max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
navitia_client/departures.py | leonardbinet/navitia-python | 9 | 12770651 | """
Departures
http://doc.navitia.io/#departures
Also known as /departures service.
This endpoint retrieves a list of departures from a specific datetime of a selected object. Departures are ordered chronologically in ascending order as:
url Result
/coverage/{region_id}/{resource_path}/departures List of the next departures, multi-route oriented, only time sorted (no grouped by stop_point/route here)
/coverage/{lon;lat}/coords/{lon;lat}/departures List of the next departures, multi-route oriented, only time sorted (no grouped by stop_point/route here)
"""
import os
def departures(client, collection_name=None, object_id=None, coords=None, region=None, extra_params=None, verbose=False):
# Construct url
if coords and region:
raise ValueError(
"Cannot specifiy both coords and region, you must choose one.")
if coords:
# TODO: check coords format
# /coverage/{lon;lat}/coords/{lon;lat}/departures
url = os.path.join("coverage", coords, "coords",
coords, "departures")
else:
# /coverage/{region_id}/{resource_path}/departures
# First choose region
if not region and not hasattr(client, 'region'):
raise ValueError(
"You must specifiy coords or region, either here or in client")
elif region:
if isinstance(region, str):
# region argument overrides client specified region
used_region = region
else:
raise ValueError("Region must be a string")
elif not region and hasattr(client, 'region'):
# Takes already specified region
used_region = client.region
else:
# shouldn't be possible
raise ValueError("Weird error, caused by region")
# /coverage/{region_id}/{collection_name}
if not object_id or not collection_name:
raise ValueError("of correct type")
url = os.path.join("coverage", used_region,
collection_name, object_id, "departures")
return client._get(url=url, extra_params=extra_params, verbose=verbose)
| 3.015625 | 3 |
molecule/debian/tests/test_default.py | hspaans/ansible-role-certificates | 2 | 12770652 | <reponame>hspaans/ansible-role-certificates
"""Role testing files using testinfra."""
import pytest
@pytest.mark.parametrize("pkg", ["openssl"])
def test_pkg_installed(host, pkg):
"""Test if package installed."""
package = host.package(pkg)
assert package.is_installed
@pytest.mark.parametrize("directory", [
"/etc/ssl/private",
"/etc/ssl/private/www.example.org"
])
def test_directory_present(host, directory):
"""Test if directory is present."""
item = host.file(directory)
assert item.exists
@pytest.mark.parametrize("directory, file", [
("/etc/ssl/private/www.example.org", "cert.pem"),
("/etc/ssl/private/www.example.org", "chain.pem"),
("/etc/ssl/private/www.example.org", "fullchain.pem"),
("/etc/ssl/private/www.example.org", "privkey.pem")
])
def test_file_present(host, directory, file):
"""Test if directory is present."""
item = host.file(directory+"/"+file)
assert item.exists
| 2.484375 | 2 |
tools/project_gen/proj/handlego.py | qsock/qim | 0 | 12770653 | <filename>tools/project_gen/proj/handlego.py
content = '''package main
import (
"context"
"github.com/qsock/qim/lib/proto/ret"
)
type Server struct{}
func (*Server) Ping(ctx context.Context, req *ret.NoArgs) (*ret.NoArgs, error) {
return new(ret.NoArgs), nil
}'''
def gen(name, srv_dir) :
with open(srv_dir+"/handle.go", "w") as f:
f.write(content) | 1.914063 | 2 |
tests/components/vera/test_init.py | tbarbette/core | 4 | 12770654 | """Vera tests."""
from unittest.mock import MagicMock
import pytest
import pyvera as pv
from requests.exceptions import RequestException
from homeassistant.components.vera import (
CONF_CONTROLLER,
CONF_EXCLUDE,
CONF_LIGHTS,
DOMAIN,
)
from homeassistant.config_entries import ENTRY_STATE_NOT_LOADED
from homeassistant.core import HomeAssistant
from .common import ComponentFactory, ConfigSource, new_simple_controller_config
from tests.common import MockConfigEntry, mock_registry
async def test_init(
hass: HomeAssistant, vera_component_factory: ComponentFactory
) -> None:
"""Test function."""
vera_device1 = MagicMock(spec=pv.VeraBinarySensor) # type: pv.VeraBinarySensor
vera_device1.device_id = 1
vera_device1.vera_device_id = vera_device1.device_id
vera_device1.name = "first_dev"
vera_device1.is_tripped = False
entity1_id = "binary_sensor.first_dev_1"
await vera_component_factory.configure_component(
hass=hass,
controller_config=new_simple_controller_config(
config={CONF_CONTROLLER: "http://127.0.0.1:111"},
config_source=ConfigSource.CONFIG_FLOW,
serial_number="first_serial",
devices=(vera_device1,),
),
)
entity_registry = await hass.helpers.entity_registry.async_get_registry()
entry1 = entity_registry.async_get(entity1_id)
assert entry1
assert entry1.unique_id == "vera_first_serial_1"
async def test_init_from_file(
hass: HomeAssistant, vera_component_factory: ComponentFactory
) -> None:
"""Test function."""
vera_device1 = MagicMock(spec=pv.VeraBinarySensor) # type: pv.VeraBinarySensor
vera_device1.device_id = 1
vera_device1.vera_device_id = vera_device1.device_id
vera_device1.name = "first_dev"
vera_device1.is_tripped = False
entity1_id = "binary_sensor.first_dev_1"
await vera_component_factory.configure_component(
hass=hass,
controller_config=new_simple_controller_config(
config={CONF_CONTROLLER: "http://1172.16.17.32:111"},
config_source=ConfigSource.FILE,
serial_number="first_serial",
devices=(vera_device1,),
),
)
entity_registry = await hass.helpers.entity_registry.async_get_registry()
entry1 = entity_registry.async_get(entity1_id)
assert entry1
assert entry1.unique_id == "vera_first_serial_1"
async def test_multiple_controllers_with_legacy_one(
hass: HomeAssistant, vera_component_factory: ComponentFactory
) -> None:
"""Test multiple controllers with one legacy controller."""
vera_device1 = MagicMock(spec=pv.VeraBinarySensor) # type: pv.VeraBinarySensor
vera_device1.device_id = 1
vera_device1.vera_device_id = vera_device1.device_id
vera_device1.name = "first_dev"
vera_device1.is_tripped = False
entity1_id = "binary_sensor.first_dev_1"
vera_device2 = MagicMock(spec=pv.VeraBinarySensor) # type: pv.VeraBinarySensor
vera_device2.device_id = 2
vera_device2.vera_device_id = vera_device2.device_id
vera_device2.name = "second_dev"
vera_device2.is_tripped = False
entity2_id = "binary_sensor.second_dev_2"
# Add existing entity registry entry from previous setup.
entity_registry = mock_registry(hass)
entity_registry.async_get_or_create(
domain="switch", platform=DOMAIN, unique_id="12"
)
await vera_component_factory.configure_component(
hass=hass,
controller_config=new_simple_controller_config(
config={CONF_CONTROLLER: "http://127.0.0.1:111"},
config_source=ConfigSource.FILE,
serial_number="first_serial",
devices=(vera_device1,),
),
)
await vera_component_factory.configure_component(
hass=hass,
controller_config=new_simple_controller_config(
config={CONF_CONTROLLER: "http://127.0.0.1:222"},
config_source=ConfigSource.CONFIG_FLOW,
serial_number="second_serial",
devices=(vera_device2,),
),
)
entity_registry = await hass.helpers.entity_registry.async_get_registry()
entry1 = entity_registry.async_get(entity1_id)
assert entry1
assert entry1.unique_id == "1"
entry2 = entity_registry.async_get(entity2_id)
assert entry2
assert entry2.unique_id == "vera_second_serial_2"
async def test_unload(
hass: HomeAssistant, vera_component_factory: ComponentFactory
) -> None:
"""Test function."""
vera_device1 = MagicMock(spec=pv.VeraBinarySensor) # type: pv.VeraBinarySensor
vera_device1.device_id = 1
vera_device1.vera_device_id = vera_device1.device_id
vera_device1.name = "first_dev"
vera_device1.is_tripped = False
await vera_component_factory.configure_component(
hass=hass, controller_config=new_simple_controller_config()
)
entries = hass.config_entries.async_entries(DOMAIN)
assert entries
for config_entry in entries:
assert await hass.config_entries.async_unload(config_entry.entry_id)
assert config_entry.state == ENTRY_STATE_NOT_LOADED
async def test_async_setup_entry_error(
hass: HomeAssistant, vera_component_factory: ComponentFactory
) -> None:
"""Test function."""
def setup_callback(controller: pv.VeraController) -> None:
controller.get_devices.side_effect = RequestException()
controller.get_scenes.side_effect = RequestException()
await vera_component_factory.configure_component(
hass=hass,
controller_config=new_simple_controller_config(setup_callback=setup_callback),
)
entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_CONTROLLER: "http://127.0.0.1"},
options={},
unique_id="12345",
)
entry.add_to_hass(hass)
assert not await hass.config_entries.async_setup(entry.entry_id)
@pytest.mark.parametrize(
["options"],
[
[{CONF_LIGHTS: [4, 10, 12, "AAA"], CONF_EXCLUDE: [1, "BBB"]}],
[{CONF_LIGHTS: ["4", "10", "12", "AAA"], CONF_EXCLUDE: ["1", "BBB"]}],
],
)
async def test_exclude_and_light_ids(
hass: HomeAssistant, vera_component_factory: ComponentFactory, options
) -> None:
"""Test device exclusion, marking switches as lights and fixing the data type."""
vera_device1 = MagicMock(spec=pv.VeraBinarySensor) # type: pv.VeraBinarySensor
vera_device1.device_id = 1
vera_device1.vera_device_id = 1
vera_device1.name = "dev1"
vera_device1.is_tripped = False
entity_id1 = "binary_sensor.dev1_1"
vera_device2 = MagicMock(spec=pv.VeraBinarySensor) # type: pv.VeraBinarySensor
vera_device2.device_id = 2
vera_device2.vera_device_id = 2
vera_device2.name = "dev2"
vera_device2.is_tripped = False
entity_id2 = "binary_sensor.dev2_2"
vera_device3 = MagicMock(spec=pv.VeraSwitch) # type: pv.VeraSwitch
vera_device3.device_id = 3
vera_device3.vera_device_id = 3
vera_device3.name = "dev3"
vera_device3.category = pv.CATEGORY_SWITCH
vera_device3.is_switched_on = MagicMock(return_value=False)
entity_id3 = "switch.dev3_3"
vera_device4 = MagicMock(spec=pv.VeraSwitch) # type: pv.VeraSwitch
vera_device4.device_id = 4
vera_device4.vera_device_id = 4
vera_device4.name = "dev4"
vera_device4.category = pv.CATEGORY_SWITCH
vera_device4.is_switched_on = MagicMock(return_value=False)
vera_device4.get_brightness = MagicMock(return_value=0)
vera_device4.get_color = MagicMock(return_value=[0, 0, 0])
vera_device4.is_dimmable = True
entity_id4 = "light.dev4_4"
component_data = await vera_component_factory.configure_component(
hass=hass,
controller_config=new_simple_controller_config(
config_source=ConfigSource.CONFIG_ENTRY,
devices=(vera_device1, vera_device2, vera_device3, vera_device4),
config={**{CONF_CONTROLLER: "http://127.0.0.1:123"}, **options},
),
)
# Assert the entries were setup correctly.
config_entry = next(iter(hass.config_entries.async_entries(DOMAIN)))
assert config_entry.options[CONF_LIGHTS] == [4, 10, 12]
assert config_entry.options[CONF_EXCLUDE] == [1]
update_callback = component_data.controller_data[0].update_callback
update_callback(vera_device1)
update_callback(vera_device2)
update_callback(vera_device3)
update_callback(vera_device4)
await hass.async_block_till_done()
assert hass.states.get(entity_id1) is None
assert hass.states.get(entity_id2) is not None
assert hass.states.get(entity_id3) is not None
assert hass.states.get(entity_id4) is not None
| 2.0625 | 2 |
repo_utils/coverage_maker.py | ACEnglish/truvari | 16 | 12770655 | <filename>repo_utils/coverage_maker.py
"""
Utility to run pylint and update the badge
"""
import re
import sys
import json
from io import StringIO
import anybadge
# Get the score
data = json.load(open(sys.argv[1], 'r'))
coverage_pct = round(data['totals']['percent_covered'])
# Define thresholds: <2=red, <4=orange <8=yellow <10=green
thresholds = {20: 'red',
40: 'orange',
70: 'yellow',
90: 'green'}
badge = anybadge.Badge('coverage', coverage_pct,
thresholds=thresholds, value_suffix="%")
badge.write_badge('imgs/coverage.svg', overwrite=True)
# failunder
if coverage_pct < 90:
exit(1)
| 2.5 | 2 |
test/helpers/music/test_queue.py | alejandrodlsp/grogu-bot | 1 | 12770656 | import unittest
from frozendict import frozendict
from src.helpers.music.queue import Queue, QueueIsEmptyError, RemoveOutOfIndexError
class QueueTest(unittest.TestCase):
queue1 = [
frozendict({ 'name' : 'song1' }),
frozendict({ 'name' : 'song2' }),
frozendict({ 'name' : 'song3' })
]
queue2 = {
frozendict({ 'name' : 'song1' }),
frozendict({ 'name' : 'song2' }),
frozendict({ 'name' : 'song3' })
}
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
self.q1 = Queue(QueueTest.queue1)
self.q2 = Queue(QueueTest.queue2)
self.q3 = Queue()
def tearDown(self):
pass
def test_create_queue(self):
q = Queue()
self.assertEqual(q._queue, [])
def test_is_empty(self):
self.assertFalse(self.q1.is_empty)
self.assertTrue(self.q3.is_empty)
def test_first_track(self):
self.assertEqual(self.q1.first_track['name'], QueueTest.queue1[0]['name'])
with self.assertRaises(QueueIsEmptyError):
self.q3.first_track
def test_current_track(self):
self.assertEqual(self.q1.current_track['name'], QueueTest.queue1[0]['name'])
self.q1.get_next_track()
self.assertEqual(self.q1.current_track['name'], QueueTest.queue1[1]['name'])
self.assertNotEqual(self.q1.current_track['name'], QueueTest.queue1[0]['name'])
with self.assertRaises(QueueIsEmptyError):
self.q3.current_track
def test_upcoming(self):
self.assertEqual(self.q1.upcoming[0]['name'], QueueTest.queue1[1]['name'])
self.assertEqual(self.q1.upcoming[1]['name'], QueueTest.queue1[2]['name'])
self.q1.get_next_track()
self.assertEqual(self.q1.upcoming[0]['name'], QueueTest.queue1[2]['name'])
self.assertNotEqual(self.q1.upcoming[0]['name'], QueueTest.queue1[1]['name'])
with self.assertRaises(QueueIsEmptyError):
self.q3.upcoming
def test_empty(self):
self.q1.empty()
self.assertEqual(self.q1.length, 0)
self.assertEqual(self.q1.position, 0)
def test_length(self):
self.assertEqual(self.q1.length, 3)
self.assertEqual(self.q3.length, 0)
def test_repeat_mode_all(self):
self.q1.set_repeat_mode('ALL')
self.q1.position = len(self.q1._queue)
next_track = self.q1.get_next_track()
self.assertEqual(next_track['name'], QueueTest.queue1[0]['name'])
def test_get_next_track(self):
with self.assertRaises(QueueIsEmptyError):
self.q3.get_next_track()
next_track = self.q1.get_next_track()
self.assertEqual(self.q1.position, 1)
self.q1.position = len(self.q1._queue)
next_track = self.q1.get_next_track()
self.assertIsNone(next_track)
def test_remove(self):
with self.assertRaises(RemoveOutOfIndexError):
self.q1.remove(255)
with self.assertRaises(RemoveOutOfIndexError):
self.q1.remove(-1)
self.q1.remove(1)
self.assertEqual(self.q1._queue[1]['name'], QueueTest.queue1[2]['name']) | 2.890625 | 3 |
django/stock/repository_pattern.py | nah990/StockF | 0 | 12770657 | <reponame>nah990/StockF<filename>django/stock/repository_pattern.py
from .models import StockByDate, StockInfo, SourceInfo
from abc import ABCMeta, abstractmethod
from .db_manager import DBConfigManager
class abstractclassmethod(classmethod):
__slots__ = ()
def __init__(self, function):
super(abstractclassmethod, self).__init__(function)
function.__isabstractmethod__ = True
__isabstractmethod__ = True
class Repository:
__metaclass__ = ABCMeta
db_config_manager = DBConfigManager()
# Connect to DB with user role
@staticmethod
def connect(user):
return user
# Create and add new object to DB
@classmethod
@abstractmethod
def create(cls, model):
pass
# Get object by pk
@classmethod
@abstractmethod
def read_by_pk(cls, pk):
pass
# Get object by filter
@classmethod
@abstractmethod
def read_filtered(cls, filter_dict):
pass
# Get all objects
@classmethod
@abstractmethod
def read_all(cls):
pass
# Refresh by pk
@classmethod
@abstractmethod
def update_by_pk(cls, pk, update_dict):
pass
# Refresh by filter
@classmethod
@abstractmethod
def update_filtered(cls, filter_dict, update_dict):
pass
# Refresh all objects
@classmethod
@abstractmethod
def update_all(cls, update_dict):
pass
# Delete by primary key
@classmethod
@abstractmethod
def delete_by_pk(cls, pk):
pass
# Remove objects matching filter
@classmethod
@abstractmethod
def delete_filtered(cls, filter_dict):
pass
@classmethod
@abstractmethod
def read_join_filtered(cls, join_field, filter_dict):
pass | 2.296875 | 2 |
src/lab/dynamic_sql.py | john-james-sf/predict-fda | 0 | 12770658 | <gh_stars>0
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# =========================================================================== #
# Project : Drug Approval Analytics #
# Version : 0.1.0 #
# File : \src\lab\dynamic_sql.py #
# Language : Python 3.9.5 #
# -------------------------------------------------------------------------- #
# Author : <NAME> #
# Company : nov8.ai #
# Email : <EMAIL> #
# URL : https://github.com/john-james-sf/drug-approval-analytics #
# -------------------------------------------------------------------------- #
# Created : Saturday, July 24th 2021, 7:52:10 pm #
# Modified : Friday, August 13th 2021, 2:09:21 am #
# Modifier : <NAME> (<EMAIL>) #
# --------------------------------------------------------------------------- #
# License : BSD 3-clause "New" or "Revised" License #
# Copyright: (c) 2021 nov8.ai #
# =========================================================================== #
# Reference: https://stackoverflow.com/questions/56570952/creating-dynamically-typed-tables-using-psycopg2s-built-in-formatting
"""
I was also having so much trouble with this aspect. sql.Identifier is for
double-quoted, well, SQL Identifiers which the datatypes (INTEGER, TEXT, etc.)
are not. Looks like just making it plain SQL does the trick.
N.B. In your code, you should have pre-defined columns tuples and not
expose their definition to the front-end. This is also why tuples are
useful here as they are immutable.
"""
import psycopg2.sql as sql
def create(name, columns):
# name = "mytable"
# columns = (("col1", "TEXT"), ("col2", "INTEGER"), ...)
fields = []
for col in columns:
fields.append(sql.SQL("{} {}").format(
sql.Identifier(col[0]), sql.SQL(col[1])))
query = sql.SQL("CREATE TABLE {tbl_name} ( {fields} );").format(
tbl_name=sql.Identifier(name),
fields=sql.SQL(', ').join(fields)
)
# CREATE TABLE "mytable" ( "col1" TEXT, "col2" INTEGER );
# print(query.as_string(conn))
# Get cursor and execute...
# --------------------------------------------------------------------------- #
class User:
def __init__(self, email, first_name, last_name, id=None):
self.email = email
self.first_name = first_name
self.last_name = last_name
self.id = id
def __repr__(self):
return "<User {}>".format(self.email)
def save_to_db(self):
# This is creating a new connection pool every time! Very expensive...
with CursorFromConnectionPool() as cursor:
cursor.execute('INSERT INTO users (email, first_name, last_name) VALUES (%s, %s, %s)',
(self.email, self.first_name, self.last_name))
@classmethod
def load_from_db_by_email(cls, email):
with CursorFromConnectionPool() as cursor:
# Note the (email,) to make it a tuple!
cursor.execute('SELECT * FROM users WHERE email=%s', (email,))
user_data = cursor.fetchone()
return cls(email=user_data[1], first_name=user_data[2], last_name=user_data[3], id=user_data[0])
>> > names = ['foo', 'bar', 'baz']
>> > q1 = sql.SQL("insert into table ({}) values ({})").format(
... sql.SQL(', ').join(map(sql.Identifier, names)),
... sql.SQL(', ').join(sql.Placeholder() * len(names)))
>> > print(q1.as_string(conn))
insert into table ("foo", "bar", "baz") values ( % s, % s, % s)
>> > q2 = sql.SQL("insert into table ({}) values ({})").format(
... sql.SQL(', ').join(map(sql.Identifier, names)),
... sql.SQL(', ').join(map(sql.Placeholder, names)))
>> > print(q2.as_string(conn))
insert into table ("foo", "bar", "baz") values ( % (foo)s, % (bar)s, % (baz)s)
| 1.421875 | 1 |
Problemset/cong-shang-dao-xia-da-yin-er-cha-shu-iii-lcof/cong-shang-dao-xia-da-yin-er-cha-shu-iii-lcof.py | worldwonderer/algorithm | 1 | 12770659 | <gh_stars>1-10
# @Title: 从上到下打印二叉树 III (从上到下打印二叉树 III LCOF)
# @Author: 18015528893
# @Date: 2021-01-20 21:44:16
# @Runtime: 44 ms
# @Memory: 15.3 MB
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
from collections import deque
class Solution:
def levelOrder(self, root: TreeNode) -> List[List[int]]:
res = []
queue = deque()
queue.append(root)
while queue:
size = len(queue)
tmp = []
for _ in range(size):
node = queue.popleft()
if node:
tmp.append(node.val)
queue.append(node.left)
queue.append(node.right)
if tmp:
if len(res) % 2 == 0:
res.append(tmp)
else:
res.append(tmp[::-1])
return res
| 3.34375 | 3 |
build_data_scripts/load_report_histograms.py | thisismattmiller/swib-2020-resources | 1 | 12770660 | <gh_stars>1-10
import glob
from pathlib import Path
import xml.etree.ElementTree as ET
from datetime import datetime
import csv
reports_dir = f"{str(Path.home())}/data/swib_data/load_reports/"
viz_data_dir = f"{str(Path.home())}/data/swib_data/viz_data_source/"
all_reports = {}
for file in glob.glob(f"{reports_dir}*.xml"):
with open(file) as infile:
print(file)
xml = ET.fromstring(infile.read())
datestr = xml.attrib['start'].split(' ')[0]
date = datetime(int(datestr.split('-')[0]), int(datestr.split('-')[1]), int(datestr.split('-')[2]))
timestmp = int(date.timestamp())
all_reports[timestmp] = {
'newNAF' : 0,
'unlinkNAF' : 0,
'linkChangedNAF' : 0,
'labelChangedNAF' : 0,
'newLCSH' : 0,
'unlinkLCSH' : 0,
'linkChangedLCSH' : 0,
'labelChangedLCSH' : 0,
'date' : datestr,
'dateStamp' : timestmp
}
details_el = xml.find('{info:lc/lds-id/log}logDetails')
for el in details_el:
if el.attrib['action'] == 'partialUnlink':
if el.attrib['lccn'][0] == 'n':
all_reports[timestmp]['linkChangedNAF']+=1
elif el.attrib['lccn'][0] == 's':
all_reports[timestmp]['linkChangedLCSH']+=1
if el.attrib['action'] == 'unlink':
if el.attrib['lccn'][0] == 'n':
all_reports[timestmp]['unlinkNAF']+=1
elif el.attrib['lccn'][0] == 's':
all_reports[timestmp]['unlinkLCSH']+=1
if el.attrib['action'] == 'new':
if el.attrib['lccn'][0] == 'n':
all_reports[timestmp]['newNAF']+=1
elif el.attrib['lccn'][0] == 's':
all_reports[timestmp]['newLCSH']+=1
if el.attrib['action'] == 'labelChange':
if el.attrib['lccn'][0] == 'n':
all_reports[timestmp]['labelChangedNAF']+=1
elif el.attrib['lccn'][0] == 's':
all_reports[timestmp]['labelChangedLCSH']+=1
with open(f"{viz_data_dir}report_hisogram.csv",'w') as out:
writer = csv.writer(out)
writer.writerow(['Date','New NAF','Unlink NAF', 'Link Change NAF', 'Label Change NAF','New LCSH','Unlink LCSH', 'Link Change LCSH', 'Label Change LCSH'])
for t in sorted(list(all_reports.keys())):
writer.writerow([all_reports[t]['date'],all_reports[t]['newNAF'],all_reports[t]['unlinkNAF'], all_reports[t]['linkChangedNAF'], all_reports[t]['labelChangedNAF'],all_reports[t]['newLCSH'],all_reports[t]['unlinkLCSH'], all_reports[t]['linkChangedLCSH'], all_reports[t]['labelChangedLCSH']])
# json.dump(all_reports,open(f"{viz_data_dir}report_hisogram.json",'w'),indent=2)
| 2.265625 | 2 |
pylabnet/launchers/servers/dio_breakout.py | wi11dey/pylabnet | 10 | 12770661 | from pyvisa import ResourceManager, VisaIOError
from pylabnet.hardware.awg.dio_breakout import Driver
from pylabnet.utils.helper_methods import get_ip, load_device_config
from pylabnet.network.client_server.dio_breakout import Service, Client
from pylabnet.network.core.generic_server import GenericServer
def launch(**kwargs):
""" Connects to DIO breakout and instantiates server
:param kwargs: (dict) containing relevant kwargs
:logger: instance of LogClient for logging purposes
:port: (int) port number for the DIO breakout server
:config: (str) name of config file to us
"""
device_config = load_device_config('dio_breakout', kwargs['config'], logger=kwargs['logger'])
# Try to load settings
if 'resource_name' in device_config:
addr = device_config['resource_name']
else:
addr = device_config['device_id']
# Try to connect
try:
dio = Driver(address=addr, logger=kwargs['logger'])
# If it fails, prompt the user to enter GPIB address from resource list
except VisaIOError:
kwargs['logger'].error(f'Failed to connect to device at address {addr}')
raise
# Instantiate Service and server
dio_service = Service()
dio_service.assign_module(module=dio)
dio_service.assign_logger(logger=kwargs['logger'])
dio_server = GenericServer(
service=dio_service,
host=get_ip(),
port=kwargs['port']
)
dio_server.start()
| 2.140625 | 2 |
elephunk/records/activity.py | pitluga/elephunk | 0 | 12770662 | from elephunk.database import Row
class Activity(Row):
@property
def formatted_xact_start(self):
if self.xact_start == None:
return ""
return self.xact_start.isoformat()
| 2.15625 | 2 |
tests/breezometer/pollen/models/test_pollen_index_forecast.py | clintecker/supercell | 0 | 12770663 | # Standard Library
import datetime
# Third Party Code
from dateutil.tz import tzutc
# Supercell Code
from supercell.breezometer.pollen.models.pollen_index import PollenIndex
from supercell.breezometer.pollen.models.pollen_index_forecast import (
PollenIndexForecast,
)
from supercell.breezometer.pollen.models.pollen_type import PollenType
def test_model():
timestamp = datetime.datetime(2020, 1, 1, 0, 0, 0, tzinfo=tzutc())
assert (
'{"timestamp": "2020-01-01T00:00:00+00:00", "display_name": '
'"BreezoMeter Pollen Index", "short_name": "bpi", "pollen_type_count": 3, '
'"plant_count": 3}'
== str(
PollenIndexForecast(
timestamp=timestamp,
short_name="bpi",
display_name="BreezoMeter Pollen Index",
pollen_types=[
PollenType(
short_name="grass",
display_name="Grass",
in_season=True,
data_available=True,
index=PollenIndex(value=4, category="High", color="#FF8C00"),
timestamp=timestamp,
),
PollenType(
short_name="tree",
display_name="Tree",
in_season=True,
data_available=True,
index=PollenIndex(value=0, category="None", color=None),
timestamp=timestamp,
),
PollenType(
short_name="weed",
display_name="Weed",
in_season=True,
data_available=True,
index=PollenIndex(
value=3, category="Moderate", color="#FFFF00"
),
timestamp=timestamp,
),
],
plants=[
PollenType(
short_name="graminales",
display_name="Graminales",
in_season=True,
data_available=True,
index=PollenIndex(value=4, category="High", color="#FF8C00"),
timestamp=timestamp,
),
PollenType(
short_name="juniper",
display_name="Juniper",
in_season=False,
data_available=False,
index=PollenIndex(value=None, category=None, color=None),
timestamp=timestamp,
),
PollenType(
short_name="elm",
display_name="Elm",
in_season=True,
data_available=True,
index=PollenIndex(value=0, category="None", color=None),
timestamp=timestamp,
),
PollenType(
short_name="oak",
display_name="Oak",
in_season=False,
data_available=False,
index=PollenIndex(value=None, category=None, color=None),
timestamp=timestamp,
),
PollenType(
short_name="alder",
display_name="Alder",
in_season=False,
data_available=False,
index=PollenIndex(value=None, category=None, color=None),
timestamp=timestamp,
),
PollenType(
short_name="pine",
display_name="Pine",
in_season=False,
data_available=False,
index=PollenIndex(value=None, category=None, color=None),
timestamp=timestamp,
),
PollenType(
short_name="cottonwood",
display_name="Cottonwood",
in_season=False,
data_available=False,
index=PollenIndex(value=None, category=None, color=None),
timestamp=timestamp,
),
PollenType(
short_name="ragweed",
display_name="Ragweed",
in_season=True,
data_available=True,
index=PollenIndex(
value=3, category="Moderate", color="#FFFF00"
),
timestamp=timestamp,
),
PollenType(
short_name="birch",
display_name="Birch",
in_season=False,
data_available=False,
index=PollenIndex(value=None, category=None, color=None),
timestamp=timestamp,
),
PollenType(
short_name="ash",
display_name="Ash",
in_season=False,
data_available=False,
index=PollenIndex(value=None, category=None, color=None),
timestamp=timestamp,
),
PollenType(
short_name="maple",
display_name="Maple",
in_season=False,
data_available=False,
index=PollenIndex(value=None, category=None, color=None),
timestamp=timestamp,
),
],
)
)
)
def test_initialize_with_dictionary():
assert (
'{"timestamp": "2020-09-06T00:00:00+00:00", "display_name": '
'"BreezoMeter Pollen Index", "short_name": "bpi", "pollen_type_count": 3, '
'"plant_count": 3}'
== str(
PollenIndexForecast.initialize_from_dictionary(
response_dictionary={
"date": "2020-09-06",
"index_id": "bpi",
"index_display_name": "BreezoMeter Pollen Index",
"types": {
"grass": {
"display_name": "Grass",
"in_season": True,
"data_available": True,
"index": {
"value": 4,
"category": "High",
"color": "#FF8C00",
},
},
"tree": {
"display_name": "Tree",
"in_season": True,
"data_available": True,
"index": {"value": 0, "category": "None", "color": None},
},
"weed": {
"display_name": "Weed",
"in_season": True,
"data_available": True,
"index": {
"value": 3,
"category": "Moderate",
"color": "#FFFF00",
},
},
},
"plants": {
"graminales": {
"display_name": "Graminales",
"in_season": True,
"data_available": True,
"index": {
"value": 4,
"category": "High",
"color": "#FF8C00",
},
},
"juniper": {
"display_name": "Juniper",
"in_season": False,
"data_available": False,
"index": {"value": None, "category": None, "color": None},
},
"elm": {
"display_name": "Elm",
"in_season": True,
"data_available": True,
"index": {"value": 0, "category": "None", "color": None},
},
"oak": {
"display_name": "Oak",
"in_season": False,
"data_available": False,
"index": {"value": None, "category": None, "color": None},
},
"alder": {
"display_name": "Alder",
"in_season": False,
"data_available": False,
"index": {"value": None, "category": None, "color": None},
},
"pine": {
"display_name": "Pine",
"in_season": False,
"data_available": False,
"index": {"value": None, "category": None, "color": None},
},
"cottonwood": {
"display_name": "Cottonwood",
"in_season": False,
"data_available": False,
"index": {"value": None, "category": None, "color": None},
},
"ragweed": {
"display_name": "Ragweed",
"in_season": True,
"data_available": True,
"index": {
"value": 3,
"category": "Moderate",
"color": "#FFFF00",
},
},
"birch": {
"display_name": "Birch",
"in_season": False,
"data_available": False,
"index": {"value": None, "category": None, "color": None},
},
"ash": {
"display_name": "Ash",
"in_season": False,
"data_available": False,
"index": {"value": None, "category": None, "color": None},
},
"maple": {
"display_name": "Maple",
"in_season": False,
"data_available": False,
"index": {"value": None, "category": None, "color": None},
},
},
}
)
)
)
| 2.015625 | 2 |
cloudeebus/cloudeebus.py | intel/cloudeebus | 5 | 12770664 | #!/usr/bin/env python
# Cloudeebus
#
# Copyright 2012 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
import argparse, dbus, json, sys
from twisted.internet import glib2reactor
# Configure the twisted mainloop to be run inside the glib mainloop.
# This must be done before importing the other twisted modules
glib2reactor.install()
from twisted.internet import reactor
from autobahn.websocket import listenWS
from autobahn.wamp import WampServerFactory, WampCraServerProtocol
from dbus.mainloop.glib import DBusGMainLoop
import gobject
gobject.threads_init()
from dbus import glib
glib.init_threads()
# enable debug log
from twisted.python import log
###############################################################################
from cloudeebusengine import VERSION, SERVICELIST, CloudeebusService, cache
import cloudeebusengine
OPENDOOR = False
CREDENTIALS = {}
WHITELIST = []
NETMASK = []
###############################################################################
def ipV4ToHex(mask):
## Convert an ip or an IP mask (such as ip/24 or ip/255.255.255.0) in hex value (32bits)
maskHex = 0
byte = 0
if mask.rfind(".") == -1:
if (int(mask) < 32):
maskHex = (2**(int(mask))-1)
maskHex = maskHex << (32-int(mask))
else:
raise Exception("Illegal mask (larger than 32 bits) " + mask)
else:
maskField = mask.split(".")
# Check if mask has four fields (byte)
if len(maskField) != 4:
raise Exception("Illegal ip address / mask (should be 4 bytes) " + mask)
for maskQuartet in maskField:
byte = int(maskQuartet)
# Check if each field is really a byte
if byte > 255:
raise Exception("Illegal ip address / mask (digit larger than a byte) " + mask)
maskHex += byte
maskHex = maskHex << 8
maskHex = maskHex >> 8
return maskHex
###############################################################################
class CloudeebusServerProtocol(WampCraServerProtocol):
'''
connexion and session authentication management
'''
def onSessionOpen(self):
# CRA authentication options
self.clientAuthTimeout = 0
self.clientAuthAllowAnonymous = OPENDOOR
# CRA authentication init
WampCraServerProtocol.onSessionOpen(self)
def getAuthPermissions(self, key, extra):
return {'permissions': extra.get("permissions", None),
'authextra': extra.get("authextra", None),
'services': extra.get("services", None)}
def getAuthSecret(self, key):
secret = CREDENTIALS.get(key, None)
if secret is None:
return None
# secret must be of str type to be hashed
return str(secret)
def onAuthenticated(self, key, permissions):
if not OPENDOOR:
# check net filter
if NETMASK != []:
ipAllowed = False
for netfilter in NETMASK:
ipHex=ipV4ToHex(self.peer.host)
ipAllowed = (ipHex & netfilter['mask']) == netfilter['ipAllowed'] & netfilter['mask']
if ipAllowed:
break
if not ipAllowed:
raise Exception("host " + self.peer.host + " is not allowed!")
# check authentication key
if key is None:
raise Exception("Authentication failed")
# check permissions, array.index throws exception
if (permissions['permissions'] != None):
for req in permissions['permissions']:
WHITELIST.index(req);
# check allowed service creation, array.index throws exception
if (permissions['services'] != None):
for req in permissions['services']:
SERVICELIST.index(req);
# create cloudeebus service instance
self.cloudeebusService = CloudeebusService(permissions)
# register it for RPC
self.registerForRpc(self.cloudeebusService)
# register for Publish / Subscribe
self.registerForPubSub("", True)
def connectionLost(self, reason):
WampCraServerProtocol.connectionLost(self, reason)
if factory.getConnectionCount() == 0:
cache.reset()
###############################################################################
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Javascript DBus bridge.')
parser.add_argument('-v', '--version', action='store_true',
help='print version and exit')
parser.add_argument('-d', '--debug', action='store_true',
help='log debug info on standard output')
parser.add_argument('-o', '--opendoor', action='store_true',
help='allow anonymous access to all services')
parser.add_argument('-p', '--port', default='9000',
help='port number')
parser.add_argument('-c', '--credentials',
help='path to credentials file')
parser.add_argument('-w', '--whitelist',
help='path to whitelist file (DBus services to use)')
parser.add_argument('-s', '--servicelist',
help='path to servicelist file (DBus services to export)')
parser.add_argument('-n', '--netmask',
help='netmask,IP filter (comma separated.) eg. : -n 127.0.0.1,192.168.2.0/24,10.12.16.0/255.255.255.0')
args = parser.parse_args(sys.argv[1:])
if args.version:
print("Cloudeebus version " + VERSION)
exit(0)
if args.debug:
log.startLogging(sys.stdout)
OPENDOOR = args.opendoor
if args.credentials:
jfile = open(args.credentials)
CREDENTIALS = json.load(jfile)
jfile.close()
if args.whitelist:
jfile = open(args.whitelist)
WHITELIST.extend(json.load(jfile))
jfile.close()
if args.servicelist:
jfile = open(args.servicelist)
SERVICELIST.extend(json.load(jfile))
jfile.close()
if args.netmask:
iplist = args.netmask.split(",")
for ip in iplist:
if ip.rfind("/") != -1:
ip=ip.split("/")
ipAllowed = ip[0]
mask = ip[1]
else:
ipAllowed = ip
mask = "255.255.255.255"
NETMASK.append( {'ipAllowed': ipV4ToHex(ipAllowed), 'mask' : ipV4ToHex(mask)} )
uri = "ws://localhost:" + args.port
factory = WampServerFactory(uri, debugWamp = args.debug)
factory.protocol = CloudeebusServerProtocol
factory.setProtocolOptions(allowHixie76 = True)
# Configure cloudeebus engine for WAMP.
cloudeebusengine.factory = factory
cloudeebusengine.OPENDOOR = OPENDOOR
listenWS(factory)
DBusGMainLoop(set_as_default=True)
reactor.run()
| 1.734375 | 2 |
core/urls.py | damiso15/location_api | 0 | 12770665 | from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path, include
from rest_framework.documentation import include_docs_urls
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
from rest_framework import permissions
API_TITLE = 'Locaion API'
API_DESCRIPTION = 'A Web API for list of available Locations'
schema_view = get_schema_view(
openapi.Info(
title="Locaion API",
default_version='v1',
description="A Web API for list of available Locations"
),
public=True,
permission_classes=(permissions.AllowAny,),
)
urlpatterns = [
path('admin/', admin.site.urls),
path('api/v1/', include('api.urls')),
path('api/auth/', include('authentication.urls')),
# path('doc', include_docs_urls(title=API_TITLE, description=API_DESCRIPTION, permission_classes=(permissions.AllowAny,))),
path('', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),
path('redoc', schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| 1.867188 | 2 |
src/camera_controller.py | OscarGTH/communal-plant | 1 | 12770666 | from pathlib import Path
from datetime import datetime
import time
from subprocess import call
import os
from logzero import logger
import picamera
# Video file path
VIDEO_PATH = str(Path().resolve()) + "/videos/"
IMAGE_PATH = str(Path().resolve()) + "/images/"
class CameraController:
def __init__(self) -> None:
self.camera = picamera.PiCamera()
self.camera.resolution = (1296, 730)
self.video_file_path = VIDEO_PATH + str(datetime.now().date())
self.image_file_path = IMAGE_PATH + str(time.time())
def start_record(self):
""" Starts to record video. """
try:
# Delete possible video that was taken earlier.
logger.info("Deleting previous video if exists.")
self.delete_previous_video()
logger.info("Starting to record video.")
# Start recording video.
self.camera.start_recording(self.video_file_path + ".h264")
except Exception as ex:
logger.warning("Error happened while recording video.")
logger.error(ex)
def stop_record(self):
""" Stops video recording and calls converter function. """
try:
logger.info("Stopping video recording.")
# Stop recording.
self.camera.stop_recording()
# Take a picture of the plant.
self.capture_image()
# Convert video to mp4 and return result.
return self.convert_recording_to_mp4()
except Exception as ex:
logger.warning("Error happened ending video recording.")
logger.error(ex)
def capture_image(self):
""" Captures single image for later use. """
logger.info("Capturing image.")
self.camera.start_preview()
# Camera warm-up time
time.sleep(2)
# Capture image.
self.camera.capture(self.image_file_path + ".png")
logger.info("Image captured.")
def convert_recording_to_mp4(self):
""" Converts .h264 to mp4 file. """
# Define file names of original and converted versions.
orig_file = self.video_file_path + ".h264"
converted_file = self.video_file_path + ".mp4"
# Try to convert video with shell command.
try:
command = "MP4Box -add " + orig_file + " " + converted_file
logger.info("Converting video to mp4.")
# Execute command to convert h246 to mp4.
call([command], shell=True)
logger.info("Video successfully converted.")
self.delete_original_format()
return True
except:
logger.error("Error when converting video to mp4.")
return False
def delete_original_format(self):
""" Deletes the H246 format file after conversion to mp4. """
orig_file = Path(self.video_file_path + '.h264')
# Check if file exists.
if orig_file.is_file():
# Remove file.
os.remove(self.video_file_path + ".h264")
def delete_previous_video(self):
""" Deletes possibly existing mp4 video with the same date. """
converted_file = Path(self.video_file_path + '.mp4')
# Check if file exists.
if converted_file.is_file():
# Remove file.
os.remove(self.video_file_path + ".mp4")
| 2.96875 | 3 |
plugins/opsgenie/unit_test/test_create_alert.py | lukaszlaszuk/insightconnect-plugins | 0 | 12770667 | <reponame>lukaszlaszuk/insightconnect-plugins
import os
import sys
from parameterized import parameterized
sys.path.append(os.path.abspath("../"))
import logging
from unittest import TestCase
from icon_opsgenie.actions.create_alert import CreateAlert
from icon_opsgenie.actions.create_alert.schema import Output
from icon_opsgenie.connection.connection import Connection
from icon_opsgenie.connection.schema import Input
from insightconnect_plugin_runtime.exceptions import PluginException
from unit_test.mock import mock_request_202, mock_request_403, mock_request_404, mock_request_500, mocked_request
class TestCreateAlert(TestCase):
def setUp(self) -> None:
self.connection = Connection()
self.connection.logger = logging.getLogger("connection logger")
self.connection.connect({Input.API_KEY: {"secretKey": "1234567e-123c-123c-123c-1234567e9xAd"}})
self.action = CreateAlert()
self.action.connection = self.connection
self.action.logger = logging.getLogger("action logger")
self.params = {
"message": "An example message",
"user": "ExampleUser",
"source": "ExampleSource",
"note": "ExampleNote",
}
def test_create_alert_when_status_ok(self):
mocked_request(mock_request_202)
response = self.action.run(self.params)
expected_response = {
Output.RESULT: "Request will be processed",
Output.REQUESTID: "43a29c5c-3dbf-4fa4-9c26-f4f71023e120",
Output.ELAPSED_TIME: 0.302,
}
self.assertEqual(response, expected_response)
@parameterized.expand(
[
(mock_request_403, PluginException.Preset.UNAUTHORIZED),
(mock_request_404, PluginException.Preset.NOT_FOUND),
(mock_request_500, PluginException.Preset.UNKNOWN),
],
)
def test_create_alert_when_status_error(self, mock_request, exception):
mocked_request(mock_request)
with self.assertRaises(PluginException) as context:
self.action.run(self.params)
self.assertEqual(
context.exception.cause,
PluginException.causes[exception],
)
def test_create_alert_no_message(self):
mocked_request(mock_request_500)
with self.assertRaises(PluginException) as context:
self.action.run()
self.assertEqual(
context.exception.cause,
PluginException.causes[PluginException.Preset.UNKNOWN],
)
self.assertEqual(context.exception.data, "No required parameter has been entered")
def test_create_alert_message_over_130_characters(self):
mocked_request(mock_request_500)
payload = {"message": "LongMessage" * 131}
with self.assertRaises(PluginException) as context:
self.action.run(payload)
self.assertEqual(
context.exception.cause,
PluginException.causes[PluginException.Preset.UNKNOWN],
)
self.assertEqual(
context.exception.data,
'Limit of maximum input characters for parameter "message" has been exceeded (maximum characters 130)',
)
def test_create_alert_user_over_100_characters(self):
mocked_request(mock_request_500)
payload = {"message": "An example message", "user": "LongUsername" * 101}
with self.assertRaises(PluginException) as context:
self.action.run(payload)
self.assertEqual(
context.exception.cause,
PluginException.causes[PluginException.Preset.UNKNOWN],
)
self.assertEqual(
context.exception.data,
'Limit of maximum input characters for parameter "user" has been exceeded (maximum characters 100)',
)
def test_create_alert_actions_over_10_elements(self):
mocked_request(mock_request_500)
payload = {**self.params, "actions": [str(element) for element in range(1, 12)]}
with self.assertRaises(PluginException) as context:
self.action.run(payload)
self.assertEqual(
context.exception.cause,
PluginException.causes[PluginException.Preset.UNKNOWN],
)
self.assertEqual(
context.exception.data,
'Limit of maximum input characters for parameter "actions" has been exceeded (maximum elements 10)',
)
def test_create_alert_actions_over_50_characters(self):
mocked_request(mock_request_500)
payload = {**self.params, "actions": ["First", "LongActionName" * 51]}
with self.assertRaises(PluginException) as context:
self.action.run(payload)
self.assertEqual(
context.exception.cause,
PluginException.causes[PluginException.Preset.UNKNOWN],
)
self.assertEqual(
context.exception.data,
'Limit of maximum input characters for parameter "actions" has been exceeded (maximum characters 50)',
)
| 2.109375 | 2 |
snpdb/models_admin_forms.py | SACGF/variantgrid | 5 | 12770668 | <reponame>SACGF/variantgrid
import re
from django.contrib import admin
from unidecode import unidecode
from snpdb.admin_utils import ModelAdminBasics
from snpdb.models import Organization, Lab
def make_code_friendly(text: str) -> str:
"""
convert accented characters to non-accented counterparts
lower case, replace - and spaces with underscores
remove anything that's then not a-z or underscore
"""
text = unidecode(text) \
.lower() \
.replace('-', '_').replace(' ', '_')
return re.sub(r'[^a-z0-9_]', '', text)
class LabAdmin(ModelAdminBasics):
list_per_page = 200
list_display = ('name', 'group_name', 'organization', 'external', 'clinvar_key', 'upload_location', 'upload_auto_pattern', 'classification_config')
fieldsets = (
('Basic', {'fields': ('name', 'group_name', 'organization')}),
('Position', {'fields': ('city', 'state', 'country', 'lat', 'long')}),
('Style', {'fields': ('url', 'css_class')}),
('Submissions', {'fields': ('classification_config', 'upload_location', 'upload_auto_pattern', 'external', 'clinvar_key')})
)
def is_readonly_field(self, f) -> bool:
if f.name == 'clinvar_key' or f.name == 'organization':
return False
return super().is_readonly_field(f)
def get_form(self, request, obj=None, **kwargs):
return super(LabAdmin, self).get_form(request, obj, widgets={
'name': admin.widgets.AdminTextInputWidget(),
'institution': admin.widgets.AdminTextInputWidget(),
'group_name': admin.widgets.AdminTextInputWidget(),
'city': admin.widgets.AdminTextInputWidget(),
'state': admin.widgets.AdminTextInputWidget(),
'country': admin.widgets.AdminTextInputWidget(),
'lat': admin.widgets.AdminTextInputWidget(),
'long': admin.widgets.AdminTextInputWidget(),
'url': admin.widgets.AdminURLFieldWidget(),
'css_class': admin.widgets.AdminTextInputWidget(),
'upload_location': admin.widgets.AdminTextInputWidget(),
'upload_auto_pattern': admin.widgets.AdminTextInputWidget()
}, **kwargs)
def fix_group_name(self, request, queryset):
safety_reg = re.compile(r'^[a-z0-9_]*$')
fixed = 0
already_good = 0
lab: Lab
for lab in queryset:
org_group_name = lab.organization.group_name
if not lab.group_name or not safety_reg.match(lab.group_name) or not lab.group_name.startswith(org_group_name):
lab.group_name = org_group_name + '/' + make_code_friendly(lab.name)
lab.save()
fixed = fixed + 1
else:
already_good = already_good + 1
self.message_user(request, f"{fixed} updated, {already_good} no change required")
fix_group_name.short_description = 'Fix group name'
actions = [fix_group_name]
class OrganizationAdmin(ModelAdminBasics):
list_display = ('name', 'group_name', 'classification_config')
fieldsets = (
('Basic', {'fields': ('name', 'short_name', 'group_name', 'active')}),
('Submissions', {'fields': ('classification_config', )})
)
def fix_group_name(self, request, queryset):
org: Organization
safety_reg = re.compile(r'^[a-z0-9_]*$')
fixed = 0
already_good = 0
for org in queryset:
if not org.group_name or not safety_reg.match(org.group_name):
org.group_name = make_code_friendly(org.name)
org.save()
fixed = fixed + 1
else:
already_good = already_good + 1
self.message_user(request, f"{fixed} updated, {already_good} no change required")
fix_group_name.short_description = 'Fix group name'
actions = [fix_group_name]
def get_form(self, request, obj=None, **kwargs):
return super(OrganizationAdmin, self).get_form(request, obj, widgets={
'name': admin.widgets.AdminTextInputWidget(),
'short_name': admin.widgets.AdminTextInputWidget(),
'group_name': admin.widgets.AdminTextInputWidget()
}, **kwargs)
# The JSONEditor has a bug in it that stops patternProperties
# from being useful. Specifically if you try to add arbitrary properties
# you get locked in the UI and can't continue to edit.
# classification_config_schema_preferred = {
# 'type': 'object',
# 'patternProperties': {
# "[a-zA-Z0-9_]+": {
# "oneOf": [
# { "type": "boolean" },
# {
# "type": 'object',
# 'properties': {
# 'hide': {'type': 'boolean' },
# 'custom_options': {
# 'type': 'array', 'items': {
# 'type': 'string'
# }
# }
# }
# }
# ]
# }
# }
# }
| 1.96875 | 2 |
helpingnetwork/organization/forms.py | neopentane/Techathon_19 | 0 | 12770669 | from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from .models import Organization,OrganizationImages
from evelist.models import Event,EventImages
class OrganizationRegisterForm(UserCreationForm):
name=forms.CharField(required=True, label="Organization Name")
email = forms.EmailField()
vision = forms.CharField(max_length=200, widget=forms.TextInput({}),label="Vision")
mission = forms.CharField(max_length=200, widget=forms.TextInput({}),label="Mission")
link=forms.CharField(required=True, label="Link")
class Meta:
model = User
fields = ['username','email','<PASSWORD>','<PASSWORD>','name','vision','mission','link']
class OrganizationUpdate(forms.ModelForm):
class Meta:
model=Organization
fields = ['name']
'''
class CreateEventForm(forms.Form):
name=forms.CharField(required=True, label="Event Name")
description = forms.CharField(max_length=200, widget=forms.TextInput({}),label="description")
venue=forms.CharField(required=True, label="Venue")
date=forms.DateField(widget=forms.SelectDateWidget())
'''
class CreateEventForm(forms.ModelForm):
class Meta:
model=Event
labels={"name":"Event Name","description":"Add Description","eventprofileImage":"Add Event Image","category":"category"}
fields=['name','description','venue','date','eventprofileImage','category']
exclude=['organizer']
class AddOrgImage(forms.ModelForm):
class Meta:
model=OrganizationImages
fields=['image']
exclude=['organization']
class AddImageForm(forms.ModelForm):
class Meta:
model=EventImages
fields=['i_event','image']
| 2.171875 | 2 |
tests/unit/test_readHelper.py | woshiange/python-lambda | 1,441 | 12770670 | <filename>tests/unit/test_readHelper.py
import os
import unittest
import yaml
from aws_lambda.helpers import read
class TestReadHelper(unittest.TestCase):
TEST_FILE = "readTmp.txt"
def setUp(self):
with open(TestReadHelper.TEST_FILE, "w") as tmp_file:
tmp_file.write("testYaml: testing")
def tearDown(self):
os.remove(TestReadHelper.TEST_FILE)
def test_read_no_loader_non_binary(self):
fileContents = read(TestReadHelper.TEST_FILE)
self.assertEqual(fileContents, "testYaml: testing")
def test_read_yaml_loader_non_binary(self):
testYaml = read(TestReadHelper.TEST_FILE, loader=yaml.full_load)
self.assertEqual(testYaml["testYaml"], "testing")
def test_read_no_loader_binary_mode(self):
fileContents = read(TestReadHelper.TEST_FILE, binary_file=True)
self.assertEqual(fileContents, b"testYaml: testing")
def test_read_yaml_loader_binary_mode(self):
testYaml = read(
TestReadHelper.TEST_FILE, loader=yaml.full_load, binary_file=True
)
self.assertEqual(testYaml["testYaml"], "testing")
| 2.78125 | 3 |
cards.py | ashbc/pybreak | 0 | 12770671 | <gh_stars>0
from enum import Enum
# TODO: turn to dict bleh
RunnerFactions = Enum('RunnerFactions', 'Neutral Shaper Anarch Criminal SunnyLebeau Adam Apex')
CorpFactions = Enum('CorpFactions', 'Neutral Jinteki HaasBioroid NBN Weyland CorpMiniFactionsWhenFFGPls')
# The Breaker and Ice classes represent "Generic" base behaviour for icebreakers and ICE respectively.
# Anything that "Pumps X for Y" and "Breaks Z for W" is in the former,
# and anything that has A strength and B subroutines that could be considered generic is in the latter.
# This is much more frequent in ICE, as it's generally the breakers that have the fun breaking-related text,
# whereas the "generic" subroutines are where all the fun is on the Corp side,
# and modelling that is outside of scope.
# Ice examples: almost anything you can name: Wall of Static, Enigma, Neural Katana.
# Icebreaker examples: Corroder, Cyber-Cypher, Femme Fatale (without bypass).
# Note that the current implementation does, to some extent, support fixed-strength breakers (think Mimic, Yog.0),
# as well as "on encounter" costs (Tollbooth), though neither of these are very well implemented.
# This is the generic icebreaker's actual profile ftsoc:
"""
Generic breaker
Program: Icebreaker - AI
0 Install cost
0 MU
0 Strength
1[c]: Break ice subroutine.
1[c]: +1 strength.
"""
class Breaker():
name = 'Generic breaker'
# card subtypes eg "Fracter"
subtype = ('Icebreaker', 'AI')
# ICE subtypes that this hits eg "Barrier"
# Note that some breakers (notably AIs) can break ANY ice
# so empty subtype represents "ANY ice", not "NO ice"!
target_subtype = ()
# for conditional breakers such as D4v1d,
# this will need to change
def can_break(self, ice):
# default behaviour: if the ice subtype is one we target,
# or if our target list is empty
if len(self.target_subtype) == 0:
# This card targets 'ice subroutine'
return True
# if any subtype of target ice is in our range of breakable types
for itype in ice.subtype:
for btype in self.target_subtype:
if itype == btype:
return True
return False
# possibility of doing this part automatically??
rules_text = '1[c]: break ice subroutine.\n1[c]: +1 strength.'
flavour_text = ''
faction = RunnerFactions.Neutral
# regular influence cost - doesn't count for in-faction
influence_cost = 0
# universal influence - from MWL - costs regardless of in-faction-ness
universal_influence_cost = 0
# base strength
strength = 0
# creds
install_cost = 0
# MU requirement
memory_cost = 0
# cost to pump
boost_cost = 0
# strength gain per pump
boost_amount = 1
# cost to break subs
break_cost = 0
# amount of subs broken per use
break_amount = 1
# modifiable behaviour for weird cards like Paperclip
def break_ice(self, ice, ignore_optional_subs=False):
# print('Using {} to break {}.'.format(self.name, ice.name))
strength = get_or_call(self.strength, ice)
cost = 0
cost += get_or_call(ice.encounter_cost, self)
target_strength = get_or_call(ice.strength, self)
subs_remaining = get_or_call(ice.subroutine_count, self)
if not ignore_optional_subs:
subs_remaining += get_or_call(ice.optional_subroutine_count)
# print('{} has {} strength and {} subroutines.'.format(ice.name, target_strength, subs_remaining))
# Pump
while strength < target_strength:
# fixed strength - can't boost :(
# TODO: D A T A S U C K E R ?
if get_or_call(self.boost_amount) == 0:
# Todo: More informative error
return -1
pumpcost = get_or_call(self.boost_cost, ice)
cost += pumpcost
strength += get_or_call(self.boost_amount, ice)
# print('Pumped to {} for {} creds (total {})'.format(strength, pumpcost, cost))
# Break
while subs_remaining > 0:
break_cost = get_or_call(self.break_cost, ice)
break_amount = get_or_call(self.break_amount, ice)
cost += break_cost
subs_remaining -= break_amount
# print('Broke {} subroutine(s) for {} creds (total {}).'.format(break_amount, break_cost, cost))
return cost
# If sth is a function, calls it with args, otherwise returns it
def get_or_call(sth, *args):
# function or lambda or callable class
if hasattr(sth, '__call__'):
return sth(*args)
else:
return sth
class Ice():
# Format for functions: attrname(self, breaker) -> int
name = 'Vanilla ICE'
flavour_text = ''
subtype = ()
strength = 0
rez_cost = 0
# I mean, implementing subroutine effects is somewhat out of scope.
# Problem: Some ICE have beneficial subroutines (Little Engine's "The runner gains 5[c]."),
# or ambiguous subroutines (Chetana's "Each player gains 2[c].").
# Initial solution: Divide subs into "standard" subs and "optional" subs.
# Additional problem: How to decide "optional" subs?
# Is, for example, a trace optional?
# For now, I guess the solution is to mark any subroutine that MAY do nothing as optional
# trace, psi games, conditionally targetted effects ("Trash 1 AI program."), conditional effects ("End the run if the runner is tagged.")
# are all examples of these
subroutine_count = 0
optional_subroutine_count = 0
# crude implementation of eg. Tollbooth
encounter_cost = 0
# eg Swordsman can use this to check for AIs.
# by default the ICE itself has no additional checks or rules
def can_be_broken_by(self, breaker):
return True
| 2.78125 | 3 |
PythonExercicios/ex042.py | Lucas-ns/Python-3-Curso-Em-Video | 0 | 12770672 | a = float(input('Primeiro Segmento: '))
b = float(input('Segundo Segmento: '))
c = float(input('Terceiro Segmento: '))
if (a + b) > c and (a + c) > b and (b + c) > a:
print('Os segmentos acima PODEM FORMAR um triângulo', end=' ')
if a == b == c:
print('EQUILÁTERO!')
elif a == b or a == c or c == b:
print('ISÓSCELES!')
else:
print('ESCALENO!')
else:
print('Os segmentos acima NÃO PODEM FORMAR um triângulo!')
| 4.0625 | 4 |
setup.py | treeverse/boto-s3-router | 14 | 12770673 | """
Boto S3 Router install script
"""
from setuptools import setup, find_packages
from pathlib import Path
import os
NAME = "boto-s3-router"
this_directory = Path(__file__).parent
LONG_DESCRIPTION = (this_directory / "README.md").read_text()
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
REQUIRES = [
"boto3",
"fnmatch2",
]
setup(
name=NAME,
version=os.getenv('VERSION', '0.0.1'),
description="Provides a Boto3-like client routing requests to multiple S3 clients",
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
author="Treeverse",
author_email="<EMAIL>",
url="https://github.com/treeverse/boto-s3-router",
keywords=["boto", "boto3", "lakeFS", "minio", "AWS", "s3", "router"],
python_requires=">=3.6",
install_requires=REQUIRES,
packages=find_packages(exclude="tests"),
include_package_data=True,
)
| 2.25 | 2 |
Array/Gas_Station.py | shua2018ti/Google | 87 | 12770674 | <reponame>shua2018ti/Google<gh_stars>10-100
'''
There are N gas stations along a circular route, where the amount of gas at station i is gas[i].
You have a car with an unlimited gas tank and it costs cost[i] of gas to travel from station i to its next station (i+1). You begin the journey with an empty tank at one of the gas stations.
Return the starting gas station's index if you can travel around the circuit once, otherwise return -1.
Note:
The solution is guaranteed to be unique.
'''
# Good Analysis: http://blog.csdn.net/kenden23/article/details/14106137
def canCompleteCircuit(self, gas, cost):
total = 0; sum = 0
n = len(gas)
j = -1
for i in xrange(n):
sum += gas[i] - cost[i]
total += gas[i] - cost[i]
if sum < 0:
j = i
sum = 0
if total < 0:
return -1
return j + 1
class Solution:
# @param {integer[]} gas
# @param {integer[]} cost
# @return {integer}
# Time out
def canCompleteCircuit(self, gas, cost):
total = 0; sum = 0
n = len(gas)
j = 0
for i in xrange(n):
sum += gas[i] - cost[i]
total += gas[i] - cost[i]
if sum < 0:
j = i + 1
sum = 0
if total < 0:
return -1
return j
'''
1. 几点注意的地方
a. 从i开始,j是当前station的指针,sum += gas[j] – cost[j] (从j站加了油,再算上从i开始走到j剩的油,走到j+1站还能剩下多少油)
b.如果sum < 0,说明从i开始是不行的。那能不能从i..j中间的某个位置开始呢?既然i出发到i+1是可行的, 又i~j是不可行的, 从而发现i+1~ j是不可行的。
c. 以此类推i+2~j, i+3~j,i+4~j 。。。。等等都是不可行的
d. 所以一旦sum<0,index就赋成j + 1,sum归零。
e. 最后total表示能不能走一圈。
倘若在i处gas[i]-cost[i] < 0, 则从 i + 1 开始
倘若total (gas[i] - cost[i]) < 0, 则一定不存在起始点
# Reference: http://www.cnblogs.com/yuzhangcmu/p/4179228.html
'''
| 3.515625 | 4 |
python/destinations/slack-notifications/main.py | SteveQuixDemo/quix-library | 7 | 12770675 | <reponame>SteveQuixDemo/quix-library
from quixstreaming import QuixStreamingClient, StreamReader
from quixstreaming.app import App
from quix_function import QuixFunction
import os
# Quix injects credentials automatically to the client. Alternatively, you can always pass an SDK token manually as an argument.
client = QuixStreamingClient()
print("Opening input and output topics")
input_topic = client.open_input_topic(os.environ["input"], "default-consumer-group-5")
webhook_url = os.environ["webhook_url"]
# Callback called for each incoming stream
def read_stream(input_stream: StreamReader):
# Create a new stream to output data
# handle the data in a function to simplify the example
quix_function = QuixFunction(webhook_url, input_stream)
# React to new data received from input topic.
input_stream.parameters.on_read += quix_function.on_parameter_data_handler
input_stream.events.on_read += quix_function.on_event_data_handler
# Hook up events before initiating read to avoid losing out on any data
input_topic.on_stream_received += read_stream
# Hook up to termination signal (for docker image) and CTRL-C
print("Listening to streams. Press CTRL-C to exit.")
# Handle graceful exit of the model.
App.run() | 2.53125 | 3 |
aliceplex/schema/schema/base.py | aliceplex/schema | 0 | 12770676 | <reponame>aliceplex/schema<filename>aliceplex/schema/schema/base.py
from dataclasses import Field, asdict, fields, is_dataclass
from typing import Any, Dict, List, Optional
from marshmallow import Schema, post_load, pre_dump, pre_load
__all__ = ["DataClassSchema"]
class DataClassSchema(Schema):
@pre_dump
def convert(self, data) -> Dict[str, Any]:
"""
Convert dataclass object to dict.
:param data: Input data
:type data: Any
:return: Dictionary for dumping.
:rtype: Dict[str, Any]
"""
new_data = asdict(data) if is_dataclass(data) else {**data}
self.filter_data(new_data)
return new_data
@pre_load
def filter(self, data: Dict[str, Any]) -> Dict[str, Any]:
new_data = {**data}
self.filter_data(new_data)
return new_data
def filter_data(self, data: Dict[str, Any]):
data_class_fields = self._get_field()
for field in data_class_fields:
name = field.name
if name not in data:
continue
f_type = field.type
origin = getattr(f_type, "__origin__", None)
args = getattr(f_type, "__args__", ())
if list in (f_type, origin):
data[name] = self._filter_list(data[name])
elif self._is_str(f_type, origin, args) and data[name] == "":
# Replace empty string with None
data[name] = None
@staticmethod
def _is_str(f_type, origin, args) -> bool:
return str in (f_type, origin) or str in args
@staticmethod
def _filter_list(data: Optional[list]) -> list:
if data is None:
# Convert None to empty list for List field
return []
# Filter None and empty string in list
return [value for value in data if value is not None and value != ""]
def _get_field(self) -> List[Field]:
"""
Get fields of the dataclass
:return: Defined fields on dataclass
:rtype: List[Field]
:raises ValueError: if data_class is not a dataclass
"""
data_class = self.data_class
if not is_dataclass(data_class):
raise ValueError("You should use dataclass for DataClassSchema")
# noinspection PyDataclass
return fields(data_class)
@post_load
def post_load(self, data) -> Any:
"""
Convert dict to dataclass object
:param data: Input data
:type data: Dict[str, Any]
:return: Dataclass
:rtype: Any
"""
data_class = self.data_class
return data_class(**data)
@property
def data_class(self) -> type:
"""
Provide the dataclass of this schema.
:return: Dataclass
:rtype: type
"""
raise NotImplementedError()
| 2.25 | 2 |
lab3-Crawler2/code/crawler_sample_thread.py | MadCreeper/SJTU_ICE2602 | 0 | 12770677 | # SJTU EE208
import threading
import queue
import time
def get_page(page):
print('downloading page %s' % page)
time.sleep(0.5)
return g.get(page, [])
def get_all_links(content):
return content
def working():
while True:
print("getting","left:",q.qsize())
page = q.get()
# if varLock.acquire():
if page not in crawled:
# varLock.release()
# else:
# varLock.release()
content = get_page(page)
outlinks = get_all_links(content)
for link in outlinks:
q.put(link)
if varLock.acquire():
graph[page] = outlinks
crawled.append(page)
varLock.release()
print(q.qsize())
q.task_done()
g = {'A': ['B', 'C', 'D'],
'B': ['E', 'F'],
'C': ['1', '2'],
'1': ['3', '4'],
'D': ['G', 'H'],
'E': ['I', 'J'],
'G': ['K', 'L'],
}
start = time.time()
NUM = 4
crawled = []
graph = {}
varLock = threading.Lock()
q = queue.Queue()
q.put('A')
for i in range(NUM):
t = threading.Thread(target=working)
t.setDaemon(True)
t.start()
q.join()
end = time.time()
print(end - start)
| 2.796875 | 3 |
tests/unit/cfngin/hooks/awslambda/models/test_responses.py | ITProKyle/runway-hook-awslambda | 1 | 12770678 | """Test runway.cfngin.hooks.awslambda.models.responses."""
# pylint: disable=no-self-use,protected-access
from __future__ import annotations
import pytest
from pydantic import ValidationError
from awslambda.models.responses import AwsLambdaHookDeployResponse
class TestAwsLambdaHookDeployResponse:
"""Test AwsLambdaHookDeployResponse."""
def test_extra(self) -> None:
"""Test extra fields."""
with pytest.raises(ValidationError) as excinfo:
AwsLambdaHookDeployResponse(
bucket_name="test-bucket",
code_sha256="sha256",
invalid=True, # type: ignore
object_key="key",
runtime="test",
)
errors = excinfo.value.errors()
assert len(errors) == 1
assert errors[0]["loc"] == ("invalid",)
assert errors[0]["msg"] == "extra fields not permitted"
| 2.421875 | 2 |
sana_pchr/reporting/run_reporting.py | SanaMobile/sana.pchr.oss-web | 0 | 12770679 | from sana_pchr.reporting.recommender import *
from datetime import timedelta, date
from dateutil import rrule
import csv
calcs = [DIABETES_CALCULATOR, HYPERTENSION_CALCULATOR, DYSLIPIDEMIA_CALCULATOR]
clinics = [clinic for clinic in Clinic.objects.all() if "Test" not in clinic.name ]
start_date = date(2016,2,14)
end_date = date(2016,10,30)
#Parses the JSON-like format into outputs that can be output itno a CSV file
def parse_out(y, prefix):
out = {}
#recursive function that can take care of dicts or lists
def flatten(x, name=''):
if type(x) is dict:
for a in x:
flatten(x[a], name + a + '_')
elif type(x) is list:
i = 0
for a in x:
flatten(a, name + str(i) + '_')
i += 1
else:
out[name[:-1]] = x
for a in y:
flatten(a[1], prefix + '_' + a[0] + '_')
return out
#runs the analysis
def run():
for clinic in clinics:
last_date = date(2016,2,7)
with open(clinic.name + ".csv", mode="w") as outfile:
row = 1
for dt in rrule.rrule(rrule.WEEKLY, dtstart=start_date, until=end_date):
summary_out = parse_out(RiskLevelCalculator.get_summary(clinic, last_date, dt), 'All')
calc_outs = [parse_out(calc.calculate(clinic, last_date, dt),calc.name) for calc in calcs]
combd = {}
for calc_out in calc_outs:
combd.update(calc_out)
combd.update(summary_out)
if row == 1:
fields = ["week_starting" , 'ASCVD Level_<10%', 'ASCVD Level_10-20%', 'ASCVD Level_20-30%', 'ASCVD Level_30-40%','ASCVD Level_>40%'] + sorted(combd)
writer = csv.DictWriter(outfile, fieldnames=fields)
writer.writeheader()
row = 2
#Need this workaround with ASCVD calc b/c grouping is only by ones present, need all for header
combd.update(parse_out(ASCVD_CALCULATOR.calculate(clinic, last_date, dt), 'ASCVD Level'))
combd.update({'week_starting': last_date.strftime("%Y-%m-%d")})
writer.writerow(combd)
last_date = dt
| 2.4375 | 2 |
boboleetcode/Play-Leetcode-master/0101-Symmetric-Tree/py-0101/Solution1.py | mcuallen/CodeLrn2019 | 2 | 12770680 | # Source : https://leetcode.com/problems/symmetric-tree/
# Author : penpenps
# Time : 2019-07-09
# Revert right node firstly, then compare with left node
# Time Complexity: O(n)
# Space Complexity: O(n)
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def isSymmetric(self, root: TreeNode) -> bool:
# Revert TreeNode
def revert(node):
if not node: return
node.left, node.right = node.right, node.left
revert(node.left)
revert(node.right)
def isEqual(left, right):
if not left and not right: return True
if not left or not right: return False
if left.val != right.val:
return False
return isEqual(left.left, right.left) and isEqual(left.right, right.right)
if not root:
return True
revert(root.right)
return isEqual(root.left, root.right)
| 4.0625 | 4 |
public/templates/_default.py | WinterTechForum/amazebot | 0 | 12770681 | from time import sleep
from org.jointheleague.ecolban.rpirobot import SimpleIRobot, Sonar
robot = SimpleIRobot()
sonar = Sonar()
def setup():
# Initialization code here
pass
def loop():
# Repeating code here
return True
def shutdown():
robot.reset()
robot.stop()
robot.closeConnection()
setup()
while loop():
pass
shutdown() | 2.234375 | 2 |
Workshop6/1b.py | Camiloasc1/OptimizationUNAL | 0 | 12770682 | from Workshop6.Graph import *
C = {}
makeDLinkW(C, '1', '4', 10)
makeDLinkW(C, '1', '2', 5)
makeDLinkW(C, '2', '5', 1)
makeDLinkW(C, '2', '3', 7)
makeDLinkW(C, '3', '6', 4)
makeDLinkW(C, '4', '7', 11)
makeDLinkW(C, '4', '5', 3)
makeDLinkW(C, '5', '8', 7)
makeDLinkW(C, '5', '6', 3)
makeDLinkW(C, '6', '9', 5)
makeDLinkW(C, '7', '10', 9)
makeDLinkW(C, '7', '8', 2)
makeDLinkW(C, '8', '11', 1)
makeDLinkW(C, '8', '9', 0)
makeDLinkW(C, '9', '12', 12)
makeDLinkW(C, '10', '11', 2)
makeDLinkW(C, '11', '12', 4)
print(Dijkstra(C, '1'))
print(FloydWarshall(C))
| 2.859375 | 3 |
Chapter07/0702.py | 0201shj/Python-OpenCV | 0 | 12770683 | # 0702.py
import cv2
import numpy as np
src = cv2.imread('./data/rect.jpg')
gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 50, 100)
lines = cv2.HoughLines(edges, rho = 1, theta = np.pi/180.0, threshold = 100)
print('lines.shape = ', lines.shape)
for line in lines:
rho, theta = line[0]
c = np.cos(theta)
s = np.sin(theta)
x0 = c * rho
y0 = s * rho
x1 = int(x0 + 1000 * (-s))
y1 = int(y0 + 1000 * (c))
x2 = int(x0 - 1000 * (-s))
y2 = int(y0 - 1000 * (c))
cv2.line(src, (x1, y1), (x2, y2), (0, 0, 255), 2)
cv2.imshow('edges', edges)
cv2.imshow('src', src)
cv2.waitKey()
cv2.destroyAllWindows() | 2.921875 | 3 |
distribulator/engine/data/Environment.py | adace74/test | 0 | 12770684 | <filename>distribulator/engine/data/Environment.py
######################################################################
#
# $Id$
#
# (c) Copyright 2004 Orbitz, Inc. All Rights Reserved.
# Please see the accompanying LICENSE file for license information.
#
######################################################################
# Pydoc comments
"""This class holds data regarding a given server group."""
# Version tag
__version__= '$Revision$'[11:-2]
# Standard modules
import re
######################################################################
class Environment:
"""This class holds data regarding a given server group."""
def __init__(self):
"""Constructor."""
self._serverGroupCount = 0
self._serverGroupList = []
######################################################################
# Name.
######################################################################
def getName(self):
"""This is a typical accessor method."""
return self._name
######################################################################
def setName(self, PassedName):
"""This is a typical accessor method."""
self._name = PassedName
######################################################################
# Servers.
######################################################################
def getServerByName(self, PassedServerName):
"""This is a typical accessor method, including some search logic."""
PassedServerName = PassedServerName.strip()
for myServerGroup in self._serverGroupList:
if ( myServerGroup.getServerByName(PassedServerName) ):
return myServerGroup.getServerByName(PassedServerName)
return False
######################################################################
# ServerGroups.
######################################################################
def getServerGroupCount(self):
"""This is a typical accessor method."""
return self._serverGroupCount
######################################################################
def getServerGroupList(self):
"""This is a typical accessor method."""
return self._serverGroupList
######################################################################
def setServerGroupList(self, PassedServerGroupList):
"""This is a typical accessor method."""
self._serverGroupList = PassedServerGroupList
######################################################################
def getServerGroupByName(self, PassedServerGroupName):
"""This is a typical accessor method, including some search logic."""
PassedServerGroupName = PassedServerGroupName.strip()
# Silly hack to make handling attributes a little easier.
if (PassedServerGroupName.find('[') != -1):
PassedServerGroupName = PassedServerGroupName[:PassedServerGroupName.find('[')]
# Handle regex.
reggie = re.compile(r'(.*):r\'(.*)\'')
maggie = reggie.match(PassedServerGroupName)
if maggie != None:
for myServerGroup in self._serverGroupList:
if (maggie.group(1) == myServerGroup.getName()):
return myServerGroup
else:
for myServerGroup in self._serverGroupList:
if (PassedServerGroupName == myServerGroup.getName()):
return myServerGroup
return False
######################################################################
def getServerGroupName(self, PassedServerGroupName):
"""This is a typical accessor method, including some search logic."""
PassedServerGroupName = PassedServerGroupName.strip()
# Silly hack to make handling attributes a little easier.
if (PassedServerGroupName.find('[') != -1):
PassedServerGroupName = PassedServerGroupName[:PassedServerGroupName.find('[')]
return PassedServerGroupName
######################################################################
def addServerGroup(self, PassedServerGroup):
"""This is a typical accessor method."""
self._serverGroupCount = self._serverGroupCount + 1
self._serverGroupList.append(PassedServerGroup)
######################################################################
def getDefaultServerGroup(self):
"""This is a typical accessor method."""
return self._defaultServerGroup
######################################################################
def setDefaultServerGroup(self, PassedDefaultServerGroup):
"""This is a typical accessor method."""
self._defaultServerGroup = PassedDefaultServerGroup
######################################################################
| 2.328125 | 2 |
examples/cw/cw_users.py | neurospin/pycaravel | 0 | 12770685 | """
CubicWeb instance request (user mode)
=====================================
Credit: <NAME>
pycaravel is a Python package that enables you to parse various source of data.
In this tutorial you will learn how to parse and search in a CubicWeb instance.
First checks
------------
In order to test if pycaravel package is installed on your machine, you can
check the package version.
"""
import caravel
print(caravel.__version__)
#############################################################################
# Now you can run the the configuration info function to see if all the
# dependencies are installed properly:
print(caravel.info())
#############################################################################
# Create a parser for your project
# --------------------------------
#
# The package provides a common interface to parse a CubicWeb instance. The
# parsing rules are defined by projects in the module, so we will beed to
# specify the project name you are working on. For the moement it is not
# possible to specify these rules via the API.
parser = caravel.get_parser(
project="herby",
layoutdir="/neurospin/tmp/pycaravel/layout")
#############################################################################
# You can now list the available configurations for your project, and the
# available layout representations pre-generated. Note that these
# representations are sorted by dates, and that the latest one will be used.
from pprint import pprint
pprint(parser.conf)
pprint(parser.representation)
#############################################################################
# You can export the whole 'sourcedata' layout in a pandas DataFrame.
print(parser.export_layout("sourcedata"))
#############################################################################
# It is also possible to filter this dataset. You need first to list all the
# avaliable filtering keys, then list all the availables values for the
# filtering key(s) of interest, and finally filter your dataset.
print(parser.list_keys("sourcedata"))
print(parser.list_values("sourcedata", "modality"))
print(parser.list_values("sourcedata", "center"))
search1 = parser.filter_layout(
"sourcedata", modality="T1w|T2w", extension="NIFTI", session="V04",
center="igr.fr")
print(search1)
#############################################################################
# Finally you may want to ask the system to load the filtered data. Only a
# couple of file extensions are supported. If no loader has been found the
# filename is returned. Using the shoping card mechanism you have downloaded
# your data in a custom folder. You need to specify this server-local machine
# mapping by setting the 'replace' parameter.
data1 = parser.load_data(
"sourcedata", search1,
replace=("/neurospin/radiomics_pub", "/neurospin/radiomics_pub"))
pprint(data1)
#############################################################################
# And for the phenotype
# ---------------------
#
# We can do the same for the phenotype
print(parser.list_keys("phenotype"))
print(parser.list_values("phenotype", "questionnaire"))
print(parser.list_values("phenotype", "subject"))
search2 = parser.filter_layout("phenotype", questionnaire="mcld",
subject="175643|278350")
print(search2)
data2 = parser.load_data("phenotype", search2)
pprint(data2)
| 2.828125 | 3 |
gatlin/core/flowPipeline.py | kokomal/GATLIN | 1 | 12770686 | # coding = utf-8
import copy
import json
import gatlin.infra.commonUtils as util
import gatlin.infra.print as pt
import gatlin.nodes.parserSelector as ps
# 读取需要进行测试的flow全集
def launch_flows_config(location):
flow_json_file = location
with open(flow_json_file) as fl:
flows_config = json.loads(fl.read())
return flows_config
def parse_one_flowX(flow_name, nodes, environ, init_param):
pt.print_green('*' * 45 + ('PARSING %s' % flow_name) + ' BEGIN' + '*' * 45)
context = {}
context['environ'] = copy.deepcopy(environ) # environ抽到全局main-flow
context['request'] = {}
context['response'] = {}
context['session'] = init_param
context['misc'] = {'canProceed': True}
for node in nodes:
util.inject_all(context['environ'], node)
context['environ']['skip'] = False
node_parser = ps.fetch_parser(node['nodeName'])(context)
node_parser.lock_and_load()
if not node_parser.can_proceed():
pt.print_yellow("DUE TO [==%s==] THE FLOW HAS TO STOP." % context['misc']['reason'])
pt.print_yellow('#' * 35 + "NODE %s CANNOT PROCEED" % node['nodeName'] + '#' * 35)
pt.print_red('*' * 45 + ('PARSING %s' % flow_name) + ' ABORTED' + '*' * 45)
break
context['environ'] = copy.deepcopy(environ) # 每次清洗environ防止前后的污染,而session由node来管理
context['request'] = {}
context['response'] = {}
pt.print_green('*' * 45 + ('PARSING %s' % flow_name) + ' ENDED' + '*' * 45)
if __name__ == '__main__':
print(launch_flows_config("../input/flows.json"))
| 2.203125 | 2 |
fuzzware_pipeline/util/genconfig.py | mo-bl/fuzzware-pipeline | 0 | 12770687 | import os
import pathlib
import string
import subprocess
from elftools.elf.constants import SH_FLAGS
from elftools.elf.elffile import ELFFile
from elftools.elf.sections import SymbolTableSection
from fuzzware_harness.util import bytes2int
from fuzzware_pipeline.logging_handler import logging_handler
logger = logging_handler().get_logger("pipeline")
PRINTABLE_ASCIIVALS = frozenset(map(ord, string.printable))
OBJCOPY_UTIL = "arm-none-eabi-objcopy"
DYNAMICALLY_ADDED_REGION_NAME_PREFIX = "dynamically_added_crash_region_"
# From cortexm_memory.yml
DEFAULT_MEM_MAP = {
"ram": {"base_addr": 0x20000000, "size": 0x00100000, "permissions": "rw-"},
"mmio": {"base_addr": 0x40000000, "size": 0x20000000, "permissions": "rw-"},
"nvic": {"base_addr": 0xe0000000, "size": 0x10000000, "permissions": "rw-"},
"irq_ret": {"base_addr": 0xfffff000, "size": 0x1000, "permissions": "--x"}
}
# Some padded size after raw ROM contents
DEFAULT_ADD_TEXT_SIZE = 0x10000
ELF_MAGIC = b"\x7f\x45\x4c\x46"
def is_elf(path):
with open(path, "rb") as f:
magic = f.read(len(ELF_MAGIC))
from binascii import hexlify
logger.info(f"looking at file contents: {hexlify(magic)} == {hexlify(ELF_MAGIC)}")
return magic == ELF_MAGIC
def extract_elf(in_path, out_path):
assert is_elf(in_path)
subprocess.check_call([OBJCOPY_UTIL, "-O", "binary", in_path, out_path])
def collect_pointers(binary_contents):
pointers = []
initial_sp, reset_vector_addr = bytes2int(binary_contents[:4]), bytes2int(binary_contents[4:8])
logger.info(f"Got reset vector: 0x{reset_vector_addr:08x}")
min_rom_ptr, max_rom_ptr = reset_vector_addr, reset_vector_addr
def is_rom_ptr(addr, curr_min, curr_max):
if addr < 8:
return False
# Check range
outer_edge_size = len(binary_contents)-(curr_max - curr_min)
return curr_min-outer_edge_size <= addr <= curr_max + outer_edge_size
for i in range(8, len(binary_contents), 4):
val = bytes2int(binary_contents[i:i+4])
if is_rom_ptr(val, min_rom_ptr, max_rom_ptr):
if val < min_rom_ptr:
min_rom_ptr = val
elif val > max_rom_ptr:
max_rom_ptr = val
pointers.append(val)
return initial_sp, reset_vector_addr, pointers
def has_ascii_at_offset(binary_contents, offset, min_len=8):
if len(binary_contents) < offset + min_len:
return False
res = all(map(lambda ind: binary_contents[offset+ind] in PRINTABLE_ASCIIVALS, range(min_len)))
return res
THUMB_OPC_PUSH = 0xB5
THUMB_OPC_STMFD1 = 0x2D
THUMB_OPC_STMFD2 = 0xE9
THUMB_OPC_INFLOOP = 0xE7FE
FN_PROLOGUE_OPCODES = (THUMB_OPC_PUSH, THUMB_OPC_STMFD1, THUMB_OPC_STMFD2)
def has_fn_prologue_at_offset(binary_contents, binary_offset):
if binary_offset & 1 != 1:
return False
if len(binary_contents) <= binary_offset:
return False
# Remove thumb bit
binary_offset &= ~1
res = binary_contents[binary_offset+1] in FN_PROLOGUE_OPCODES
if not res:
res = THUMB_OPC_INFLOOP == bytes2int(binary_contents[binary_offset:binary_offset+2])
if res:
logger.info(f"Found function prologue at offset {binary_offset:x}")
return res
def can_be_good_offset(binary_contents, ptr, base_offset):
binary_offset = ptr - base_offset
if binary_offset < 0 or binary_offset > len(binary_contents):
return False
# We are pointing inside the image, let's see now
# 1. Is string?
if has_ascii_at_offset(binary_contents, binary_offset):
logger.info(f"Found ascii! (ptr 0x{ptr:08x}, offset: {base_offset:x}")
return True
# 2. Is function pointer?
if has_fn_prologue_at_offset(binary_contents, binary_offset):
return True
return False
PAGE_SIZE = 0x1000
PAGE_MASK = PAGE_SIZE - 1
def find_text_mapping(binary_path):
# Find by raw binary
# We do this via FirmXRay's algorithm:
# 1. scan for pointer values
# 2. Guess values based on found pointers and check whether pointers point to functions/strings
# 3. Choose base address with most matches
with open(binary_path, "rb") as f:
binary_contents = f.read()
aligned_contents_len = len(binary_contents)
if aligned_contents_len & PAGE_MASK:
aligned_contents_len = (aligned_contents_len & (~PAGE_MASK)) + PAGE_SIZE
initial_sp, reset_vector, pointers = collect_pointers(binary_contents)
pointers = sorted(set(pointers))
min_ptr, max_ptr = pointers[0], pointers[-1]
_, _, aligned_reset_vector = min_ptr & (~PAGE_MASK), max_ptr & (~PAGE_MASK), reset_vector & (~PAGE_MASK)
first_offset_candidate = -aligned_contents_len # -min(aligned_contents_len, aligned_min_ptr)
#print("first oc {:x}".format(first_offset_candidate))
#print("reset_vector {:x}".format(reset_vector))
if (reset_vector - first_offset_candidate) < 0: #sanity check, necessary for certain boards
first_offset_candidate = 0
#print("first oc {:x}".format(first_offset_candidate))
last_offset_candidate = aligned_contents_len
matches_per_offset = {}
for offset_candidate in range(first_offset_candidate, last_offset_candidate, PAGE_SIZE):
logger.info(f"Checking offset candidate: {offset_candidate}")
matches_per_offset[offset_candidate] = sum(map(lambda ptr: can_be_good_offset(binary_contents, ptr-aligned_reset_vector, offset_candidate), pointers))
best_candidates = sorted(matches_per_offset.items(), key=lambda entry: matches_per_offset[entry[0]])
best_candidate_offset = best_candidates[-1][0]
base_addr = aligned_reset_vector + best_candidate_offset
logger.info(f"Got base address: 0x{base_addr:08x} with {matches_per_offset[best_candidate_offset]} plausible address matches (second best: {best_candidates[-2][1]}).")
return initial_sp, base_addr, os.stat(binary_path).st_size + DEFAULT_ADD_TEXT_SIZE
def merge_adjacent_regions(memregion_config):
"""
Merge scattered memory regions into consecutive regions
"""
region_ends = {
entry["base_addr"]+entry["size"]: region_name for region_name, entry in memregion_config.items()
}
removed_region_names = []
for region_name in memregion_config:
start, size = memregion_config[region_name]['base_addr'], memregion_config[region_name]['size']
# Is our region the start of another region?
if start in region_ends:
adjacent_region_name = region_ends.pop(start)
memregion_config[adjacent_region_name]["size"] += size
region_ends[start+size] = adjacent_region_name
# Remove now merged_in fragment
memregion_config[region_name] = None
removed_region_names.append(region_name)
for rname in removed_region_names:
del memregion_config[rname]
def add_missing_regions(existing_mem_config, add_entries):
for rname, entry in add_entries.items():
start = entry['base_addr']
end = start + entry['size']
should_add = True
logger.info(f"Looking at region to add: {rname} ({start:x}-{end:x})")
consumed_region_names = set()
sorted_other_regions = sorted(existing_mem_config, key=lambda k: existing_mem_config[k]['base_addr'])
for i, other_rname in enumerate(sorted_other_regions):
if other_rname in consumed_region_names:
continue
other_entry = existing_mem_config[other_rname]
other_start = other_entry['base_addr']
other_end = other_start + other_entry['size']
print(f"comparing to {other_rname} ({other_start:x}-{other_end:x})")
# Need to extend next region backwards?
if start < other_start <= end:
logger.info(f"Setting start of region {other_rname} ({other_start:x}-{other_end:x}) to {start:x}")
prepend_size = other_start - start
other_start = start
other_entry['base_addr'] = other_start
other_entry['size'] += prepend_size
# Do we also need to extend other region forward?
if other_start <= start <= other_end < end:
# If we need to extend the other region forward, make sure not to clash with the region following that
if i+1 < len(sorted_other_regions):
next_region_name = sorted_other_regions[i+1]
next_start = existing_mem_config[next_region_name]['base_addr']
if end > next_start:
# We got a collision. Is it dynamically added?
if DYNAMICALLY_ADDED_REGION_NAME_PREFIX in next_region_name:
logger.warn(f"While extending forward, collided with dynamically added region {next_region_name}, consuming it")
consumed_region_names.add(next_region_name)
next_end = next_start + existing_mem_config[next_region_name]['size']
end = max(end, next_end)
del existing_mem_config[next_region_name]
else:
logger.warn("While extending forward, collided with next region, setting end to other region's start")
end = next_start
append_size = end - other_end
other_end = end
logger.info(f"Extending end of region {other_rname} ({other_start:x}-{other_start+other_entry['size']:x}) to {other_end:x}")
other_entry['size'] += append_size
# Fully contained? Then we added it or it was already included
if other_start <= start <= other_end and other_start <= end <= other_end:
logger.info(f"Region {rname} ({start:x}-{end:x}) fully contained in region {other_rname}")
should_add = False
break
# We did not find an overlap, so add the section
if should_add:
logger.info(f"Adding memory region {rname} ({start:#10x}-{end:#10x}) to config")
while rname in existing_mem_config:
rname = "_" + rname
existing_mem_config[rname] = {**entry}
def align_mem_map_to_pages(mem_config):
"""
Given an already non-colliding memory map, we make
sure that two regions are not on the same page boundary.
"""
sorted_region_names = sorted(mem_config, key=lambda reg_name: mem_config[reg_name]['base_addr'])
region_indices_to_eliminate = []
for i, region_name in enumerate(sorted_region_names):
if i == len(sorted_region_names):
break
if i in region_indices_to_eliminate:
continue
cur_start = mem_config[region_name]['base_addr']
cur_size = mem_config[region_name]['size']
cur_end = cur_start + cur_size
logger.info(f"[align_mem_map_to_pages] looking at region '{region_name}', base: {cur_start:#010x}, size: {cur_size:#x}")
# If we are aligned, there is no need to shift anything
if cur_end & PAGE_MASK == 0:
continue
next_start = mem_config[sorted_region_names[i+1]]['base_addr']
if cur_end & ~PAGE_MASK == next_start & ~PAGE_MASK:
logger.warning(f"Regions {region_name} and {sorted_region_names[i+1]} end/start on the same page, unaligned.")
cur_size += PAGE_SIZE - (cur_end % PAGE_SIZE)
next_shrink_size = PAGE_SIZE - (next_start % PAGE_SIZE)
next_start += next_shrink_size
logger.warning(f"Adjusting {region_name} size to {cur_size:08x}.")
logger.warning(f"Adjusting {sorted_region_names[i+1]} start to {next_start:#010x}.")
mem_config[sorted_region_names[i+1]]['base_addr'] = next_start
if next_shrink_size <= mem_config[sorted_region_names[i+1]]['size']:
mem_config[sorted_region_names[i+1]]['size'] -= next_shrink_size
else:
logger.warning(f"Fully removing region {sorted_region_names[i+1]} which spanned less than a page")
region_indices_to_eliminate.append(i+1)
mem_config[region_name]['size'] = cur_size
# TODO: We might have different permissions here. But if they differed,
# that would not have worked on most architectures anyways.
# What we could do instead is create a single-page region with merged permissions
for i in region_indices_to_eliminate:
del mem_config[sorted_region_names[i]]
def collect_and_merge_elf_segments(elf_path):
res = load_elf_segment_mem_regions(elf_path)
merge_adjacent_regions(res)
return res
def add_mem_map(config_basedir, config_map, binary_path, elf_path, ivt_offset):
if "memory_map" not in config_map:
config_map["memory_map"] = {}
mem_cfg = config_map["memory_map"]
# Fill from default memory map
for memregion_name, memregion_config in DEFAULT_MEM_MAP.items():
if memregion_name not in mem_cfg:
mem_cfg[memregion_name] = memregion_config
binary_already_mapped = False
abs_binpath = os.path.abspath(binary_path)
for region_config in mem_cfg.values():
f = region_config.get("file")
if f and f == abs_binpath:
binary_already_mapped = True
break
if not binary_already_mapped:
# We will register the binary image as "text", make sure it is not taken
assert "text" not in mem_cfg
_, text_base, text_size = find_text_mapping(binary_path)
mem_cfg["text"] = {
"base_addr": text_base,
"size": text_size,
"ivt_offset" : ivt_offset,
# get the relative path
"file": str(pathlib.Path(binary_path).relative_to(pathlib.Path(config_basedir))),
"permissions": "r-x"
}
if elf_path:
elf_memory_regions = collect_and_merge_elf_segments(elf_path)
logger.info(f"collected ELF memory regions: {elf_memory_regions}")
add_missing_regions(config_map['memory_map'], elf_memory_regions)
align_mem_map_to_pages(config_map['memory_map'])
def gen_syms(elf_path):
# Based on https://github.com/eliben/pyelftools/blob/master/scripts/readelf.py
res = {}
with open(elf_path, "rb") as f:
elffile = ELFFile(f)
symbol_tables = [(idx, s) for idx, s in enumerate(elffile.iter_sections())
if isinstance(s, SymbolTableSection)]
if not symbol_tables and elffile.num_sections() == 0:
logger.warning("No symbol sections...")
return res
for _, section in symbol_tables:
if section['sh_entsize'] == 0:
logger.warning("section['sh_entsize'] == 0")
# Symbol table has no entries
continue
for _, symbol in enumerate(section.iter_symbols()):
if symbol.name and "$" not in symbol.name:
res[symbol['st_value']] = symbol.name
return res
def load_elf_segment_mem_regions(elf_path):
# Based on https://github.com/eliben/pyelftools/blob/master/scripts/readelf.py
res = {}
with open(elf_path, "rb") as f:
elffile = ELFFile(f)
if elffile.num_sections() == 0:
return res
for section in elffile.iter_sections():
if (section['sh_flags'] & SH_FLAGS.SHF_ALLOC) == 0:
logger.debug(f"Section {section.name} does not have alloc flag set, skipping")
continue
if section['sh_size'] == 0:
logger.debug(f"Section {section.name} has 0 size, skipping")
continue
res[section.name] = {
'base_addr': section['sh_addr'],
'size': section['sh_size'],
'permissions': ('r'
+ ("w" if section['sh_flags'] & SH_FLAGS.SHF_WRITE else "-")
+ ("x" if section['sh_flags'] & SH_FLAGS.SHF_EXECINSTR else "-")
)
}
return res
def gen_configs(config_basedir, config_map, binary_path, elf_path, ivt_offset=0, ti_flag=False):
#check for proprietary header in binary file
#check_for_header(binary_path)
add_mem_map(config_basedir, config_map, binary_path, elf_path, ivt_offset)
if elf_path and 'symbols' not in config_map:
logger.info("Generating symbols")
config_map['symbols'] = gen_syms(elf_path)
if 'interrupt_triggers' not in config_map:
config_map['interrupt_triggers'] = {
"trigger": {
"fuzz_mode": "round_robin",
"every_nth_tick": 1000
}
}
#necessary actions for some texas instruments samples
if ti_flag:
#add rom region
config_map['memory_map']['ti_rom'] = {
"base_addr": 0x10000000,
"file": ti_flag,
"size": 0x20000,
"permissions": "r-x"
}
print(config_map)
#change ram size to 0x400000
config_map['memory_map']['ram']['size'] = 0x400000
#add is_entry = True to text
config_map['memory_map']['text']['is_entry'] = True
NUM_CRASH_MAPPED_AROUND_PAGES = 5
def add_region_for_crashing_addr(config_map, crash_addr):
page_start = crash_addr & ~PAGE_MASK
mapping_distance = NUM_CRASH_MAPPED_AROUND_PAGES * PAGE_SIZE
new_region_entry = {
f'{DYNAMICALLY_ADDED_REGION_NAME_PREFIX}{crash_addr:08x}': {
'base_addr': max(page_start - mapping_distance, 0),
'size': 2 * mapping_distance,
'permissions': 'rw-'
}
}
logger.info(f"Adding region for crash address 0x{crash_addr:x}: {new_region_entry}")
logger.warning("If you suspect this region to be an mmio-region, manually preface it with 'mmio' to make sure that it is detected by fuzzware")
add_missing_regions(config_map['memory_map'], new_region_entry)
align_mem_map_to_pages(config_map['memory_map']) | 2.03125 | 2 |
python/scroll.py | robotlightsyou/test | 2 | 12770688 | from bibliopixel.animation import BaseAnimation
from bibliopixel import colors
# This is using the default 32x32 simpixel display -
# so this is a list of 16 * 64 = 1024 colors.
BASE = [
colors.Black, colors.Black, colors.Black, colors.Black,
colors.Green, colors.Green, colors.Green, colors.Green,
colors.Blue, colors.Blue, colors.Blue, colors.Blue,
colors.White, colors.White, colors.White, colors.White,
] * 64
class Scroll(BaseAnimation):
def pre_run(self):
self.layout.set_colors(BASE)
def step(self, amt=1):
# Pop a color off the end, and insert it at the start.
self.layout._colors.insert(0, self.layout._colors.pop())
| 3.265625 | 3 |
0x0A-python-inheritance/1-my_list.py | BennettDixon/holbertonschool-higher_level_programming | 1 | 12770689 | #!/usr/bin/python3
class MyList(list):
"""extended version of list
"""
def print_sorted(self):
"""prints the list in ascending order
"""
copy = self[:]
copy.sort()
print(copy)
| 3.609375 | 4 |
tests/http_provider_hosted_test.py | ilblackdragon/studio | 397 | 12770690 | import unittest
import os
import tempfile
import uuid
from studio import model
from model_test import get_test_experiment
# We are not currently working with HTTP providers.
@unittest.skip
class HTTPProviderHostedTest(unittest.TestCase):
def get_db_provider(self, config_name):
config_file = os.path.join(
os.path.dirname(
os.path.realpath(__file__)),
config_name)
return model.get_db_provider(model.get_config(config_file))
def test_add_get_delete_experiment(self):
with self.get_db_provider('test_config_http_client.yaml') as hp:
experiment_tuple = get_test_experiment()
hp.add_experiment(experiment_tuple[0])
experiment = hp.get_experiment(experiment_tuple[0].key)
self.assertEquals(experiment.key, experiment_tuple[0].key)
self.assertEquals(
experiment.filename,
experiment_tuple[0].filename)
self.assertEquals(experiment.args, experiment_tuple[0].args)
hp.delete_experiment(experiment_tuple[1])
self.assertTrue(hp.get_experiment(experiment_tuple[1]) is None)
def test_start_experiment(self):
with self.get_db_provider('test_config_http_client.yaml') as hp:
experiment_tuple = get_test_experiment()
hp.add_experiment(experiment_tuple[0])
hp.start_experiment(experiment_tuple[0])
experiment = hp.get_experiment(experiment_tuple[1])
self.assertTrue(experiment.status == 'running')
self.assertEquals(experiment.key, experiment_tuple[0].key)
self.assertEquals(
experiment.filename,
experiment_tuple[0].filename)
self.assertEquals(experiment.args, experiment_tuple[0].args)
hp.finish_experiment(experiment_tuple[0])
hp.delete_experiment(experiment_tuple[1])
def test_add_get_experiment_artifacts(self):
experiment_tuple = get_test_experiment()
e_experiment = experiment_tuple[0]
e_artifacts = e_experiment.artifacts
a1_filename = os.path.join(tempfile.gettempdir(), str(uuid.uuid4()))
a2_filename = os.path.join(tempfile.gettempdir(), str(uuid.uuid4()))
with open(a1_filename, 'w') as f:
f.write('hello world')
e_artifacts['a1'] = {
'local': a1_filename,
'mutable': False
}
e_artifacts['a2'] = {
'local': a2_filename,
'mutable': True
}
with self.get_db_provider('test_config_http_client.yaml') as db:
db.add_experiment(e_experiment)
experiment = db.get_experiment(e_experiment.key)
self.assertEquals(experiment.key, e_experiment.key)
self.assertEquals(experiment.filename, e_experiment.filename)
self.assertEquals(experiment.args, e_experiment.args)
db.delete_experiment(e_experiment.key)
os.remove(a1_filename)
if __name__ == '__main__':
unittest.main()
| 2.609375 | 3 |
Ex011.py | leonardoDelefrate/Curso-de-Python | 0 | 12770691 | <reponame>leonardoDelefrate/Curso-de-Python
altura= float(input('Quanto de altura a parede possui?'))
largura= float(input('Quanto de largura a parede possui?'))
area= largura * altura
litros= area/2
print('Sua parede possui uma área de {},portanto, você precisará de {}litros de tinta para pinta-la'.format(area,litros)) | 3.625 | 4 |
nornir_f5/plugins/tasks/bigip/__init__.py | kpressouyre/nornir_f5 | 1 | 12770692 | <reponame>kpressouyre/nornir_f5
"""Nornir F5 BIG-IP tasks."""
| 0.914063 | 1 |
wrappers/arlexecute/calibration/pointing.py | ska-telescope/algorithm-reference-library | 22 | 12770693 | """ Functions for calibration, including creation of pointingtables, application of pointingtables, and
merging pointingtables.
"""
from processing_components.calibration.pointing import create_pointingtable_from_blockvisibility
from processing_components.calibration.pointing import create_pointingtable_from_rows
from processing_components.calibration.pointing import qa_pointingtable
| 1.601563 | 2 |
keyword_preprocess.py | w-garcia/BugClustering | 3 | 12770694 | from util import cwd
import util
from jira import *
import DBModel
def get_full_description(j, line):
begin_key = line.find('[') + 1
end_key = line.find(']') - 1
issue_id = line[begin_key: end_key + 1]
try:
issue = j.issue(issue_id)
except JIRAError:
print JIRAError.message
return "", ""
desc = issue.fields.description
if desc is None:
return "", ""
paragraph_key = desc.find('\n') - 1
return desc[:paragraph_key], desc
# noinspection PyTypeChecker
def process_system(jira, system):
f = open(util.cwd + '/raw/' + system + '.txt', 'r')
if f is None:
print "Couldn't find " + system + ". Aborting."
return
list_classifications = []
classifications_cache = {}
terse_descriptions_cache = []
full_descriptions_cache = []
descriptions_processed_count = -1
for line in f:
if line.find('[') == 0:
if descriptions_processed_count != -1:
classifications_cache[descriptions_processed_count] = u' '.join(list_classifications).encode('utf-8')
list_classifications = []
ters_description, full_description = get_full_description(jira, line)
line = line.replace(']\n', ' ')
# Append found description to the existing stub in the line, before the bracket.
terse_descriptions_cache.append(u' '.join( (line, ters_description, ']\n') ).encode('utf-8'))
full_descriptions_cache.append(u' '.join( (line, full_description, ']\n') ).encode('utf-8'))
descriptions_processed_count += 1
print "Processed bug description: " + str(descriptions_processed_count)
elif line[0].isalpha():
line = line.replace('\n', '')
list_classifications.append(line.encode('utf-8'))
#if descriptions_processed_count == 10:
# break
# Reached eof, append last set of classifications
classifications_cache[descriptions_processed_count] = u' '.join(list_classifications).encode('utf-8')
populate_tables(classifications_cache, full_descriptions_cache, terse_descriptions_cache, system)
print "Processed " + system + "."
def populate_tables(classifications_cache, full_descriptions_cache, terse_descriptions_cache, system):
list_of_full_dicts = []
list_of_ters_dicts = []
for i in range(len(full_descriptions_cache)):
list_of_full_dicts.append({'system': system,
'description': full_descriptions_cache[i],
'classification': classifications_cache[i]})
list_of_ters_dicts.append({'system': system,
'description': terse_descriptions_cache[i],
'classification': classifications_cache[i]})
DBModel.Full_PreProcessed_Keyword.get_db_ref_by_system(system).overwrite_system_rows(system, list_of_full_dicts)
DBModel.Terse_PreProcessed_Keyword.get_db_ref_by_system(system).overwrite_system_rows(system, list_of_ters_dicts)
| 2.421875 | 2 |
deep_qa/contrib/layers/__init__.py | mrbot-ai/deep_qa | 2 | 12770695 |
from .knowledge_backed_lstm import KnowledgeBackedLSTM
from .tree_composition_lstm import TreeCompositionLSTM
| 1.046875 | 1 |
davarocr/davarocr/davar_common/datasets/builder.py | CuteyThyme/MultiModal_IE | 0 | 12770696 | """
##################################################################################################
# Copyright Info : Copyright (c) Davar Lab @ Hikvision Research Institute. All rights reserved.
# Filename : builder.py
# Abstract :
# Current Version: 1.0.0
# Date : 2020-05-31
##################################################################################################
"""
import copy
import platform
from functools import partial
from torch.utils.data import DataLoader
from mmcv.utils import Registry
from mmcv.utils import build_from_cfg
from mmcv.parallel import collate
from mmcv.runner import get_dist_info
from mmdet.datasets import DATASETS
from mmdet.models.builder import build
from mmdet.datasets.builder import worker_init_fn
from mmdet.datasets.samplers import DistributedGroupSampler, GroupSampler, DistributedSampler
from .davar_dataset_wrappers import DavarConcatDataset
from .davar_multi_dataset import DavarMultiDataset
if platform.system() != 'Windows':
# https://github.com/pytorch/pytorch/issues/973
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
hard_limit = rlimit[1]
soft_limit = min(4096, hard_limit)
resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))
SAMPLER = Registry('sampler')
def build_sampler(cfg):
"""Build sampler
Args:
cfg(mmcv.Config): Sample cfg
Returns:
obj: sampler
"""
return build(cfg, SAMPLER)
def davar_build_dataloader(dataset,
samples_per_gpu=1,
workers_per_gpu=1,
sampler_type=None,
num_gpus=1,
dist=True,
shuffle=True,
seed=None,
**kwargs):
"""
Args:
dataset (Dataset): dataset
samples_per_gpu (int): image numbers on each gpu
workers_per_gpu (int): workers each gpu
sampler_type (optional | dict): sampler parameter
num_gpus (int): numbers of gpu
dist (boolean): whether to use distributed mode
shuffle (boolean): whether to shuffle the dataset
seed (int): seed number
**kwargs (None): back parameter
Returns:
the training data loader
"""
rank, world_size = get_dist_info()
if sampler_type is not None:
sampler = sampler_type
else:
sampler = kwargs.pop('sampler', None)
# if choose distributed sampler
if dist:
# whether to shuffle data
if shuffle:
if sampler is None:
# Distributed Group Sampler
sampler = DistributedGroupSampler(dataset, samples_per_gpu, world_size, rank,)
else:
sampler['dataset'] = dataset
sampler['samples_per_gpu'] = samples_per_gpu
# build distributed sampler
sampler = build_sampler(sampler)
else:
# distributed sampler
sampler = DistributedSampler(dataset, world_size, rank, shuffle=False)
batch_size = samples_per_gpu
num_workers = workers_per_gpu
else:
if shuffle:
if sampler is None:
# Group Sampler
sampler = GroupSampler(dataset, samples_per_gpu)
else:
sampler['dataset'] = dataset
sampler['samples_per_gpu'] = samples_per_gpu
# build non-distributed sampler
sampler = build_sampler(sampler)
else:
sampler = None
batch_size = num_gpus * samples_per_gpu
num_workers = num_gpus * workers_per_gpu
# combine the training image to mini-batch tensor
init_fn = partial(worker_init_fn,
num_workers=num_workers,
rank=rank,
seed=seed) if seed is not None else None
# build data loader
data_loader = DataLoader(
dataset,
batch_size=batch_size,
sampler=sampler,
num_workers=num_workers,
collate_fn=partial(collate, samples_per_gpu=samples_per_gpu),
pin_memory=False,
worker_init_fn=init_fn,
**kwargs)
return data_loader
def _concat_dataset(cfg, default_args=None):
"""
Args:
cfg (cfg): model config file
default_args (args): back parameter
Returns:
concat all the dataset in config file
"""
# dataset information, pipeline information, batch setting information
ann_files = cfg['ann_file']
img_prefixes = cfg.get('img_prefix', None)
seg_prefixes = cfg.get('seg_prefix', None)
proposal_files = cfg.get('proposal_file', None)
data_types = cfg.get('data_type', None)
pipeline = cfg.get('pipeline', None)
batch_ratios = cfg.get('batch_ratios', None)
# update the parameter of the config
datasets = []
num_dset = len(ann_files)
for i in range(num_dset):
data_cfg = copy.deepcopy(cfg)
data_cfg['ann_file'] = ann_files[i]
if isinstance(img_prefixes, (list, tuple)):
data_cfg['img_prefix'] = img_prefixes[i]
if isinstance(seg_prefixes, (list, tuple)):
data_cfg['seg_prefix'] = seg_prefixes[i]
if isinstance(proposal_files, (list, tuple)):
data_cfg['proposal_file'] = proposal_files[i]
if isinstance(data_types, (list, tuple)):
data_cfg['data_type'] = data_types[i]
if isinstance(pipeline, (list, tuple)):
if isinstance(pipeline[0], (list, tuple)):
data_cfg['pipeline'] = pipeline[i]
if isinstance(batch_ratios, (list, tuple)):
data_cfg['batch_ratios'] = batch_ratios[i]
# build the dataset
datasets.append(davar_build_dataset(data_cfg, default_args))
return DavarConcatDataset(datasets)
def davar_build_dataset(cfg, default_args=None):
"""
Args:
cfg (cfg): model config file
default_args (args): back parameter
Returns:
build the dataset for training
"""
from mmdet.datasets.dataset_wrappers import (ConcatDataset, RepeatDataset,
ClassBalancedDataset)
from mmdet.datasets import build_dataset
if isinstance(cfg, (list, tuple)):
dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg])
elif cfg['type'] == 'ConcatDataset':
dataset = ConcatDataset(
[build_dataset(c, default_args) for c in cfg['datasets']],
cfg.get('separate_eval', True))
elif cfg['type'] == 'DavarMultiDataset':
align_parameters = parameter_align(cfg)
dataset = DavarMultiDataset(cfg["batch_ratios"],
[davar_build_dataset(c, default_args) for c in align_parameters])
elif cfg['type'] == 'RepeatDataset':
dataset = RepeatDataset(
build_dataset(cfg['dataset'], default_args), cfg['times'])
elif cfg['type'] == 'ClassBalancedDataset':
dataset = ClassBalancedDataset(
build_dataset(cfg['dataset'], default_args), cfg['oversample_thr'])
elif isinstance(cfg.get('ann_file'), (list, tuple)):
dataset = _concat_dataset(cfg, default_args)
else:
dataset = build_from_cfg(cfg, DATASETS, default_args)
return dataset
def parameter_align(cfg):
""" pipeline parameter alignment
Args:
cfg (config): model pipeline config
Returns:
"""
align_para = list()
if isinstance(cfg["batch_ratios"], (float, int)):
batch_ratios = [cfg["batch_ratios"]]
elif isinstance(cfg["batch_ratios"], (tuple, list)):
batch_ratios = cfg["batch_ratios"]
else:
batch_ratios = list(map(float, cfg["batch_ratios"].split('|')))
if isinstance(cfg["dataset"]["ann_file"], str):
cfg["dataset"]["ann_file"] = cfg["dataset"]["ann_file"].split('|')
if isinstance(cfg["dataset"]["img_prefix"], str):
cfg["dataset"]["img_prefix"] = cfg["dataset"]["img_prefix"].split('|')
dataset_num = len(batch_ratios)
for key, item in cfg["dataset"].items():
if isinstance(item, list) and isinstance(item[0], list) and len(item) < dataset_num:
for _ in range(dataset_num - len(item)):
cfg["dataset"][key].append(item)
elif isinstance(item, list) and isinstance(item[0], dict):
temp = []
for _ in range(dataset_num):
temp.append(item)
cfg["dataset"][key] = temp
elif isinstance(item, list) and len(item) == dataset_num:
continue
elif isinstance(item, (int, float)):
temp = []
for _ in range(dataset_num):
temp.append(item)
cfg["dataset"][key] = temp
elif isinstance(item, str):
temp_ = []
for _ in range(dataset_num):
temp_.append(item)
cfg["dataset"][key] = temp_
else:
raise TypeError("parameter type error")
for i in range(dataset_num):
temp_dict = dict()
for key, item in cfg["dataset"].items():
temp_dict[key] = item[i]
align_para.append(temp_dict)
return align_para
| 1.976563 | 2 |
Ex087.py | dpsaraiva/Exercicios_Python | 0 | 12770697 | #Desafio: Aprimore o desafio anterior, mostrando no final:
# A) A soma de todos os valores pares digitados.
# B) A soma dos valores da terceira coluna.
# C) O maior valor da segunda linha.
matriz = [[], [], []]
par = soma = maior = 0
for n in range(0, 3):
for i in range(0, 3):
num = int(input(f'Digite um valor para[{n}, {i}]: '))
if num % 2 == 0:
par += num
if i == 2:
soma += num
if n == 1 and num > maior:
maior = num
matriz[n].append(num)
print('=-' * 25)
for l in range(0, 3):
for c in range(0, 3):
print(f'[{matriz[l][c]:^5}]', end='')
print('')
print('=-' * 25)
print(f'A soma dos valores pares é {par}')
print(f'A soma dos valores da terceira coluna é {soma}')
print(f'O maior valor da segunda linha é {maior}')
| 3.953125 | 4 |
saticl/preproc/utils.py | edornd/multimodal-icl | 6 | 12770698 | import os
from typing import Dict, Union
import numpy as np
def lenient_makedirs(path: str) -> None:
"""Simple wrapper around makedirs that first checks for existence.
Args:
path (str): path to be created
"""
if not os.path.exists(path):
os.makedirs(path)
def tile_overlapped(image: np.ndarray,
tile_size: Union[tuple, int] = 256,
channels_first: bool = False,
tile_rows: int = None,
tile_cols: int = None) -> np.ndarray:
if len(image.shape) == 2:
axis = 0 if channels_first else -1
image = np.expand_dims(image, axis=axis)
if channels_first:
image = np.moveaxis(image, 0, -1)
# assume height, width, channels from now on
height, width, channels = image.shape
tile_h, tile_w = tile_size if isinstance(tile_size, tuple) else (tile_size, tile_size)
if height <= tile_h and width <= tile_w:
raise ValueError("Image is smaller than the required tile size")
# number of expected tiles, manually defined or inferred
exact = [height / float(tile_h), width / float(tile_w)]
outer = [int(np.ceil(v)) for v in exact]
# the required number of tiles is given by the ceiling
tile_count_h = tile_rows or outer[0]
tile_count_w = tile_cols or outer[1]
# compute total remainder for the expanded window
remainder_h = (tile_count_h * tile_h) - height
remainder_w = (tile_count_w * tile_w) - width
# divide remainders among tiles as overlap
overlap_h = int(np.floor(remainder_h / float(tile_count_h))) if tile_count_h > 1 else 0
overlap_w = int(np.floor(remainder_w / float(tile_count_w))) if tile_count_w > 1 else 0
# create the empty tensor to contain tiles
tiles = np.empty((tile_count_h, tile_count_w, tile_h, tile_w, channels), dtype=image.dtype)
stride_h = tile_h - overlap_h
stride_w = tile_w - overlap_w
# iterate over tiles and copy content from image windows
for row in range(tile_count_h):
for col in range(tile_count_w):
# get the starting indices, accounting for initial positions
# overlap is halved to distribute in left/right and top/bottom
x = max(row * stride_h - overlap_h // 2, 0)
y = max(col * stride_w - overlap_w // 2, 0)
# if it exceeds horizontally or vertically in the last rows or cols, increase overlap to fit
if (x + tile_h) >= height:
x -= abs(x + tile_h - height)
if (y + tile_w) >= width:
y -= abs(y + tile_w - width)
# assign tile to final tensor
tiles[row, col] = image[x:x + tile_h, y:y + tile_w, :]
return tiles
def convert_mask(image: np.ndarray, lut: Dict[tuple, int]) -> np.ndarray:
"""Converts a given RGB image containing labels in channels-last format (h, w, c)
into a greyscale mask where each index indicates a given class.
:param image: RGB input image with dimensions [height, width, channels]
:type image: np.ndarray
:param lut: look-up table containing the associations color -> index
:type lut: Dict[tuple, int]
:return: greyscale image with size [height, width] containing the mapped label indices
:rtype: np.ndarray
"""
result = np.zeros(image.shape[:2])
for color, index in lut.items():
result[np.all(image == color, axis=-1)] = index
return result.astype(np.uint8)
| 2.953125 | 3 |
scripts/practice/FB/LowestCommonAncestor.py | bhimeshchauhan/competitive_programming | 0 | 12770699 | <reponame>bhimeshchauhan/competitive_programming
"""
"""
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def lowestCommonAncestor(self, root: 'TreeNode', p: 'TreeNode', q: 'TreeNode') -> 'TreeNode':
self.ans = None
def recurse(node):
if not node:
return False
left = recurse(node.left)
right = recurse(node.right)
mid = node == p or node == q
print(node.val, mid+left+right)
if mid+left+right >= 2:
self.ans = node
return mid or left or right
recurse(root)
return self.ans
def lowestCommonAncestor(self, root, p, q):
if root == None or root == p or root == q:
return root
# Find p/q in left subtree
l = self.lowestCommonAncestor(root.left, p, q)
# Find p/q in right subtree
r = self.lowestCommonAncestor(root.right, p, q)
# If p and q found in left and right subtree of this node, then this node is LCA
if l and r:
return root
# Else return the node which returned a node from it's subtree such that one of it's ancestor will be LCA
return l if l else r | 3.625 | 4 |
Examples/python_server/openapi_server/com/h21lab/TS29573_N32_Handshake/handler/__init__.py | H21lab/5GC_build | 12 | 12770700 | <reponame>H21lab/5GC_build<filename>Examples/python_server/openapi_server/com/h21lab/TS29573_N32_Handshake/handler/__init__.py
# coding: utf-8
# flake8: noqa
from __future__ import absolute_import
# import models into model package
from openapi_server.com.h21lab.TS29573_N32_Handshake.handler.api_ie_mapping import ApiIeMapping
from openapi_server.com.h21lab.TS29573_N32_Handshake.handler.api_signature import ApiSignature
from openapi_server.com.h21lab.TS29573_N32_Handshake.handler.callback_name import CallbackName
from openapi_server.com.h21lab.TS29573_N32_Handshake.handler.failed_modification_info import FailedModificationInfo
from openapi_server.com.h21lab.TS29573_N32_Handshake.handler.failure_reason import FailureReason
from openapi_server.com.h21lab.TS29573_N32_Handshake.handler.http_method import HttpMethod
from openapi_server.com.h21lab.TS29573_N32_Handshake.handler.ie_info import IeInfo
from openapi_server.com.h21lab.TS29573_N32_Handshake.handler.ie_location import IeLocation
from openapi_server.com.h21lab.TS29573_N32_Handshake.handler.ie_type import IeType
from openapi_server.com.h21lab.TS29573_N32_Handshake.handler.invalid_param import InvalidParam
from openapi_server.com.h21lab.TS29573_N32_Handshake.handler.ipx_provider_sec_info import IpxProviderSecInfo
from openapi_server.com.h21lab.TS29573_N32_Handshake.handler.n32f_context_info import N32fContextInfo
from openapi_server.com.h21lab.TS29573_N32_Handshake.handler.n32f_error_detail import N32fErrorDetail
from openapi_server.com.h21lab.TS29573_N32_Handshake.handler.n32f_error_info import N32fErrorInfo
from openapi_server.com.h21lab.TS29573_N32_Handshake.handler.n32f_error_type import N32fErrorType
from openapi_server.com.h21lab.TS29573_N32_Handshake.handler.problem_details import ProblemDetails
from openapi_server.com.h21lab.TS29573_N32_Handshake.handler.protection_policy import ProtectionPolicy
from openapi_server.com.h21lab.TS29573_N32_Handshake.handler.sec_negotiate_req_data import SecNegotiateReqData
from openapi_server.com.h21lab.TS29573_N32_Handshake.handler.sec_negotiate_rsp_data import SecNegotiateRspData
from openapi_server.com.h21lab.TS29573_N32_Handshake.handler.sec_param_exch_req_data import SecParamExchReqData
from openapi_server.com.h21lab.TS29573_N32_Handshake.handler.sec_param_exch_rsp_data import SecParamExchRspData
from openapi_server.com.h21lab.TS29573_N32_Handshake.handler.security_capability import SecurityCapability
| 1.445313 | 1 |
app/recipe/tests/test_recipe_api.py | pprasha2/python-api | 0 | 12770701 | from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Recipe, Tag, Ingredient
from recipe.serializers import RecipeSerializer, RecipeDetailSerializer
RECIPES_URL = reverse('recipe:recipe-list')
def detail_url(recipe_id):
"""return recipe url"""
return reverse('recipe:recipe-detail', args=[recipe_id])
def sample_tag(user, name='Main Course'):
"""create a sample tag and return"""
return Tag.objects.create(
user=user,
name=name
)
def sample_ingredient(user, name='Cinnamon'):
"""create and return sample ingredient and return"""
return Ingredient.objects.create(
user=user,
name=name
)
def sample_recipe(user, **param):
"""create and return a sample recipe"""
defaults = {
'title': 'sample recipe',
'time_minutes': 10,
'price': 5.00
}
defaults.update(param)
return Recipe.objects.create(
user=user,
**defaults
)
class PublicRecipeApiTests(TestCase):
"""test unauthenticated rest API"""
def setUp(self) -> None:
self.client = APIClient()
def test_auth_required(self):
"""test that authentication is required"""
res = self.client.get(RECIPES_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateRecipeApiTests(TestCase):
"""test recipe can be retrieved from authenticated user"""
def setUp(self) -> None:
self.client = APIClient()
self.user = get_user_model().objects.create_user(
email="<EMAIL>",
password="<PASSWORD>"
)
self.client.force_authenticate(user=self.user)
def test_retreive_recipe(self):
"""test user is able to retrieve recipe"""
sample_recipe(user=self.user)
sample_recipe(user=self.user)
res = self.client.get(RECIPES_URL)
recipes = Recipe.objects.all().order_by('-id')
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_recipes_limited_to_user(self):
"""test that recipes are limited to authenticated user"""
user2 = get_user_model().objects.create_user(
email="<EMAIL>",
password='<PASSWORD>'
)
sample_recipe(user=user2)
sample_recipe(user=self.user)
res = self.client.get(RECIPES_URL)
recipes = Recipe.objects.filter(user=self.user)
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data, serializer.data)
def test_view_recipe_detail(self):
"""test viewing recipe detail"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
recipe.ingredients.add(sample_ingredient(user=self.user))
url = detail_url(recipe.id)
res = self.client.get(url)
serializer = RecipeDetailSerializer(recipe)
self.assertEqual(res.data, serializer.data)
def test_create_basic_recipe(self):
"""test creating recipe"""
payload = {
'title': 'chocolate cake',
'time_minutes': 30,
'price': 5.00
}
res = self.client.post(RECIPES_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(
id=res.data['id']
)
for key in payload.keys():
self.assertEqual(payload[key], getattr(recipe, key))
def test_create_recipe_with_tags(self):
"""test adding a recipe with tags"""
tag1 = sample_tag(user=self.user, name='Vegan')
tag2 = sample_tag(user=self.user, name='Dessert')
payload = {
'title': 'Avocardo lime cheesecake',
'tags': [tag1.id, tag2.id],
'time_minutes': 20,
'price': 10.25
}
res = self.client.post(RECIPES_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(
id=res.data['id']
)
tags = recipe.tags.all()
self.assertEqual(tags.count(), 2)
self.assertIn(tag1, tags)
self.assertIn(tag2, tags)
def test_create_recipe_with_ingredients(self):
"""test adding a recipe with ingredients"""
ingredient1 = sample_ingredient(user=self.user, name='Prawns')
ingredient2 = sample_ingredient(user=self.user, name='Ginger')
payload = {
'title': 'Thai prawns curry',
'ingredients': [ingredient1.id, ingredient2.id],
'time_minutes': 20,
'price': 10
}
res = self.client.post(RECIPES_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(
id=res.data['id']
)
ingredients = recipe.ingredients.all()
self.assertEqual(ingredients.count(), 2)
self.assertIn(ingredient1, ingredients)
self.assertIn(ingredient2, ingredients)
def test_partial_update_recipe(self):
"""test updating a recipe patch"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
new_tag = sample_tag(user=self.user, name='curry')
payload = {
'title': 'Paneer tikka',
'tags': [new_tag.id]
}
url = detail_url(recipe.id)
self.client.patch(url, payload)
recipe.refresh_from_db()
self.assertEqual(recipe.title, payload['title'])
tags = recipe.tags.all()
self.assertEqual(len(tags), 1)
self.assertIn(new_tag, tags)
def test_full_update_recipe(self):
"""test updating a recipe full"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
payload = {
'title': 'chai',
'time_minutes': 25,
'price': 15
}
url = detail_url(recipe.id)
self.client.put(url, payload)
recipe.refresh_from_db()
self.assertEqual(recipe.title, payload['title'])
self.assertEqual(recipe.time_minutes, payload['time_minutes'])
self.assertEqual(recipe.price, payload['price'])
tags = recipe.tags.all()
self.assertEqual(len(tags), 0)
| 2.578125 | 3 |
dataloader.py | Haiyang-W/DTI-GRAPH | 6 | 12770702 | import json
import torch
import numpy as np
import random
import torch.nn.functional as F
import functools
def cmp_time(a, b):
a_num = int(a.split('_')[1])
b_num = int(b.split('_')[1])
return a_num - b_num
def pad_tensor(vec, pad):
"""
pad tensor to fixed length
:parameter
vec: tensor to pad
pad: the size to pad to
:return
a new tensor padded to 'pad'
"""
padded = torch.cat([vec, torch.zeros((pad - len(vec),20), dtype=torch.float)], dim=0).data.numpy()
return padded
def padding_all(vec, max_len):
"""
vec: [n, len, feat]
"""
n = vec.shape[0]
vec_len = vec.shape[1]
padded = torch.cat([vec, torch.zeros((n,max_len-vec_len,20), dtype=torch.double)], dim=1).data
return padded
def load_info_data(path):
ori_data = np.load(path)
protein_tensor = torch.tensor(ori_data['pssm_arr'], dtype =torch.float) # [n_p ,220]
drug_tensor = torch.tensor(ori_data['drug_arr'], dtype =torch.float) # [n_d, 881]
protein_num = protein_tensor.shape[0]
drug_num = drug_tensor.shape[0]
node_num = protein_num + drug_num
return protein_tensor, drug_tensor, node_num, protein_num
def load_pre_process(preprocess_path):
with open(preprocess_path, 'r') as f:
a = json.load(f)
adj = torch.FloatTensor(a['adj'])
dti_inter_mat = torch.FloatTensor(a['dti_inter_mat'])
train_interact_pos = torch.tensor(a['train_interact_pos'])
val_interact_pos = torch.tensor(a['val_interact_pos'])
return adj, dti_inter_mat, train_interact_pos, val_interact_pos
| 2.3125 | 2 |
backend/api/tests/routes/test_auth.py | damo-da/bars-and-grills | 45 | 12770703 | # Test cases for /login and /signup
import unittest
from rest_framework.test import APIClient
from api.models import User, Group
class LoginTestCase(unittest.TestCase):
def setUp(self) -> None:
self.username = 'testuser'
self.password = '<PASSWORD>'
self.client = APIClient()
self.response = None
self.user = None
def create_user(self):
self.user, _ = User.objects.get_or_create(username=self.username)
self.user.set_password(self.password)
self.user.save()
def test_login(self):
"""Users should be able to login without authentication header."""
self.create_user()
self.response = self.client.post('/login', {
'username': self.username,
'password': <PASSWORD>,
})
self.assertEqual(self.response.status_code, 200)
def test_signup(self, do_assert=True):
"""Anyone should be able to create new user."""
self.create_user()
self.user.delete()
self.response = self.client.post('/signup', {
'username': self.username,
'password': <PASSWORD>,
})
if do_assert:
self.assertEqual(self.response.status_code, 201)
def test_short_password(self):
"""Password length must be greater than or equal to 6."""
self.password = "<PASSWORD>"
self.test_signup(do_assert=False)
self.assertEqual(self.response.status_code, 400)
def test_signup_group(self):
"""Signed up user must be in the Regular group."""
self.test_signup()
self.assertEqual(self.response.status_code, 201)
self.user = User.objects.get(username=self.username)
self.assertTrue(self.user.groups.filter(name='Regular').exists())
if __name__ == '__main__':
unittest.main()
| 3.25 | 3 |
UnityEngine/CameraType/__init__.py | Grim-es/udon-pie-auto-completion | 0 | 12770704 | <reponame>Grim-es/udon-pie-auto-completion<filename>UnityEngine/CameraType/__init__.py<gh_stars>0
from UdonPie import UnityEngine
from UdonPie.Undefined import *
class CameraType:
def __new__(cls, arg1=None):
'''
:returns: CameraType
:rtype: UnityEngine.CameraType
'''
pass
| 1.757813 | 2 |
ipycode/A00-fileSplit.py | wangzhongtian/ConTractMngToolSrc | 0 | 12770705 | #!/usr/bin/python3
#-*- coding: UTF-8
import fileSplit
fileSplit.合并()
| 1.367188 | 1 |
conans/client/conan_api.py | zomeck/conan | 0 | 12770706 | import hashlib
import os
import sys
from collections import defaultdict, OrderedDict
import requests
import conans
from conans import __version__ as CLIENT_VERSION, tools
from conans.client.client_cache import ClientCache
from conans.client.conf import MIN_SERVER_COMPATIBLE_VERSION, ConanClientConfigParser
from conans.client.detect import detect_defaults_settings
from conans.client.manager import ConanManager
from conans.client.migrations import ClientMigrator
from conans.client.output import ConanOutput, ScopedOutput
from conans.client.profile_loader import read_profile, get_profile_path
from conans.client.remote_manager import RemoteManager
from conans.client.remote_registry import RemoteRegistry
from conans.client.rest.auth_manager import ConanApiAuthManager
from conans.client.rest.rest_client import RestApiClient
from conans.client.rest.version_checker import VersionCheckerRequester
from conans.client.runner import ConanRunner
from conans.client.store.localdb import LocalDB
from conans.client.userio import UserIO
from conans.errors import ConanException
from conans.model.env_info import EnvValues
from conans.model.options import OptionsValues
from conans.model.profile import Profile
from conans.model.ref import ConanFileReference, is_a_reference
from conans.model.scope import Scopes
from conans.model.version import Version
from conans.paths import CONANFILE, get_conan_user_home
from conans.search.search import DiskSearchManager, DiskSearchAdapter
from conans.util.env_reader import get_env
from conans.util.files import rmdir, save_files, exception_message_safe, save
from conans.util.log import configure_logger
from conans.util.tracer import log_command, log_exception
from conans.client.loader_parse import load_conanfile_class
from conans.client import settings_preprocessor
default_manifest_folder = '.conan_manifests'
def get_basic_requester(client_cache):
requester = requests.Session()
requester.proxies = client_cache.conan_config.proxies
return requester
def api_method(f):
def wrapper(*args, **kwargs):
the_self = args[0]
try:
log_command(f.__name__, kwargs)
with tools.environment_append(the_self._client_cache.conan_config.env_vars):
# Patch the globals in tools
return f(*args, **kwargs)
except Exception as exc:
msg = exception_message_safe(exc)
try:
log_exception(exc, msg)
except:
pass
raise
return wrapper
def prepare_cwd(cwd):
if cwd:
if os.path.isabs(cwd):
return cwd
else:
return os.path.abspath(cwd)
else:
return os.getcwd()
class ConanAPIV1(object):
@staticmethod
def factory():
"""Factory"""
def instance_remote_manager(client_cache):
requester = get_basic_requester(client_cache)
# Verify client version against remotes
version_checker_requester = VersionCheckerRequester(requester, Version(CLIENT_VERSION),
Version(MIN_SERVER_COMPATIBLE_VERSION),
out)
# To handle remote connections
put_headers = client_cache.read_put_headers()
rest_api_client = RestApiClient(out, requester=version_checker_requester, put_headers=put_headers)
# To store user and token
localdb = LocalDB(client_cache.localdb)
# Wraps RestApiClient to add authentication support (same interface)
auth_manager = ConanApiAuthManager(rest_api_client, user_io, localdb)
# Handle remote connections
remote_manager = RemoteManager(client_cache, auth_manager, out)
return remote_manager
use_color = get_env("CONAN_COLOR_DISPLAY", 1)
if use_color and hasattr(sys.stdout, "isatty") and sys.stdout.isatty():
import colorama
colorama.init()
color = True
else:
color = False
out = ConanOutput(sys.stdout, color)
user_io = UserIO(out=out)
try:
client_cache = migrate_and_get_client_cache(get_conan_user_home(), out)
except Exception as e:
out.error(str(e))
raise
with tools.environment_append(client_cache.conan_config.env_vars):
# Adjust CONAN_LOGGING_LEVEL with the env readed
conans.util.log.logger = configure_logger()
# Get the new command instance after migrations have been done
remote_manager = instance_remote_manager(client_cache)
# Get a search manager
search_adapter = DiskSearchAdapter()
search_manager = DiskSearchManager(client_cache, search_adapter)
# Settings preprocessor
conan = Conan(client_cache, user_io, get_conan_runner(), remote_manager, search_manager,
settings_preprocessor)
return conan
def __init__(self, client_cache, user_io, runner, remote_manager, search_manager,
settings_preprocessor):
assert isinstance(user_io, UserIO)
assert isinstance(client_cache, ClientCache)
self._client_cache = client_cache
self._user_io = user_io
self._runner = runner
self._manager = ConanManager(client_cache, user_io, runner, remote_manager, search_manager,
settings_preprocessor)
# Patch the tools module with a good requester and user_io
tools._global_requester = get_basic_requester(self._client_cache)
tools._global_output = self._user_io.out
@api_method
def new(self, name, header=False, pure_c=False, test=False, exports_sources=False, bare=False, cwd=None,
visual_versions=None, linux_gcc_versions=None, linux_clang_versions=None, osx_clang_versions=None,
shared=None, upload_url=None, gitignore=None, gitlab_gcc_versions=None, gitlab_clang_versions=None):
from conans.client.new import get_files
cwd = prepare_cwd(cwd)
files = get_files(name, header=header, pure_c=pure_c, test=test,
exports_sources=exports_sources, bare=bare,
visual_versions=visual_versions,
linux_gcc_versions=linux_gcc_versions,
linux_clang_versions=linux_clang_versions,
osx_clang_versions=osx_clang_versions, shared=shared,
upload_url=upload_url, gitignore=gitignore,
gitlab_gcc_versions=gitlab_gcc_versions,
gitlab_clang_versions=gitlab_clang_versions)
save_files(cwd, files)
for f in sorted(files):
self._user_io.out.success("File saved: %s" % f)
@api_method
def test_package(self, profile_name=None, settings=None, options=None, env=None,
scope=None, test_folder=None, not_export=False, build=None, keep_source=False,
verify=default_manifest_folder, manifests=default_manifest_folder,
manifests_interactive=default_manifest_folder,
remote=None, update=False, cwd=None, user=None, channel=None, name=None,
version=None):
settings = settings or []
options = options or []
env = env or []
cwd = prepare_cwd(cwd)
if name and version:
package_name = name
package_version = version
else:
conanfile_path = os.path.join(cwd, "conanfile.py")
conanfile = load_conanfile_class(conanfile_path)
package_name = getattr(conanfile, "name", None)
package_version = getattr(conanfile, "version", None)
if not package_name or not package_version:
raise ConanException("conanfile.py doesn't declare package name or version")
test_folders = [test_folder] if test_folder else ["test_package", "test"]
for test_folder_name in test_folders:
test_folder = os.path.join(cwd, test_folder_name)
test_conanfile_path = os.path.join(test_folder, "conanfile.py")
if os.path.exists(test_conanfile_path):
break
else:
raise ConanException("test folder '%s' not available, "
"or it doesn't have a conanfile.py" % test_folder_name)
sha = hashlib.sha1("".join(options + settings).encode()).hexdigest()
build_folder = os.path.join(test_folder, "build", sha)
rmdir(build_folder)
# shutil.copytree(test_folder, build_folder)
profile = profile_from_args(profile_name, settings, options, env, scope, cwd,
self._client_cache.profiles_path)
loader = self._manager.get_loader(profile)
test_conanfile = loader.load_conan(test_conanfile_path, self._user_io.out, consumer=True)
try:
if hasattr(test_conanfile, "requirements"):
test_conanfile.requirements()
except Exception as e:
raise ConanException("Error in test_package/conanfile.py requirements(). %s" % str(e))
requirement = test_conanfile.requires.get(package_name)
if requirement:
if requirement.conan_reference.version != package_version:
raise ConanException("package version is '%s', but test_package/conanfile "
"is requiring version '%s'\n"
"You can remove this requirement and use "
"'conan test_package user/channel' instead"
% (package_version, requirement.conan_reference.version))
user = user or requirement.conan_reference.user
channel = channel or requirement.conan_reference.channel
if not user or not channel:
raise ConanException("Please specify user and channel")
conanfile_reference = ConanFileReference(package_name, package_version, user, channel)
# Forcing an export!
if not not_export:
self._user_io.out.info("Exporting package recipe")
self._manager.export(user, channel, cwd, keep_source=keep_source)
if build is None: # Not specified, force build the tested library
build = [package_name]
manifests = _parse_manifests_arguments(verify, manifests, manifests_interactive, cwd)
manifest_folder, manifest_interactive, manifest_verify = manifests
self._manager.install(inject_require=conanfile_reference,
reference=test_folder,
current_path=build_folder,
manifest_folder=manifest_folder,
manifest_verify=manifest_verify,
manifest_interactive=manifest_interactive,
remote=remote,
profile=profile,
build_modes=build,
update=update,
generators=["txt"]
)
test_conanfile = os.path.join(test_folder, CONANFILE)
self._manager.build(test_conanfile, test_folder, build_folder, package_folder=None,
test=str(conanfile_reference))
@api_method
def create(self, profile_name=None, settings=None,
options=None, env=None, scope=None, test_folder=None, not_export=False, build=None,
keep_source=False, verify=default_manifest_folder,
manifests=default_manifest_folder, manifests_interactive=default_manifest_folder,
remote=None, update=False, cwd=None,
user=None, channel=None, name=None, version=None):
settings = settings or []
options = options or []
env = env or []
cwd = prepare_cwd(cwd)
if not name or not version:
conanfile_path = os.path.join(cwd, "conanfile.py")
conanfile = load_conanfile_class(conanfile_path)
name, version = conanfile.name, conanfile.version
if not name or not version:
raise ConanException("conanfile.py doesn't declare package name or version")
reference = ConanFileReference(name, version, user, channel)
scoped_output = ScopedOutput(str(reference), self._user_io.out)
# Forcing an export!
if not not_export:
scoped_output.highlight("Exporting package recipe")
self._manager.export(user, channel, cwd, keep_source=keep_source, name=name,
version=version)
if build is None: # Not specified, force build the tested library
build = [name]
manifests = _parse_manifests_arguments(verify, manifests, manifests_interactive, cwd)
manifest_folder, manifest_interactive, manifest_verify = manifests
profile = profile_from_args(profile_name, settings, options, env, scope,
cwd, self._client_cache.profiles_path)
self._manager.install(reference=reference,
current_path=cwd,
manifest_folder=manifest_folder,
manifest_verify=manifest_verify,
manifest_interactive=manifest_interactive,
remote=remote,
profile=profile,
build_modes=build,
update=update
)
test_folders = [test_folder] if test_folder else ["test_package", "test"]
for test_folder_name in test_folders:
test_folder = os.path.join(cwd, test_folder_name)
test_conanfile_path = os.path.join(test_folder, "conanfile.py")
if os.path.exists(test_conanfile_path):
break
else:
self._user_io.out.warn("test package folder not available, or it doesn't have "
"a conanfile.py\nIt is recommended to set a 'test_package' "
"while creating packages")
return
scoped_output.highlight("Testing with 'test_package'")
sha = hashlib.sha1("".join(options + settings).encode()).hexdigest()
build_folder = os.path.join(test_folder, "build", sha)
rmdir(build_folder)
test_conanfile = os.path.join(test_folder, CONANFILE)
self._manager.install(inject_require=reference,
reference=test_folder,
current_path=build_folder,
manifest_folder=manifest_folder,
manifest_verify=manifest_verify,
manifest_interactive=manifest_interactive,
remote=remote,
profile=profile,
update=update,
generators=["txt"]
)
self._manager.build(test_conanfile, test_folder, build_folder, package_folder=None,
test=str(reference))
@api_method
def package_files(self, reference, source_folder=None, build_folder=None, package_folder=None,
profile_name=None, force=False, settings=None, options=None, cwd=None):
cwd = prepare_cwd(cwd)
reference = ConanFileReference.loads(reference)
profile = profile_from_args(profile_name, settings, options, env=None, scope=None, cwd=cwd,
default_folder=self._client_cache.profiles_path)
package_folder = package_folder or cwd
if not source_folder and build_folder:
source_folder = build_folder
if not os.path.isabs(package_folder):
package_folder = os.path.join(cwd, package_folder)
if source_folder and not os.path.isabs(source_folder):
source_folder = os.path.normpath(os.path.join(cwd, source_folder))
if build_folder and not os.path.isabs(build_folder):
build_folder = os.path.normpath(os.path.join(cwd, build_folder))
self._manager.package_files(reference=reference, source_folder=source_folder,
build_folder=build_folder, package_folder=package_folder,
profile=profile, force=force)
@api_method
def install(self, reference="", package=None, settings=None, options=None, env=None, scope=None, all=False,
remote=None, werror=False, verify=default_manifest_folder, manifests=default_manifest_folder,
manifests_interactive=default_manifest_folder, build=None, profile_name=None,
update=False, generator=None, no_imports=False, filename=None, cwd=None):
self._user_io.out.werror_active = werror
cwd = prepare_cwd(cwd)
try:
ref = ConanFileReference.loads(reference)
except:
ref = os.path.normpath(os.path.join(cwd, reference))
if all or package: # Install packages without settings (fixed ids or all)
if all:
package = []
if not reference or not isinstance(ref, ConanFileReference):
raise ConanException("Invalid package recipe reference. "
"e.g., MyPackage/1.2@user/channel")
self._manager.download(ref, package, remote=remote)
else: # Classic install, package chosen with settings and options
manifests = _parse_manifests_arguments(verify, manifests, manifests_interactive, cwd)
manifest_folder, manifest_interactive, manifest_verify = manifests
profile = profile_from_args(profile_name, settings, options, env, scope, cwd,
self._client_cache.profiles_path)
self._manager.install(reference=ref,
current_path=cwd,
remote=remote,
profile=profile,
build_modes=build,
filename=filename,
update=update,
manifest_folder=manifest_folder,
manifest_verify=manifest_verify,
manifest_interactive=manifest_interactive,
generators=generator,
no_imports=no_imports)
@api_method
def config_get(self, item):
config_parser = ConanClientConfigParser(self._client_cache.conan_conf_path)
self._user_io.out.info(config_parser.get_item(item))
return config_parser.get_item(item)
@api_method
def config_set(self, item, value):
config_parser = ConanClientConfigParser(self._client_cache.conan_conf_path)
config_parser.set_item(item, value)
@api_method
def config_rm(self, item):
config_parser = ConanClientConfigParser(self._client_cache.conan_conf_path)
config_parser.rm_item(item)
@api_method
def info_build_order(self, reference, settings=None, options=None, env=None, scope=None, profile_name=None,
filename=None, remote=None, build_order=None, check_updates=None, cwd=None):
current_path = prepare_cwd(cwd)
try:
reference = ConanFileReference.loads(reference)
except:
reference = os.path.normpath(os.path.join(current_path, reference))
profile = profile_from_args(profile_name, settings, options, env, scope, cwd, self._client_cache.profiles_path)
graph = self._manager.info_build_order(reference, profile, filename, build_order, remote, check_updates, cwd=cwd)
return graph
@api_method
def info_nodes_to_build(self, reference, build_modes, settings=None, options=None, env=None, scope=None,
profile_name=None, filename=None, remote=None, check_updates=None, cwd=None):
current_path = prepare_cwd(cwd)
try:
reference = ConanFileReference.loads(reference)
except:
reference = os.path.normpath(os.path.join(current_path, reference))
profile = profile_from_args(profile_name, settings, options, env, scope, cwd, self._client_cache.profiles_path)
ret = self._manager.info_nodes_to_build(reference, profile, filename, build_modes, remote, check_updates, cwd)
ref_list, project_reference = ret
return ref_list, project_reference
@api_method
def info_get_graph(self, reference, remote=None, settings=None, options=None, env=None, scope=None,
profile_name=None, update=False, filename=None, cwd=None):
current_path = prepare_cwd(cwd)
try:
reference = ConanFileReference.loads(reference)
except:
reference = os.path.normpath(os.path.join(current_path, reference))
profile = profile_from_args(profile_name, settings, options, env, scope, current_path,
self._client_cache.profiles_path)
ret = self._manager.info_get_graph(reference=reference, current_path=current_path, remote=remote,
profile=profile, check_updates=update, filename=filename)
deps_graph, graph_updates_info, project_reference = ret
return deps_graph, graph_updates_info, project_reference
@api_method
def build(self, path="", source_folder=None, package_folder=None, filename=None, cwd=None):
current_path = prepare_cwd(cwd)
if path:
root_path = os.path.abspath(path)
else:
root_path = current_path
build_folder = current_path
source_folder = source_folder or root_path
if not os.path.isabs(source_folder):
source_folder = os.path.normpath(os.path.join(current_path, source_folder))
if package_folder and not os.path.isabs(package_folder):
package_folder = os.path.normpath(os.path.join(current_path, package_folder))
if filename and filename.endswith(".txt"):
raise ConanException("A conanfile.py is needed to call 'conan build'")
conanfile_path = os.path.join(root_path, filename or CONANFILE)
self._manager.build(conanfile_path, source_folder, build_folder, package_folder)
@api_method
def package(self, reference="", package_id=None, build_folder=None, source_folder=None,
cwd=None):
try:
ref = ConanFileReference.loads(reference)
except:
if "@" in reference:
raise
ref = None
if ref: # cache packaging
# TODO: other args are unused. Either raise, or split API in two methods
self._manager.package(ref, package_id)
else: # local packaging
current_path = prepare_cwd(cwd)
recipe_folder = reference
if not os.path.isabs(recipe_folder):
recipe_folder = os.path.join(current_path, recipe_folder)
recipe_folder = os.path.normpath(recipe_folder)
build_folder = build_folder or recipe_folder
if not os.path.isabs(build_folder):
build_folder = os.path.join(current_path, build_folder)
build_folder = os.path.normpath(build_folder)
package_folder = current_path
source_folder = source_folder or recipe_folder
self._manager.local_package(package_folder, recipe_folder, build_folder, source_folder)
@api_method
def source(self, reference, force=False, cwd=None):
cwd = prepare_cwd(cwd)
current_path, reference = _get_reference(reference, cwd)
self._manager.source(current_path, reference, force)
@api_method
def imports(self, reference, undo=False, dest=None, filename=None, cwd=None):
cwd = prepare_cwd(cwd)
if undo:
if not os.path.isabs(reference):
current_path = os.path.normpath(os.path.join(cwd, reference))
else:
current_path = reference
self._manager.imports_undo(current_path)
else:
cwd = prepare_cwd(cwd)
current_path, reference = _get_reference(reference, cwd)
self._manager.imports(current_path, reference, filename, dest)
@api_method
def export(self, user, channel, path=None, keep_source=False, filename=None, cwd=None,
name=None, version=None):
cwd = prepare_cwd(cwd)
current_path = os.path.abspath(path or cwd)
self._manager.export(user, channel, current_path, keep_source, filename=filename, name=name,
version=version)
@api_method
def remove(self, pattern, query=None, packages=None, builds=None, src=False, force=False,
remote=None, outdated=False):
self._manager.remove(pattern, package_ids_filter=packages, build_ids=builds,
src=src, force=force, remote=remote, packages_query=query,
outdated=outdated)
@api_method
def copy(self, reference="", user_channel="", force=False, all=False, package=None):
reference = ConanFileReference.loads(reference)
new_ref = ConanFileReference.loads("%s/%s@%s" % (reference.name,
reference.version,
user_channel))
if all:
package = []
self._manager.copy(reference, package, new_ref.user, new_ref.channel, force)
@api_method
def user(self, name=None, clean=False, remote=None, password=<PASSWORD>):
if clean:
localdb = LocalDB(self._client_cache.localdb)
localdb.init(clean=True)
self._user_io.out.success("Deleted user data")
return
self._manager.user(remote, name, password)
@api_method
def search_recipes(self, pattern, remote=None, case_sensitive=False):
refs = self._manager.search_recipes(pattern, remote, ignorecase=not case_sensitive)
return refs
@api_method
def search_packages(self, reference, query=None, remote=None, outdated=False):
ret = self._manager.search_packages(reference, remote, packages_query=query,
outdated=outdated)
return ret
@api_method
def upload(self, pattern, package=None, remote=None, all=False, force=False, confirm=False,
retry=2, retry_wait=5, skip_upload=False, integrity_check=False):
""" Uploads a package recipe and the generated binary packages to a specified remote
"""
if package and not is_a_reference(pattern):
raise ConanException("-p parameter only allowed with a valid recipe reference, "
"not with a pattern")
self._manager.upload(pattern, package, remote, all_packages=all, force=force,
confirm=confirm, retry=retry, retry_wait=retry_wait,
skip_upload=skip_upload, integrity_check=integrity_check)
@api_method
def remote_list(self):
registry = RemoteRegistry(self._client_cache.registry, self._user_io.out)
return registry.remotes
@api_method
def remote_add(self, remote, url, verify_ssl=True, insert=None):
registry = RemoteRegistry(self._client_cache.registry, self._user_io.out)
return registry.add(remote, url, verify_ssl, insert)
@api_method
def remote_remove(self, remote):
registry = RemoteRegistry(self._client_cache.registry, self._user_io.out)
return registry.remove(remote)
@api_method
def remote_update(self, remote, url, verify_ssl=True, insert=None):
registry = RemoteRegistry(self._client_cache.registry, self._user_io.out)
return registry.update(remote, url, verify_ssl, insert)
@api_method
def remote_list_ref(self):
registry = RemoteRegistry(self._client_cache.registry, self._user_io.out)
return registry.refs
@api_method
def remote_add_ref(self, reference, remote):
registry = RemoteRegistry(self._client_cache.registry, self._user_io.out)
return registry.add_ref(reference, remote)
@api_method
def remote_remove_ref(self, reference):
registry = RemoteRegistry(self._client_cache.registry, self._user_io.out)
return registry.remove_ref(reference)
@api_method
def remote_update_ref(self, reference, remote):
registry = RemoteRegistry(self._client_cache.registry, self._user_io.out)
return registry.update_ref(reference, remote)
@api_method
def profile_list(self):
folder = self._client_cache.profiles_path
if os.path.exists(folder):
return [name for name in os.listdir(folder) if not os.path.isdir(name)]
else:
self._user_io.out.info("No profiles defined")
return []
@api_method
def create_profile(self, profile_name, detect=False):
profile_path = get_profile_path(profile_name, self._client_cache.profiles_path, os.getcwd())
if os.path.exists(profile_path):
raise ConanException("Profile already exists")
profile = Profile()
if detect:
settings = detect_defaults_settings(self._user_io.out)
for name, value in settings:
profile.settings[name] = value
contents = profile.dumps()
save(profile_path, contents)
self._user_io.out.info("Empty profile created: %s" % profile_path)
return profile_path
@staticmethod
def _get_profile_keys(key):
# settings.compiler.version => settings, compiler.version
tmp = key.split(".")
first_key = tmp[0]
rest_key = ".".join(tmp[1:]) if len(tmp) > 1 else None
if first_key not in ("build_requires", "settings", "options", "scopes", "env"):
raise ConanException("Invalid specified key: %s" % key)
return first_key, rest_key
@api_method
def update_profile(self, profile_name, key, value):
first_key, rest_key = self._get_profile_keys(key)
profile, _ = read_profile(profile_name, os.getcwd(), self._client_cache.profiles_path)
if first_key == "settings":
profile.settings[rest_key] = value
elif first_key == "options":
tmp = OptionsValues([(rest_key, value)])
profile.options.update(tmp)
elif first_key == "env":
profile.env_values.update(EnvValues.loads("%s=%s" % (rest_key, value)))
elif first_key == "scopes":
profile.update_scopes(Scopes.from_list(["%s=%s" % (rest_key, value)]))
elif first_key == "build_requires":
raise ConanException("Edit the profile manually to change the build_requires")
contents = profile.dumps()
profile_path = get_profile_path(profile_name, self._client_cache.profiles_path, os.getcwd())
save(profile_path, contents)
@api_method
def delete_profile_key(self, profile_name, key):
first_key, rest_key = self._get_profile_keys(key)
profile, _ = read_profile(profile_name, os.getcwd(), self._client_cache.profiles_path)
# For options, scopes, env vars
try:
package, name = rest_key.split(":")
except ValueError:
package = None
name = rest_key
try:
if first_key == "settings":
del profile.settings[rest_key]
elif first_key == "options":
profile.options.remove(name, package)
elif first_key == "env":
profile.env_values.remove(name, package)
elif first_key == "scopes":
profile.scopes.remove(name, package)
elif first_key == "build_requires":
raise ConanException("Edit the profile manually to delete a build_require")
except KeyError:
raise ConanException("Profile key '%s' doesn't exist" % key)
contents = profile.dumps()
profile_path = get_profile_path(profile_name, self._client_cache.profiles_path, os.getcwd())
save(profile_path, contents)
@api_method
def read_profile(self, profile=None):
p, _ = read_profile(profile, os.getcwd(), self._client_cache.profiles_path)
return p
@api_method
def get_path(self, reference, package_id=None, path=None, remote=None):
reference = ConanFileReference.loads(str(reference))
return self._manager.get_path(reference, package_id, path, remote)
@api_method
def export_alias(self, reference, target_reference):
reference = ConanFileReference.loads(str(reference))
target_reference = ConanFileReference.loads(str(target_reference))
return self._manager.export_alias(reference, target_reference)
Conan = ConanAPIV1
def _check_query_parameter_and_get_reference(query, pattern):
reference = None
if pattern:
try:
reference = ConanFileReference.loads(pattern)
except ConanException:
if query is not None:
raise ConanException("-q parameter only allowed with a valid recipe "
"reference as search pattern. e.j conan search "
"MyPackage/1.2@user/channel -q \"os=Windows\"")
return reference
def _parse_manifests_arguments(verify, manifests, manifests_interactive, cwd):
if manifests and manifests_interactive:
raise ConanException("Do not specify both manifests and "
"manifests-interactive arguments")
if verify and (manifests or manifests_interactive):
raise ConanException("Do not specify both 'verify' and "
"'manifests' or 'manifests-interactive' arguments")
manifest_folder = verify or manifests or manifests_interactive
if manifest_folder:
if not os.path.isabs(manifest_folder):
manifest_folder = os.path.join(cwd, manifest_folder)
manifest_verify = verify is not None
manifest_interactive = manifests_interactive is not None
else:
manifest_verify = manifest_interactive = False
return manifest_folder, manifest_interactive, manifest_verify
def get_conan_runner():
print_commands_to_output = get_env("CONAN_PRINT_RUN_COMMANDS", False)
generate_run_log_file = get_env("CONAN_LOG_RUN_TO_FILE", False)
log_run_to_output = get_env("CONAN_LOG_RUN_TO_OUTPUT", True)
runner = ConanRunner(print_commands_to_output, generate_run_log_file, log_run_to_output)
return runner
def _get_reference(ref, cwd=None):
try:
reference = ConanFileReference.loads(ref)
except:
if "@" in ref:
raise
if not os.path.isabs(ref):
reference = os.path.normpath(os.path.join(cwd, ref))
else:
reference = ref
return cwd, reference
def migrate_and_get_client_cache(base_folder, out, storage_folder=None):
# Init paths
client_cache = ClientCache(base_folder, storage_folder, out)
# Migration system
migrator = ClientMigrator(client_cache, Version(CLIENT_VERSION), out)
migrator.migrate()
return client_cache
# Profile helpers
def profile_from_args(profile, settings, options, env, scope, cwd, default_folder):
""" Return a Profile object, as the result of merging a potentially existing Profile
file and the args command-line arguments
"""
file_profile, _ = read_profile(profile, cwd, default_folder)
args_profile = _profile_parse_args(settings, options, env, scope)
if file_profile:
file_profile.update(args_profile)
return file_profile
else:
return args_profile
def _profile_parse_args(settings, options, envs, scopes):
""" return a Profile object result of parsing raw data
"""
def _get_tuples_list_from_extender_arg(items):
if not items:
return []
# Validate the pairs
for item in items:
chunks = item.split("=", 1)
if len(chunks) != 2:
raise ConanException("Invalid input '%s', use 'name=value'" % item)
return [(item[0], item[1]) for item in [item.split("=", 1) for item in items]]
def _get_simple_and_package_tuples(items):
"""Parse items like "thing:item=value or item2=value2 and returns a tuple list for
the simple items (name, value) and a dict for the package items
{package: [(item, value)...)], ...}
"""
simple_items = []
package_items = defaultdict(list)
tuples = _get_tuples_list_from_extender_arg(items)
for name, value in tuples:
if ":" in name: # Scoped items
tmp = name.split(":", 1)
ref_name = tmp[0]
name = tmp[1]
package_items[ref_name].append((name, value))
else:
simple_items.append((name, value))
return simple_items, package_items
def _get_env_values(env, package_env):
env_values = EnvValues()
for name, value in env:
env_values.add(name, EnvValues.load_value(value))
for package, data in package_env.items():
for name, value in data:
env_values.add(name, EnvValues.load_value(value), package)
return env_values
result = Profile()
options = _get_tuples_list_from_extender_arg(options)
result.options = OptionsValues(options)
env, package_env = _get_simple_and_package_tuples(envs)
env_values = _get_env_values(env, package_env)
result.env_values = env_values
settings, package_settings = _get_simple_and_package_tuples(settings)
result.settings = OrderedDict(settings)
for pkg, values in package_settings.items():
result.package_settings[pkg] = OrderedDict(values)
result.scopes = Scopes.from_list(scopes) if scopes else Scopes()
return result
| 1.523438 | 2 |
flixbus/flixbus/models.py | kamotos/flixbus-homework | 0 | 12770707 | from django.db import models
class Segment(models.Model):
from_stop = models.IntegerField()
to_stop = models.IntegerField()
distance = models.DecimalField(max_digits=6, decimal_places=2)
class Route(models.Model):
segments = models.ManyToManyField(Segment, through='RouteSegment')
class RouteSegment(models.Model):
route = models.ForeignKey(Route)
segment = models.ForeignKey(Segment)
sequence = models.PositiveSmallIntegerField(db_index=True)
class Meta:
ordering = ('route', 'sequence', )
class Ride(models.Model):
from_stop = models.IntegerField()
to_stop = models.IntegerField()
route = models.ForeignKey(Route)
class Ticket(models.Model):
ride = models.ForeignKey(Ride)
from_stop = models.IntegerField()
to_stop = models.IntegerField()
date = models.DateField()
description = models.TextField()
transaction_has = models.CharField(max_length=64)
price = models.DecimalField(max_digits=5, decimal_places=2)
| 2.109375 | 2 |
hw1/test/tree/__init__.py | LePotatoChef/CS583-Machine-Learning | 1 | 12770708 | from .dtnode import *
from .dtree import *
| 0.960938 | 1 |
core/objs/xml_modelo_106.py | aanacleto/erp- | 0 | 12770709 | <reponame>aanacleto/erp-
# !/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
ERP+
"""
__author__ = '<NAME>'
__credits__ = []
__version__ = "1.0"
__maintainer__ = "<NAME>"
__status__ = "Development"
__model_name__ = 'xml_modelo_106.XMLModelo106'
import auth, base_models
from orm import *
from form import *
import erp_config
try:
from my_area_fiscal import AreaFiscal
except:
from area_fiscal import AreaFiscal
from xml_anexo_cliente_m106 import XMLAnexoClienteM106
from xml_linha_anexo_cliente_m106 import XMLLinhaAnexoClienteM106
from xml_anexo_fornecedor_m106 import XMLAnexoFornecedorM106
from xml_linha_anexo_fornecedor_m106 import XMLLinhaAnexoFornecedorM106
try:
from my_factura_cli import FacturaCliente
except:
from factura_cli import FacturaCliente
try:
from my_factura_forn import FacturaFornecedor
except:
from factura_forn import FacturaFornecedor
try:
from my_terceiro import Terceiro
except:
from terceiro import Terceiro
try:
from my_codigo_pais import CodigoPais
except:
from codigo_pais import CodigoPais
try:
from my_linha_factura_cli import LinhaFacturaCliente
except:
from linha_factura_cli import LinhaFacturaCliente
try:
from my_linha_factura_forn import LinhaFacturaFornecedor
except:
from linha_factura_forn import LinhaFacturaFornecedor
from xml_anexo_reg_fornecedor_m106 import XMLAnexoRegFornecedorM106
from xml_linha_anexo_reg_fornecedor_m106 import LinhaAnexoRegFornecedor
from xml_anexo_reg_cliente_m106 import XMLAnexoRegClienteM106
from xml_linha_anexo_reg_cliente_m106 import XMLLinhaAnexoRegClienteM106
class XMLModelo106(Model, View):
def __init__(self, **kargs):
#depois por aqui entre datas e só de um diario ou periodo, etc, etc.
Model.__init__(self, **kargs)
self.__name__ = 'xml_modelo_106'
self.__title__ = 'Modelo 106'
self.__model_name__ = __model_name__
self.__list_edit_mode__ = 'edit'
self.__get_options__ = ['nome']
self.__workflow__ = (
'estado', {
'Rascunho':['Verificar'],
'Verificado':['Rascunho', 'Confirmar'],
'Confirmado':['Gerar XML Mod106','Exportar_Anexo_Cliente', 'Exportar_xml_anexo_fornecedor_m106'],
'Gerado':['Exportar_XML_M106','Exportar_Anexo_Cliente', 'Exportar_xml_anexo_fornecedor_m106']}
)
self.__workflow_auth__ = {
'Exportar_Anexo_Cliente':['All'],
'Exportar_xml_anexo_fornecedor_m106':['All'],
'Exportar_XML_M106':['All'],
'Gerar XML Mod106':['All'],
'Confirmar':['All'],
'Verificar':['All'],
'Rascunho':['All'],
'full_access':['Gestor']
}
self.__auth__ = {
'read':['All'],
'write':['All'],
'create':['All'],
'delete':['Gestor'],
'full_access':['Gestor']
}
self.__tabs__ = [
('Anexo Cliente', ['xml_anexo_cliente_m106']),
('Anexo Fornecedor', ['xml_anexo_fornecedor_m106']),
('Regularização Cliente', ['xml_anexo_reg_cliente_m106']),
('Regularização Fornecedor', ['xml_anexo_reg_fornecedor_m106',]),
('Campos do Modelo',['cp01','cp02','cp03','cp04','cp05','cp06','cp07','cp08','cp09','cp10','cp11','cp12','cp13','cp14','cp15','cp16','cp17','cp18','cp19','cp20','cp21','cp22','cp23','cp24','cp25','cp26','cp27','cp28','cp29','cp30','cp31','cp32','cp33','cp34','cp35','cp36','cp37','cp38','cp39','cp40','cp41','cp42','cp43','cp44','cp45','cp46','cp47','cp48','cp49','cp50']),
('XML',['xml'])]
self.nome = string_field(view_order = 1, name = 'Nome', size = 70, default='modelo106_{data}'.format(data=datetime.date.today()))
self.tipo_declaracao = combo_field(view_order = 2, name = 'Tipo Declaração', size = 65, default = '1', options = [('1','No Prazo'), ('2','Fora de Prazo'),('4', 'Substituição')], onlist = False)
self.nif=string_field(view_order=3, name='Nif', size=55, default=erp_config.nif)
self.ano = combo_field(view_order = 4, name ='Ano', args = 'required', size = 50, default = datetime.date.today().year, options='model.getAno()')
self.mes = combo_field(view_order = 5, name ='Mês',args = 'required', default=datetime.date.today().strftime("%m"), options =[('01','Janeiro'),('02','Fevereiro'),('03','Março'),('04','Abril'),('05','Maio'),('06','Junho'),('07','Julho'),('08','Agosto'),('09','Setembro'),('10','Outubro'),('11','Novembro'),('12','Dezembro')])
self.anexo_cli = combo_field(view_order = 6, name = 'Anexos Cliente',onlist=False, size=75, args='required',default='1',options=[('0','NÃO'),('1','SIM')])
self.anexo_forn = combo_field(view_order = 7, name = 'Anexos Fornecedor',onlist=False, size=65,args='required',default='1',options=[('0','NÃO'),('1','SIM')])
self.anexo_reg_cli = combo_field(view_order = 8, name = 'Regularização Cliente',onlist=False,size=50,args='required',default='0',options=[('0','NÃO'),('1','SIM')])
self.anexo_reg_forn = combo_field(view_order = 9, name = 'Regularização Fornecedor',onlist=False,size=50,args='required',default='0',options=[('0','NÃO'),('1','SIM')])
self.area_fiscal = combo_field(view_order = 10, name = 'Área Fiscal', size = 50, model = 'area_fiscal',args = 'required', onlist = False, search = True, column = 'codigo', default='223', options = "model.get_opts('AreaFiscal().get_options_buyable()')")
self.operacoes = combo_field(view_order = 11, name = 'Tipo de Operacoes', args='required', size = 75, default = '1', options = [('0','Activas e/ou Passivas'), ('1','Inexistência de operações'),('2', 'Unica Operação 1ª vez')], onlist = False)
self.nif_tecnico = string_field(view_order = 12, name = 'Nif do Técnico O.C.', onlist=False,size = 65)
self.num_reg_tec_ordem = string_field(view_order = 13, name = 'Nº Técnico O.C.',onlist=False, size = 55)
self.data_apresentacao = date_field(view_order=14, size=62, name ='Data de Apresentação', args='required ', default=datetime.date.today())
self.data_recepcao = date_field(view_order=15, size=60, name ='Data de Recepção', default=datetime.date.today())
self.local_apresentacao = combo_field(view_order = 16, name = 'Local de Apresentação', size = 75, model = 'area_fiscal', args = 'required', onlist = False, search = False, column = 'local', default='Praia', options = "model.get_opts('AreaFiscal().get_options()')")
self.observacoes = text_field(view_order=17, name='Observações', size = 235,onlist=False, args="rows=1")
self.cp01 = currency_field(view_order= 18, name ='[01] Base Tributavel Taxa Normal', args='required', size=70,onlist=False)
self.cp02 = currency_field(view_order= 19, name ='[02] Imp. Favor Estado Taxa Normal', args='required',size=70,onlist=False)
self.cp03 = currency_field(view_order= 20, name ='[03] Base Tributavel Taxa Especial', args='required',size=70,onlist=False)
self.cp04 = currency_field(view_order= 21, name ='[04] Imp. Favor Estado Taxa Especial', args='required',size=70,onlist=False)
self.cp05 = currency_field(view_order= 22, name ='[05] Base Trib. IVA Liq. Operaçoes(Dec. - Lei nº 16/2004)', args='required',size=90,onlist=False)
self.cp06 = currency_field(view_order= 23, name ='[06] IVA Favor Estado', args ='required',size=90,onlist=False)
self.cp07 = currency_field(view_order= 24, name ='[07] IVA Liquidado pelo Contratante', args='required',size=90,onlist=False)
self.cp08 = currency_field(view_order= 25, name ='[08] Bens/Serv Isentas Com Direito a Dedução', args='required',size=90,onlist=False)
self.cp09 = currency_field(view_order= 26, name ='[09] Bens/Serv Isentas Sem Direito a Dedução', args='required',size=90,onlist=False)
self.cp10 = currency_field(view_order= 27, name ='[10] Bens/Serv Não tributados (art. 6º, nº 7 do RIVA)', args='required',size=90,onlist=False)
self.cp11 = currency_field(view_order= 28, name ='[11] Base Tributavel - Prestador Serviço Estrageiro', args='required',size=90,onlist=False)
self.cp12 = currency_field(view_order= 29, name ='[12] Imp. Liq. Favor Sujeito - Prestador Serviço Estrageiro', args='required',size=90,onlist=False)
self.cp13 = currency_field(view_order= 30, name ='[13] Imp. Liq. Favor Estado - Prestador Serviço Estrageiro', args='required',size=90,onlist=False)
self.cp14 = currency_field(view_order= 31, name ='[14] Base Tributavel - Serviços Costrução. Civil', args='required',size=90,onlist=False)
self.cp15 = currency_field(view_order= 32, name ='[15] IVA Favor Sujeito - Serviços Costrução. Civil', args='required',size=90,onlist=False)
self.cp16 = currency_field(view_order= 33, name ='[16] IVA Favor Estado - Serviços Costrução. Civil', args='required',size=90,onlist=False)
self.cp17 = currency_field(view_order= 34, name ='[17] Base Trib. Bens/Serv - Investimentos', args='required',size=90,onlist=False)
self.cp18 = currency_field(view_order= 35, name ='[18] Imposto Favor Sujeito - Investimentos', args='required',size=120,onlist=False)
self.cp19 = currency_field(view_order= 36, name ='[19] Base Trib. Bens/Serv - Inventários', args='required',size=90,onlist=False)
self.cp20 = currency_field(view_order= 37, name ='[20] Imposto Favor Sujeito - Inventários', args='required',size=120,onlist=False)
self.cp21 = currency_field(view_order= 38, name ='[21] Base Trib. Bens/Serv - O.Bens de Consumo', args='required',size=90,onlist=False)
self.cp22 = currency_field(view_order= 39, name ='[22] Imposto Favor Sujeito - O.Bens de Consumo', args='required',size=120,onlist=False)
self.cp23 = currency_field(view_order= 40, name ='[23] Base Trib. Bens/Serv - Serviços', args='required',size=90,onlist=False)
self.cp24 = currency_field(view_order= 41, name ='[24] Imposto Favor Sujeito - Serviços', args='required',size=120,onlist=False)
self.cp25 = currency_field(view_order= 42, name ='[25] Base Trib. Import. Bens efectuados pelo SP', args='required',size=90,onlist=False)
self.cp26 = currency_field(view_order= 43, name ='[26] Imp. Favor Sujeito- Import. Bens efectuados pelo SP', args='required',size=120,onlist=False)
self.cp27 = currency_field(view_order= 44, name ='[27] Regularização F.Sujeito- Comunicada pela Adm.Fiscal.', args='required',size=120,onlist=False)
self.cp28 = currency_field(view_order= 45, name ='[28] Regularização F.Estado- Comunicada pela Adm.Fiscal', args='required',size=120,onlist=False)
self.cp29 = currency_field(view_order= 46, name ='[29] Regularização F.Sujeito-Comunicada pelo Contribuinte', args='required',size=120,onlist=False)
self.cp30 = currency_field(view_order= 47, name ='[30] Regularização F.Estado-Comunicada pelo Contribuinte', args='required',size=120,onlist=False)
self.cp31 = currency_field(view_order= 48, name ='[31] Percentagem Estimada ( dedução parcial pro rata )', args='required',size=270,onlist=False)
self.cp32 = currency_field(view_order= 49, name ='[32] Soma B.T.(01+03+05+07+08+09+10+11+14+17+19+21+23+25)', args='readonly',size=120,onlist=False)
self.cp33 = currency_field(view_order= 50, name ='[33] Soma I.F.S.(12+15+18+20+22+24+26+27+29)', args='readonly',size=90,onlist=False)
self.cp34 = currency_field(view_order= 51, name ='[34] Soma I.F.E.(02+04+06+13+16+28+30)', args='readonly',size=70,onlist=False)
self.cp35 = currency_field(view_order= 52, name ='[35] Apuramento F. Estado(34-33)', args='readonly',size=70,onlist=False)
self.cp36 = currency_field(view_order= 53, name ='[36] Apuramento F. Sujeito(33-34)', args='readonly',size=70,onlist=False)
self.cp37 = currency_field(view_order= 54, name ='[37] Excesso a Reportar P.Anteriores', args='required',size=70,onlist=False)
self.cp38 = currency_field(view_order= 55, name ='[38] Imp. a Pagar ao Estado', args='readonly',size=70,onlist=False)
self.cp39 = currency_field(view_order= 56, name ='[39] Crédito de Imposto', args='required',size=70,onlist=False)
self.cp40 = currency_field(view_order= 57, name ='[40] Reporte p/ Periodo Seg.', args='required',size=70,onlist=False)
self.cp41 = currency_field(view_order= 58, name ='[41] <NAME>', args='required',size=70,onlist=False)
self.cp42 = currency_field(view_order= 59, name ='[42] Adiant./trans.bens e serv tributadas', args='required',size=70,onlist=False)
self.cp43 = currency_field(view_order= 60, name ='[43] Amostras/ofertas além limite legal', args='required',size=70,onlist=False)
self.cp44 = currency_field(view_order= 61, name ='[44] Op. sujeitas a tributação da margem', args='required',size=70,onlist=False)
self.cp45 = currency_field(view_order= 62, name ='[45] Outras Operações- art. 3º e 4º RIVA', args='required',size=70,onlist=False)
self.cp46 = currency_field(view_order= 63, name ='[46] Op. Destinadas a Exportação', args='required',size=70,onlist=False)
self.cp47 = currency_field(view_order= 64, name ='[47] Oper.efetuadas-Decreto-Lei 88/2005', args='required',size=70,onlist=False)
self.cp48 = currency_field(view_order= 65, name ='[48] Bens da Lista Anexa', args='required',size=70,onlist=False)
self.cp49 = currency_field(view_order= 66, name ='[49] Faturas de prest.serviços emitadas', args='required',size=70,onlist=False)
self.cp50 = currency_field(view_order= 67, name ='[50] Recibos de prest.serviços faturados', args='required',size=70,onlist=False)
self.estado = info_field(view_order = 68, name ='Estado', hidden = True, default='Rascunho')
self.xml_anexo_cliente_m106 = list_field(view_order = 69, name= '', nolabel =True, args ='readonly', condition = "xml_modelo_106='{id}'", model_name = 'xml_anexo_cliente_m106.XMLAnexoClienteM106', list_edit_mode = 'edit', onlist = False)
self.xml_anexo_fornecedor_m106 = list_field(view_order = 70, name= '', nolabel =True, args ='readonly', condition = "xml_modelo_106='{id}'", model_name = 'xml_anexo_fornecedor_m106.XMLAnexoFornecedorM106', list_edit_mode = 'edit', onlist = False)
self.xml_anexo_reg_fornecedor_m106 = list_field(view_order = 71, name= '', nolabel =True, args ='readonly', condition = "xml_modelo_106='{id}'", model_name = 'xml_anexo_reg_fornecedor_m106.XMLAnexoRegFornecedorM106', list_edit_mode = 'edit', onlist = False)
self.xml_anexo_reg_cliente_m106 = list_field(view_order = 72, name= '', nolabel =True, args ='readonly', condition = "xml_modelo_106='{id}'", model_name = 'xml_anexo_reg_cliente_m106.XMLAnexoRegClienteM106', list_edit_mode = 'edit', onlist = False)
self.xml = text_field(view_order = 73, name="XML", size = 320, onlist = False, args='rows=20')
def get_opts(self, get_str):
return eval(get_str)
def getAno(self):
options = []
opts = range(2014,2051)
for option in opts:
options.append((str(option), str(option)))
return options
def Exportar_XML_M106(self, key, window_id):
modelo = self.get(key=key)
if modelo:
modelo=modelo[0]
return data_to_xml(modelo['xml'], 'Download', "Modelo106_{ano}-{mes}".format(ano=modelo['ano'],mes=modelo['mes']))
def Exportar_xml_anexo_fornecedor_m106(self, key, window_id):
modelo = self.get(key=key)
if modelo:
modelo=modelo[0]
if modelo['anexo_forn']=='1':
anexo = XMLAnexoFornecedorM106(where="xml_modelo_106='{id}'".format(id=modelo['id'])).get()
if anexo:
return data_to_xml(anexo[0]['xml_gerado'], 'Download', "M106 Anexo Fornecedor_{ano}-{mes}".format(ano=modelo['ano'],mes=modelo['mes']))
else:
return form_edit(window_id = window_id).show()
elif modelo['anexo_reg_forn']=='1':
anexo = XMLAnexoRegFornecedorM106(where="xml_modelo_106='{id}'".format(id=modelo['id'])).get()
if anexo:
return data_to_xml(anexo[0]['xml_gerado'], 'Download', "M106 Anexo Reg Fornecedor_{ano}-{mes}".format(ano=modelo['ano'],mes=modelo['mes']))
else:
return form_edit(window_id = window_id).show()
else:
return form_edit(window_id = window_id).show()
else:
return form_edit(window_id = window_id).show()
def Exportar_Anexo_Cliente(self, key, window_id):
modelo = self.get(key=key)
if modelo:
modelo=modelo[0]
if modelo['anexo_cli']=='1':
anexo = XMLAnexoClienteM106(where="xml_modelo_106='{id}'".format(id=modelo['id'])).get()
if anexo:
return data_to_xml(anexo[0]['xml_gerado'], 'Download', "M106 Anexo Cliente_{ano}-{mes}".format(ano=modelo['ano'],mes=modelo['mes']))
else:
return form_edit(window_id = window_id).show()
elif modelo['anexo_reg_cli']=='1':
anexo = XMLAnexoRegClienteM106(where="xml_modelo_106='{id}'".format(id=modelo['id'])).get()
if anexo:
return data_to_xml(anexo[0]['xml_gerado'], 'Download', "M106 Anexo Reg Cliente_{ano}-{mes}".format(ano=modelo['ano'],mes=modelo['mes']))
else:
return form_edit(window_id = window_id).show()
else:
return form_edit(window_id = window_id).show()
else:
return form_edit(window_id = window_id).show()
def Verificar(self, key, window_id, internal = False):
"""
Metodo que faz a verificao e geração dos anexos do modelo 106
"""
self.kargs = get_model_record(model = self, key = key)
if self.kargs['estado'] == 'Rascunho':
#gerar os anexos
if self.kargs['anexo_cli']=='1':
self.Gerar_Anexo_Cliente(key=key)
if self.kargs['anexo_forn']=='1':
self.Gerar_xml_anexo_fornecedor_m106(key=key)
if self.kargs['anexo_reg_cli']=='1':
self.Gerar_Anexo_Reg_Cliente(key=key)
if self.kargs['anexo_reg_forn']=='1':
print("\n\n\n\n.................INICIO.......................\n\n\n\n\n\n")
self.Gerar_Anexo_Reg_Fornecedor(key=key)
print("\n\n\n\n...................Fim........................\n\n\n\n\n\n")
#calcular os totais
self.put()
self.kargs['cp32'] = str(int(self.get_cp32(key=key)))
self.kargs['cp33'] = str(int(self.get_cp33(key=key)))
self.kargs['cp34'] = str(int(self.get_cp34(key=key)))
#é preciso guardar estes calculos para poder assim efectuar os novos (35 e 36)
self.put()
self.kargs['cp35'] = str(int(self.get_cp35(key=key)))
self.kargs['cp36'] = str(int(self.get_cp36(key=key)))
#self.kargs['estado'] = 'Verificado'
self.put()
ctx_dict = get_context(window_id)
ctx_dict['main_key'] = self.kargs['id']
set_context(window_id, ctx_dict)
result = form_edit(window_id = window_id).show()
if not internal:
return result
def Confirmar(self, key, window_id, internal = False):
"""
Metodo para a confirmacao do modelo
"""
self.kargs = get_model_record(model = self, key = key)
if self.kargs['estado'] == 'Verificado':
self.kargs['cp38'] = str(int(self.get_cp38(key=key)))
self.kargs['estado'] = 'Confirmado'
self.put()
ctx_dict = get_context(window_id)
ctx_dict['main_key'] = self.kargs['id']
set_context(window_id, ctx_dict)
result = form_edit(window_id = window_id).show()
if not internal:
return result
def Rascunho(self, key, window_id, internal = False):
self.kargs = get_model_record(model=self, key=key)
if self.kargs['estado'] == 'Verificado':
for x in range(1,51):
if x<10:
campo="cp0{num}".format(num=x)
else:
campo="cp{num}".format(num=x)
try:
self.kargs[campo]=to_decimal(0)
except:
pass
self.kargs['estado']='Rascunho'
self.put()
ctx_dict = get_context(window_id)
ctx_dict['main_key'] = self.kargs['id']
set_context(window_id, ctx_dict)
result = form_edit(window_id = window_id).show()
if not internal:
return result
def get_record_info(self, key):
def get_results():
record = XMLModelo106(where="id = '{id}'".format(id=key)).get()
return record[0]
return erp_cache.get(key=self.__model_name__ + str(key), createfunc=get_results)
def get_record_info_to3536(self, key):
def get_results():
record = XMLModelo106(where="id = '{id}'".format(id=key)).get()
return record[0]
return erp_cache.get(key=self.__model_name__ +'3536'+ str(key), createfunc=get_results)
def get_cp32(self, key):
record = self.get_record_info(key=key)
soma=to_decimal(0)
try:
for x in ('01','03','05','07','08','09','10','11','14','17','19','21','23','25'):
campo = "cp{x}".format(x=x)
if record[campo] not in(None,'','None',0):
soma += to_decimal(int(record[campo]))
return int(soma)
except:
return int(to_decimal(0))
def get_cp33(self, key):
record = self.get_record_info(key=key)
soma=to_decimal(0)
try:
for x in ('12','15','18','20','22','24','26','27','29'):
campo = "cp{x}".format(x=x)
if record[campo] not in(None,'','None',0):
soma += to_decimal(int(record[campo]))
return int(soma)
except:
return int(to_decimal(0))
def get_cp34(self, key):
record = self.get_record_info(key=key)
soma=to_decimal(0)
try:
for x in ('02','04','06','13','16','28','30'):
campo = "cp{x}".format(x=x)
if record[campo] not in(None,'','None',0):
soma += to_decimal(int(record[campo]))
return int(soma)
except:
return int(to_decimal(0))
def get_cp35(self, key):
record = self.get_record_info_to3536(key=key)
diferenca =to_decimal(0)
if to_decimal(record['cp34']) > to_decimal(record['cp33']):
diferenca = to_decimal(record['cp34']) - to_decimal(record['cp33'])
return int(diferenca)
def get_cp36(self, key):
record = self.get_record_info_to3536(key=key)
diferenca =to_decimal(0)
if to_decimal(record['cp34']) < to_decimal(record['cp33']):
diferenca = to_decimal(record['cp33']) - to_decimal(record['cp34'])
return int(diferenca)
def get_cp38(self, key):
self.kargs = get_model_record(model=self,key=key)
diferenca = to_decimal(0)
if self.kargs['tipo_declaracao']=='1':
if to_decimal(self.kargs['cp35']) >= to_decimal(self.kargs['cp37']):
diferenca = to_decimal(self.kargs['cp35']) - to_decimal(self.kargs['cp37'])
return int(diferenca)
#define a tipologia do anexo de regularizacao segundo o tipo de produto da linha de factura
def getTipologia(self, tipo_produto):
"""
retorna a tipologia da linha de anexo regularizacao baseado no tipo de produto
"""
if tipo_produto in ('servico','','None',None):
#servico
return 'SRV'
elif tipo_produto == 'consumivel':
#outros bens de consumo
return 'OBC'
elif tipo_produto == 'imobilizado':
#investimento
return 'IMO'
elif tipo_produto in ('armazenavel','produzido'):
#inventario
return 'INV'
"""Metodo para gerar o anexo fornecedor"""
def Gerar_xml_anexo_fornecedor_m106(self, key):
informacoes = self.get_info_anexo_forn(key=key)
if len(informacoes) > 0:
print('\n\n\n\n\n\n\n########informacoes forn ############\n',informacoes,'\n####################\n\n\n')
self.guardar_anexo_forn(key=key, info_anexo=informacoes[0]['info_anexo'], info_linhas=informacoes[0]['info_linhas'])
""" Metodo para gerar o anexo cliente """
def Gerar_Anexo_Cliente(self, key):
informacoes = self.get_info_anexo_cli(key)
if len(informacoes) > 0:
print('\n\n\n\n\n\n\n########informacoes cli ############\n',informacoes,'\n####################\n\n\n')
self.guardar_anexo_cli(key=key, info_anexo=informacoes[0]['info_anexo'], info_linhas=informacoes[0]['info_linhas'])
def Gerar_Anexo_Reg_Fornecedor(self, key):
self.guardar_regularizacao_forn(key=key)
print("\n\n\n\n............Guardado.......................\n\n\n\n\n\n")
#gerar o xml
self.gerar_xml_reg_forn(key=key)
def Gerar_Anexo_Reg_Cliente(self, key):
self.guardar_regularizacao_cli(key=key)
#gerar o xml
self.gerar_xml_reg_cli(key=key)
""" Metodo para gerar o arquivo xml do modelo 106 """
def Gerar_XML_Mod106(self, key, window_id):
self.kargs = get_model_record(model=self, key=key)
areaFiscal = AreaFiscal(where="id='{id}'".format(id=self.kargs['local_apresentacao'])).get()
nome_local_apresentacao = areaFiscal[0]['local']
import xml.dom.minidom
doc = xml.dom.minidom.Document()
tag_modelo106 = doc.createElement('modelo106')
tag_tp_dec_anx = doc.createElement('tp_dec_anx')
tag_nif = doc.createElement('nif')
tag_periodo = doc.createElement('periodo')
tag_cd_af = doc.createElement('cd_af')
tag_exist_oper = doc.createElement('exist_oper')
tag_dt_apresentacao = doc.createElement('dt_apresentacao')
tag_loc_apresentacao = doc.createElement('loc_apresentacao')
tag_nif_toc = doc.createElement('nif_toc')
tag_num_ordem_toc = doc.createElement('num_ordem_toc')
tag_dt_recepcao = doc.createElement('dt_recepcao')
tag_obs = doc.createElement('obs')
tag_tp_dec_anx.setAttribute('dec', str(self.kargs['tipo_declaracao']))
tag_tp_dec_anx.setAttribute('cli', str(self.kargs['anexo_cli']))
tag_tp_dec_anx.setAttribute('for', str(self.kargs['anexo_forn']))
tag_tp_dec_anx.setAttribute('cli_reg', str(self.kargs['anexo_reg_cli']))
tag_tp_dec_anx.setAttribute('for_reg', str(self.kargs['anexo_reg_forn']))
tag_periodo.setAttribute('ano', str(self.kargs['ano']))
tag_periodo.setAttribute('mes', str(self.kargs['mes']))
# Cria a estrutura
doc.appendChild(tag_modelo106)
tag_modelo106.appendChild(tag_tp_dec_anx)
tag_modelo106.appendChild(tag_nif)
tag_modelo106.appendChild(tag_periodo)
tag_modelo106.appendChild(tag_cd_af)
tag_modelo106.appendChild(tag_exist_oper)
for x in range(1,51):
if x<10:
campo="cp0{num}".format(num=x)
else:
campo="cp{num}".format(num=x)
try:
if self.kargs[campo] not in (None,'','None',0,'0'):
tags = doc.createElement(campo)
tag_modelo106.appendChild(tags)
tags.appendChild(doc.createTextNode(str(int(to_decimal(self.kargs[campo])))))
except:
pass
tag_modelo106.appendChild(tag_dt_apresentacao)
tag_modelo106.appendChild(tag_loc_apresentacao)
tag_modelo106.appendChild(tag_nif_toc)
tag_modelo106.appendChild(tag_num_ordem_toc)
tag_modelo106.appendChild(tag_dt_recepcao)
tag_modelo106.appendChild(tag_obs)
tag_nif.appendChild(doc.createTextNode(str(self.kargs['nif'])))
tag_cd_af.appendChild(doc.createTextNode(str(self.kargs['area_fiscal'])))
tag_exist_oper.appendChild(doc.createTextNode(str(self.kargs['operacoes'])))
tag_dt_apresentacao.appendChild(doc.createTextNode(str(self.kargs['data_apresentacao'])))
tag_loc_apresentacao.appendChild(doc.createTextNode(str(nome_local_apresentacao)))
tag_dt_recepcao.appendChild(doc.createTextNode(str(self.kargs['data_recepcao'])))
tag_nif_toc.appendChild(doc.createTextNode(str(self.kargs['nif_tecnico'])))
tag_num_ordem_toc.appendChild(doc.createTextNode(str(self.kargs['num_reg_tec_ordem'])))
tag_obs.appendChild(doc.createTextNode(str(self.kargs['observacoes'])))
conteudoXmlCriado= doc.toprettyxml()
conteudoFinalXml=conteudoXmlCriado.replace('<?xml version="1.0" ?>','<?xml version="1.0" encoding="utf-8"?>')
self.kargs['xml']=str(conteudoFinalXml)
self.kargs['estado']='Gerado'
self.put()
return form_edit(window_id=window_id).show()
def guardar_anexo_cli(self, key, info_anexo, info_linhas):
#GUARDANDO OS DADOS
self.kargs = get_model_record(model=self, key=key)
if len(info_linhas)!=0:
conteudoFinalXml = self.gerar_XML_anexo_cli(info_anexo=info_anexo, info_linhas=info_linhas)
content ={
'user': '{user}'.format(user=bottle.request.session['user']),
'ano': str(info_anexo['anx_cli_ano']),
'mes': str(info_anexo['anx_cli_mes']),
'area_fiscal': str(info_anexo['anx_cli_area_fiscal']),
'nif_contribuinte': str(info_anexo['anx_cli_nif_contr']),
'data_entrega': str(info_anexo['anx_cli_dt_entrega']),
'xml_modelo_106':str(info_anexo['anx_cli_modelo106']),
'nome':str(info_anexo['anx_cli_nome']),
'estado':'Gerado',
'total_factura':str(info_anexo['anx_cli_total_fact']),
'total_base_incidencia':str(info_anexo['anx_cli_total_bs_incid']),
'total_liquidado':str(info_anexo['anx_cli_total_liq']),
'xml_gerado':str(conteudoFinalXml)
}
id_anxCli = XMLAnexoClienteM106(**content).put()
#guardandos as linhas do anexo
for line in info_linhas:
content={
'user': '{user}'.format(user=bottle.request.session['user']),
'factura_cliente':str(line['ln_anx_cli_factura']),
'designacao':str(line['ln_anx_cli_designacao']),
'nif_cliente':str(line['ln_anx_cli_nif']),
'origem':str(line['ln_anx_cli_origem']),
'serie':str(line['ln_anx_cli_serie']),
'tipo_doc':str(line['ln_anx_cli_tipoDoc']),
'numero_doc':str(line['ln_anx_cli_num_doc']),
'data':str(line['ln_anx_cli_data']),
'valor_factura':str(int(to_decimal(line['ln_anx_cli_vl_fatura']))),
'valor_base_incidencia':str(int(line['ln_anx_cli_Incidencia'])),
'taxa_iva':str(line['ln_anx_cli_taxa_iva']),
'iva_liquidado':str(int(line['ln_anx_cli_total_Liq'])),
'nao_liq_imposto':str(line['ln_anx_cli_nao_liq_imp']),
'linha_mod106':str(line['ln_anx_cli_linha_MOD106']),
'xml_anexo_cliente_m106':str(id_anxCli)
}
XMLLinhaAnexoClienteM106(**content).put()
###adicionar as informaçoes ao modelo106
if str(line['ln_anx_cli_linha_MOD106'])=='01':
self.kargs['cp01']= to_decimal(to_decimal(self.kargs['cp01']) + to_decimal(line['ln_anx_cli_Incidencia']))
self.kargs['cp02']= to_decimal(to_decimal(self.kargs['cp02']) + to_decimal(line['ln_anx_cli_total_Liq']))
elif str(line['ln_anx_cli_linha_MOD106'])=='03':
self.kargs['cp03']= to_decimal(to_decimal(self.kargs['cp03']) + to_decimal(line['ln_anx_cli_Incidencia']))
self.kargs['cp04']= to_decimal(to_decimal(self.kargs['cp04']) + to_decimal(line['ln_anx_cli_total_Liq']))
elif str(line['ln_anx_cli_linha_MOD106'])=='05':
self.kargs['cp05']= to_decimal(to_decimal(self.kargs['cp05']) + to_decimal(line['ln_anx_cli_Incidencia']))
self.kargs['cp06']= to_decimal(to_decimal(self.kargs['cp06']) + to_decimal(line['ln_anx_cli_total_Liq']))
elif str(line['ln_anx_cli_linha_MOD106'])=='07':
self.kargs['cp07']= to_decimal(to_decimal(self.kargs['cp07']) + to_decimal(line['ln_anx_cli_Incidencia']))
elif str(line['ln_anx_cli_linha_MOD106'])=='08':
self.kargs['cp08']= to_decimal(to_decimal(self.kargs['cp08']) + to_decimal(line['ln_anx_cli_Incidencia']))
elif str(line['ln_anx_cli_linha_MOD106'])=='09':
self.kargs['cp09']= to_decimal(to_decimal(self.kargs['cp09']) + to_decimal(line['ln_anx_cli_Incidencia']))
elif str(line['ln_anx_cli_linha_MOD106']) =='10':
self.kargs['cp10']= to_decimal(to_decimal(self.kargs['cp10']) + to_decimal(line['ln_anx_cli_Incidencia']))
#guadar a informaçoes
self.put()
def gerar_XML_anexo_cli(self, info_anexo, info_linhas):
conteudoFinalXml=''
if len(info_linhas)!=0:
#CRIACAO DO MODELO XML
import xml.dom.minidom
doc = xml.dom.minidom.Document()
# Cria os elementos
tag_anexo_cli = doc.createElement('anexo_cli')
tag_header = doc.createElement('header')
tag_linhas = doc.createElement('linhas')
tag_dt_entrega = doc.createElement('dt_entrega')
tag_total_fatura = doc.createElement('total_fatura')
tag_total_base_incid = doc.createElement('total_base_incid')
tag_total_liquidado = doc.createElement('total_liquidado')
# Cria os atributos de header, os mesmos do modelo106
# é necessario guardar as informacaoes
tag_header.setAttribute('ano', str(info_anexo['anx_cli_ano']))
tag_header.setAttribute('mes', str(info_anexo['anx_cli_mes']))
tag_header.setAttribute('cd_af', str(info_anexo['anx_cli_area_fiscal']))
tag_header.setAttribute('nif', str(info_anexo['anx_cli_nif_contr']))
# Cria a estrutura
doc.appendChild(tag_anexo_cli)
tag_anexo_cli.appendChild(tag_header)
tag_anexo_cli.appendChild(tag_linhas)
tag_anexo_cli.appendChild(tag_dt_entrega)
tag_anexo_cli.appendChild(tag_total_fatura)
tag_anexo_cli.appendChild(tag_total_base_incid)
tag_anexo_cli.appendChild(tag_total_liquidado)
#colocar os valores
for line in info_linhas:
#criar a tag linha
tag_linha = doc.createElement('linha')
#colocar os valor da linnha
tag_linha.setAttribute('designacao',str(line['ln_anx_cli_designacao']))
tag_linha.setAttribute('nif',str(line['ln_anx_cli_nif']))
tag_linha.setAttribute('origem',str(line['ln_anx_cli_origem']))
tag_linha.setAttribute('serie',str(line['ln_anx_cli_serie']))
tag_linha.setAttribute('tp_doc',str(line['ln_anx_cli_tipoDoc']))
tag_linha.setAttribute('num_doc',str(line['ln_anx_cli_num_doc']))
tag_linha.setAttribute('data',str(line['ln_anx_cli_data']))
tag_linha.setAttribute('vl_fatura',str(int(to_decimal(line['ln_anx_cli_vl_fatura']))))
tag_linha.setAttribute('vl_base_incid',str(int(line['ln_anx_cli_Incidencia'])))
tag_linha.setAttribute('tx_iva',str(line['ln_anx_cli_taxa_iva']))
tag_linha.setAttribute('iva_liq',str(int(line['ln_anx_cli_total_Liq'])))
tag_linha.setAttribute('nao_liq_imp',str(line['ln_anx_cli_nao_liq_imp']))
tag_linha.setAttribute('linha_dest_mod',str(line['ln_anx_cli_linha_MOD106']))
#adicionar a tag linha na tag linhas
tag_linhas.appendChild(tag_linha)
tag_dt_entrega.appendChild(doc.createTextNode(str(info_anexo['anx_cli_dt_entrega'])))
tag_total_fatura.appendChild(doc.createTextNode(str(info_anexo['anx_cli_total_fact'])))
tag_total_base_incid.appendChild(doc.createTextNode(str(info_anexo['anx_cli_total_bs_incid'])))
tag_total_liquidado.appendChild(doc.createTextNode(str(info_anexo['anx_cli_total_liq'])))
# GERANDO O XML
conteudoXmlCriado= doc.toprettyxml()
#colocar o encoding
conteudoFinalXml=conteudoXmlCriado.replace('<?xml version="1.0" ?>','<?xml version="1.0" encoding="utf-8"?>')
return conteudoFinalXml
def get_info_anexo_cli(self, key):
self.kargs = get_model_record(model=self, key=key)
informacoes=[]
#inicializar os totais
anx_cli_total_fact = 0
anx_cli_total_bs_incid = 0
anx_cli_total_liq = 0
#array de linhas
info_linhas =[]
#buscar as facturas de cliente do periodo
facturas_clientes = FacturaCliente(where = "retencao= 'NAO' AND (estado='Confirmado' OR estado='Pago') and to_char(data,'yyyy')='{ano}' and to_char(data,'mm')='{mes}'".format(ano=str(self.kargs['ano']), mes =str(self.kargs['mes']))).get()
if len(facturas_clientes) > 0:
for facturaCli in facturas_clientes:
terceiro = Terceiro(where="id = '{id}'".format(id=facturaCli['cliente'])).get()
#criar os atributos das linhas
if terceiro:
ln_anx_cli_origem=terceiro[0]['origem']
ln_anx_cli_designacao=terceiro[0]['nome']
ln_anx_cli_nif = "000000000"
if terceiro[0]['nif'] not in ('', None,'None'):
ln_anx_cli_nif= terceiro[0]['nif']
ln_anx_cli_serie = facturaCli['serie']
ln_anx_cli_tipoDoc = 'FT'
ln_anx_cli_num_doc = facturaCli['numero']
ln_anx_cli_data = facturaCli['data']
ln_anx_cli_vl_fatura = int(to_decimal(facturaCli['total']))
ln_anx_cli_factura = facturaCli['id']
#para cada factura buscar as taxas de iva existente nela
sql="""SELECT DISTINCT l.iva, p.deducao, p.nao_liq_imposto FROM linha_factura_cli l, Produto p
where (l.active = True OR l.active is NULL)
AND (p.active = True OR p.active is NULL)
AND l.produto = p.id
AND l.factura_cli = '{idFactura}'""".format(idFactura = facturaCli['id'])
taxas = run_sql(sql)
for taxa in taxas:
ln_anx_cli_linha_MOD106 = self.getLinhaM106AnexCli(taxa=taxa)
if taxa['nao_liq_imposto'] not in (None,'None'):
sql="""SELECT l.* FROM linha_factura_cli l, Produto p
where (l.active = True OR l.active is NULL)
AND (p.active = True OR p.active is NULL)
AND l.produto = p.id
AND l.iva='{esteIva}'
AND p.deducao = '{esteDeducao}'
AND p.nao_liq_imposto = '{naoLiqImp}'
AND l.factura_cli = '{idFactura}'""".format(esteIva=taxa['iva'],esteDeducao=taxa['deducao'],naoLiqImp=str(taxa['nao_liq_imposto']),idFactura = facturaCli['id'])
else:
sql="""SELECT l.* FROM linha_factura_cli l, Produto p
where (l.active = True OR l.active is NULL)
AND (p.active = True OR p.active is NULL)
AND l.produto = p.id
AND l.iva='{esteIva}'
AND p.deducao = '{esteDeducao}'
AND p.nao_liq_imposto is NULL
AND l.factura_cli = '{idFactura}'""".format(esteIva=taxa['iva'],esteDeducao=taxa['deducao'],idFactura = facturaCli['id'])
linhas_fact_cli = run_sql(sql)
#calcular a nova base incidencia e novo total de iva
ln_anx_cli_Incidencia = FacturaCliente().get_total_incidencia_por_taxa(record_lines=linhas_fact_cli)
ln_anx_cli_total_Liq = FacturaCliente().get_total_iva_por_taxa(record_lines=linhas_fact_cli)
if taxa['nao_liq_imposto']:
ln_anx_cli_nao_liq_imp = taxa['nao_liq_imposto']
else:
ln_anx_cli_nao_liq_imp = ''
###para cada linha do anexo, é necessario somar os totais ao totais no anexo
anx_cli_total_fact += int(to_decimal(facturaCli['total']))
anx_cli_total_liq += int(ln_anx_cli_total_Liq)
anx_cli_total_bs_incid += int(ln_anx_cli_Incidencia)
ln_anx_cli_taxa_iva =""
if str(taxa['iva']) in ('None', None,'',0):
ln_anx_cli_taxa_iva=str(0)
elif '.5' in str(taxa['iva']):
ln_anx_cli_taxa_iva= str(round(to_decimal(taxa['iva']),1))
else:
ln_anx_cli_taxa_iva = str(int(taxa['iva']))
#adicionar as informacoes das linhas
linha = {
'ln_anx_cli_Incidencia':ln_anx_cli_Incidencia,
'ln_anx_cli_nif':ln_anx_cli_nif,
'ln_anx_cli_data':ln_anx_cli_data,
'ln_anx_cli_serie':ln_anx_cli_serie,
'ln_anx_cli_tipoDoc':ln_anx_cli_tipoDoc,
'ln_anx_cli_designacao':ln_anx_cli_designacao,
'ln_anx_cli_origem':ln_anx_cli_origem,
'ln_anx_cli_taxa_iva':ln_anx_cli_taxa_iva,
'ln_anx_cli_total_Liq':ln_anx_cli_total_Liq,
'ln_anx_cli_vl_fatura':ln_anx_cli_vl_fatura,
'ln_anx_cli_num_doc':ln_anx_cli_num_doc,
'ln_anx_cli_linha_MOD106':ln_anx_cli_linha_MOD106,
'ln_anx_cli_nao_liq_imp':ln_anx_cli_nao_liq_imp,
'ln_anx_cli_factura':ln_anx_cli_factura
}
info_linhas.append(linha)
#adiconar as info do anexo cliente
info_anexo={
'anx_cli_ano':self.kargs['ano'],
'anx_cli_mes':self.kargs['mes'],
'anx_cli_area_fiscal': self.kargs['area_fiscal'],
'anx_cli_nif_contr': self.kargs['nif'],
'anx_cli_dt_entrega':self.kargs['data_apresentacao'],
'anx_cli_modelo106': key,
'anx_cli_nome': 'Anexo clientes_{ano}-{mes}'.format(ano=self.kargs['ano'], mes=self.kargs['mes']),
'anx_cli_estado': 'Gerado',
'anx_cli_total_fact':anx_cli_total_fact,
'anx_cli_total_bs_incid':anx_cli_total_bs_incid,
'anx_cli_total_liq':anx_cli_total_liq
}
informacoes.append({'info_anexo':info_anexo, 'info_linhas':info_linhas})
return informacoes
def getLinhaM106AnexCli(self, taxa):
"""
determina a linha do modelo106 que entra a informação do anexo de cliente
obs: ficou por determinar a situaçao da linhha 5 (Operações em que liquidou o
IVA nos termos do Decreto - Lei nº 16/2004 de 20 de Maio (valor recebido))
"""
if str(to_decimal(taxa['iva'])) == str(to_decimal(15.5)):
#taxa normal em vigor
return '01'
if str(to_decimal(taxa['iva'])) in (str(to_decimal(0)),'',None,'None'):
#isento de iva
if taxa['deducao'] in ('0',0):
#sem direito a deducao
return '09'
elif taxa['deducao'] in ('50',50,'100',100):
#com direito a deducao
return '08'
else:
#nao tributados
return '10'
else:
#taxa especial
return '03'
def get_info_anexo_forn(self, key):
#array contendo toda a informacao do anexo fornecedor
informacoes=[]
self.kargs = get_model_record(model=self, key=key)
#criar os dicionarios de dados
anx_forn_total_fact = 0
anx_forn_total_bs_incid = 0
anx_forn_total_ded = 0
anx_forn_total_sup = 0
#array de linhas de anexo
info_linhas = []
#buscar as facturas de fornecedor do periodo
facturas_fornecedores = FacturaFornecedor(where = "retencao= 'NAO' AND estado='Confirmado' AND to_char(data,'yyyy')='{ano}' and to_char(data,'mm')='{mes}'".format(ano=str(self.kargs['ano']), mes =str(self.kargs['mes']))).get()
if len(facturas_fornecedores) > 0:
for facturaForn in facturas_fornecedores:
terceiro = Terceiro(where="id = '{id}'".format(id=facturaForn['fornecedor'])).get()
#criar os atributos das linhas
if terceiro:
ln_anx_forn_origem=terceiro[0]['origem']
ln_anx_forn_designacao=terceiro[0]['nome']
ln_anx_forn_nif= terceiro[0]['nif']
ln_anx_forn_tipoDoc = 'FT'
ln_anx_forn_num_doc = facturaForn['numero']
ln_anx_forn_data = facturaForn['data']
ln_anx_forn_vl_fatura = facturaForn['total']
ln_anx_forn_factura = facturaForn['id']
#para cada factura buscar as taxas de iva e deducao existente nela
sql ="""SELECT DISTINCT l.iva, l.direito_deducao AS deducao, p.tipo
FROM linha_factura_forn l, produto p
WHERE (l.active=True OR l.active IS NULL)
AND (p.active=True OR p.active IS NULL)
AND l.produto = p.id
AND l.factura_forn = '{idFactura}'""".format(idFactura = facturaForn['id'])
taxas = run_sql(sql)
for taxa in taxas:
#colocar a tipologia
ln_anx_forn_tipologia = self.getTipologia(taxa['tipo'])
#buscar as linhas de factura_forn que contem a taxa de iva, de deducao e o tipo de produto
sql ="""SELECT l.* FROM linha_factura_forn l, produto p
WHERE (l.active=True OR l.active IS NULL)
AND (p.active=True OR p.active IS NULL)
AND l.produto = p.id
AND l.iva = '{esteIva}'
AND l.direito_deducao ='{esteDeducao}'
AND p.tipo = '{esteTipo}'
AND l.factura_forn = '{idFactura}'""".format(esteIva=taxa['iva'], esteDeducao=taxa['deducao'],esteTipo=taxa['tipo'],idFactura = facturaForn['id'])
linhas_fact_forn = run_sql(sql)
#calcular a nova base incidencia, novo total de iva suportado e novo total deducao
ln_anx_forn_Incidencia = FacturaFornecedor().get_total_incidencia_por_taxa(record_lines=linhas_fact_forn)
ln_anx_forn_total_sup = round(to_decimal(to_decimal(ln_anx_forn_Incidencia)*to_decimal(taxa['iva'])/100),0)
ln_anx_forn_total_ded = FacturaFornecedor().get_total_dedutivel_por_taxa(record_lines=linhas_fact_forn)
###para cada linha do anexo, é necessario somar os totais ao totais no anexo
anx_forn_total_fact += int(to_decimal(facturaForn['total']))
anx_forn_total_sup += int(ln_anx_forn_total_sup)
anx_forn_total_bs_incid += int(ln_anx_forn_Incidencia)
anx_forn_total_ded += int(ln_anx_forn_total_ded)
ln_anx_forn_taxa_iva =""
if str(taxa['iva']) in ('None', None,''):
ln_anx_forn_taxa_iva=0
elif '.5' in str(taxa['iva']):
ln_anx_forn_taxa_iva= round(to_decimal(taxa['iva']),1)
else:
ln_anx_forn_taxa_iva = int(taxa['iva'])
ln_anx_forn_direito_ded = str(taxa['deducao'])
#defenir a linha do modelo106 que entra a informação
ln_anx_forn_linha_MOD106 = self.getLinhaM106AnexForn(tipologia = ln_anx_forn_tipologia, origem = ln_anx_forn_origem)
#adicionar as informacoes das linhas
linha = {
'ln_anx_forn_Incidencia':ln_anx_forn_Incidencia,
'ln_anx_forn_nif':ln_anx_forn_nif,
'ln_anx_forn_data':ln_anx_forn_data,
'ln_anx_forn_tipoDoc':ln_anx_forn_tipoDoc,
'ln_anx_forn_designacao':ln_anx_forn_designacao,
'ln_anx_forn_origem':ln_anx_forn_origem,
'ln_anx_forn_taxa_iva':ln_anx_forn_taxa_iva,
'ln_anx_forn_total_sup':ln_anx_forn_total_sup,
'ln_anx_forn_direito_ded':ln_anx_forn_direito_ded,
'ln_anx_forn_total_ded':ln_anx_forn_total_ded,
'ln_anx_forn_vl_fatura':ln_anx_forn_vl_fatura,
'ln_anx_forn_num_doc':ln_anx_forn_num_doc,
'ln_anx_forn_linha_MOD106':ln_anx_forn_linha_MOD106,
'ln_anx_forn_tipologia':ln_anx_forn_tipologia,
'ln_anx_forn_factura':ln_anx_forn_factura
}
info_linhas.append(linha)
#adicionar as info do anexo
info_anexo ={
'anx_forn_ano': self.kargs['ano'],
'anx_forn_mes': self.kargs['mes'],
'anx_forn_area_fiscal': self.kargs['area_fiscal'],
'anx_forn_nif_entidade': self.kargs['nif'],
'anx_forn_dt_entrega': self.kargs['data_apresentacao'],
'anx_forn_modelo106': key,
'anx_forn_nome' :'Anexo fornecedor_{ano}-{mes}'.format(ano=self.kargs['ano'], mes=self.kargs['mes']),
'anx_forn_estado':'Gerado',
'anx_forn_total_fact' : anx_forn_total_fact,
'anx_forn_total_bs_incid': anx_forn_total_bs_incid,
'anx_forn_total_ded': anx_forn_total_ded,
'anx_forn_total_sup': anx_forn_total_sup
}
informacoes.append({'info_anexo':info_anexo,'info_linhas':info_linhas})
return informacoes
def getLinhaM106AnexForn(self, tipologia, origem):
"""
determina a linha do modelo106 que entra a informação.
obs: ficou por determinar a linha 25(Imposto Dedutível nas importações de bens efetuadas pelo SP)
"""
if origem=='CV':
#fornecedor com sede nacional
if tipologia=='IMO':
return '17'
elif tipologia=='INV':
return '19'
elif tipologia=='OBC':
return '21'
else:
return '23'
else:
#fornecedor com sede no estrangeiro
return '11'
def gerar_XML_anexo_forn(self, info_anexo, info_linhas):
conteudoFinalXml=''
if len(info_linhas)!=0:
#CRIACAO DO MODELO XML
import xml.dom.minidom
doc = xml.dom.minidom.Document()
# Cria os elementos
tag_anexo_for = doc.createElement('anexo_for')
tag_header = doc.createElement('header')
tag_linhas = doc.createElement('linhas')
tag_dt_entrega = doc.createElement('dt_entrega')
tag_total_fatura = doc.createElement('total_fatura')
tag_total_base_incid = doc.createElement('total_base_incid')
tag_total_suportado = doc.createElement('total_suportado')
tag_total_dedutivel = doc.createElement('total_dedutivel')
# Cria os atributos de header
tag_header.setAttribute('ano', str(info_anexo['anx_forn_ano']))
tag_header.setAttribute('mes', str(info_anexo['anx_forn_mes']))
tag_header.setAttribute('cd_af', str(info_anexo['anx_forn_area_fiscal']))
tag_header.setAttribute('nif', str(info_anexo['anx_forn_nif_entidade']))
# Cria a estrutura
doc.appendChild(tag_anexo_for)
tag_anexo_for.appendChild(tag_header)
tag_anexo_for.appendChild(tag_linhas)
tag_anexo_for.appendChild(tag_dt_entrega)
tag_anexo_for.appendChild(tag_total_fatura)
tag_anexo_for.appendChild(tag_total_base_incid)
tag_anexo_for.appendChild(tag_total_suportado)
tag_anexo_for.appendChild(tag_total_dedutivel)
#colocar os valores
for line in info_linhas:
#criar a tag linha
tag_linha = doc.createElement('linha')
#colocar os valor da linnha
tag_linha.setAttribute('designacao',str(line['ln_anx_forn_designacao']))
tag_linha.setAttribute('nif',str(line['ln_anx_forn_nif']))
tag_linha.setAttribute('origem',str(line['ln_anx_forn_origem']))
tag_linha.setAttribute('tp_doc',str(line['ln_anx_forn_tipoDoc']))
tag_linha.setAttribute('num_doc',str(line['ln_anx_forn_num_doc']))
tag_linha.setAttribute('data',str(line['ln_anx_forn_data']))
tag_linha.setAttribute('vl_fatura',str(int(to_decimal(line['ln_anx_forn_vl_fatura']))))
tag_linha.setAttribute('vl_base_incid',str(int(line['ln_anx_forn_Incidencia'])))
tag_linha.setAttribute('tx_iva',str(line['ln_anx_forn_taxa_iva']))
tag_linha.setAttribute('iva_sup',str(int(line['ln_anx_forn_total_sup'])))
tag_linha.setAttribute('direito_ded',str(int(to_decimal(line['ln_anx_forn_direito_ded']))))
tag_linha.setAttribute('iva_ded',str(line['ln_anx_forn_total_ded']))
tag_linha.setAttribute('tipologia',str(line['ln_anx_forn_tipologia']))
tag_linha.setAttribute('linha_dest_mod',str(line['ln_anx_forn_linha_MOD106']))
#adicionar a tag linha na tag linhas
tag_linhas.appendChild(tag_linha)
#adicionar os totais
tag_dt_entrega.appendChild(doc.createTextNode(str(info_anexo['anx_forn_dt_entrega'])))
tag_total_fatura.appendChild(doc.createTextNode(str(info_anexo['anx_forn_total_fact'])))
tag_total_base_incid.appendChild(doc.createTextNode(str(info_anexo['anx_forn_total_bs_incid'])))
tag_total_suportado.appendChild(doc.createTextNode(str(info_anexo['anx_forn_total_sup'])))
tag_total_dedutivel.appendChild(doc.createTextNode(str(info_anexo['anx_forn_total_ded'])))
# GERANDO O XML
conteudoXmlCriado = doc.toprettyxml()
#colocar o encoding
conteudoFinalXml=conteudoXmlCriado.replace('<?xml version="1.0" ?>','<?xml version="1.0" encoding="utf-8"?>')
return conteudoFinalXml
def guardar_anexo_forn(self, key, info_anexo, info_linhas):
#gerar o xml
self.kargs= get_model_record(model=self,key=key)
conteudoFinalXml = self.gerar_XML_anexo_forn(info_anexo, info_linhas)
#GUARDANDO OS DADOS
content ={
'user': self.kargs['user'],
'ano': str(info_anexo['anx_forn_ano']),
'mes': str(info_anexo['anx_forn_mes']),
'area_fiscal': str(info_anexo['anx_forn_area_fiscal']),
'nif_entidade': str(info_anexo['anx_forn_nif_entidade']),
'data_entrega': str(info_anexo['anx_forn_dt_entrega']),
'xml_modelo_106':str(info_anexo['anx_forn_modelo106']),
'nome':str(info_anexo['anx_forn_nome']),
'estado':'Gerado',
'total_factura':str(info_anexo['anx_forn_total_fact']),
'total_base_incidencia':str(info_anexo['anx_forn_total_bs_incid']),
'total_suportado':str(info_anexo['anx_forn_total_sup']),
'total_dedutivel':str(info_anexo['anx_forn_total_ded']),
'xml_gerado':str(conteudoFinalXml)
}
id_anxForn = XMLAnexoFornecedorM106(**content).put()
#guardandos as linhas do anexo
for line in info_linhas:
content={
'user': self.kargs['user'],
'factura_fornecedor':str(line['ln_anx_forn_factura']),
'xml_anexo_fornecedor_m106':str(id_anxForn),
'designacao':str(line['ln_anx_forn_designacao']),
'nif_fornecedor':str(line['ln_anx_forn_nif']),
'origem':str(line['ln_anx_forn_origem']),
'tipologia':str(line['ln_anx_forn_tipologia']),
'tipo_doc':str(line['ln_anx_forn_tipoDoc']),
'numero_doc':str(line['ln_anx_forn_num_doc']),
'data':str(line['ln_anx_forn_data']),
'valor_factura':str(int(to_decimal(line['ln_anx_forn_vl_fatura']))),
'valor_base_incid':str(int(line['ln_anx_forn_Incidencia'])),
'taxa_iva':str(line['ln_anx_forn_taxa_iva']),
'iva_suportado':str(int(line['ln_anx_forn_total_sup'])),
'direito_ded':str(int(to_decimal(line['ln_anx_forn_direito_ded']))),
'iva_dedutivel':str(line['ln_anx_forn_total_ded']),
'linha_mod106':str(line['ln_anx_forn_linha_MOD106'])
}
XMLLinhaAnexoFornecedorM106(**content).put()
###adicionar as informaçoes ao modelo106
if str(line['ln_anx_forn_linha_MOD106'])=='11':
self.kargs['cp11']= to_decimal(to_decimal(self.kargs['cp11']) + to_decimal(line['ln_anx_forn_Incidencia']))
self.kargs['cp12']= to_decimal(to_decimal(self.kargs['cp12']) + to_decimal(line['ln_anx_forn_total_ded']))
elif str(line['ln_anx_forn_linha_MOD106'])=='14':
self.kargs['cp14']= to_decimal(to_decimal(self.kargs['cp14']) + to_decimal(line['ln_anx_forn_Incidencia']))
self.kargs['cp15']= to_decimal(to_decimal(self.kargs['cp15']) + to_decimal(line['ln_anx_forn_total_ded']))
elif str(line['ln_anx_forn_linha_MOD106'])=='17':
self.kargs['cp17']= to_decimal(to_decimal(self.kargs['cp17']) + to_decimal(line['ln_anx_forn_Incidencia']))
self.kargs['cp18']= to_decimal(to_decimal(self.kargs['cp18']) + to_decimal(line['ln_anx_forn_total_ded']))
elif str(line['ln_anx_forn_linha_MOD106'])=='19':
self.kargs['cp19']= to_decimal(to_decimal(self.kargs['cp19']) + to_decimal(line['ln_anx_forn_Incidencia']))
self.kargs['cp20']= to_decimal(to_decimal(self.kargs['cp20']) + to_decimal(line['ln_anx_forn_total_ded']))
elif str(line['ln_anx_forn_linha_MOD106'])=='21':
self.kargs['cp21']= to_decimal(to_decimal(self.kargs['cp21']) + to_decimal(line['ln_anx_forn_Incidencia']))
self.kargs['cp22']= to_decimal(to_decimal(self.kargs['cp22']) + to_decimal(line['ln_anx_forn_total_ded']))
elif str(line['ln_anx_forn_linha_MOD106'])=='23':
self.kargs['cp23']= to_decimal(to_decimal(self.kargs['cp23']) + to_decimal(line['ln_anx_forn_Incidencia']))
self.kargs['cp24']= to_decimal(to_decimal(self.kargs['cp24']) + to_decimal(line['ln_anx_forn_total_ded']))
elif str(line['ln_anx_forn_linha_MOD106']) =='25':
self.kargs['cp25']= to_decimal(to_decimal(self.kargs['cp25']) + to_decimal(line['ln_anx_forn_Incidencia']))
self.kargs['cp26']= to_decimal(to_decimal(self.kargs['cp26']) + to_decimal(line['ln_anx_forn_total_ded']))
#guadar a informaçoes
self.put()
def get_alteracoes_forn(self, key):
"""
metodo que verifica a existencia de alteracoes nas facturas de fornecdor, e retorna um array de dados
informando a accao a tomar e os dados (informacao das linhas de anexo actuais e anteriores) a utilizar na acao.
"""
self.kargs = get_model_record(model=self, key=key)
#informacao das facturas anteriores
anterior =[]
info_anexo = XMLAnexoFornecedorM106(where="ano='{ano}' AND mes='{mes}'".format(ano=self.kargs['ano'],mes =self.kargs['mes'])).get()
if len(info_anexo)!=0:
info_linhas = XMLLinhaAnexoFornecedorM106(where="xml_anexo_fornecedor_m106='{id}'".format(id=info_anexo[0]['id'])).get()
if len(info_linhas)!=0:
anterior.append({'info_linhas':info_linhas})
#informacao das factuaras actuais
actual = self.get_info_anexo_forn(key)
#TOMAR DECISAO SOBRE AS INFORMACOES EXISTENTES (actual e anterior)
if (len(actual)==0) & (len(anterior)==0):
#nao existe facturas (actuais ou anteriro) logo nao existe alteracoes
return []
elif (len(actual)> 0) & (len(anterior)==0):
#todos as facturas actuais foram adicionadas (criar anexo reg. fornecedor de adicão de facturas)
alteracoes = []
alteracoes.append({'accao':'Adicionar','info_linhas_act':actual[0]['info_linhas']})
return alteracoes
elif (len(actual)==0) & (len(anterior)>0):
#todas as facturas do periodo foram eliminadas, logo deve-se criar anexo regularizacao com ação de "eliminar" todas
alteracoes = []
alteracoes.append({'accao':'Eliminar','info_linhas_ant':anterior[0]['info_linhas']})
return alteracoes
elif (len(actual)>0) & (len(anterior)>0):
#existe facturas anterior e actuais, porem podera existir adicao, eliminacao e alteracoes de factura, logo é preciso a analise factura a factura
act = actual[0]
ant = anterior[0]
linhasAlterar = []
linhasAdicionar=[]
linhasEliminar=[]
for line_act in act['info_linhas']:
existe = False
for line_ant in ant['info_linhas']:
#detectar os iguais
if ((str(line_act['ln_anx_forn_factura']) == line_ant['factura_fornecedor'])
& (str(line_act['ln_anx_forn_taxa_iva']) == line_ant['taxa_iva'])
& (str(line_act['ln_anx_forn_direito_ded']) == line_ant['direito_ded'])
& (str(int(line_act['ln_anx_forn_total_sup'])) == line_ant['iva_suportado'])
& (str(int(line_act['ln_anx_forn_total_ded'])) == line_ant['iva_dedutivel'])
& (str(int(line_act['ln_anx_forn_vl_fatura'])) == line_ant['valor_factura'])
& (str(line_act['ln_anx_forn_num_doc']) == line_ant['numero_doc'])):
ant['info_linhas'].remove(line_ant)
existe = True
break
# detectar os modificados que sao para alterar
elif ((str(line_act['ln_anx_forn_factura']) == line_ant['factura_fornecedor'])
& (str(line_act['ln_anx_forn_num_doc']) == line_ant['numero_doc'])):
linhasAlterar.append({'info_linhas_act':line_act,'info_linhas_ant':line_ant})
#remover esta linha do ant['info_linhas']
ant['info_linhas'].remove(line_ant)
existe = True
break
#caso nao existe em ambas deve adiciona-la
if not existe:
linhasAdicionar.append(line_act)
# o que restou das linhas anteriores sao para eliminar
linhasEliminar = ant['info_linhas']
alteracoes = []
# caso exista alguma linha no linhaAlterar esta é para a acção de Corrigir
if len(linhasAlterar)>0:
alteracoes.append({'accao':'Corrigir', 'info_linhas':linhasAlterar})
# caso continua existindo linhas no linhasAdicionar, essas devem ser da acção Adicionar
if len(linhasAdicionar)>0:
alteracoes.append({'accao':'Adicionar', 'info_linhas_act':linhasAdicionar})
# caso continua existindo linhas no linhasEliminar, essas devem ser da acção Eliminar
if len(linhasEliminar)>0:
alteracoes.append({'accao':'Eliminar', 'info_linhas_ant':linhasEliminar})
return alteracoes
def get_alteracoes_cli(self, key):
"""
metodo que verifica a existencia de alteracoes nas facturas de clientes, e retorna um array de dados
informando a accao a tomar e os dados (informacao das linhas de anexo actuais e anteriores) a utilizar na acao.
"""
self.kargs = get_model_record(model=self, key= key)
#informacao das facturas anteriores
anterior =[]
info_anexo = XMLAnexoClienteM106(where="ano='{ano}' AND mes='{mes}'".format(ano=self.kargs['ano'],mes =self.kargs['mes'])).get()
if len(info_anexo)!=0:
info_linhas = XMLLinhaAnexoClienteM106(where="xml_anexo_cliente_m106='{id}'".format(id=info_anexo[0]['id'])).get()
if len(info_linhas)!=0:
anterior.append({'info_linhas':info_linhas})
#informacao das factuaras actuais
actual = self.get_info_anexo_cli(key)
#TOMAR DECISAO SOBRE AS INFORMACOES EXISTENTES (actual e anterior)
if (len(actual)==0) & (len(anterior)==0):
#nao existe facturas (actuais ou anteriro) logo nao existe alteracoes
return []
elif (len(actual)> 0) & (len(anterior)==0):
#todos as facturas actuais foram adicionadas (criar anexo reg. fornecedor de adicão de facturas)
alteracoes = []
alteracoes.append({'accao':'Adicionar','info_linhas_act':actual[0]['info_linhas']})
return alteracoes
elif (len(actual)==0) & (len(anterior)>0):
#todas as facturas do periodo foram eliminadas, logo deve-se criar anexo regularizacao com ação de "eliminar" todas
alteracoes = []
alteracoes.append({'accao':'Eliminar','info_linhas_ant':anterior[0]['info_linhas']})
return alteracoes
elif (len(actual)>0) & (len(anterior)>0):
#existe facturas anterior e actuais, porem podera existir adicao, eliminacao e alteracoes de factura, logo é preciso a analise factura a factura
act = actual[0]
ant = anterior[0]
linhasAlterar = []
linhasAdicionar=[]
linhasEliminar=[]
for line_act in act['info_linhas']:
existe = False
for line_ant in ant['info_linhas']:
#detectar os iguais
if ((str(line_act['ln_anx_cli_factura']) == str(line_ant['factura_cliente']))
& (str(line_act['ln_anx_cli_taxa_iva']) == str(line_ant['taxa_iva']))
& (str(int(line_act['ln_anx_cli_Incidencia'])) == str(line_ant['valor_base_incidencia']))
& (str(int(line_act['ln_anx_cli_total_Liq'])) == str(line_ant['iva_liquidado']))
& (str(int(line_act['ln_anx_cli_vl_fatura'])) == str(line_ant['valor_factura']))):
ant['info_linhas'].remove(line_ant)
existe = True
break
# detectar os modificados que sao para alterar
elif ((str(line_act['ln_anx_cli_factura']) == line_ant['factura_cliente'])
& (str(line_act['ln_anx_cli_num_doc']) == line_ant['numero_doc'])):
linhasAlterar.append({'info_linhas_act':line_act,'info_linhas_ant':line_ant})
#remover esta linha do ant['info_linhas']
ant['info_linhas'].remove(line_ant)
existe = True
break
#caso nao existe em ambas deve adiciona-la
if not existe:
linhasAdicionar.append(line_act)
# o que restou das linhas anteriores sao para a accao de eliminar
linhasEliminar = ant['info_linhas']
alteracoes = []
# caso exista alguma linha no linhaAlterar esta é para a acção de Corrigir
if len(linhasAlterar)>0:
alteracoes.append({'accao':'Corrigir', 'info_linhas':linhasAlterar})
# caso continua existindo linhas no linhasAdicionar, essas devem ser da acção Adicionar
if len(linhasAdicionar)>0:
alteracoes.append({'accao':'Adicionar', 'info_linhas_act':linhasAdicionar})
# caso continua existindo linhas no linhasEliminar, essas devem ser da acção Eliminar
if len(linhasEliminar)>0:
alteracoes.append({'accao':'Eliminar', 'info_linhas_ant':linhasEliminar})
return alteracoes
def gerar_xml_reg_forn(self, key):
"""
gera o xml de anexo regularizacao fornecedor com as
infomaçoes das linhas ja armazenadas na base dados
"""
self.kargs = get_model_record(model=self, key=key)
anexo = XMLAnexoRegFornecedorM106(where="xml_modelo_106='{modelo}'".format(modelo=key)).get()
if len(anexo)!=0:
info_anexo =anexo[0]
linhasAnexo = LinhaAnexoRegFornecedor(orderby="posicao",where="xml_anexo_reg_fornecedor_m106='{anexo_reg}'".format(anexo_reg=info_anexo['id'])).get()
if len(linhasAnexo)!=0:
#CRIACAO DO MODELO XML
import xml.dom.minidom
doc = xml.dom.minidom.Document()
# Cria os elementos
tag_anexo_for = doc.createElement('anexo_for_reg')
tag_header = doc.createElement('header')
tag_linhas = doc.createElement('linhas')
tag_linha = doc.createElement('linha')
tag_dt_entrega = doc.createElement('dt_entrega')
tag_total_fatura = doc.createElement('total_fatura')
tag_total_base_incid = doc.createElement('total_base_incid')
tag_total_suportado = doc.createElement('total_suportado')
tag_total_dedutivel = doc.createElement('total_dedutivel')
# setar os atributos de header
tag_header.setAttribute('ano', info_anexo['ano'])
tag_header.setAttribute('mes', info_anexo['mes'])
tag_header.setAttribute('cd_af', info_anexo['area_fiscal'])
tag_header.setAttribute('nif', info_anexo['nif_entidade'])
# Cria a estrutura
doc.appendChild(tag_anexo_for)
tag_anexo_for.appendChild(tag_header)
tag_anexo_for.appendChild(tag_linhas)
tag_anexo_for.appendChild(tag_dt_entrega)
tag_anexo_for.appendChild(tag_total_fatura)
tag_anexo_for.appendChild(tag_total_base_incid)
tag_anexo_for.appendChild(tag_total_suportado)
tag_anexo_for.appendChild(tag_total_dedutivel)
#buscar as linhas do anexo para inserir
#como as linhas estao aos pares a cada duas linhas percorridas uma nova "tag_linha" e inserida
x=1
for linhA in linhasAnexo:
if linhA['accao'] == 'Adicionar':
if linhA['tipo'] != 'decl_anterior':
tag_reg_or_ant = doc.createElement(linhA['tipo'])
tag_reg_or_ant.setAttribute('num_doc', linhA['valor_factura'])
tag_reg_or_ant.setAttribute('origem', linhA['origem'])
tag_reg_or_ant.setAttribute('nif', linhA['nif_fornecedor'])
tag_reg_or_ant.setAttribute('iniciativa', linhA['iniciativa'])
tag_reg_or_ant.setAttribute('tp_doc', linhA['tipo_doc'])
tag_reg_or_ant.setAttribute('data', linhA['data'])
tag_reg_or_ant.setAttribute('vl_fatura', linhA['valor_factura'])
tag_reg_or_ant.setAttribute('vl_base_incid', linhA['valor_base_incid'])
tag_reg_or_ant.setAttribute('tx_iva', linhA['taxa_iva'])
tag_reg_or_ant.setAttribute('iva_sup', linhA['iva_suportado'])
tag_reg_or_ant.setAttribute('direito_ded', linhA['direito_ded'])
tag_reg_or_ant.setAttribute('iva_ded', linhA['iva_dedutivel'])
tag_reg_or_ant.setAttribute('tipologia', linhA['tipologia'])
tag_reg_or_ant.setAttribute('linha_dest_mod', linhA['linha_mod106'])
tag_reg_or_ant.setAttribute('periodo_ref',linhA['periodo_referencia'])
else:
tag_reg_or_ant = doc.createElement(linhA['tipo'])
tag_reg_or_ant.setAttribute('num_doc', linhA['valor_factura'])
tag_reg_or_ant.setAttribute('origem', linhA['origem'])
tag_reg_or_ant.setAttribute('nif', linhA['nif_fornecedor'])
tag_reg_or_ant.setAttribute('iniciativa', linhA['iniciativa'])
tag_reg_or_ant.setAttribute('tp_doc', linhA['tipo_doc'])
tag_reg_or_ant.setAttribute('data', linhA['data'])
tag_reg_or_ant.setAttribute('vl_fatura', linhA['valor_factura'])
tag_reg_or_ant.setAttribute('vl_base_incid', linhA['valor_base_incid'])
tag_reg_or_ant.setAttribute('tx_iva', linhA['taxa_iva'])
tag_reg_or_ant.setAttribute('iva_sup', linhA['iva_suportado'])
tag_reg_or_ant.setAttribute('direito_ded', linhA['direito_ded'])
tag_reg_or_ant.setAttribute('iva_ded', linhA['iva_dedutivel'])
tag_reg_or_ant.setAttribute('tipologia', linhA['tipologia'])
tag_reg_or_ant.setAttribute('linha_dest_mod', linhA['linha_mod106'])
tag_reg_or_ant.setAttribute('periodo_ref',linhA['periodo_referencia'])
tag_linha.appendChild(tag_reg_or_ant)
if x==2:
x=0
tag_linhas.appendChild(tag_linha)
tag_linha = doc.createElement('linha')
x+=1
# colocar os valores
tag_dt_entrega.appendChild(doc.createTextNode(info_anexo['data_entrega']))
tag_total_fatura.appendChild(doc.createTextNode(info_anexo['total_factura']))
tag_total_base_incid.appendChild(doc.createTextNode(info_anexo['total_base_incidencia']))
tag_total_suportado.appendChild(doc.createTextNode(info_anexo['total_suportado']))
tag_total_dedutivel.appendChild(doc.createTextNode(info_anexo['total_dedutivel']))
# Gerar o conteudo xml
conteudoXmlCriado= doc.toprettyxml()
#colocar o encoding
conteudoFinalXml=conteudoXmlCriado.replace('<?xml version="1.0" ?>','<?xml version="1.0" encoding="utf-8"?>')
#guardar o xml
content={
'user': '{user}'.format(user=bottle.request.session['user']),
'id':info_anexo['id'],
'xml_gerado':conteudoFinalXml
}
XMLAnexoRegFornecedorM106(**content).put()
def gerar_xml_reg_cli(self, key):
"""
gera o xml de anexo regularizacao cliente com as
infomaçoes do anexo ja armazenadas na base dados
"""
self.kargs = get_model_record(model=self, key=key)
anexo = XMLAnexoRegClienteM106(where="xml_modelo_106='{modelo}'".format(modelo=key)).get()
if len(anexo)!=0:
info_anexo =anexo[0]
linhasAnexo = XMLLinhaAnexoRegClienteM106(orderby="posicao",where="xml_anexo_reg_cliente_m106='{anexo_reg}'".format(anexo_reg=info_anexo['id'])).get()
if len(linhasAnexo)!=0:
#CRIACAO DO MODELO XML
import xml.dom.minidom
doc = xml.dom.minidom.Document()
# Cria os elementos
tag_anexo_cli = doc.createElement('anexo_for_cli')
tag_header = doc.createElement('header')
tag_linhas = doc.createElement('linhas')
tag_linha = doc.createElement('linha')
tag_dt_entrega = doc.createElement('dt_entrega')
tag_total_fatura = doc.createElement('total_fatura')
tag_total_base_incid = doc.createElement('total_base_incid')
tag_total_liquidado = doc.createElement('total_liquidado')
# setar os atributos de header
tag_header.setAttribute('ano', info_anexo['ano'])
tag_header.setAttribute('mes', info_anexo['mes'])
tag_header.setAttribute('cd_af', info_anexo['area_fiscal'])
tag_header.setAttribute('nif', info_anexo['nif_entidade'])
# Cria a estrutura
doc.appendChild(tag_anexo_cli)
tag_anexo_cli.appendChild(tag_header)
tag_anexo_cli.appendChild(tag_linhas)
tag_anexo_cli.appendChild(tag_dt_entrega)
tag_anexo_cli.appendChild(tag_total_fatura)
tag_anexo_cli.appendChild(tag_total_base_incid)
tag_anexo_cli.appendChild(tag_total_liquidado)
#buscar as linhas do anexo para inserir
#como as linhas estao aos pares a cada duas linhas percorridas uma nova "tag_linha" e inserida
x=1
for linhA in linhasAnexo:
if linhA['accao'] == 'Adicionar':
if linhA['tipo'] != 'decl_anterior':
tag_reg_or_ant = doc.createElement(linhA['tipo'])
tag_reg_or_ant.setAttribute('num_doc', linhA['valor_factura'])
tag_reg_or_ant.setAttribute('origem', linhA['origem'])
tag_reg_or_ant.setAttribute('nif', linhA['nif_cliente'])
tag_reg_or_ant.setAttribute('iniciativa', linhA['iniciativa'])
tag_reg_or_ant.setAttribute('tp_doc', linhA['tipo_doc'])
tag_reg_or_ant.setAttribute('data', linhA['data'])
tag_reg_or_ant.setAttribute('vl_fatura', linhA['valor_factura'])
tag_reg_or_ant.setAttribute('vl_base_incid', linhA['valor_base_incid'])
tag_reg_or_ant.setAttribute('tx_iva', linhA['taxa_iva'])
tag_reg_or_ant.setAttribute('iva_liq', linhA['iva_liquidado'])
tag_reg_or_ant.setAttribute('linha_dest_mod', linhA['linha_mod106'])
tag_reg_or_ant.setAttribute('periodo_ref',linhA['periodo_referencia'])
else:
tag_reg_or_ant = doc.createElement(linhA['tipo'])
tag_reg_or_ant.setAttribute('num_doc', linhA['valor_factura'])
tag_reg_or_ant.setAttribute('origem', linhA['origem'])
tag_reg_or_ant.setAttribute('nif', linhA['nif_cliente'])
tag_reg_or_ant.setAttribute('iniciativa', linhA['iniciativa'])
tag_reg_or_ant.setAttribute('tp_doc', linhA['tipo_doc'])
tag_reg_or_ant.setAttribute('data', linhA['data'])
tag_reg_or_ant.setAttribute('vl_fatura', linhA['valor_factura'])
tag_reg_or_ant.setAttribute('vl_base_incid', linhA['valor_base_incid'])
tag_reg_or_ant.setAttribute('tx_iva', linhA['taxa_iva'])
tag_reg_or_ant.setAttribute('iva_liq', linhA['iva_liquidado'])
tag_reg_or_ant.setAttribute('linha_dest_mod', linhA['linha_mod106'])
tag_reg_or_ant.setAttribute('periodo_ref',linhA['periodo_referencia'])
tag_linha.appendChild(tag_reg_or_ant)
if x==2:
x=0
tag_linhas.appendChild(tag_linha)
tag_linha = doc.createElement('linha')
x+=1
# colocar os valores
tag_dt_entrega.appendChild(doc.createTextNode(info_anexo['data_entrega']))
tag_total_fatura.appendChild(doc.createTextNode(info_anexo['total_factura']))
tag_total_base_incid.appendChild(doc.createTextNode(info_anexo['total_base_incidencia']))
tag_total_liquidado.appendChild(doc.createTextNode(info_anexo['total_liquidado']))
# Gerar o conteudo xml
conteudoXmlCriado= doc.toprettyxml()
#colocar o encoding
conteudoFinalXml=conteudoXmlCriado.replace('<?xml version="1.0" ?>','<?xml version="1.0" encoding="utf-8"?>')
#guardar o xml
content={
'user': '{user}'.format(user=bottle.request.session['user']),
'id':info_anexo['id'],
'xml_gerado':conteudoFinalXml
}
XMLAnexoRegClienteM106(**content).put()
def guardar_regularizacao_forn(self, key):
"""
guarda as informaçoes de anexo de regularizacao de fornecedor na base dados
"""
#get alteracoes
alteracoes = self.get_alteracoes_forn(key=key)
print("\n\n\n\n.................fim get alteracoes.......................\n\n\n\n\n\n")
print("\n\n\n\........\n",alteracoes,"\n.......................\n\n\n\n\n\n")
self.kargs = get_model_record(model=self,key=key)
if len(alteracoes)>0:
#guardando o anexo reg
tot_factura=0
tot_incidencia=0
tot_suportado=0
tot_dedutivel=0
content ={
'user': '{user}'.format(user=bottle.request.session['user']),
'nome': 'anexo_reg_for_{ano}-{mes}'.format(ano=self.kargs['ano'], mes=self.kargs['mes']),
'xml_modelo_106':str(key),
'nif_entidade':str(self.kargs['nif']),
'ano':str(self.kargs['ano']),
'mes':str(self.kargs['mes']),
'area_fiscal':str(self.kargs['area_fiscal']),
'data_entrega':str(self.kargs['data_apresentacao']),
'total_factura':str(0),
'total_base_incidencia':str(0),
'total_suportado':str(0),
'total_dedutivel':str(0),
'estado':'Gerado'
}
id_reg_forn = XMLAnexoRegFornecedorM106(**content).put()
#guardando as linhas
#variavel "pos" server para guiar na colocacao de linha regulada e da linha reguladora
pos=1
for line in alteracoes:
if line['accao'] =='Adicionar':
# de acordo com as especificaçoes tecnicas, na acao de adicionar, somente a linha "regularizacao" é necessario,
# sendo a linha "decl_anterior" ignorada ou seja vazia
info_linhas = line['info_linhas_act']
#guardando as linhas do anexo
for linha in info_linhas:
tot_factura+=int(to_decimal(linha['ln_anx_forn_vl_fatura']))
tot_incidencia+=int(linha['ln_anx_forn_Incidencia'])
tot_suportado+=int(linha['ln_anx_forn_total_sup'])
tot_dedutivel+=int(linha['ln_anx_forn_total_ded'])
#linha "regularizacao"
content={
'user': '{user}'.format(user=bottle.request.session['user']),
'xml_anexo_reg_fornecedor_m106':str(id_reg_forn),
'tipo':'regularizacao',
'accao':str(line['accao']),
'factura':str(linha['ln_anx_forn_factura']),
'origem':str(linha['ln_anx_forn_origem']),
'nif_fornecedor':str(linha['ln_anx_forn_nif']),
'tipo_doc':str(linha['ln_anx_forn_tipoDoc']),
'numero_doc':str(linha['ln_anx_forn_num_doc']),
'data':str(linha['ln_anx_forn_data']),
'valor_factura':str(linha['ln_anx_forn_vl_fatura']),
'valor_base_incid':str(linha['ln_anx_forn_Incidencia']),
'taxa_iva':str(linha['ln_anx_forn_taxa_iva']),
'direito_ded':str(linha['ln_anx_forn_direito_ded']),
'iva_suportado':str(linha['ln_anx_forn_total_sup']),
'iva_dedutivel':str(linha['ln_anx_forn_total_ded']),
'tipologia':str(linha['ln_anx_forn_tipologia']),
'linha_mod106':str(29),
'periodo_referencia':'{ano}-{mes}'.format(ano=str(self.kargs['ano']),mes=str(self.kargs['mes'])),
'iniciativa':'CT',
'posicao':str(pos)
}
print("\n\n\n\n\nAAAAAAAAAAAAAAAAAAAAAAAAA\n\n\n\n")
LinhaAnexoRegFornecedor(**content).put()
#adicionar a informação ao modelo106
self.kargs['cp29']= to_decimal(to_decimal(self.kargs['cp29']) + to_decimal(linha['ln_anx_forn_total_ded']))
#linha "decl_anterior"
content={
'user': '{user}'.format(user=bottle.request.session['user']),
'xml_anexo_reg_fornecedor_m106':str(id_reg_forn),
'tipo':'decl_anterior',
'accao':str(line['accao']),
'factura':'',
'origem':'',
'nif_fornecedor':'',
'tipo_doc':'',
'numero_doc':'',
'data':'',
'valor_factura':'',
'valor_base_incid':'',
'taxa_iva':'',
'direito_ded':'',
'iva_suportado':'',
'iva_dedutivel':'',
'tipologia':'',
'linha_mod106':'',
'periodo_referencia':'',
'iniciativa':'',
'posicao':str(pos)
}
LinhaAnexoRegFornecedor(**content).put()
pos+=1
elif line['accao']=='Eliminar':
# de acordo com as especificaçoes tecnicas, na acao de Eliminar, a linha "regularizacao" é preenchida com os totais a zero,
# sendo a linha "decl_anterior" preenchida exatamente como foi enviada anteriormente
info_linhas = line['info_linhas_ant']
#guardando as linhas do anexo
for linha in info_linhas:
tot_factura+=int(linha['valor_factura'])
tot_incidencia+=int(linha['valor_base_incid'])
tot_suportado+=int(linha['iva_suportado'])
tot_dedutivel+=int(linha['iva_dedutivel'])
#linha "regularizacao"
content={
'user': '{user}'.format(user=bottle.request.session['user']),
'xml_anexo_reg_fornecedor_m106':str(id_reg_forn),
'tipo':'regularizacao',
'accao':str(line['accao']),
'factura':str(linha['factura_fornecedor']),
'origem':str(linha['origem']),
'nif_fornecedor':str(linha['nif_fornecedor']),
'tipo_doc':str(linha['tipo_doc']),
'numero_doc':str(linha['numero_doc']),
'data':str(linha['data']),
'valor_factura':str(0),
'valor_base_incid':str(0),
'taxa_iva':str(linha['taxa_iva']),
'direito_ded':str(linha['direito_ded']),
'iva_suportado':str(0),
'iva_dedutivel':str(0),
'tipologia':str(linha['tipologia']),
'linha_mod106':str(linha['linha_mod106']),
'periodo_referencia':'{ano}-{mes}'.format(ano=str(self.kargs['ano']),mes=str(self.kargs['mes'])),
'iniciativa':'CT',
'posicao':str(pos)
}
LinhaAnexoRegFornecedor(**content).put()
#linha "decl_anterior"
content={
'user': '{user}'.format(user=bottle.request.session['user']),
'xml_anexo_reg_fornecedor_m106':str(id_reg_forn),
'tipo':'decl_anterior',
'accao':str(line['accao']),
'factura':str(linha['factura_fornecedor']),
'origem':str(linha['origem']),
'nif_fornecedor':str(linha['nif_fornecedor']),
'tipo_doc':str(linha['tipo_doc']),
'numero_doc':str(linha['numero_doc']),
'data':str(linha['data']),
'valor_factura':str(linha['valor_factura']),
'valor_base_incid':str(linha['valor_base_incid']),
'taxa_iva':str(linha['taxa_iva']),
'direito_ded':str(linha['direito_ded']),
'iva_suportado':str(linha['iva_suportado']),
'iva_dedutivel':str(linha['iva_dedutivel']),
'tipologia':str(linha['tipologia']),
'linha_mod106':str(30),
'periodo_referencia':'{ano}-{mes}'.format(ano=str(self.kargs['ano']),mes=str(self.kargs['mes'])),
'iniciativa':'CT',
'posicao':str(pos)
}
LinhaAnexoRegFornecedor(**content).put()
#adicionar a informação ao modelo106
self.kargs['cp30']= to_decimal(to_decimal(self.kargs['cp30']) + to_decimal(linha['iva_dedutivel']))
pos+=1
elif line['accao']=='Corrigir':
#guardando as linhas do anexo
for linha in line['info_linhas']:
linha_act = linha['info_linhas_act']
linha_ant = linha['info_linhas_ant']
#adicionar os totais do actual
tot_factura+=int(linha_act['ln_anx_forn_vl_fatura'])
tot_incidencia+=int(linha_act['ln_anx_forn_Incidencia'])
tot_suportado+=int(linha_act['ln_anx_forn_total_sup'])
tot_dedutivel+=int(linha_act['ln_anx_forn_total_ded'])
#adicionar os totais do anterior
tot_factura+=int(linha_ant['valor_factura'])
tot_incidencia+=int(linha_ant['valor_base_incid'])
tot_suportado+=int(linha_ant['iva_suportado'])
tot_dedutivel+=int(linha_ant['iva_dedutivel'])
num_linhaM106 = self.get_linhaM106_correcao_forn(linha_act, linha_ant)
#linha "regularizacao"
content={
'user': '{user}'.format(user=bottle.request.session['user']),
'xml_anexo_reg_fornecedor_m106':str(id_reg_forn),
'tipo':'regularizacao',
'accao':str(line['accao']),
'factura':str(linha_act['ln_anx_forn_factura']),
'origem':str(linha_act['ln_anx_forn_origem']),
'nif_fornecedor':str(linha_act['ln_anx_forn_nif']),
'tipo_doc':str(linha_act['ln_anx_forn_tipoDoc']),
'numero_doc':str(linha_act['ln_anx_forn_num_doc']),
'data':str(linha_act['ln_anx_forn_data']),
'valor_factura':str(linha_act['ln_anx_forn_vl_fatura']),
'valor_base_incid':str(linha_act['ln_anx_forn_Incidencia']),
'taxa_iva':str(linha_act['ln_anx_forn_taxa_iva']),
'direito_ded':str(linha_act['ln_anx_forn_direito_ded']),
'iva_suportado':str(linha_act['ln_anx_forn_total_sup']),
'iva_dedutivel':str(linha_act['ln_anx_forn_total_ded']),
'tipologia':str(linha_act['ln_anx_forn_tipologia']),
#podera ser tanto 29 como 30 dependendo do total a dedutivel
'linha_mod106':str(num_linhaM106),
'periodo_referencia':'{ano}-{mes}'.format(ano=str(self.kargs['ano']),mes=str(self.kargs['mes'])),
'iniciativa':'CT',
'posicao':str(pos),
}
LinhaAnexoRegFornecedor(**content).put()
#adicionar a informaçao ao modelo106
campo = "cp{num}".format(num=str(num_linhaM106))
self.kargs[campo] = int(to_decimal(self.kargs[campo]) + to_decimal(linha_act['ln_anx_forn_total_ded']))
#linha "decl_anterior"
content={
'user': '{user}'.format(user=bottle.request.session['user']),
'xml_anexo_reg_fornecedor_m106':str(id_reg_forn),
'tipo':'decl_anterior',
'accao':str(line['accao']),
'factura':str(linha_ant['factura_fornecedor']),
'origem':str(linha_ant['origem']),
'nif_fornecedor':str(linha_ant['nif_fornecedor']),
'tipo_doc':str(linha_ant['tipo_doc']),
'numero_doc':str(linha_ant['numero_doc']),
'data':str(linha_ant['data']),
'valor_factura':str(linha_ant['valor_factura']),
'valor_base_incid':str(linha_ant['valor_base_incid']),
'taxa_iva':str(linha_ant['taxa_iva']),
'direito_ded':str(linha_ant['direito_ded']),
'iva_suportado':str(linha_ant['iva_suportado']),
'iva_dedutivel':str(linha_ant['iva_dedutivel']),
'tipologia':str(linha_ant['tipologia']),
'linha_mod106':str(linha_ant['linha_mod106']),
'periodo_referencia':'{ano}-{mes}'.format(ano=str(self.kargs['ano']),mes=str(self.kargs['mes'])),
'iniciativa':'CT',
'posicao':str(pos)
}
LinhaAnexoRegFornecedor(**content).put()
#guadar alteraçoes do modelo106
campo = "cp{num}".format(num=str(linha_ant['linha_mod106']))
self.kargs[campo] = int(to_decimal(self.kargs[campo]) + to_decimal(linha_ant['iva_dedutivel']))
#self.kargs['cp{num}'.format(num=linha_ant['linha_mod106'])] = to_decimal(to_decimal(self.kargs['cp{num}'.format(num=linha_ant['linha_mod106'])]) + to_decimal(linha_ant['iva_dedutivel']))
pos+=1
#apos inserir as linhas é necessario actualizar os totais do anexo
content ={
'user': '{user}'.format(user=bottle.request.session['user']),
'id':str(id_reg_forn),
'total_factura':str(tot_factura),
'total_base_incidencia':str(tot_incidencia),
'total_suportado':str(tot_suportado),
'total_dedutivel':str(tot_dedutivel),
}
XMLAnexoRegFornecedorM106(**content).put()
self.put()
def get_linhaM106_correcao_forn(self, linha_actual, linha_anterior):
"""
determina qual a linha do modelo 106 que uma correcao de anexo regularizaçao de fornecedor pertence
"""
if int(linha_actual['ln_anx_forn_total_ded']) > int(linha_anterior['iva_dedutivel']):
return str(29)
else:
return str(30)
def guardar_regularizacao_cli(self, key):
"""
guarda todas as informaçoes de anexo regularização de cliente na base de dados
"""
#get alteracoes
alteracoes = self.get_alteracoes_cli(key=key)
self.kargs = get_model_record(model=self,key=key)
if len(alteracoes)>0:
#guardando o anexo reg
tot_factura=0
tot_incidencia=0
tot_liquidado=0
content ={
'user': '{user}'.format(user=bottle.request.session['user']),
'nome': 'anexo_reg_cli_{ano}-{mes}'.format(ano=self.kargs['ano'], mes=self.kargs['mes']),
'xml_modelo_106':str(key),
'nif_entidade':str(self.kargs['nif']),
'ano':str(self.kargs['ano']),
'mes':str(self.kargs['mes']),
'area_fiscal':str(self.kargs['area_fiscal']),
'data_entrega':str(self.kargs['data_apresentacao']),
'total_factura':str(0),
'total_base_incidencia':str(0),
'total_liquidado':str(0),
'estado':'Gerado'
}
id_reg_cli = XMLAnexoRegClienteM106(**content).put()
#guardando as linhas
#variavel "pos" server para guiar na colocacao de linha regulada e da linha reguladora
pos=1
for line in alteracoes:
if line['accao'] =='Adicionar':
# de acordo com as especificaçoes tecnicas, na acao de adicionar, somente a linha "regularizacao" é necessario,
# sendo a linha "decl_anterior" ignorada ou seja vazia
info_linhas = line['info_linhas_act']
#guardando as linhas do anexo
for linha in info_linhas:
tot_factura+=int(to_decimal(linha['ln_anx_cli_vl_fatura']))
tot_incidencia+=int(linha['ln_anx_cli_Incidencia'])
tot_liquidado+=int(linha['ln_anx_cli_total_Liq'])
#linha "regularizacao"
content={
'user': '{user}'.format(user=bottle.request.session['user']),
'xml_anexo_reg_cliente_m106':str(id_reg_cli),
'tipo':'regularizacao',
'accao':str(line['accao']),
'factura':str(linha['ln_anx_cli_factura']),
'origem':str(linha['ln_anx_cli_origem']),
'nif_cliente':str(linha['ln_anx_cli_nif']),
'tipo_doc':str(linha['ln_anx_cli_tipoDoc']),
'numero_doc':str(linha['ln_anx_cli_num_doc']),
'data':str(linha['ln_anx_cli_data']),
'valor_factura':str(int(to_decimal(linha['ln_anx_cli_vl_fatura']))),
'valor_base_incid':str(linha['ln_anx_cli_Incidencia']),
'taxa_iva':str(linha['ln_anx_cli_taxa_iva']),
'iva_liquidado':str(linha['ln_anx_cli_total_Liq']),
#linha 30 pq o iva e favor ao estado
'linha_mod106':'30',
'periodo_referencia':'{ano}-{mes}'.format(ano=str(self.kargs['ano']),mes=str(self.kargs['mes'])),
'iniciativa':'CT',
'posicao':str(pos)
}
XMLLinhaAnexoRegClienteM106(**content).put()
#adicionar a informação ao modelo106
self.kargs['cp30']= to_decimal(to_decimal(self.kargs['cp30']) + to_decimal(linha['ln_anx_cli_total_Liq']))
#linha "decl_anterior"
content={
'user': '{user}'.format(user=bottle.request.session['user']),
'xml_anexo_reg_cliente_m106':str(id_reg_cli),
'tipo':'decl_anterior',
'accao':str(line['accao']),
'factura':'',
'origem':'',
'nif_cliente':'',
'tipo_doc':'',
'numero_doc':'',
'data':'',
'valor_factura':'',
'valor_base_incid':'',
'taxa_iva':'',
'iva_liquidado':'',
'linha_mod106':'',
'periodo_referencia':'',
'iniciativa':'CT',
'posicao':str(pos)
}
XMLLinhaAnexoRegClienteM106(**content).put()
pos+=1
elif line['accao']=='Eliminar':
# de acordo com as especificaçoes tecnicas, na acao de Eliminar, a linha "regularizacao" é preenchida com os totais a zero,
# sendo a linha "decl_anterior" preenchida exatamente como foi enviada anteriormente
info_linhas = line['info_linhas_ant']
#guardando as linhas do anexo
for linha in info_linhas:
tot_factura+=int(linha['valor_factura'])
tot_incidencia+=int(linha['valor_base_incidencia'])
tot_liquidado+=int(linha['iva_liquidado'])
#linha "regularizacao"
content={
'user': '{user}'.format(user=bottle.request.session['user']),
'xml_anexo_reg_cliente_m106':str(id_reg_cli),
'tipo':'regularizacao',
'accao':str(line['accao']),
'factura':str(linha['factura_cliente']),
'origem':str(linha['origem']),
'nif_cliente':str(linha['nif_cliente']),
'tipo_doc':str(linha['tipo_doc']),
'numero_doc':str(linha['numero_doc']),
'data':str(linha['data']),
'valor_factura':str(0),
'valor_base_incid':str(0),
'taxa_iva':str(0),
'iva_liquidado':str(0),
'linha_mod106':str(linha['linha_mod106']),
'periodo_referencia':'{ano}-{mes}'.format(ano=str(self.kargs['ano']),mes=str(self.kargs['mes'])),
'iniciativa':'CT',
'posicao':str(pos)
}
XMLLinhaAnexoRegClienteM106(**content).put()
#linha "decl_anterior"
content={
'user': '{user}'.format(user=bottle.request.session['user']),
'xml_anexo_reg_cliente_m106':str(id_reg_cli),
'tipo':'decl_anterior',
'accao':str(line['accao']),
'factura':str(linha['factura_cliente']),
'origem':str(linha['origem']),
'nif_cliente':str(linha['nif_cliente']),
'tipo_doc':str(linha['tipo_doc']),
'numero_doc':str(linha['numero_doc']),
'data':str(linha['data']),
'valor_factura':str(linha['valor_factura']),
'valor_base_incid':str(linha['valor_base_incidencia']),
'taxa_iva':str(linha['taxa_iva']),
'iva_liquidado':str(linha['iva_liquidado']),
'linha_mod106':str(linha['linha_mod106']),
'periodo_referencia':'{ano}-{mes}'.format(ano=str(self.kargs['ano']),mes=str(self.kargs['mes'])),
'iniciativa':'CT',
'posicao':str(pos)
}
XMLLinhaAnexoRegClienteM106(**content).put()
#adicionar a informação ao modelo106
self.kargs['cp29']= to_decimal(to_decimal(self.kargs['cp29']) + to_decimal(linha['iva_liquidado']))
pos+=1
elif line['accao']=='Corrigir':
#guardando as linhas do anexo
for linha in line['info_linhas']:
linha_act = linha['info_linhas_act']
linha_ant = linha['info_linhas_ant']
#adicionar os totais do actual
tot_factura+=int(to_decimal(linha_act['ln_anx_cli_vl_fatura']))
tot_incidencia+=int(linha_act['ln_anx_cli_Incidencia'])
tot_liquidado+=int(linha_act['ln_anx_cli_total_Liq'])
#adicionar os totais do anterior
tot_factura+=int(linha_ant['valor_factura'])
tot_incidencia+=int(linha_ant['valor_base_incidencia'])
tot_liquidado+=int(linha_ant['iva_liquidado'])
#determinar a linha do modelo 106 que entre esta alteração
num_linhaM106 = self.get_linhaM106_correcao_cli(linha_act=linha_act, linha_ant=linha_ant)
#linha "regularizacao"
content={
'user': '{user}'.format(user=bottle.request.session['user']),
'tipo':'regularizacao',
'accao':str(line['accao']),
'posicao':str(pos),
'xml_anexo_reg_cliente_m106':str(id_reg_cli),
'factura':str(linha_act['ln_anx_cli_factura']),
'origem':str(linha_act['ln_anx_cli_origem']),
'nif_cliente':str(linha_act['ln_anx_cli_nif']),
'tipo_doc':str(linha_act['ln_anx_cli_tipoDoc']),
'numero_doc':str(linha_act['ln_anx_cli_num_doc']),
'data':str(linha_act['ln_anx_cli_data']),
'valor_factura':str(to_decimal(linha_act['ln_anx_cli_vl_fatura'])),
'valor_base_incid':str(linha_act['ln_anx_cli_Incidencia']),
'taxa_iva':str(linha_act['ln_anx_cli_taxa_iva']),
'iva_liquidado':str(linha_act['ln_anx_cli_total_Liq']),
'linha_mod106':str(num_linhaM106),
'periodo_referencia':'{ano}-{mes}'.format(ano=str(self.kargs['ano']),mes=str(self.kargs['mes'])),
'iniciativa':'CT'
}
XMLLinhaAnexoRegClienteM106(**content).put()
#adicionar a informaçao ao modelo106
campo = "cp{num}".format(num=str(num_linhaM106))
self.kargs[campo] = int(to_decimal(self.kargs[campo]) + to_decimal(linha_act['ln_anx_cli_total_Liq']))
#linha "decl_anterior"
content={
'user': '{user}'.format(user=bottle.request.session['user']),
'xml_anexo_reg_cliente_m106':str(id_reg_cli),
'tipo':'decl_anterior',
'accao':str(line['accao']),
'factura':str(linha_ant['factura_cliente']),
'origem':str(linha_ant['origem']),
'nif_cliente':str(linha_ant['nif_cliente']),
'tipo_doc':str(linha_ant['tipo_doc']),
'numero_doc':str(linha_ant['numero_doc']),
'data':str(linha_ant['data']),
'valor_factura':str(linha_ant['valor_factura']),
'valor_base_incid':str(linha_ant['valor_base_incidencia']),
'taxa_iva':str(linha_ant['taxa_iva']),
'iva_liquidado':str(linha_ant['iva_liquidado']),
'linha_mod106':str(linha_ant['linha_mod106']),
'periodo_referencia':'{ano}-{mes}'.format(ano=str(self.kargs['ano']),mes=str(self.kargs['mes'])),
'iniciativa':str('CT'),
'posicao':str(pos),
}
XMLLinhaAnexoRegClienteM106(**content).put()
#adicionar a informação ao modelo1060
campo = "cp{num}".format(num=str(linha_ant['linha_mod106']))
self.kargs[campo] = int(to_decimal(self.kargs[campo]) + to_decimal(linha_ant['iva_liquidado']))
pos+=1
#apos inserir as linhas é necessario actualizar os totais do anexo
content ={
'user': '{user}'.format(user=bottle.request.session['user']),
'id':str(id_reg_cli),
'total_factura':str(tot_factura),
'total_base_incidencia':str(tot_incidencia),
'total_liquidado':str(tot_liquidado),
}
XMLAnexoRegClienteM106(**content).put()
#guardar alteraçoes do modelo 106
self.put()
def get_linhaM106_correcao_cli(self, linha_act, linha_ant):
"""
determina qual a linha do modelo 106 que uma correcao de anexo regularizaçao de cliente pertence
"""
if int(linha_act['ln_anx_cli_total_Liq']) > int(linha_ant['iva_liquidado']):
return str(30)
else:
return str(29) | 1.796875 | 2 |
export_resized_android_app_icons.py | Tubbebubbe/gimp-plugins | 4 | 12770710 | <filename>export_resized_android_app_icons.py
#!/usr/bin/env python
"""
export_resized_android_app_icons
Gimp plugin to export app icons for an Android app
Author:
-------
<NAME>, Techne Development AB <<EMAIL>>
Installation:
-------------
(Mac OS X)
Run make:
> make install
or copy this file to ~/Library/Application Support/GIMP/x.x/plug-ins and
make it executable (chmod 755)
Usage:
------
1. Create your image at a resolution of 1024 x 1024 @ 144 dpi
2. Run the plug-in (from the File menu) and select the output
directory.
License:
--------
Released under the MIT License
Copyright (c) 2013-2017 Techne Development AB
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from gimpfu import *
import os
def gprint(text):
pdb.gimp_message(text)
return
def resize_and_save_image(timg, tdrawable, size, dpi, dir, filename):
img = timg.duplicate()
fullpath = os.path.join(dir, filename)
pdb.gimp_image_merge_visible_layers(img, CLIP_TO_IMAGE)
pdb.gimp_image_scale(img, size, size)
pdb.gimp_image_set_resolution(img, dpi, dpi)
pdb.file_png_save(img, img.layers[0], fullpath, filename, 0, 9, 1, 1, 1, 1, 1)
def plugin_main(img, drawable, dir):
resize_and_save_image(img, drawable, 192, 72, dir, "ic_launcher-192x192.png")
resize_and_save_image(img, drawable, 144, 72, dir, "ic_launcher-144x144.png")
resize_and_save_image(img, drawable, 96, 72, dir, "ic_launcher-96x96.png")
resize_and_save_image(img, drawable, 72, 72, dir, "ic_launcher-72x72.png")
resize_and_save_image(img, drawable, 48, 72, dir, "ic_launcher-48x48.png")
gprint("Images exported to:\n %s" % (dir))
register(
"export_resized_android_app_icons",
"Exports app icons for Android apps",
"Exports app icons for Android apps",
"Techne Development AB",
"Copyright (c) 2013-2016 Techne Development AB. Released under MIT License.",
"2013-2016",
"<Image>/File/Export Android app icons...",
"RGB*, GRAY*",
[
(PF_DIRNAME, "dir", "Output directory", os.path.expanduser("~")),
],
[],
plugin_main)
main()
| 2.25 | 2 |
tests/results/040_medium_complex_multiple_hook_test2.py | CowboyTim/python-storable | 8 | 12770711 | <reponame>CowboyTim/python-storable
result = [
{0: 0, 1: 'var 1'},
{0: 0, 1: 'var 2'}
]
| 1.632813 | 2 |
EspFitting/ProbePlacers/Water.py | jacob-litman/fittingScripts | 0 | 12770712 | import sys
import numpy as np
import math
from JMLUtils import dist2, eprint
from StructureXYZ import StructXYZ
from typing import Sequence
TRIANGLE_TOL = 1E-4
Y_DENOM = 1.0 / math.sqrt(3)
def water(infile: str = 'QM_REF.xyz', delta=4.0):
delta = float(delta)
xyzfi = StructXYZ(infile)
assert len(xyzfi.probe_indices) == 0
assert xyzfi.n_atoms == 3
assert xyzfi.atom_names[0].startswith("O")
place_triangle(xyzfi, delta)
def place_triangle(xyzfi: StructXYZ, delta: float = 4.0, outname: str = "WATER_PROBE.xyz", center: int = 0,
flank1: int = 1, flank2: int = 2):
if center >= xyzfi.n_atoms or center < 0:
raise ValueError(f"Central atom index {center} out-of-bounds 0-{xyzfi.n_atoms}")
if flank1 >= xyzfi.n_atoms or flank1 < 0:
raise ValueError(f"Flank1 atom index {flank1} out-of-bounds 0-{xyzfi.n_atoms}")
if flank2 >= xyzfi.n_atoms or flank2 < 0:
raise ValueError(f"Flank2 atom index {flank2} out-of-bounds 0-{xyzfi.n_atoms}")
if center == flank1 or center == flank2 or flank1 == flank2:
raise ValueError(f"All three atoms must have distinct indices: received {center},{flank1},{flank2}")
triangle_center = xyzfi.coords[center] + xyzfi.coords[flank1] + xyzfi.coords[flank2]
triangle_center *= 0.5
place_vec = triangle_center - xyzfi.coords[center]
mag_pv = math.sqrt(np.dot(place_vec, place_vec))
bisector_vector = 0.5 * (xyzfi.coords[flank2] - xyzfi.coords[flank1])
# Only used as square, so don't bother w/ square root
half_bisector = np.dot(bisector_vector, bisector_vector)
from_bisector = math.sqrt((delta * delta) - half_bisector)
out_xyz = (place_vec * (from_bisector / mag_pv)) + triangle_center
eprint(f"Placing probe at {out_xyz}")
xyzfi.append_atom(xyzfi.get_default_probetype()[0], out_xyz)
xyzfi.write_out(outname)
| 2.734375 | 3 |
.idea/VirtualEnvironment/Lib/site-packages/tests/outcomes/imports/test_import_absolute_error_circular/main2.py | Vladpetr/NewsPortal | 0 | 12770713 | <filename>.idea/VirtualEnvironment/Lib/site-packages/tests/outcomes/imports/test_import_absolute_error_circular/main2.py
import main
x = 1040
| 1.328125 | 1 |
muz/main.py | Akaricchi/muz | 9 | 12770714 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os, sys, logging, argparse
from itertools import ifilter as filter
import muz
import muz.frontend
import muz.vfs as vfs
import muz.beatmap as beatmap
import muz.game as game
import muz.util
from muz import _config as config
NAME = u"μz"
VERSION = "0.01-prepreprealpha"
basedir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'data'))
userdir = os.path.abspath(os.path.join(os.path.expanduser("~"), ".muz"))
globalArgs = None
frontend = None
log = logging.getLogger(__name__)
def initUserDir():
if not os.path.exists(userdir):
os.makedirs(userdir)
def initvfs():
vfs.root.clear()
if globalArgs.no_vfs:
return
vfs.applySettings()
def initroot(root=vfs.root):
root.loadDataDirs(basedir, userdir, *globalArgs.extradirs)
for pack in globalArgs.extrapacks:
root.loadPack(pack)
return root
vfs.root = vfs.LazyNode(initroot)
def initArgParser(desc=None, prog=None):
if desc is None:
desc = "%s: a mania-style rhythm game" % NAME
if prog is None:
if os.path.split(sys.argv[0])[-1] == "__main__.py":
prog = "muz"
return argparse.ArgumentParser(description=desc, prog=prog, add_help=False, conflict_handler='resolve')
def handleGeneralArgs(parser, argv, namespace):
global globalArgs, userdir, basedir
g = parser.add_argument_group(title="general options")
g.add_argument('--basedir', action='store', default=basedir,
help="set the location of base game assets (default: %(default)s)")
g.add_argument('--userdir', action="store", default=userdir,
help="set the location of user-supplied game data (e.g. beatmaps) (default: %(default)s)")
g.add_argument('--no-vfs', action='store_true', default=False,
help="do not initialize the virtual filesystem")
g.add_argument('-d', '--dir', metavar='DIR', dest='extradirs', action='append', default=[],
help="add a directory to search for game data in (including beatmaps), can be specified multiple times")
g.add_argument('-p', '--pack', metavar='PACK', dest='extrapacks', action='append', default=[],
help="add a pack to search for game data in (including beatmaps), can be specified multiple times")
g.add_argument('-c', '--config', action="store", type=argparse.FileType('r'), default=None,
help="load an alternative configuration file (default: $userdir/config.json)")
g.add_argument('-l', '--list-beatmaps', dest="listbeatmaps", action="count", default=False,
help="list all beatmaps found in the virtual filesystem, specify twice to also list their 'nicer' names parsed from metadata (slow)")
g.add_argument('-L', '--list-vfs', dest="listvfspath", metavar="PATH", action="store", nargs='?', const='', default=None,
help="list the contents of a path in the virtual filesystem and exit")
g.add_argument('--log-level', dest="loglevel", metavar="LEVEL", choices=["critical", "error", "warning", "info", "debug"], default=None,
help="set the output verbosity level, overrides the config setting (default: warning)")
g.add_argument('--frontend', choices=tuple(muz.frontend.iter()), default="pygame",
help="set the subsystem used to render and display the game, handle input, play audio, etc. (default: %(default)s)")
g.add_argument('-v', '--version', action="version", version="%s %s" % (NAME, VERSION),
help="print the game version and exit")
g.add_argument('-h', '--help', action='store_true', #action="help",
help="print this rather unhelpful (I'm sorry) help message and exit")
n, a = parser.parse_known_args(argv, namespace=namespace)
globalArgs = n
basedir = os.path.abspath(n.basedir)
userdir = os.path.abspath(n.userdir)
if globalArgs.loglevel is not None:
muz.log.setLevel(muz.util.logLevelByName(globalArgs.loglevel))
if n.listvfspath is not None:
init()
l = vfs.locate(n.listvfspath)
for key in sorted(l.keys()):
print("%s%s" % (key, vfs.VPATH_SEP if l[key].isDir else ""))
exit(0)
if n.listbeatmaps:
init()
def getname(s):
if n.listbeatmaps < 2:
return s
try:
b = muz.beatmap.load(s, bare=True)
except Exception as e:
log.exception("failed to load beatmap %s: %s", s, e)
return s
else:
return "%s: %s" %(s, b.name)
for s in sorted(filter(None, (muz.beatmap.nameFromPath(path+obj) for path, obj, _ in vfs.root.walk()))):
print(getname(s))
exit(0)
return (n, a)
def handleGameArgs(parser, argv, namespace, beatmapOption=True):
g = parser.add_argument_group(title="game options")
if beatmapOption:
g.add_argument('beatmap', type=str, nargs=1,
help='run the game with the specified beatmap')
g.add_argument('--importer-options', action='store', default=None,
help='pass an option string to the beatmap importer')
g.add_argument('--start-from', dest='startfrom', metavar='TIME', type=int, action='store', default=0,
help='start playing from an arbitrary position, in milliseconds (default: 0)')
g.add_argument('--loop', metavar='TIME', type=int, action='store', default=0,
help='if >0, the song will automatically restart after being played for this much milliseconds (default: 0)')
g.add_argument('-o', '--beatmap-offset', metavar='TIME', type=int, action='store', default=None,
help='offset timing of all notes on the beatmap by this value, in milliseconds (overrides the config setting)')
g.add_argument('-f', '--fc-run', dest='fcrun', action='store_true', default=False,
help='automatically restart the game when the combo is broken')
g.add_argument('-p', '--perfect-run', dest='perfectrun', action='store_true', default=False,
help='automatically restart the game when anything less than Perfect is scored, implies --fc-run')
g.add_argument('-r', '--random', action='store_true', default=False,
help='randomize note positions on the beatmap')
g.add_argument('--shuffle-bands', action='store_true', default=False,
help='shuffle band positions')
g.add_argument('--mirror-bands', action='store_true', default=False,
help='mirror band positions')
g.add_argument('--no-holds', action='store_true', default=False,
help='replace each hold note with two hit notes')
g.add_argument('--holdify', action='store_true', default=False,
help='group all notes into holds where possible')
g.add_argument('-i', '--insane', action='store_true', default=False,
help='add lots of extra notes')
g.add_argument('-a', '--autoplay', action='store_true', default=False,
help='play automatically without user interaction (overrides the config setting)')
g.add_argument('-b', '--num-bands', action='store', type=int, default=0,
help='forces a specific amount of bands instead of reading it from the beatmap')
if beatmapOption and len(argv) < 1:
parser.print_help()
exit(1)
n, a = parser.parse_known_args(argv, namespace=namespace)
return (n, a)
def handleRemainingArgs(parser, argv, namespace):
if namespace.help:
parser.print_help()
exit(1)
if argv:
sys.stderr.write("error: unhandled arguments: %s\n\n" % ', '.join(repr(a) for a in argv))
parser.print_usage()
sys.stderr.write("\ntry %s -h for help\n" % parser.prog)
exit(1)
return (namespace, argv)
def loadConfig(requireLogLevel=logging.CRITICAL):
if globalArgs.config is not None:
cfg = globalArgs.config
else:
cfg = os.path.join(userdir, "config.json")
defCfg = os.path.join(userdir, "config.default.json")
try:
with open(defCfg, 'w') as f:
muz.config.dump(f)
except Exception:
log.exception("couldn't write the default configuration file")
else:
log.info("wrote the default configuration to %s", repr(defCfg))
try:
with open(cfg) as f:
muz.config.load(f)
except Exception:
log.exception("couldn't load the configuration file %s", repr(cfg))
else:
log.info("loaded configuration from %s", repr(cfg))
if globalArgs.loglevel is None:
muz.log.setLevel(min(requireLogLevel, muz.util.logLevelByName(muz._config["log"]["level"])))
def playBeatmap(bmap):
frontend.gameLoop(game.Game(bmap, frontend))
def initFrontend(args, namespace):
global frontend
frontend = muz.frontend.get(globalArgs.frontend, frontendArgs=args, frontendArgsNamespace=namespace)
def init(requireLogLevel=logging.CRITICAL):
reload(sys)
sys.setdefaultencoding("utf-8")
initUserDir()
loadConfig(requireLogLevel=requireLogLevel)
initvfs()
if frontend is not None:
frontend.postInit()
def bareInit(argv=None, requireFrontend=False):
p = initArgParser()
n = None
if argv is None:
argv = []
n, argv = handleGeneralArgs(p, argv, n)
n, argv = handleGameArgs(p, argv, n, beatmapOption=False)
if requireFrontend:
initFrontend(argv, n)
n, argv = handleRemainingArgs(p, argv, n)
init()
@muz.util.entrypoint
def run(*argv):
argv = argv[1:]
p = initArgParser()
n = None
n, argv = handleGeneralArgs(p, argv, n)
n, argv = handleGameArgs(p, argv, n)
initFrontend(argv, n)
n, argv = handleRemainingArgs(p, argv, n)
init()
try:
playBeatmap(beatmap.load(n.beatmap[0], options=n.importer_options))
finally:
frontend.shutdown()
@muz.util.entrypoint
def runUI(*argv):
argv = argv[1:]
p = initArgParser()
n = None
n, argv = handleGeneralArgs(p, argv, n)
initFrontend(argv, n)
init()
try:
frontend.main()
finally:
frontend.shutdown()
if __name__ == "__main__":
run(*sys.argv)
| 2.15625 | 2 |
components/model/bert/bert_pipeline/kubeflow_dag_runner.py | googleforgames/clean-chat | 9 | 12770715 | <filename>components/model/bert/bert_pipeline/kubeflow_dag_runner.py
################################################################################################################
#
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################################################
import os
from absl import logging
from tfx.orchestration.experimental import KubeflowDagRunner
from tfx import v1 as tfx
import pipeline
def run_pipline():
''' Creates a Kubeflow Pipeline '''
metadata_config = tfx.orchestration.experimental.get_default_kubeflow_metadata_config()
tfx_image = 'gcr.io/' + s.getenv('TF_VAR_GCP_PROJECT_ID') + '/tfx-pipeline'
runner_config = tfx.orchestration.experimental.KubeflowDagRunnerConfig(
kubeflow_metadata_config=metadata_config,
tfx_image=tfx_image
)
KubeflowDagRunner(config=runner_config).run_pipeline(
pipeline.create_pipeline(
pipeline_name=os.getenv('TF_VAR_ML_PIPELINE_NAME'),
pipeline_root=os.getenv('TF_VAR_ML_PIPELINE_ROOT'),
data_path=os.getenv('TRAINING_DATA_PATH'),
preprocessing_fn=os.path.join(os.getcwd(), 'preprocessing.py'),
run_fn='model.run_fn',
train_steps=tfx.proto.TrainArgs(num_steps=os.getenv('TRAINING_STEPS')),
eval_steps=tfx.proto.EvalArgs(num_steps=os.getenv('EVAL_STEPS')),
serving_model_dir=os.path.join(os.getenv('TF_VAR_ML_PIPELINE_ROOT'), 'serving_model')
)
)
if __name__ == '__main__':
logging.set_verbosity(logging.INFO)
run_pipline()
| 1.609375 | 2 |
CODES/20. Loop for/loops.py | eltechno/python_course | 4 | 12770716 | <reponame>eltechno/python_course
result = 0
"""
i = 0
while i < 4:
nr = int(input("Please give me the number: "))
result += nr
i += 1
print("The result of adding numbers is: ", result)
"""
for i in range(1000):
if (i%2 == 0):
print(i, " is even number")
print("The result of adding numbers is: ", result)
| 3.84375 | 4 |
chamfer_pytorch/test_chamfer.py | jiyeonkim127/PSI | 138 | 12770717 | <filename>chamfer_pytorch/test_chamfer.py
import torch
import dist_chamfer as ext
distChamfer = ext.chamferDist()
from torch.autograd import Variable
def pairwise_dist(x, y):
xx, yy, zz = torch.mm(x, x.t()), torch.mm(y, y.t()), torch.mm(x, y.t())
rx = xx.diag().unsqueeze(0).expand_as(xx)
ry = yy.diag().unsqueeze(0).expand_as(yy)
P = rx.t() + ry - 2 * zz
return P
def NN_loss(x, y, dim=0):
dist = pairwise_dist(x, y)
values, indices = dist.min(dim=dim)
return values.mean()
def mydistChamfer(a, b):
x, y = a, b
bs, num_points, points_dim = x.size()
xx = torch.bmm(x, x.transpose(2, 1))
yy = torch.bmm(y, y.transpose(2, 1))
zz = torch.bmm(x, y.transpose(2, 1))
diag_ind = torch.arange(0, num_points).type(torch.cuda.LongTensor)
rx = xx[:, diag_ind, diag_ind].unsqueeze(1).expand_as(xx)
ry = yy[:, diag_ind, diag_ind].unsqueeze(1).expand_as(yy)
P = rx.transpose(2, 1) + ry - 2 * zz
return torch.min(P, 2)[0], torch.min(P, 1)[0]
def test_chamfer():
distChamfer = ext.chamferDist()
p1 = torch.rand(4, 100, 3).cuda()
p2 = torch.rand(4, 100, 3).cuda()
points1 = Variable(p1, requires_grad=True)
points2 = Variable(p2)
dist1, dist2, = distChamfer(points1, points2)
loss = torch.sum(dist1)
print(loss)
loss.backward()
print(points1.grad, points2.grad)
mydist1, mydist2 = mydistChamfer(points1, points2)
d1 = (dist1 - mydist1) ** 2
d2 = (dist2 - mydist2) ** 2
print(d1, d2)
assert (
torch.sum(d1) + torch.sum(d2) < 0.00000001
), "chamfer cuda and chamfer normal are not giving the same results"
test_chamfer()
| 2.34375 | 2 |
annotations/gtf.py | don4apaev/anfisa | 0 | 12770718 | import bisect
from annotations.db_connect import Connection
class GTF(Connection):
TABLE = "GTF"
GENE_BUCKET_SIZE = 1000000
COLUMNS = [
"chromosome",
"source",
"feature",
"start",
"end",
"score",
"strand",
"frame",
"attribute",
"gene",
"biotype",
"exon",
"transcript"
]
def __init__(self, host = "anfisa.forome.org:ip-172-31-24-96:MishaMBP3.mmcentre.org", *args, **kvargs):
if (not "database" in kvargs):
kvargs["database"] = "ensembl"
if (not "user" in kvargs):
kvargs["user"] = "hgmd"
if (not "password" in kvargs):
kvargs["password"] = "<PASSWORD>"
Connection.__init__(self, host, connect_now=True, *args, **kvargs)
self.table = "{}.{}".format(self.database, self.TABLE)
def prepare_lookup(self, chromosome=None, gene=None, transcript=None, feature="exon"):
if (chromosome == None and gene == None and transcript == None):
raise Exception("At least one of: chromosome, gene or transcript is required")
select = "SELECT {}, {}, {} from {}".format(self.quote("start"), self.quote("end"), "feature", self.table)
conditions = []
if (chromosome != None):
conditions.append("chromosome")
if (gene != None):
conditions.append("gene")
if (transcript != None):
conditions.append("transcript")
ccc = ["{} = {}".format(c, self.parameter()) for c in conditions]
if (feature != None):
ccc.append("feature = '{}'".format(feature))
condition = " AND ".join(ccc)
order = "{}, {}".format(self.quote("start"), self.quote("end"))
sql = "{} WHERE {} ORDER BY {}".format(select, condition, order)
return Lookup(self.connection, sql, conditions)
def get_gene(self, chromosome, pos):
sql = "SELECT gene FROM {}_gene WHERE chromosome = {} AND bucket = {} AND {} between {} and {}" \
.format(self.table, self.parameter(), self.parameter(), self.parameter(), self.quote("start"),
self.quote("end"))
bucket = (pos / self.GENE_BUCKET_SIZE) * self.GENE_BUCKET_SIZE
cursor = self.connection.cursor()
cursor.execute(sql, (chromosome, bucket, pos))
rows = cursor.fetchall()
if (rows):
return rows[0][0]
class Lookup:
def __init__(self, connection, sql, conditions):
self.sql = sql
self.conditions = conditions
self.connection = connection
self.verbose = False
def get_rows(self, args):
if (len(set(self.conditions) & set(args.keys())) != len(args)):
e = ",".join(self.conditions)
a = ",".join(args.keys())
raise Exception("Incorrect Arguments: Expected: {}, Actual: {}".format(e, a))
paramaters = tuple([args[c] for c in self.conditions])
cursor = self.connection.cursor()
cursor.execute(self.sql, paramaters)
rows = cursor.fetchall()
return rows
def lookup(self, pos, args):
rows = self.get_rows(args)
n = len(rows)
if (n == 0):
return None
inf = rows[0][0]
if (pos < inf):
return (inf-pos), "upstream"
sup = rows[-1][1]
if (pos > sup):
return (pos-sup), "downstream"
a = []
for row in rows:
a.append(row[0])
a.append(row[1])
i = bisect.bisect(a, pos)
if (pos == inf or pos == sup):
d = 0
else:
try:
d = min(pos - a[i-1], a[i] - pos)
except:
raise
if ((i%2) == 1):
index = (i+1)/2
region = "exon"
else:
index = i/2
region = "intron"
if (self.verbose):
return d, region, "{}/{}".format(index, n), inf, sup
else:
return d, region, index, n
if __name__ == '__main__':
with GTF() as gtf:
lookup = gtf.prepare_lookup(gene=True, transcript=True)
lookup.verbose = True
print lookup.lookup(pos=6484880, args={"gene":"HES2", "transcript":"ENST00000377834"})
print lookup.lookup(pos=6484880, args={"gene":"ESPN", "transcript":"ENST00000377828"})
print lookup.lookup(pos=6500660, args={"gene":"ESPN", "transcript":"ENST00000377828"})
print lookup.lookup(pos=6501044, args={"gene":"ESPN", "transcript":"ENST00000377828"})
print lookup.lookup(pos=6520312, args={"gene":"ESPN", "transcript":"ENST00000377828"})
print lookup.lookup(pos=12058802, args={"gene":"MFN2", "transcript":"ENST00000235329"})
print lookup.lookup(pos=12062017, args={"gene":"MFN2", "transcript":"ENST00000235329"})
print lookup.lookup(pos=12065841, args={"gene":"MFN2", "transcript":"ENST00000235329"})
print lookup.lookup(pos=16360550, args={"gene":"CLCNKA", "transcript":"ENST00000331433"})
print lookup.lookup(pos=16370215, args={"gene":"CLCNKB", "transcript":"ENST00000375679"})
print lookup.lookup(pos=16371067, args={"gene":"CLCNKB", "transcript":"ENST00000375679"})
print gtf.get_gene(5, 70818177)
| 2.71875 | 3 |
hpat/io/pio.py | AlexanderKalistratov/hpat | 1 | 12770719 | <gh_stars>1-10
from __future__ import print_function, division, absolute_import
import types as pytypes # avoid confusion with numba.types
import numba
from numba import ir, analysis, types, config, numpy_support
from numba.ir_utils import (mk_unique_var, replace_vars_inner, find_topo_order,
dprint_func_ir, remove_dead, mk_alloc,
find_callname, guard, require, get_definition,
build_definitions, find_const, compile_to_numba_ir,
replace_arg_nodes)
import numpy as np
import hpat
from hpat import utils
import hpat.io
from hpat.io import pio_api, pio_lower
from hpat.utils import find_str_const, debug_prints
def remove_h5(rhs, lives, call_list):
# the call is dead if the read array is dead
if call_list == ['h5read', 'io', pio_api] and rhs.args[6].name not in lives:
return True
if call_list == ['h5size', 'io', pio_api]:
return True
return False
numba.ir_utils.remove_call_handlers.append(remove_h5)
class PIO(object):
"""analyze and transform hdf5 calls"""
def __init__(self, func_ir, _locals, reverse_copies):
self.func_ir = func_ir
self.locals = _locals
self.reverse_copies = reverse_copies
def handle_possible_h5_read(self, assign, lhs, rhs):
tp = self._get_h5_type(lhs, rhs)
if tp is not None:
dtype_str = str(tp.dtype)
func_text = "def _h5_read_impl(dset, index):\n"
# TODO: index arg?
func_text += " arr = hpat.io.pio_api.h5_read_dummy(dset, {}, '{}', index)\n".format(tp.ndim, dtype_str)
loc_vars = {}
exec(func_text, {}, loc_vars)
_h5_read_impl = loc_vars['_h5_read_impl']
f_block = compile_to_numba_ir(_h5_read_impl, {'hpat': hpat}).blocks.popitem()[1]
index_var = rhs.index if rhs.op == 'getitem' else rhs.index_var
replace_arg_nodes(f_block, [rhs.value, index_var])
nodes = f_block.body[:-3] # remove none return
nodes[-1].target = assign.target
return nodes
return None
def _get_h5_type(self, lhs, rhs):
tp = self._get_h5_type_locals(lhs)
if tp is not None:
return tp
return guard(self._infer_h5_typ, rhs)
def _infer_h5_typ(self, rhs):
# infer the type if it is of the from f['A']['B'][:] or f['A'][b,:]
# with constant filename
# TODO: static_getitem has index_var for sure?
# make sure it's slice, TODO: support non-slice like integer
require(rhs.op in ('getitem', 'static_getitem'))
# XXX can't know the type of index here especially if it is bool arr
# make sure it is not string (we're not in the middle a select chain)
index_var = rhs.index if rhs.op == 'getitem' else rhs.index_var
index_val = guard(find_const, self.func_ir, index_var)
require(not isinstance(index_val, str))
# index_def = get_definition(self.func_ir, index_var)
# require(isinstance(index_def, ir.Expr) and index_def.op == 'call')
# require(find_callname(self.func_ir, index_def) == ('slice', 'builtins'))
# collect object names until the call
val_def = rhs
obj_name_list = []
while True:
val_def = get_definition(self.func_ir, val_def.value)
require(isinstance(val_def, ir.Expr))
if val_def.op == 'call':
return self._get_h5_type_file(val_def, obj_name_list)
# object_name should be constant str
require(val_def.op in ('getitem', 'static_getitem'))
val_index_var = val_def.index if val_def.op == 'getitem' else val_def.index_var
obj_name = find_str_const(self.func_ir, val_index_var)
obj_name_list.append(obj_name)
def _get_h5_type_file(self, val_def, obj_name_list):
require(len(obj_name_list) > 0)
require(find_callname(self.func_ir, val_def) == ('File', 'h5py'))
require(len(val_def.args) > 0)
f_name = find_str_const(self.func_ir, val_def.args[0])
obj_name_list.reverse()
import h5py
f = h5py.File(f_name, 'r')
obj = f
for obj_name in obj_name_list:
obj = obj[obj_name]
require(isinstance(obj, h5py.Dataset))
ndims = len(obj.shape)
numba_dtype = numba.numpy_support.from_dtype(obj.dtype)
f.close()
return types.Array(numba_dtype, ndims, 'C')
def _get_h5_type_locals(self, varname):
# TODO: can we do this without reverse_copies?
# TODO: if copy propagation is done, varname itself should be checked
new_name = self.reverse_copies.get(varname, None)
typ = self.locals.pop(new_name, None)
if typ is None and new_name is not None:
typ = self.locals.pop(new_name + ":h5_types", None)
return typ
def _handle_h5_File_call(self, assign, lhs, rhs):
"""
Handle h5py.File calls like:
f = h5py.File(file_name, mode)
"""
# parallel arg = False for this stage
loc = lhs.loc
scope = lhs.scope
parallel_var = ir.Var(scope, mk_unique_var("$const_parallel"), loc)
parallel_assign = ir.Assign(ir.Const(0, loc), parallel_var, loc)
rhs.args.append(parallel_var)
return [parallel_assign, assign]
| 2.328125 | 2 |
py/buck/zip/append_with_copy.py | thelvis4/buck | 1 | 12770720 | #!/usr/bin/env python
# Copyright 2017-present Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Copyright 2017-present Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Appends a file to a zip archive with copying the resulting zip to a new place """
import argparse
import shutil
import sys
from zipfile import ZipFile
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--input")
parser.add_argument("--output")
parser.add_argument(
"files-to-append",
nargs="+",
help="Pairs of new zip entry name and file with entry content",
)
options = parser.parse_args()
copy_and_append_to_zip_file(options.input, options.output, options.files_to_append)
def copy_and_append_to_zip_file(input_zip_file, output_zip_file, files_to_append):
shutil.copy(input_zip_file, output_zip_file)
append_files_to_zip(output_zip_file, files_to_append)
def append_files_to_zip(zip_file_name, files_to_append):
with ZipFile(zip_file_name, "a") as zip_file:
for i in range(0, len(files_to_append) - 1, 2):
entry_name = files_to_append[i]
entry_content = __read_file(files_to_append[i + 1])
zip_file.writestr(entry_name, entry_content)
def __read_file(file_path):
with open(file_path) as new_file:
return new_file.read()
if __name__ == "__main__":
copy_and_append_to_zip_file(sys.argv[1], sys.argv[2], sys.argv[3:])
| 2.34375 | 2 |
python/graphs/shortest_path.py | tachyonsoftware/algorithms | 17 | 12770721 | def get_shortest_path(in_nodes, start, dest):
shortest_paths = {start: (0, [start])}
def _get_shortest(dest, shortest_paths=shortest_paths):
if dest not in shortest_paths:
incoming_edges = in_nodes[dest]
shortest_path, shortest_cost = None, None
for in_node, cost in incoming_edges:
# incoming shortest path cost and path
in_cost, in_short = _get_shortest(in_node)
if shortest_cost is None or in_cost + cost < shortest_cost:
shortest_path = in_short
shortest_cost = in_cost + cost
shortest_paths[dest] = (shortest_cost, shortest_path + [dest])
return shortest_paths[dest]
return _get_shortest(dest)
def get_incoming_edges(adj_lst):
res = {}
for node_from, edges in adj_lst.items():
for node_to, cost in edges:
res[node_to] = res.get(node_to, []) + [((node_from, cost))]
return res
if __name__ == "__main__":
out_nodes = {'a': [('c', 3), ('b', 2)],
'b': [('d', 4)],
'c': [('e', 6), ('d', 5)],
'd': [('f', 2), ('g', 3)],
'e': [('f', 1)],
'f': [('h', 15)],
'g': [('h', 10)],
'h': []}
in_nodes = get_incoming_edges(out_nodes)
print get_shortest_path(in_nodes, 'a', 'h')
| 3.890625 | 4 |
2017/round_1a/alphabet_cake.py | laichunpongben/CodeJam | 0 | 12770722 | #!/usr/bin/env python
# Google Code Jam
# Google Code Jam 2017
# Round 1A 2017
# Problem A. Alphabet Cake
# Solve all test sets
from __future__ import print_function
def make_cake(r, c, cake):
assert isinstance(cake, list)
filled_cake = []
for row in cake:
first_cell = '?'
last_cell = '?'
new_row = ''
for cell in row:
if cell != '?':
last_cell = cell
if first_cell == '?':
first_cell = cell
new_row += last_cell
new_row = new_row.replace('?', first_cell)
filled_cake.append(new_row)
filled_cake2 = []
last_row = '?' * c
first_row = '?' * c
for row in filled_cake:
if not '?' in row:
last_row = row
if '?' in first_row:
first_row = row
new_row = last_row
filled_cake2.append(new_row)
filled_cake2 = [first_row if '?' in row else row for row in filled_cake2]
return filled_cake2
if __name__ == '__main__':
import os
samples = [
(3, 3, ['G??', '?C?', '??J']),
(3, 4, ['CODE', '????', '?JAM']),
(2, 2, ['CA', 'KE'])
]
for sample in samples:
print(make_cake(*sample))
data_files = ['A-small-practice', 'A-large-practice']
for f in data_files:
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
'{0}.in'.format(f)), 'r') as input_file:
lines = input_file.readlines()
input_count = int(lines[0].replace('\n' ,''))
inputs = [line.replace('\n', '') for line in lines[1:]]
test_cases = []
j = 0
for _ in range(input_count):
cake = []
r, c = tuple([int(_) for _ in inputs[j].split(' ')])
j += 1
for _ in range(r):
row = inputs[j]
cake.append(row)
j += 1
test_cases.append((r, c, cake))
i = 1
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
'{0}.out'.format(f)), 'w') as output_file:
for test_case in test_cases:
cake = make_cake(*test_case)
output_file.write('Case #{0}:\n'.format(i))
for row in cake:
output_file.write(row)
output_file.write('\n')
i += 1
| 3.015625 | 3 |
venv/lib/python2.7/site-packages/boxsdk/util/compat.py | LockScreen/Backend | 1 | 12770723 | <filename>venv/lib/python2.7/site-packages/boxsdk/util/compat.py<gh_stars>1-10
# coding: utf-8
from __future__ import division, unicode_literals
from datetime import timedelta
if not hasattr(timedelta, 'total_seconds'):
def total_seconds(delta):
"""
Return the total number of seconds represented by this timedelta.
Python 2.6 does not define this method.
"""
return (delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 10 ** 6) / 10 ** 6
else:
def total_seconds(delta):
return delta.total_seconds()
| 2.703125 | 3 |
unit_test/model/sklearn_like_model/AE/test_AE.py | demetoir/MLtools | 0 | 12770724 | from script.data_handler.DatasetPackLoader import DatasetPackLoader
from script.model.sklearn_like_model.AE.AE import AE
data_pack = DatasetPackLoader().load_dataset("MNIST")
dataset = data_pack['train']
Xs, Ys = dataset.full_batch(['Xs', 'Ys'])
sample_X = Xs[:2]
sample_Y = Ys[:2]
def AE_total_execute(model):
model.train(Xs, epoch=1)
metric = model.metric(sample_X)
print(metric)
code = model.code(sample_X)
print(code)
recon = model.recon(sample_X)
print(recon)
path = model.save()
class_ = model.__class__
del model
model = class_().load_meta(path)
print('model reloaded')
for i in range(2):
model.train(Xs, epoch=1)
metric = model.metric(sample_X)
print(metric)
metric = model.metric(sample_X)
print(metric)
code = model.code(sample_X)
print(code)
recon = model.recon(sample_X)
print(recon)
model.save()
def test_AE():
model = AE()
AE_total_execute(model)
def test_AE_with_noise():
model = AE(with_noise=True)
AE_total_execute(model)
| 2.5 | 2 |
src/zsl/application/modules/task_router.py | AtteqCom/zsl | 2 | 12770725 | <filename>src/zsl/application/modules/task_router.py
"""
:mod:`zsl.application.modules.task_router_module`
-------------------------------------------------
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import *
from injector import ClassProvider, Module, provides, singleton
from zsl import Config, inject
from zsl.router.task import TASK_CONFIGURATION_NAME, TaskConfiguration, TaskRouter
class TaskRouterModule(Module):
"""Adds task router to current configuration."""
task_provider = ClassProvider(TaskRouter)
@provides(interface=TaskConfiguration, scope=singleton)
@inject(config=Config)
def provide_task_configuration(self, config):
default_config = self._create_default_configuration()
return config.get(TASK_CONFIGURATION_NAME, default_config)
def configure(self, binder):
binder.bind(TaskRouter, to=self.task_provider, scope=singleton)
def _create_default_configuration(self):
return TaskConfiguration().create_namespace('task').add_packages(['zsl.tasks']).get_configuration()
| 2.296875 | 2 |
wordmarker/data/formatter.py | lostblackknight/wordmarker | 2 | 12770726 | <reponame>lostblackknight/wordmarker
from abc import ABCMeta, abstractmethod
class Formatter(metaclass=ABCMeta):
"""
::
格式化的抽象类
"""
@abstractmethod
def format(self, *args):
"""
.. note::
格式化数据
:param args: 数据
:return: - 格式化后的数据
"""
pass
class SqlFormatter(Formatter):
"""
::
格式化sql语句
"""
def format(self, sql):
"""
.. note::
格式化用户输入的sql语句
例如:
.. code-block:: sql
:linenos:
-- 输入 --
select * from t_user where username=? and password=?
-- 输出 --
select * from t_user where username=:a and password=:b
.. caution::
拼接的 ``:a`` ``:b`` 使用的是26个字母,也就是说一次查询的 ``?`` ,不能超过26个。
:param sql: sql语句
:return: - 格式化后的sql语句
"""
sql_list = list(sql)
index = 0
n = 97
for i in sql_list:
if i == '?':
sql_list[index] = ':' + chr(n)
n += 1
index += 1
return ''.join(sql_list)
| 2.984375 | 3 |
examples/strategies/double_ma.py | mcFore/ctpbee | 461 | 12770727 | <filename>examples/strategies/double_ma.py
from ctpbee import CtpbeeApi, CtpBee
from ctpbee.constant import Offset, TradeData, Direction
from ctpbee.indicator.ta_lib import ArrayManager
class DoubleMaStrategy(CtpbeeApi):
def __init__(self, name):
super().__init__(name)
self.manager = ArrayManager(100)
self.instrument_set = ["rb2101.SHFE"] # 这个里面的变量 如果你开启了行情分离选项, 当数据进来的时候会判断数据 只会把相应的行情送进来, 还要就是可以通过来订阅指定行情
self.buy = 0
self.sell = 0
self.slow = 60
self.fast = 30
def on_trade(self, trade: TradeData):
if trade.offset == Offset.OPEN:
if trade.direction == Direction.LONG:
self.buy += trade.volume
else:
self.sell += trade.volume
else:
if trade.direction == Direction.LONG:
self.sell -= trade.volume
else:
self.buy -= trade.volume
def on_bar(self, bar):
""" """
self.manager.add_data(bar)
if not self.manager.inited:
return
fast_avg = self.manager.sma(self.fast, array=True)
slow_avg = self.manager.sma(self.slow, array=True)
if slow_avg[-2] < fast_avg[-2] and slow_avg[-1] >= fast_avg[-1]:
self.action.cover(bar.close_price, self.buy, bar)
self.action.sell(bar.close_price, 3, bar)
if fast_avg[-2] < slow_avg[-2] and fast_avg[-1] >= slow_avg[-1]:
self.action.sell(bar.close_price, self.sell, bar)
self.action.buy(bar.close_price, 3, bar)
def on_tick(self, tick):
pass
def on_init(self, init: bool):
print("初始化成功了, 这里可能会触发两次哦")
if __name__ == '__main__':
app = CtpBee("doublema", __name__, refresh=True)
app.config.from_mapping({
"CONNECT_INFO": {
"userid": "089131",
"password": "<PASSWORD>",
"brokerid": "9999",
"md_address": "tcp://172.16.17.32:10112",
"td_address": "tcp://172.16.17.32:10102",
"product_info": "",
"appid": "simnow_client_test",
"auth_code": "0000000000000000"
},
"INTERFACE": "ctp", # 接口声明
"TD_FUNC": True, # 开启交易功能
"MD_FUNC": True,
"XMIN": [1]
})
strategy = DoubleMaStrategy("doublema")
app.add_extension(strategy)
app.start()
| 2.65625 | 3 |
shhicparking/ui/PictureViewerDlg.py | sqf-ice/ParkTerminalUnitDevelopManage | 0 | 12770728 | #encoding:utf-8
'''
Created on 2015-8-27
图片查看窗口
@author: user
'''
from PyQt4 import QtGui, QtCore, uic
from PyQt4.Qt import pyqtSlot
from PyQt4.QtGui import QMessageBox
from shhicparking.server import TSStub
from shhicparking.util import dateutil
import base64
class PictureViewerDlg(QtGui.QDialog):
def __init__(self,parent):
super( PictureViewerDlg, self ).__init__(parent=parent)
uic.loadUi( "shhicparking/ui/uires/pictureViewerDlg.ui", self )
def show(self,photo,title=None):
QtGui.QDialog.show(self)
self.label.setPixmap(photo)
self.setWindowTitle(u"图片查看:"+title if title is not None else "")
@pyqtSlot()
def on_saveBtn_clicked(self):
fn = QtGui.QFileDialog.getSaveFileName(self,u"保存图片","/", u"图片文件 (*.jpg);;所有文件(*.*)")
if fn != '':
if self.label.pixmap().save(fn):
QMessageBox.information(self, u"保存图片",u"图片保存完成")
else:
QMessageBox.warning(self, u"保存图片",u"图片保存完成")
@pyqtSlot()
def on_closeBtn_clicked(self):
self.label.clear()
self.hide()
| 2 | 2 |
doc2vec_model.py | woctezuma/steam-descriptions | 1 | 12770729 | # Objective: learn a Doc2Vec model
import logging
import multiprocessing
import random
from time import time
import numpy as np
from gensim.models import doc2vec
from benchmark_utils import load_benchmarked_app_ids, print_ranking
from sentence_models import print_most_similar_sentences
from universal_sentence_encoder import perform_knn_search_with_vectors_as_input
from universal_sentence_encoder import prepare_knn_search, transform_matches_to_app_ids
from utils import load_tokens, load_game_names, get_doc_model_file_name
from word_model import compute_similarity_using_word2vec_model
def get_tag_prefix():
return 'appID_'
def read_corpus(steam_tokens, game_tags=None, include_app_ids=True):
for app_id, tokens in steam_tokens.items():
doc_tag = []
if include_app_ids:
doc_tag += [get_tag_prefix() + str(app_id)]
try:
# Reference: https://medium.com/scaleabout/a-gentle-introduction-to-doc2vec-db3e8c0cce5e
doc_tag += game_tags[app_id]
except KeyError:
print('AppID = {} cannot be found in tag dictionary.'.format(app_id))
except TypeError:
pass
yield doc2vec.TaggedDocument(tokens, doc_tag)
def reformat_similarity_scores_for_doc2vec(similarity_scores_as_tuples, game_names=None):
if game_names is None:
game_names, _ = load_game_names()
dummy_app_ids = []
similarity_scores = dict()
for app_id, similarity_value in similarity_scores_as_tuples:
if app_id.startswith(get_tag_prefix()):
app_id = app_id[len(get_tag_prefix()):]
similarity_scores[str(app_id)] = similarity_value
if str(app_id) not in game_names:
dummy_app_ids.append(app_id)
if len(dummy_app_ids) > 0:
print('Dummy appIDs: {}'.format(dummy_app_ids))
return similarity_scores
def train_doc_model_on_steam_tokens(model=None, steam_tokens=None, num_epochs=10):
# You do not want to perform training this way, because training already happened when initializating the model
# with Doc2Vec(documents). Moreover, calling train() several times messes with decay of learning rate alpha!
if steam_tokens is None:
steam_tokens = load_tokens()
documents = list(read_corpus(steam_tokens))
if model is None:
model = doc2vec.Doc2Vec(documents) # training happens with 5 epochs (default) here
start = time()
model.train(documents, total_examples=len(documents), epochs=num_epochs)
print('Elapsed time: {%.2f}' % (time() - start))
model.save(get_doc_model_file_name())
return model
def compute_similarity_using_doc2vec_model(query_app_id, steam_tokens=None, model=None,
verbose=False,
enforce_training=False, avoid_inference=False, num_items_displayed=10):
if steam_tokens is None:
steam_tokens = load_tokens()
if model is None:
try:
print('Loading Doc2Vec model.')
model = doc2vec.Doc2Vec.load(get_doc_model_file_name())
if enforce_training:
model = train_doc_model_on_steam_tokens(model=model, steam_tokens=steam_tokens)
except FileNotFoundError:
print('Training Doc2Vec model from scratch.')
model = train_doc_model_on_steam_tokens(model=None, steam_tokens=steam_tokens)
if avoid_inference:
if verbose:
print('Finding most similar documents based on the query appID.')
# For games which are part of the training corpus, we do not need to call model.infer_vector()
similarity_scores_as_tuples = model.docvecs.most_similar(positive=get_tag_prefix() + str(query_app_id),
topn=num_items_displayed)
else:
if verbose:
print('Finding most similar documents based on an inferred vector, which represents the query document.')
query = steam_tokens[query_app_id]
# Caveat: « Subsequent calls to this function may infer different representations for the same document. »
# Reference: https://radimrehurek.com/gensim/models/doc2vec.html#gensim.models.doc2vec.Doc2Vec.infer_vector
inferred_vector = model.infer_vector(query)
similarity_scores_as_tuples = model.docvecs.most_similar([inferred_vector])
similarity_scores = reformat_similarity_scores_for_doc2vec(similarity_scores_as_tuples)
print_most_similar_sentences(similarity_scores, num_items_displayed=num_items_displayed)
return similarity_scores
def check_analogy(model, pos, neg, num_items_displayed=10):
similarity_scores_as_tuples = model.docvecs.most_similar(positive=[get_tag_prefix() + p for p in pos],
negative=[get_tag_prefix() + n for n in neg],
topn=num_items_displayed)
similarity_scores = reformat_similarity_scores_for_doc2vec(similarity_scores_as_tuples)
print_most_similar_sentences(similarity_scores, num_items_displayed)
return
def apply_pipeline(train_from_scratch=True, avoid_inference=False, shuffle_corpus=True,
include_genres=False, include_categories=True, include_app_ids=True,
verbose=False):
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
game_names, game_tags = load_game_names(include_genres, include_categories)
steam_tokens = load_tokens()
documents = list(read_corpus(steam_tokens, game_tags, include_app_ids))
if shuffle_corpus:
# « Only if the training data has some existing clumping – like all the examples with certain words/topics are
# stuck together at the top or bottom of the ordering – is native ordering likely to cause training problems.
# And in that case, a single shuffle, before any training, should be enough to remove the clumping. »
# Reference: https://stackoverflow.com/a/48080869
random.shuffle(documents)
if train_from_scratch:
print('Creating a new Doc2Vec model from scratch.')
model = doc2vec.Doc2Vec(documents,
vector_size=100,
window=5,
min_count=5,
epochs=20,
workers=multiprocessing.cpu_count())
# NB: Do not follow the piece of advice given in https://rare-technologies.com/doc2vec-tutorial/
# « I have obtained better results by iterating over the data several times and either:
# 1. randomizing the order of input sentences, or
# 2. manually controlling the learning rate over the course of several iterations. »
# Indeed, in my experience, this leads to buggy results. Moreover, this approach is not recommended according to
# https://stackoverflow.com/a/48080869
model.save(get_doc_model_file_name())
else:
print('Loading previous Doc2Vec model.')
model = doc2vec.Doc2Vec.load(get_doc_model_file_name())
# Test doc2vec
if verbose:
try:
# Spelunky + (Slay the Spire) - (Dream Quest)
check_analogy(model, pos=['239350', '646570'], neg=['557410'])
except TypeError:
pass
try:
# Half-Life + (Witcher 2) - (Witcher)
check_analogy(model, pos=['70', '20920'], neg=['20900'])
except TypeError:
pass
query_app_ids = ['620', '364470', '504230', '583950', '646570', '863550', '794600']
for query_app_id in query_app_ids:
print('Query appID: {} ({})'.format(query_app_id, game_names[query_app_id]))
compute_similarity_using_doc2vec_model(query_app_id, steam_tokens, model,
avoid_inference=avoid_inference,
num_items_displayed=10)
# Check the relevance of the corresponding word2vec
for query_word in ['anime', 'fun', 'violent']:
compute_similarity_using_word2vec_model(query_word, steam_tokens, model)
entity = get_doc_model_entity(model)
tag_entity = set(tag for tag in entity if 'appID_' not in tag)
print(tag_entity)
query_tags = ['In-App Purchases', 'Free to Play', 'Violent', 'Early Access']
for query_tag in tag_entity.intersection(query_tags):
for query_app_id in query_app_ids:
try:
sim = model.docvecs.similarity(get_tag_prefix() + query_app_id, query_tag)
print('Similarity = {:.0%} for tag {} vs. appID {} ({})'.format(sim, query_tag, query_app_id,
game_names[query_app_id]))
except KeyError:
pass
num_items_displayed = 3
for query_tag in tag_entity:
print('\nTag: {}'.format(query_tag))
similarity_scores_as_tuples = model.docvecs.most_similar(positive=query_tag, topn=num_items_displayed)
similarity_scores = reformat_similarity_scores_for_doc2vec(similarity_scores_as_tuples)
print_most_similar_sentences(similarity_scores, num_items_displayed=num_items_displayed)
# Top 100
query_app_ids = load_benchmarked_app_ids(append_hard_coded_app_ids=True)
num_neighbors = 10
only_print_banners = True
use_cosine_similarity = True
label_database = np.array(model.docvecs.vectors_docs)
doc_tags = list(model.docvecs.doctags.keys())
init_indices = np.array(range(len(doc_tags)))
bool_indices_to_remove = list(map(lambda x: not x.startswith(get_tag_prefix()), doc_tags))
indices_to_remove = init_indices[bool_indices_to_remove]
label_database = np.delete(label_database, indices_to_remove, axis=0)
app_ids = [int(doc_tag[len(get_tag_prefix()):]) for doc_tag in doc_tags
if doc_tag.startswith(get_tag_prefix())]
knn = prepare_knn_search(label_database, use_cosine_similarity=use_cosine_similarity)
query_des = None
for query_app_id in query_app_ids:
if avoid_inference:
inferred_vector = label_database[app_ids.index(query_app_id)]
else:
# From query appID to query feature vector
query = steam_tokens[str(query_app_id)]
# Caveat: « Subsequent calls to this function may infer different representations for the same document. »
# Reference: https://radimrehurek.com/gensim/models/doc2vec.html#gensim.models.doc2vec.Doc2Vec.infer_vector
inferred_vector = model.infer_vector(query)
if query_des is None:
query_des = inferred_vector
else:
query_des = np.vstack((query_des, inferred_vector))
# Matching of feature vectors
matches = perform_knn_search_with_vectors_as_input(query_des, knn, num_neighbors)
# From feature matches to appID matches
matches_as_app_ids = transform_matches_to_app_ids(matches, app_ids)
print_ranking(query_app_ids,
matches_as_app_ids,
num_elements_displayed=num_neighbors,
only_print_banners=only_print_banners)
return
def get_doc_model_entity(model):
# The equivalent of a vocabulary for a word model
index2entity_set = set(model.docvecs.index2entity)
return index2entity_set
if __name__ == '__main__':
apply_pipeline(train_from_scratch=True, avoid_inference=False, shuffle_corpus=True,
include_genres=False, include_categories=False, include_app_ids=True)
| 2.65625 | 3 |
desafios/Mundo 2/Ex052.py | duartecgustavo/Python---Estudos- | 6 | 12770730 | <filename>desafios/Mundo 2/Ex052.py
# Desafio 52 - Aula 13 : Programa que leia um numero e diga se é um numero PRIMO.
conta = 0
primo = int(input('Me diga um numero: '))
for c in range(1, primo+1):
if primo % c == 0 :
print('\033[34m', end='')
conta += 1
else:
print('\033[m', end='')
print(f'{c}', end=' ')
if conta == 2:
print(f'\n\033[mO numero {primo} é primo!')
else:
print(f'\n\033[mO numero {primo} não é primo!') | 3.734375 | 4 |
automlk/doc.py | pierre-chaville/automlk | 16 | 12770731 | import os
import sys
import glob
import zipfile
import pandas as pd
import numpy as np
from .context import get_dataset_folder
from .results import *
from automlk.worker import get_search_rounds
from .print import *
import jinja2
import subprocess
jinja_globals = {'print_list': print_list,
'print_score': print_score,
'print_score_std': print_score_std,
'print_value': print_value,
'print_duration': print_duration,
'print_params': print_params,
'print_other_metrics': print_other_metrics,
'print_title': print_title,
}
def render(template, fileout, **kwargs):
"""
generates output from template into the fileout file
:param template: jinja2 template to be used (in folder /template)
:param fileout: file to store the results
:param kwargs: args to render the template
:return:
"""
t = jinja2.Environment(loader=jinja2.FileSystemLoader(searchpath="../automlk/templates/")).get_template(template)
with open(fileout, 'w') as f:
f.write(t.render({**kwargs, **jinja_globals}))
def gener_doc(dataset):
"""
generate the documentation of this dataset
:param dataset: dataset object
:return:
"""
# check or create doc folder
folder = get_dataset_folder(dataset.dataset_id) + '/docs'
if not os.path.exists(folder):
os.makedirs(folder)
os.makedirs(folder + '/_build')
os.makedirs(folder + '/_static')
os.makedirs(folder + '/_templates')
# generate conf.py
render('conf.txt', folder + '/conf.py', dataset=dataset)
render('make.bat', folder + '/make.bat', dataset=dataset)
render('makefile.txt', folder + '/Makefile', dataset=dataset)
# generate index
render('index.rst', folder + '/index.rst', dataset=dataset)
# dataset data and features
search = get_search_rounds(dataset.dataset_id)
if len(search) > 0:
best = get_best_models(dataset.dataset_id)
best_pp = get_best_pp(dataset.dataset_id)
# separate models (level 0) from ensembles (level 1)
best1 = [b for b in best if b['level'] == 1]
best2 = [b for b in best if b['level'] == 2]
print(len(best1), len(best2))
print(best1[:2])
render('dataset.rst', folder + '/dataset.rst', dataset=dataset, best1=best1, best2=best2, best_pp=best_pp,
n_searches1=len(search[search.level == 1]),
n_searches2=len(search[search.level == 2]))
# then for the best rounds
N_ROUNDS = 5
for round_id in list([b['round_id'] for b in best1[:N_ROUNDS]]) + list([b['round_id'] for b in best2[:N_ROUNDS]]):
round = search[search.round_id == int(round_id)].to_dict(orient='records')[0]
pipeline = [s for s in round['pipeline'] if s[0] not in ['NO-SCALE', 'PASS']]
params = get_round_params(search, round_id)
features = get_feature_importance(dataset.dataset_id, round_id)
render('round.rst', folder + '/round_%s.rst' % round_id, dataset=dataset, round=round,
pipeline=pipeline, features=features, params=params, cols=params.keys())
else:
# return render_template('dataset.html', dataset=dataset, n_searches1=0)
render('dataset.rst', folder + '/dataset.rst', dataset=dataset, n_searches1=0)
# then generate html and pdf with make
if sys.platform == 'linux':
subprocess.call(['sh', '../scripts/gen_doc.sh', os.path.abspath(get_dataset_folder(dataset.dataset_id)+'/docs')])
else:
os.system('call ../scripts/gen_doc ' + os.path.abspath(get_dataset_folder(dataset.dataset_id)+'/docs'))
# generate zip file of the html site
with zipfile.ZipFile(get_dataset_folder(dataset.dataset_id) + '/doc.zip', 'w') as z:
root = get_dataset_folder(dataset.dataset_id) + '/docs/_build/html/'
for dir in ['', '_static/', '_images/', '_sources/']:
for f in glob.glob(root + dir + '*.*'):
z.write(f, dataset.dataset_id + '/' + dir + os.path.basename(f))
| 2.359375 | 2 |
dq0/sdk/pipeline/pipeline.py | gradientzero/dq0-sdk | 2 | 12770732 | # -*- coding: utf-8 -*-
"""
Copyright 2021, Gradient Zero
All rights reserved
"""
import logging
from dq0.sdk.errors.errors import fatal_error
from dq0.sdk.pipeline import pipeline_config
import pandas as pd
from sklearn import pipeline
logger = logging.getLogger(__name__)
class Pipeline():
def __init__(self, steps=None, config_path=None, transformers_root_dir='.', log_key_string='', **kwargs):
"""
Initialize with steps directly (standalone mode) or with config file. Both can not be given.
params:
steps: List of (name, transform) tuples (implementing fit/transform) that are chained, in the order in which they are chained.
config_path: path to config file where the pipelien steps are given.
"""
self.log_key_string = log_key_string
if (steps is not None) and (config_path is None):
self.pipeline = pipeline.Pipeline(steps)
elif (steps is None) and (config_path is not None):
pp_config = pipeline_config.PipelineConfig(config_path=config_path)
steps = pp_config.get_steps_from_config(root_dir=transformers_root_dir, log_key_string=self.log_key_string)
self.pipeline = pipeline.Pipeline(steps)
else:
fatal_error("Both steps and config_path are given. Only one should be given.")
def fit(self, X, y=None, **fit_params):
if hasattr(X, 'columns'):
self.col_names = X.columns
else:
self.col_names = None
self.pipeline = self.pipeline.fit(X=X, y=y, **fit_params)
def fit_transform(self, X, y=None, **fit_params):
if hasattr(X, 'columns'):
self.col_names = X.columns
else:
self.col_names = None
X_t = self.pipeline.fit_transform(X=X, y=y, **fit_params)
if self.col_names is not None:
X_t = pd.DataFrame(X_t, columns=self.col_names)
return X_t
def get_params(self, deep=True):
return self.pipeline.get_params(deep=deep)
| 2.625 | 3 |
source/sam_spot_bot_create_job/iam_helper.py | liangfu/spot-tagging-bot-for-digital-assets | 19 | 12770733 | import json
import boto3
class IamHelper:
def __init__(self):
self.client = boto3.client("iam")
self.ssm_client = boto3.client('ssm')
def create_or_get_ecs_role(self) -> str:
self.role_name = "spot-bot-ecs-service-role"
print("Check role exist")
try:
response = self.client.get_role(
RoleName=self.role_name,
)
print("<<< Role exist and role ARN is " + response["Role"]["Arn"])
return response["Role"]["Arn"]
except Exception as e:
print(type(e).__name__) # NoSuchEntityException for the first time call.
if type(e).__name__ == "NoSuchEntityException":
return self._create_ecs_role()
def _create_ecs_role(self) -> str:
assume_role_policy_document = json.dumps({
"Version": "2012-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": {
"Service": "ecs-tasks.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
})
response = self.client.create_role(
RoleName=self.role_name,
AssumeRolePolicyDocument=assume_role_policy_document
)
print("<<< created ecs role: " + str(response))
return response["Role"]["Arn"]
@staticmethod
def get_account_id():
return boto3.client('sts').get_caller_identity().get('Account')
@staticmethod
def get_region():
region_name = boto3.session.Session().region_name
print("Current Region is - ", region_name)
return region_name
@staticmethod
def get_partition():
"""
Auto switch the region partition for arn.
"""
if boto3.session.Session().region_name in ("cn-northwest-1", "cn-north-1"):
print("China region")
return "aws-cn"
else:
return "aws"
| 2.171875 | 2 |
tests/nn/test_inference.py | Tyelab/sleap | 0 | 12770734 | <filename>tests/nn/test_inference.py
import pytest
import numpy as np
import tensorflow as tf
import sleap
from numpy.testing import assert_array_equal, assert_allclose
from sleap.nn.data.confidence_maps import (
make_confmaps,
make_grid_vectors,
make_multi_confmaps,
)
from sleap.nn.inference import (
InferenceLayer,
InferenceModel,
get_model_output_stride,
find_head,
SingleInstanceInferenceLayer,
SingleInstanceInferenceModel,
SingleInstancePredictor,
CentroidCropGroundTruth,
CentroidCrop,
FindInstancePeaksGroundTruth,
FindInstancePeaks,
TopDownInferenceModel,
TopDownPredictor,
BottomUpPredictor,
BottomUpMultiClassPredictor,
TopDownMultiClassPredictor,
load_model,
)
sleap.nn.system.use_cpu_only()
@pytest.fixture
def test_labels():
skel = sleap.Skeleton()
skel.add_node("a")
skel.add_node("b")
vid = sleap.Video.from_numpy(np.zeros((8, 12, 12, 1), dtype="uint8"))
labels = sleap.Labels()
for fidx in range(len(vid)):
insts = []
insts.append(
sleap.Instance.from_pointsarray(
points=np.array([[1, 2], [3, 4]]) + fidx, skeleton=skel
)
)
if fidx >= 3:
insts.append(
sleap.Instance.from_pointsarray(
points=np.array([[5, 6], [7, 8]]) + fidx, skeleton=skel
)
)
lf = sleap.LabeledFrame(video=vid, frame_idx=fidx, instances=insts)
labels.append(lf)
return labels
@pytest.fixture
def test_pipeline(test_labels):
p = sleap.nn.data.pipelines.Pipeline(
sleap.nn.data.pipelines.LabelsReader(labels=test_labels)
)
p += sleap.nn.data.pipelines.InstanceCentroidFinder(
center_on_anchor_part=True,
anchor_part_names="a",
skeletons=test_labels.skeleton,
)
p += sleap.nn.data.pipelines.Batcher(batch_size=4, unrag=False)
return p
def test_centroid_crop_gt_layer(test_labels, test_pipeline):
ex = test_pipeline.peek()
crop_layer = CentroidCropGroundTruth(crop_size=6)
out = crop_layer(ex)
assert tuple(out["crops"].shape) == (4, None, 6, 6, 1)
assert tuple(out["crop_offsets"].shape) == (4, None, 2)
assert tuple(out["centroids"].shape) == (4, None, 2)
assert tuple(out["centroid_vals"].shape) == (4, None)
assert tuple(out["crops"].bounding_shape()) == (4, 2, 6, 6, 1)
assert tuple(out["crop_offsets"].bounding_shape()) == (4, 2, 2)
assert tuple(out["centroids"].bounding_shape()) == (4, 2, 2)
assert tuple(out["centroid_vals"].bounding_shape()) == (4, 2)
assert out["crops"].dtype == tf.uint8
assert out["crop_offsets"].dtype == tf.float32
assert out["centroids"].dtype == tf.float32
assert out["centroid_vals"].dtype == tf.float32
assert (out["centroids"][0][0].numpy() == test_labels[0][0].numpy()[0]).all()
assert (out["centroids"][1][0].numpy() == test_labels[1][0].numpy()[0]).all()
def test_instance_peaks_gt_layer(test_labels, test_pipeline):
crop_layer = CentroidCropGroundTruth(crop_size=6)
instance_peaks_layer = FindInstancePeaksGroundTruth()
ex = test_pipeline.peek()
crop_output = crop_layer(ex)
out = instance_peaks_layer(ex, crop_output)
assert tuple(out["centroids"].shape) == (4, None, 2)
assert tuple(out["centroid_vals"].shape) == (4, None)
assert tuple(out["instance_peaks"].shape) == (4, None, None, 2)
assert tuple(out["instance_peak_vals"].shape) == (4, None, None)
assert out["centroids"][0].shape == (1, 2)
assert out["centroids"][1].shape == (1, 2)
assert out["centroids"][2].shape == (1, 2)
assert out["centroids"][3].shape == (2, 2)
assert tuple(out["centroids"].bounding_shape()) == (4, 2, 2)
assert tuple(out["centroid_vals"].bounding_shape()) == (4, 2)
assert tuple(out["instance_peaks"].bounding_shape()) == (4, 2, 2, 2)
assert tuple(out["instance_peak_vals"].bounding_shape()) == (4, 2, 2)
assert out["centroids"].dtype == tf.float32
assert out["centroid_vals"].dtype == tf.float32
assert out["instance_peaks"].dtype == tf.float32
assert out["instance_peak_vals"].dtype == tf.float32
assert (out["instance_peaks"][0][0].numpy() == test_labels[0][0].numpy()).all()
assert (out["instance_peaks"][1][0].numpy() == test_labels[1][0].numpy()).all()
def test_instance_peaks_gt_layer_nans():
# Covers nasty edge case when evaluating centroid models and
# GT instances have NaNs
flat_values = tf.cast(
[
[0, 0],
[0, 0],
[1, 1],
[1, 1],
[0, 0],
[np.nan, np.nan],
[1, 1],
[np.nan, np.nan],
],
tf.float32,
)
nested_value_rowids = (
tf.cast([0, 0, 1, 1], tf.int64),
tf.cast([0, 0, 1, 1, 2, 2, 3, 3], tf.int64),
)
instances = tf.RaggedTensor.from_nested_value_rowids(
flat_values, nested_value_rowids
)
flat_values = tf.cast([[0, 0], [1, 1], [0, 0], [1, 1]], tf.float32)
nested_value_rowids = (tf.cast([0, 0, 1, 1], tf.int32),)
centroids = tf.RaggedTensor.from_nested_value_rowids(
flat_values, nested_value_rowids
)
flat_values = tf.cast([1, 1, 1, 1], tf.float32)
nested_value_rowids = (tf.cast([0, 0, 1, 1], tf.int32),)
centroid_vals = tf.RaggedTensor.from_nested_value_rowids(
flat_values, nested_value_rowids
)
example_gt = {"instances": instances}
crop_output = {"centroids": centroids, "centroid_vals": centroid_vals}
layer = FindInstancePeaksGroundTruth()
peaks_gt = layer(example_gt, crop_output)
assert tuple(peaks_gt["instance_peaks"].bounding_shape()) == (2, 2, 2, 2)
def test_centroid_crop_layer():
xv, yv = make_grid_vectors(image_height=12, image_width=12, output_stride=1)
points = tf.cast([[[1.75, 2.75]], [[3.75, 4.75]], [[5.75, 6.75]]], tf.float32)
cms = tf.expand_dims(make_multi_confmaps(points, xv, yv, sigma=1.5), axis=0)
x_in = tf.keras.layers.Input([12, 12, 1])
x_out = tf.keras.layers.Lambda(lambda x: x, name="CentroidConfmapsHead")(x_in)
model = tf.keras.Model(inputs=x_in, outputs=x_out)
layer = CentroidCrop(
keras_model=model,
input_scale=1.0,
crop_size=3,
pad_to_stride=1,
output_stride=None,
refinement="local",
integral_patch_size=5,
peak_threshold=0.2,
return_confmaps=False,
)
out = layer(cms)
assert tuple(out["centroids"].shape) == (1, None, 2)
assert tuple(out["centroid_vals"].shape) == (1, None)
assert tuple(out["crops"].shape) == (1, None, 3, 3, 1)
assert tuple(out["crop_offsets"].shape) == (1, None, 2)
assert tuple(out["centroids"].bounding_shape()) == (1, 3, 2)
assert tuple(out["centroid_vals"].bounding_shape()) == (1, 3)
assert tuple(out["crops"].bounding_shape()) == (1, 3, 3, 3, 1)
assert tuple(out["crop_offsets"].bounding_shape()) == (1, 3, 2)
assert_allclose(out["centroids"][0].numpy(), points.numpy().squeeze(axis=1))
assert_allclose(out["centroid_vals"][0].numpy(), [1, 1, 1], atol=0.1)
def test_instance_peaks_layer():
xv, yv = make_grid_vectors(image_height=12, image_width=12, output_stride=1)
points = tf.cast([[1.5, 2.5], [3.5, 4.5], [5.5, 6.5]], tf.float32)
cms = tf.stack(
[
make_confmaps(points, xv, yv, sigma=1.0),
make_confmaps(points + 1, xv, yv, sigma=1.0),
],
axis=0,
)
x_in = tf.keras.layers.Input([12, 12, 3])
x_out = tf.keras.layers.Lambda(lambda x: x, name="CenteredInstanceConfmapsHead")(
x_in
)
model = tf.keras.Model(inputs=x_in, outputs=x_out)
instance_peaks_layer = FindInstancePeaks(
keras_model=model,
input_scale=1.0,
peak_threshold=0.2,
return_confmaps=False,
refinement="integral",
)
# Raw tensor
out = instance_peaks_layer(cms)
assert tuple(out["instance_peaks"].shape) == (2, None, 3, 2)
assert tuple(out["instance_peak_vals"].shape) == (2, None, 3)
assert tuple(out["instance_peaks"].bounding_shape()) == (2, 1, 3, 2)
assert tuple(out["instance_peak_vals"].bounding_shape()) == (2, 1, 3)
assert_allclose(out["instance_peaks"][0][0].numpy(), points.numpy(), atol=0.1)
assert_allclose(out["instance_peak_vals"][0][0].numpy(), [1, 1, 1], atol=0.3)
assert_allclose(out["instance_peaks"][1][0].numpy(), points.numpy() + 1, atol=0.1)
assert_allclose(out["instance_peak_vals"][1][0].numpy(), [1, 1, 1], atol=0.3)
# Batched example
crops = tf.RaggedTensor.from_tensor(tf.expand_dims(cms, axis=1), lengths=[1, 1])
out = instance_peaks_layer({"crops": crops})
assert tuple(out["instance_peaks"].shape) == (2, None, 3, 2)
assert tuple(out["instance_peak_vals"].shape) == (2, None, 3)
assert tuple(out["instance_peaks"].bounding_shape()) == (2, 1, 3, 2)
assert tuple(out["instance_peak_vals"].bounding_shape()) == (2, 1, 3)
assert_allclose(out["instance_peaks"][0][0].numpy(), points.numpy(), atol=0.1)
assert_allclose(out["instance_peak_vals"][0][0].numpy(), [1, 1, 1], atol=0.3)
assert_allclose(out["instance_peaks"][1][0].numpy(), points.numpy() + 1, atol=0.1)
assert_allclose(out["instance_peak_vals"][1][0].numpy(), [1, 1, 1], atol=0.3)
# Batch size = 1, multi-instance example
crops = tf.RaggedTensor.from_tensor(tf.expand_dims(cms, axis=0), lengths=[2])
out = instance_peaks_layer({"crops": crops})
assert tuple(out["instance_peaks"].shape) == (1, None, 3, 2)
assert tuple(out["instance_peak_vals"].shape) == (1, None, 3)
assert tuple(out["instance_peaks"].bounding_shape()) == (1, 2, 3, 2)
assert tuple(out["instance_peak_vals"].bounding_shape()) == (1, 2, 3)
assert_allclose(out["instance_peaks"][0][0].numpy(), points.numpy(), atol=0.1)
assert_allclose(out["instance_peak_vals"][0][0].numpy(), [1, 1, 1], atol=0.3)
assert_allclose(out["instance_peaks"][0][1].numpy(), points.numpy() + 1, atol=0.1)
assert_allclose(out["instance_peak_vals"][0][1].numpy(), [1, 1, 1], atol=0.3)
# Offset adjustment and pass through centroids
instance_peaks_layer = FindInstancePeaks(
keras_model=model,
input_scale=1.0,
peak_threshold=0.2,
return_confmaps=True,
refinement="integral",
)
# (samples, h, w, c) -> (samples, ?, h, w, c)
crops = tf.RaggedTensor.from_tensor(tf.expand_dims(cms, axis=1), lengths=[1, 1])
# (samples, centroids, 2) -> (samples, ?, 2)
crop_offsets = tf.RaggedTensor.from_tensor(
tf.reshape(tf.cast([1, 2, 3, 4], tf.float32), [2, 1, 2]), lengths=[1, 1]
)
out = instance_peaks_layer(
{
"crops": crops,
"centroids": tf.zeros([]),
"centroid_vals": tf.zeros([]),
"crop_offsets": crop_offsets,
}
)
assert "centroids" in out
assert "centroid_vals" in out
assert_allclose(
out["instance_peaks"][0][0].numpy(),
points.numpy() + np.array([[1, 2]]),
atol=0.1,
)
assert_allclose(
out["instance_peaks"][1][0].numpy(),
points.numpy() + 1 + np.array([[3, 4]]),
atol=0.1,
)
# Input scaling
scale = 0.5
instance_peaks_layer = FindInstancePeaks(
keras_model=model,
input_scale=scale,
peak_threshold=0.2,
return_confmaps=False,
refinement="integral",
)
xv, yv = make_grid_vectors(
image_height=12 / scale, image_width=12 / scale, output_stride=1
)
points = tf.cast([[1.5, 2.5], [3.5, 4.5], [5.5, 6.5]], tf.float32)
cms = tf.stack(
[
make_confmaps(points / scale, xv, yv, sigma=1.0 / scale),
make_confmaps((points + 1) / scale, xv, yv, sigma=1.0 / scale),
],
axis=0,
)
out = instance_peaks_layer(cms)
assert_allclose(
out["instance_peaks"][0][0].numpy(), points.numpy() / scale, atol=0.15
)
assert_allclose(
out["instance_peaks"][1][0].numpy(), (points.numpy() + 1) / scale, atol=0.15
)
def test_topdown_model(test_pipeline):
model = TopDownInferenceModel(
centroid_crop=CentroidCropGroundTruth(crop_size=4),
instance_peaks=FindInstancePeaksGroundTruth(),
)
out = model.predict(test_pipeline.make_dataset())
assert tuple(out["centroids"].shape) == (8, 2, 2)
assert tuple(out["centroid_vals"].shape) == (8, 2)
assert tuple(out["instance_peaks"].shape) == (8, 2, 2, 2)
assert tuple(out["instance_peak_vals"].shape) == (8, 2, 2)
assert tuple(out["n_valid"].shape) == (8,)
assert (out["n_valid"] == [1, 1, 1, 2, 2, 2, 2, 2]).all()
def test_inference_layer():
# Convert to float
x_in = tf.keras.layers.Input([4, 4, 1])
x = tf.keras.layers.Lambda(lambda x: x)(x_in)
keras_model = tf.keras.Model(x_in, x)
layer = sleap.nn.inference.InferenceLayer(
keras_model=keras_model, input_scale=1.0, pad_to_stride=1, ensure_grayscale=None
)
data = tf.cast(tf.fill([1, 4, 4, 1], 255), tf.uint8)
out = layer(data)
assert out.dtype == tf.float32
assert tuple(out.shape) == (1, 4, 4, 1)
assert tf.reduce_all(out == 1.0)
# Convert from rgb to grayscale, infer ensure grayscale
x_in = tf.keras.layers.Input([4, 4, 1])
x = tf.keras.layers.Lambda(lambda x: x)(x_in)
keras_model = tf.keras.Model(x_in, x)
layer = sleap.nn.inference.InferenceLayer(
keras_model=keras_model, input_scale=1.0, pad_to_stride=1, ensure_grayscale=None
)
data = tf.cast(tf.fill([1, 4, 4, 3], 255), tf.uint8)
out = layer(data)
assert layer.ensure_grayscale
assert out.dtype == tf.float32
assert tuple(out.shape) == (1, 4, 4, 1)
assert tf.reduce_all(out == 1.0)
# Infer ensure rgb, convert from grayscale
x_in = tf.keras.layers.Input([4, 4, 3])
x = tf.keras.layers.Lambda(lambda x: x)(x_in)
keras_model = tf.keras.Model(x_in, x)
layer = sleap.nn.inference.InferenceLayer(
keras_model=keras_model, input_scale=1.0, pad_to_stride=1, ensure_grayscale=None
)
data = tf.cast(tf.fill([1, 4, 4, 1], 255), tf.uint8)
out = layer(data)
assert not layer.ensure_grayscale
assert out.dtype == tf.float32
assert tuple(out.shape) == (1, 4, 4, 3)
assert tf.reduce_all(out == 1.0)
# Input scaling
x_in = tf.keras.layers.Input([4, 4, 1])
x = tf.keras.layers.Lambda(lambda x: x)(x_in)
keras_model = tf.keras.Model(x_in, x)
layer = sleap.nn.inference.InferenceLayer(
keras_model=keras_model, input_scale=0.5, pad_to_stride=1, ensure_grayscale=None
)
data = tf.cast(tf.fill([1, 8, 8, 1], 255), tf.uint8)
out = layer(data)
assert out.dtype == tf.float32
assert tuple(out.shape) == (1, 4, 4, 1)
assert tf.reduce_all(out == 1.0)
# Stride padding
x_in = tf.keras.layers.Input([4, 4, 1])
x = tf.keras.layers.Lambda(lambda x: x)(x_in)
keras_model = tf.keras.Model(x_in, x)
layer = sleap.nn.inference.InferenceLayer(
keras_model=keras_model, input_scale=1, pad_to_stride=2, ensure_grayscale=None
)
data = tf.cast(tf.fill([1, 3, 3, 1], 255), tf.uint8)
out = layer(data)
assert out.dtype == tf.float32
assert tuple(out.shape) == (1, 4, 4, 1)
# Scaling and stride padding
x_in = tf.keras.layers.Input([4, 4, 1])
x = tf.keras.layers.Lambda(lambda x: x)(x_in)
keras_model = tf.keras.Model(x_in, x)
layer = sleap.nn.inference.InferenceLayer(
keras_model=keras_model, input_scale=0.5, pad_to_stride=2, ensure_grayscale=None
)
data = tf.cast(tf.fill([1, 6, 6, 1], 255), tf.uint8)
out = layer(data)
assert out.dtype == tf.float32
assert tuple(out.shape) == (1, 4, 4, 1)
def test_get_model_output_stride():
# Single input/output
x_in = tf.keras.layers.Input([4, 4, 1])
x = tf.keras.layers.Lambda(lambda x: x)(x_in)
model = tf.keras.Model(x_in, x)
assert get_model_output_stride(model) == 1
# Single input/output, downsampled
x_in = tf.keras.layers.Input([4, 4, 1])
x = tf.keras.layers.MaxPool2D(strides=2, padding="same")(x_in)
model = tf.keras.Model(x_in, x)
assert get_model_output_stride(model) == 2
# Single input/output, downsampled, uneven
x_in = tf.keras.layers.Input([5, 5, 1])
x = tf.keras.layers.MaxPool2D(strides=2, padding="same")(x_in)
model = tf.keras.Model(x_in, x)
assert model.output.shape[1] == 3
with pytest.warns(UserWarning):
stride = get_model_output_stride(model)
assert stride == 1
# Multi input/output
x_in = [tf.keras.layers.Input([4, 4, 1]), tf.keras.layers.Input([8, 8, 1])]
x = [tf.keras.layers.MaxPool2D(strides=2, padding="same")(x) for x in x_in]
model = tf.keras.Model(x_in, x)
assert get_model_output_stride(model) == 1
assert get_model_output_stride(model, input_ind=0, output_ind=0) == 2
assert get_model_output_stride(model, input_ind=0, output_ind=1) == 1
assert get_model_output_stride(model, input_ind=1, output_ind=0) == 4
assert get_model_output_stride(model, input_ind=1, output_ind=1) == 2
def test_find_head():
x_in = tf.keras.layers.Input([4, 4, 1])
x = tf.keras.layers.Lambda(lambda x: x, name="A_0")(x_in)
model = tf.keras.Model(x_in, x)
assert find_head(model, "A") == 0
assert find_head(model, "B") is None
def test_single_instance_inference():
xv, yv = make_grid_vectors(image_height=12, image_width=12, output_stride=1)
points = tf.cast([[1.75, 2.75], [3.75, 4.75], [5.75, 6.75]], tf.float32)
points = np.stack([points, points + 1], axis=0)
cms = tf.stack(
[
make_confmaps(points[0], xv, yv, sigma=1.0),
make_confmaps(points[1], xv, yv, sigma=1.0),
],
axis=0,
)
x_in = tf.keras.layers.Input([12, 12, 3])
x = tf.keras.layers.Lambda(lambda x: x, name="SingleInstanceConfmapsHead")(x_in)
keras_model = tf.keras.Model(x_in, x)
layer = SingleInstanceInferenceLayer(keras_model=keras_model, refinement="local")
assert layer.output_stride == 1
out = layer(cms)
assert tuple(out["instance_peaks"].shape) == (2, 1, 3, 2)
out["instance_peaks"] = tf.squeeze(out["instance_peaks"], axis=1)
assert tuple(out["instance_peak_vals"].shape) == (2, 1, 3)
out["instance_peak_vals"] = tf.squeeze(out["instance_peak_vals"], axis=1)
assert_array_equal(out["instance_peaks"], points)
assert_allclose(out["instance_peak_vals"], 1.0, atol=0.1)
assert "confmaps" not in out
out = layer({"image": cms})
assert tuple(out["instance_peaks"].shape) == (2, 1, 3, 2)
out["instance_peaks"] = tf.squeeze(out["instance_peaks"], axis=1)
assert_array_equal(out["instance_peaks"], points)
layer = SingleInstanceInferenceLayer(
keras_model=keras_model, refinement="local", return_confmaps=True
)
out = layer(cms)
assert "confmaps" in out
assert_array_equal(out["confmaps"], cms)
model = SingleInstanceInferenceModel(layer)
preds = model.predict(cms)
assert preds["instance_peaks"].shape == (2, 1, 3, 2)
preds["instance_peaks"] = preds["instance_peaks"].squeeze(axis=1)
assert_array_equal(preds["instance_peaks"], points)
assert "instance_peak_vals" in preds
assert "confmaps" in preds
def test_single_instance_predictor(
min_labels_robot, min_single_instance_robot_model_path
):
predictor = SingleInstancePredictor.from_trained_models(
min_single_instance_robot_model_path
)
predictor.verbosity = "none"
assert predictor.is_grayscale == False
labels_pr = predictor.predict(min_labels_robot)
assert len(labels_pr) == 2
assert len(labels_pr[0].instances) == 1
points_gt = np.concatenate(
[min_labels_robot[0][0].numpy(), min_labels_robot[1][0].numpy()], axis=0
)
points_pr = np.concatenate(
[labels_pr[0][0].numpy(), labels_pr[1][0].numpy()], axis=0
)
assert_allclose(points_gt, points_pr, atol=10.0)
def test_single_instance_predictor_high_peak_thresh(
min_labels_robot, min_single_instance_robot_model_path
):
predictor = SingleInstancePredictor.from_trained_models(
min_single_instance_robot_model_path, peak_threshold=1.5
)
predictor.verbosity = "none"
labels_pr = predictor.predict(min_labels_robot)
assert len(labels_pr) == 2
assert labels_pr[0][0].n_visible_points == 0
assert labels_pr[1][0].n_visible_points == 0
def test_topdown_predictor_centroid(min_labels, min_centroid_model_path):
predictor = TopDownPredictor.from_trained_models(
centroid_model_path=min_centroid_model_path
)
predictor.verbosity = "none"
labels_pr = predictor.predict(min_labels)
assert len(labels_pr) == 1
assert len(labels_pr[0].instances) == 2
assert predictor.is_grayscale == True
points_gt = np.concatenate(
[min_labels[0][0].numpy(), min_labels[0][1].numpy()], axis=0
)
points_pr = np.concatenate(
[labels_pr[0][0].numpy(), labels_pr[0][1].numpy()], axis=0
)
inds1, inds2 = sleap.nn.utils.match_points(points_gt, points_pr)
assert_allclose(points_gt[inds1.numpy()], points_pr[inds2.numpy()], atol=1.5)
def test_topdown_predictor_centered_instance(
min_labels, min_centered_instance_model_path
):
predictor = TopDownPredictor.from_trained_models(
confmap_model_path=min_centered_instance_model_path
)
predictor.verbosity = "none"
labels_pr = predictor.predict(min_labels)
assert len(labels_pr) == 1
assert len(labels_pr[0].instances) == 2
assert predictor.is_grayscale == True
points_gt = np.concatenate(
[min_labels[0][0].numpy(), min_labels[0][1].numpy()], axis=0
)
points_pr = np.concatenate(
[labels_pr[0][0].numpy(), labels_pr[0][1].numpy()], axis=0
)
inds1, inds2 = sleap.nn.utils.match_points(points_gt, points_pr)
assert_allclose(points_gt[inds1.numpy()], points_pr[inds2.numpy()], atol=1.5)
def test_bottomup_predictor(min_labels, min_bottomup_model_path):
predictor = BottomUpPredictor.from_trained_models(
model_path=min_bottomup_model_path
)
predictor.verbosity = "none"
labels_pr = predictor.predict(min_labels)
assert len(labels_pr) == 1
assert len(labels_pr[0].instances) == 2
assert predictor.is_grayscale == True
points_gt = np.concatenate(
[min_labels[0][0].numpy(), min_labels[0][1].numpy()], axis=0
)
points_pr = np.concatenate(
[labels_pr[0][0].numpy(), labels_pr[0][1].numpy()], axis=0
)
inds1, inds2 = sleap.nn.utils.match_points(points_gt, points_pr)
assert_allclose(points_gt[inds1.numpy()], points_pr[inds2.numpy()], atol=1.75)
# Test inference with score threshold too high
predictor = BottomUpPredictor.from_trained_models(
model_path=min_bottomup_model_path,
min_line_scores=1.1,
)
predictor.verbosity = "none"
labels_pr = predictor.predict(min_labels)
assert len(labels_pr[0]) == 0
def test_bottomup_multiclass_predictor(
min_tracks_2node_labels, min_bottomup_multiclass_model_path
):
labels_gt = sleap.Labels(min_tracks_2node_labels[[0]])
predictor = BottomUpMultiClassPredictor.from_trained_models(
model_path=min_bottomup_multiclass_model_path,
peak_threshold=0.7,
integral_refinement=False,
)
labels_pr = predictor.predict(labels_gt)
assert len(labels_pr) == 1
assert len(labels_pr[0].instances) == 2
inds1 = np.argsort([x.track.name for x in labels_gt[0]])
inds2 = np.argsort([x.track.name for x in labels_pr[0]])
assert labels_gt[0][inds1[0]].track == labels_pr[0][inds2[0]].track
assert labels_gt[0][inds1[1]].track == labels_pr[0][inds2[1]].track
assert_allclose(
labels_gt[0][inds1[0]].numpy(), labels_pr[0][inds2[0]].numpy(), rtol=0.02
)
assert_allclose(
labels_gt[0][inds1[1]].numpy(), labels_pr[0][inds2[1]].numpy(), rtol=0.02
)
labels_pr = predictor.predict(
sleap.nn.data.pipelines.VideoReader(labels_gt.video, example_indices=[0])
)
labels_pr[0][0].track.name == "female"
labels_pr[0][1].track.name == "male"
def test_topdown_multiclass_predictor(
min_tracks_2node_labels, min_topdown_multiclass_model_path
):
labels_gt = sleap.Labels(min_tracks_2node_labels[[0]])
predictor = TopDownMultiClassPredictor.from_trained_models(
confmap_model_path=min_topdown_multiclass_model_path,
peak_threshold=0.7,
integral_refinement=False,
)
labels_pr = predictor.predict(labels_gt)
assert len(labels_pr) == 1
assert len(labels_pr[0].instances) == 2
inds1 = np.argsort([x.track.name for x in labels_gt[0]])
inds2 = np.argsort([x.track.name for x in labels_pr[0]])
assert labels_gt[0][inds1[0]].track == labels_pr[0][inds2[0]].track
assert labels_gt[0][inds1[1]].track == labels_pr[0][inds2[1]].track
assert_allclose(
labels_gt[0][inds1[0]].numpy(), labels_pr[0][inds2[0]].numpy(), rtol=0.02
)
assert_allclose(
labels_gt[0][inds1[1]].numpy(), labels_pr[0][inds2[1]].numpy(), rtol=0.02
)
def test_load_model(
min_single_instance_robot_model_path,
min_centroid_model_path,
min_centered_instance_model_path,
min_bottomup_model_path,
min_topdown_multiclass_model_path,
min_bottomup_multiclass_model_path,
):
predictor = load_model(min_single_instance_robot_model_path)
assert isinstance(predictor, SingleInstancePredictor)
predictor = load_model([min_centroid_model_path, min_centered_instance_model_path])
assert isinstance(predictor, TopDownPredictor)
predictor = load_model(min_bottomup_model_path)
assert isinstance(predictor, BottomUpPredictor)
predictor = load_model([min_centroid_model_path, min_topdown_multiclass_model_path])
assert isinstance(predictor, TopDownMultiClassPredictor)
predictor = load_model(min_bottomup_multiclass_model_path)
assert isinstance(predictor, BottomUpMultiClassPredictor)
def test_ensure_numpy(
min_centroid_model_path, min_centered_instance_model_path, min_labels_slp
):
model = load_model([min_centroid_model_path, min_centered_instance_model_path])
# each frame has same number of instances
same_shape = min_labels_slp.video[:4]
out = model.inference_model.predict(same_shape, numpy=False)
assert type(out["instance_peaks"]) == tf.RaggedTensor
assert type(out["instance_peak_vals"]) == tf.RaggedTensor
assert type(out["centroids"]) == tf.RaggedTensor
assert type(out["centroid_vals"]) == tf.RaggedTensor
out = model.inference_model.predict(same_shape, numpy=True)
assert type(out["instance_peaks"]) == np.ndarray
assert type(out["instance_peak_vals"]) == np.ndarray
assert type(out["centroids"]) == np.ndarray
assert type(out["centroid_vals"]) == np.ndarray
assert type(out["n_valid"]) == np.ndarray
out = model.inference_model.predict_on_batch(same_shape, numpy=False)
assert type(out["instance_peaks"]) == tf.RaggedTensor
assert type(out["instance_peak_vals"]) == tf.RaggedTensor
assert type(out["centroids"]) == tf.RaggedTensor
assert type(out["centroid_vals"]) == tf.RaggedTensor
out = model.inference_model.predict_on_batch(same_shape, numpy=True)
assert type(out["instance_peaks"]) == np.ndarray
assert type(out["instance_peak_vals"]) == np.ndarray
assert type(out["centroids"]) == np.ndarray
assert type(out["centroid_vals"]) == np.ndarray
assert type(out["n_valid"]) == np.ndarray
# variable number of instances
diff_shape = min_labels_slp.video[4:8]
out = model.inference_model.predict(diff_shape, numpy=False)
assert type(out["instance_peaks"]) == tf.RaggedTensor
assert type(out["instance_peak_vals"]) == tf.RaggedTensor
assert type(out["centroids"]) == tf.RaggedTensor
assert type(out["centroid_vals"]) == tf.RaggedTensor
out = model.inference_model.predict(diff_shape, numpy=True)
assert type(out["instance_peaks"]) == np.ndarray
assert type(out["instance_peak_vals"]) == np.ndarray
assert type(out["centroids"]) == np.ndarray
assert type(out["centroid_vals"]) == np.ndarray
assert type(out["n_valid"]) == np.ndarray
out = model.inference_model.predict_on_batch(diff_shape, numpy=False)
assert type(out["instance_peaks"]) == tf.RaggedTensor
assert type(out["instance_peak_vals"]) == tf.RaggedTensor
assert type(out["centroids"]) == tf.RaggedTensor
assert type(out["centroid_vals"]) == tf.RaggedTensor
out = model.inference_model.predict_on_batch(diff_shape, numpy=True)
assert type(out["instance_peaks"]) == np.ndarray
assert type(out["instance_peak_vals"]) == np.ndarray
assert type(out["centroids"]) == np.ndarray
assert type(out["centroid_vals"]) == np.ndarray
assert type(out["n_valid"]) == np.ndarray
| 2.03125 | 2 |
week4/w4e1.py | melphick/pybasics | 0 | 12770735 | #!/usr/bin/env python
import sys
valid = False
reason = []
while valid == False:
valid = True
ip_addr = raw_input("please enter an IP: ")
print "The IP is now: %s" % ip_addr
ip_list = ip_addr.split('.')
if len (ip_list) != 4:
valid = False
reason.append('IP Address does not have 4 octets')
if (int(ip_list[0]) < 1) or (int(ip_list[0]) > 223):
valid = False
reason.append('IP Address first octet is not between 1 and 223')
if (int(ip_list[0]) == 127):
valid = False
reason.append('IP Address first octet is 127')
if (int(ip_list[0]) == 127):
valid = False
reason.append('IP Address first octet is 127')
if (int(ip_list[0]) == 169) and (int(ip_list[1]) == 254):
valid = False
reason.append('IP Address first and second octet is 169 and 254')
if (valid == True) and ((int(ip_list[1]) >= 255) or (int(ip_list[2]) >= 255) or (int(ip_list[3]) >= 255)):
valid = False
reason.append('IP Address second third or fourth octet is over 255')
if (valid == True) and ((int(ip_list[1]) < 0) or (int(ip_list[2]) < 0) or (int(ip_list[3]) < 0)):
valid = False
reason.append('IP Address second third or fourth octet is less than 0')
print valid
print reason
| 3.765625 | 4 |
utils/status.py | loribonna/CSSL | 3 | 12770736 | # Copyright 2021-present, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from datetime import datetime
import sys
from time import time
from typing import Union
def progress_bar(i: int, max_iter: int, epoch: Union[int, str],
task_number: int, loss: float) -> None:
"""
Prints out the progress bar on the stderr file.
:param i: the current iteration
:param max_iter: the maximum number of iteration
:param epoch: the epoch
:param task_number: the task index
:param loss: the current value of the loss function
"""
if not (i + 1) % 10 or (i + 1) == max_iter:
progress = min(float((i + 1) / max_iter), 1)
progress_bar = ('█' * int(50 * progress)) + ('┈' * (50 - int(50 * progress)))
print('\r[ {} ] Task {} | epoch {}: |{}| loss: {}'.format(
datetime.now().strftime("%m-%d | %H:%M"),
task_number + 1 if isinstance(task_number, int) else task_number,
epoch,
progress_bar,
round(loss / (i + 1), 8)
), file=sys.stderr, end='', flush=True)
class ProgressBar():
def __init__(self):
self.old_time = 0
self.running_sum = 0
def __call__(self, i: int, max_iter: int, epoch: Union[int, str],
task_number: int, loss: float) -> None:
"""
Prints out the progress bar on the stderr file.
:param i: the current iteration
:param max_iter: the maximum number of iteration
:param epoch: the epoch
:param task_number: the task index
:param loss: the current value of the loss function
"""
if i == 0:
self.old_time = time()
self.running_sum = 0
else:
self.running_sum = self.running_sum + (time() - self.old_time)
self.old_time = time()
if i:
progress = min(float((i + 1) / max_iter), 1)
progress_bar = ('█' * int(50 * progress)) + ('┈' * (50 - int(50 * progress)))
print('\r[ {} ] Task {} | epoch {}: |{}| {} ep/h | loss: {} |'.format(
datetime.now().strftime("%m-%d | %H:%M"),
task_number + 1 if isinstance(task_number, int) else task_number,
epoch,
progress_bar,
round(3600 / (self.running_sum / i * max_iter), 2),
round(loss, 8)
), file=sys.stderr, end='', flush=True)
| 3.015625 | 3 |
src/chathamhouse/chathamhousedata.py | OCHA-DAP/hdx-scraper-chathamhouse | 0 | 12770737 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Chatham House Data
------------------
Collects input data for Chatham House.
"""
import logging
from hdx.data.dataset import Dataset
from hdx.data.resource import Resource
from hdx.data.showcase import Showcase
from hdx.utilities.dictandlist import integer_value_convert
from hdx.location.country import Country
from slugify import slugify
logger = logging.getLogger(__name__)
def append_value(countrydict, iso3, tier_or_type, name, value):
tiers_or_types = countrydict.get(iso3)
if tiers_or_types is None:
tiers_or_types = dict()
countrydict[iso3] = tiers_or_types
camps = tiers_or_types.get(tier_or_type)
if camps is None:
camps = dict()
tiers_or_types[tier_or_type] = camps
existing_pop = camps.get(name)
if existing_pop is None:
existing_pop = 0
camps[name] = existing_pop + value
def check_name_dispersed(name):
lowername = name.lower()
if 'dispersed' in lowername and ('country' in name.lower() or 'territory' in name.lower()):
return True
return False
def get_iso3(name):
iso3, match = Country.get_iso3_country_code_fuzzy(name, exception=ValueError)
if not match:
logger.info('Country %s matched to ISO3: %s!' % (name, iso3))
return iso3
def get_camp_non_camp_populations(noncamp_types, camp_types, camp_overrides, datasets, downloader):
noncamp_types = noncamp_types.split(',')
camp_types = camp_types.split(',')
dataset_unhcr = None
latest_date = None
for dataset in datasets:
if 'displacement' in dataset['title'].lower():
date = dataset.get_dataset_date_as_datetime()
if latest_date is None or date > latest_date:
dataset_unhcr = dataset
latest_date = date
if dataset_unhcr is None:
raise ValueError('No UNHCR dataset found!')
url = dataset_unhcr.get_resources()[0]['url']
country_ind = 0 # assume first column contains country
iso3 = None
row = None
prev_row = None
all_camps_per_country = dict()
unhcr_non_camp = dict()
unhcr_camp = dict()
unhcr_camp_excluded = dict()
rowiter = downloader.get_tabular_rows(url, sheet='Tab15')
for row in rowiter:
country = row[country_ind]
iso3 = Country.get_iso3_country_code(country)
if iso3 is not None:
break
prev_row = row
accommodation_ind = None
location_ind = None
population_ind = None
population = None
for i, text in enumerate(prev_row):
header = text.lower()
value = row[i]
if 'accommodation' in header:
accommodation_ind = i
elif 'location' in header and len(value) > 1:
location_ind = i
else:
try:
population = int(value)
population_ind = i
break
except ValueError:
pass
campname = row[location_ind]
def get_accommodation_type(name):
accom_type = camp_overrides['Accommodation Type'].get(name)
if accom_type is None:
accom_type = row[accommodation_ind]
else:
logger.info('Overriding accommodation type to %s for %s' % (accom_type, name))
return accom_type.lower()
accommodation_type = get_accommodation_type(campname)
def match_camp_types(name, accom_type, pop, iso):
if check_name_dispersed(name):
accom_type = noncamp_types[0]
found_camp_type = None
for camp_type in camp_types:
if camp_type in accom_type:
found_camp_type = camp_type
unhcr_camp[name] = pop, iso, found_camp_type
break
for noncamp_type in noncamp_types:
if noncamp_type in accom_type:
found_camp_type = noncamp_type
append_value(unhcr_non_camp, iso, found_camp_type, name, pop)
break
if found_camp_type is None:
append_value(unhcr_camp_excluded, iso, accom_type, name, pop)
append_value(all_camps_per_country, iso, accom_type, name, pop)
else:
append_value(all_camps_per_country, iso, found_camp_type, name, pop)
match_camp_types(campname, accommodation_type, population, iso3)
for row in rowiter:
country = row[country_ind]
if not country:
continue
if 'NOTES' in country.upper():
break
iso3, match = Country.get_iso3_country_code_fuzzy(country)
if iso3 is None:
logger.warning('Country %s could not be matched to ISO3 code!' % country)
continue
else:
if match is False:
logger.info('Matched %s to ISO3: %s!' % (country, iso3))
campname = row[location_ind]
accommodation_type = get_accommodation_type(campname)
population = int(row[population_ind])
match_camp_types(campname, accommodation_type, population, iso3)
for campname in sorted(camp_overrides['Population']):
if campname in unhcr_camp:
continue
iso3 = camp_overrides['Country'][campname]
accommodation_type = camp_overrides['Accommodation Type'][campname].lower()
population = camp_overrides['Population'][campname]
logger.info('Adding camp from override: %s (%s, %s): %d' % (campname, iso3, accommodation_type, population))
match_camp_types(campname, accommodation_type, population, iso3)
return all_camps_per_country, unhcr_non_camp, unhcr_camp, unhcr_camp_excluded
def get_camptypes(url, downloader):
camptypes = downloader.download_tabular_rows_as_dicts(url)
for key in camptypes:
camptypes[key] = integer_value_convert(camptypes[key])
return camptypes
def get_camptypes_fallbacks(url, downloader, keyfn=lambda x: x):
camptypes = downloader.download_tabular_rows_as_dicts(url)
camptypes_offgrid = dict()
camptypes_solid = dict()
for key in camptypes:
new_key = keyfn(key)
camptypes_offgrid[new_key] = dict()
camptypes_solid[new_key] = dict()
for tier in camptypes[key]:
try:
typeval = int(camptypes[key][tier])
if 'Lighting OffGrid' in tier:
camptypes_offgrid[new_key][tier.replace('Lighting OffGrid ', '')] = typeval
else:
camptypes_solid[new_key][tier.replace('Cooking Solid ', '')] = typeval
except ValueError:
pass
return camptypes_offgrid, camptypes_solid
def get_worldbank_series(json_url, downloader):
response = downloader.download(json_url)
json = response.json()
data = dict()
for countrydata in json[1]:
iso3 = Country.get_iso3_from_iso2(countrydata['country']['id'])
if iso3 is not None:
value = countrydata.get('value')
if value:
data[iso3] = float(value) / 100.0
return data
def get_slumratios(url, downloader):
stream = downloader.get_tabular_stream(url, headers=1, format='csv', compression='zip')
years = set()
for header in stream.headers:
try:
int(header)
years.add(header)
except ValueError:
pass
years = sorted(years, reverse=True)
slumratios = dict()
for row in stream.iter(keyed=True):
if not row:
break
iso3 = Country.get_iso3_from_m49(int(row['CountryCode']))
if iso3 is None:
continue
for year in years:
value = row.get(year)
if value and value != ' ':
slumratios[iso3] = float(value) / 100.0
return slumratios
def generate_dataset_resources_and_showcase(pop_types, today):
title = 'Energy consumption of refugees and displaced people'
slugified_name = slugify(title.lower())
dataset = Dataset({
'name': slugified_name,
'title': title,
})
dataset.set_maintainer('196196be-6037-4488-8b71-d786adf4c081')
dataset.set_organization('0c6bf79f-504c-4ba5-9fdf-c8cc893c8b2f')
dataset.set_dataset_date_from_datetime(today)
dataset.set_expected_update_frequency('Every month')
dataset.add_other_location('world')
tags = ['HXL', 'energy', 'refugees', 'internally displaced persons - idp']
dataset.add_tags(tags)
resources = list()
for pop_type in pop_types:
resource_data = {
'name': '%s_consumption.csv' % pop_type.lower().replace(' ', '_'),
'description': '%s %s' % (pop_type, title.lower()),
'format': 'csv'
}
resources.append(Resource(resource_data))
resource_data = {
'name': 'population.csv',
'description': 'UNHCR displaced population totals',
'format': 'csv'
}
resources.append(Resource(resource_data))
resource_data = {
'name': 'keyfigures_disagg.csv',
'description': 'Disaggregated MEI Key Figures',
'format': 'csv'
}
resources.append(Resource(resource_data))
resource_data = {
'name': 'keyfigures.csv',
'description': 'MEI Key Figures',
'format': 'csv'
}
resources.append(Resource(resource_data))
showcase = Showcase({
'name': '%s-showcase' % slugified_name,
'title': 'Energy services for refugees and displaced people',
'notes': 'Click the image on the right to go to the energy services model',
'url': 'http://www.sciencedirect.com/science/article/pii/S2211467X16300396',
'image_url': 'https://ars.els-cdn.com/content/image/X2211467X.jpg'
})
showcase.add_tags(tags)
return dataset, resources, showcase
| 2.40625 | 2 |
impact/elements/element.py | jacquelinegarrahan/lume-impact | 1 | 12770738 | <reponame>jacquelinegarrahan/lume-impact
from impact import parsers
class ImpactEle:
"""
"""
def __init__(self, L=0, type='drift'):
pass
| 1.5 | 2 |
lecture_11_advanced_topics/additional_materials/code_samples_from_lecture/lazy_property.py | DKudrik/epam_python_autumn_2020 | 37 | 12770739 | # https://github.com/lord63-forks/python-patterns/blob/patch-3/lazy_evaluation.py
def lazy_property(fn):
"""Decorator that makes a property lazy-evaluated."""
attr_name = '_lazy_' + fn.__name__
@property
def _lazy_property(self):
if not hasattr(self, attr_name):
setattr(self, attr_name, fn(self))
return getattr(self, attr_name)
return _lazy_property
class Person(object):
def __init__(self, name, occupation):
self.name = name
self.occupation = occupation
@lazy_property
def relatives(self):
# Get all relatives, let's assume that it costs much time.
relatives = "Many relatives."
return relatives
def main():
Jhon = Person('Jhon', 'Coder')
print("Name: {0} Occupation: {1}".format(Jhon.name, Jhon.occupation))
print("Before we access `relatives`:")
print(Jhon.__dict__)
print("Jhon's relatives: {0}".format(Jhon.relatives))
print("After we've accessed `relatives`:")
print(Jhon.__dict__)
if __name__ == '__main__':
main() | 3.5 | 4 |
cas.py | anawwy/tornado-cas-oauth2 | 0 | 12770740 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import json
from tornado import gen
from http_client import AAsyncHTTPClient, get_url
class CASHelper(AAsyncHTTPClient):
CA_CERT_PATH = 'XXXXX'
@classmethod
def _base_url(cls):
'''cas server url prefix'''
return 'https://cas.test.change.it:8443'
@classmethod
def get_login_url(cls, redirect_uri, **kwargs):
kwargs['redirect_uri'] = redirect_uri
return get_url(cls._base_url(), '/oauth2/authorize', data=kwargs)
@classmethod
def get_logout_url(cls, ):
return get_url(cls._base_url(), '/logout')
@classmethod
@gen.coroutine
def async_access_token(cls, redirect_uri, code):
result = yield cls.async_get('/oauth2/accessToken',
ca_certs=cls.CA_CERT_PATH,
data={'code': code, 'redirect_uri': redirect_uri})
raise gen.Return(result)
@classmethod
def unpack_access_token(cls, result):
access_token, expires = None, None
if result:
result_array = result.split('&')
if len(result_array) == 2:
access_token_array = result_array[0].split('=')
expires_array = result_array[1].split('=')
if len(access_token_array) == 2 and access_token_array[0] == 'access_token' \
and len(expires_array) == 2 and expires_array[0] == 'expires':
access_token = access_token_array[1]
expires = float(expires_array[1])
return access_token, expires
@classmethod
@gen.coroutine
def async_profile(cls, access_token):
result = yield cls.async_get('/oauth2/profile',
ca_certs=cls.CA_CERT_PATH,
data={'access_token': access_token})
raise gen.Return(result)
@classmethod
def unpack_profile(cls, profile):
result = json.loads(profile)
profile_id = result.get('id')
return profile_id
| 2.03125 | 2 |
nicos_mlz/mira/setups/memograph.py | ebadkamil/nicos | 0 | 12770741 | <gh_stars>0
description = 'memograph readout'
group = 'optional'
devices = dict(
t_in_fak40 = device('nicos_mlz.devices.memograph.MemographValue',
hostname = 'memograph02.care.frm2',
group = 1,
valuename = 'T_in MIRA',
description = 'inlet temperature memograph',
fmtstr = '%.2F',
warnlimits = (-1, 17.5), #-1 no lower value
unit = 'degC',
),
t_out_fak40 = device('nicos_mlz.devices.memograph.MemographValue',
hostname = 'memograph02.care.frm2',
group = 1,
valuename = 'T_out MIRA',
description = 'outlet temperature memograph',
pollinterval = 30,
maxage = 60,
fmtstr = '%.2F',
unit = 'degC',
),
p_in_fak40 = device('nicos_mlz.devices.memograph.MemographValue',
hostname = 'memograph02.care.frm2',
group = 1,
valuename = 'P_in MIRA',
description = 'inlet pressure memograph',
pollinterval = 30,
maxage = 60,
fmtstr = '%.2F',
unit = 'bar',
),
p_out_fak40 = device('nicos_mlz.devices.memograph.MemographValue',
hostname = 'memograph02.care.frm2',
group = 1,
valuename = 'P_out MIRA',
description = 'outlet pressure memograph',
pollinterval = 30,
maxage = 60,
fmtstr = '%.2F',
unit = 'bar',
),
flow_in_fak40 = device('nicos_mlz.devices.memograph.MemographValue',
hostname = 'memograph02.care.frm2',
group = 1,
valuename = 'FLOW_in MIRA',
description = 'inlet flow memograph',
pollinterval = 30,
maxage = 60,
fmtstr = '%.2F',
warnlimits = (0.2, 100), #100 no upper value
unit = 'l/min',
),
flow_out_fak40 = device('nicos_mlz.devices.memograph.MemographValue',
hostname = 'memograph02.care.frm2',
group = 1,
valuename = 'FLOW_out MIRA',
description = 'outlet flow memograph',
pollinterval = 30,
maxage = 60,
fmtstr = '%.2F',
unit = 'l/min',
),
leak_fak40 = device('nicos_mlz.devices.memograph.MemographValue',
hostname = 'memograph02.care.frm2',
group = 1,
valuename = 'Leak MIRA',
description = 'leakage memograph',
pollinterval = 30,
maxage = 60,
fmtstr = '%.2F',
warnlimits = (-1, 1), #-1 no lower value
unit = 'l/min',
),
cooling_fak40 = device('nicos_mlz.devices.memograph.MemographValue',
hostname = 'memograph02.care.frm2',
group = 1,
valuename = 'Cooling MIRA',
description = 'cooling memograph',
pollinterval = 30,
maxage = 60,
fmtstr = '%.2F',
unit = 'kW',
),
)
| 1.476563 | 1 |
15A-175/HI/imaging/match_and_split.py | Astroua/LocalGroup-VLA | 1 | 12770742 | <gh_stars>1-10
'''
Match and split the re-weighted HI MSs
The 14B frequency range includes 2000 channels. That same range in the 17B
data is 2006 channels. So we first regrid and split the data over the same
velocity range. The original channels are -206 and
change m/s. Regrid to something common like -210 m/s.
UPDATE: The issue with this regridding is that there is a beat pattern
when changing the channel widths by the such a small amount. CASA 5.3
*seemed* to actually be using linear interpolation, not the FFT shift.
When using the FFT shift in CASA 5.4 on other data, it caused horrific
residuals in the spectral dimension. Unsure why. However, the two
data sets have the same frequency channel width (0.977 kHz) and have a
frequency offset of 0.2% of the channel width. I'm just going to
match frequency bins to the nearest velocity instead of regridding.
For larger channel sizes, I'll round down to the nearest integer.
The individual channels are then split out for imaging. A separate
folder structure is made for each choice of channel width.
The input given is the channel width in km/s. It is assumed that
the start and end points will be the same (or rounded up by 1),
'''
import numpy as np
import sys
from glob import glob
import os
from tasks import mstransform, partition, split, concat
# Use astropy's spectral conversion
# Needs to be installed separately
from astropy import units as u
# This is here for local runs to avoid needing to make an MMS
# Mostly for storage reasons.
use_parallel = False
use_contsub = True if sys.argv[-5] == "True" else False
# All in km/s
chan_width = float(sys.argv[-4])
# Capture about half of M31's velocity range in these
start_vel = -320
end_vel = -6
part = int(sys.argv[-3])
total_parts = int(sys.argv[-2])
out_path = str(sys.argv[-1])
chan_width_label = "{}kms".format(chan_width).replace(".", "_")
chan_width_quant = chan_width * u.km / u.s
# Common fields in B and C
myfields = 'M31*'
# ~262 for 1.2 km/s
# ~762 for 0.4 km/s
# n_chan = int(np.ceil((end_vel - start_vel) / chan_width))
fourteenA_ms = "M31_14A-235_15Afields_HI_spw_0_LSRK_freqmatch.ms"
fifteenA_B_ms = "15A-175_Btracks_HI_spw_0_LSRK.ms"
fifteenA_C_ms = "15A-175_Ctracks_HI_spw_0_LSRK.ms"
fourteenA_mms = "{0}.{1}.regrid".format(fourteenA_ms, chan_width_label)
fifteenA_B_mms = "{0}.{1}.regrid".format(fifteenA_B_ms, chan_width_label)
fifteenA_C_mms = "{0}.{1}.regrid".format(fifteenA_C_ms, chan_width_label)
concat_vis = '14A_15A_HI_LSRK.ms'
if use_contsub:
fourteenA_ms += ".contsub"
fifteenA_B_ms += ".contsub"
fifteenA_C_ms += ".contsub"
concat_vis += ".contsub"
# Create an MMS prior to splitting to that the split can be run in parallel
all_ms = [fourteenA_ms, fifteenA_B_ms, fifteenA_C_ms]
if use_parallel:
all_mms = [fourteenA_mms, fifteenA_B_mms, fifteenA_C_mms]
for myms, mymms in zip(all_ms, all_mms):
if os.path.exists(mymms):
casalog.post("Found {}. Skipping mstransform.".format(mymms))
continue
partition(vis=myms,
outputvis=mymms,
createmms=True,
flagbackup=False,
numsubms=31) # Assuming this is run on a 32-core node.
else:
all_mms = all_ms
def vel_to_freq(vel_or_freq, rest_freq=1.42040575177 * u.GHz,
unit=u.Hz):
'''
Using radio velocity here.
'''
equiv = u.doppler_radio(rest_freq)
return vel_or_freq.to(unit, equiv)
def closest_channel(freqs, targ_freq):
return np.argmin(np.abs(freqs - targ_freq))
# Get the HI SPW freqs
tb.open(os.path.join(fourteenA_ms, 'SPECTRAL_WINDOW'))
chanfreqs_14A = tb.getcol('CHAN_FREQ').squeeze()
tb.close()
delta_freq_14A = np.abs(np.diff(chanfreqs_14A))[0]
tb.open(os.path.join(fifteenA_B_ms, 'SPECTRAL_WINDOW'))
chanfreqs_15A_B = tb.getcol('CHAN_FREQ').squeeze()
tb.close()
delta_freq_15A_B = np.abs(np.diff(chanfreqs_15A_B))[0]
tb.open(os.path.join(fifteenA_C_ms, 'SPECTRAL_WINDOW'))
chanfreqs_15A_C = tb.getcol('CHAN_FREQ').squeeze()
tb.close()
delta_freq_15A_C = np.abs(np.diff(chanfreqs_15A_C))[0]
# They should be really close
# Within 0.33 kHz, 10^-5 of the channel width.
assert abs(delta_freq_15A_B - delta_freq_15A_C) < 0.5
assert abs(delta_freq_14A - delta_freq_15A_C) < 0.5
# Find the number of channels to get closest to the requested velocity width
vunit = u.km / u.s
vel_width = \
np.abs(vel_to_freq(chanfreqs_14A[len(chanfreqs_14A) // 2] * u.Hz, unit=vunit) -
vel_to_freq(chanfreqs_14A[len(chanfreqs_14A) // 2 - 1] * u.Hz, unit=vunit))
navg_channel = int(round((chan_width_quant / vel_width).value))
start_freq = vel_to_freq(start_vel * u.km / u.s)
end_freq = vel_to_freq(end_vel * u.km / u.s)
# Find the start and finish channels in each MS
start_14A_chan = closest_channel(chanfreqs_14A * u.Hz, start_freq)
end_14A_chan = closest_channel(chanfreqs_14A * u.Hz, end_freq)
if start_14A_chan > end_14A_chan:
start_14A_chan, end_14A_chan = end_14A_chan, start_14A_chan
start_15A_B_chan = closest_channel(chanfreqs_15A_B * u.Hz, start_freq)
end_15A_B_chan = closest_channel(chanfreqs_15A_B * u.Hz, end_freq)
if start_15A_B_chan > end_15A_B_chan:
start_15A_B_chan, end_15A_B_chan = end_15A_B_chan, start_15A_B_chan
# NOTE: Due to rounding <<chan_width, we need to +1 to end_15A_B_chan
# Then the max offset between channels always remains much smaller than the
# original channel width
end_15A_B_chan += 1
start_15A_C_chan = closest_channel(chanfreqs_15A_C * u.Hz, start_freq)
end_15A_C_chan = closest_channel(chanfreqs_15A_C * u.Hz, end_freq)
if start_15A_C_chan > end_15A_C_chan:
start_15A_C_chan, end_15A_C_chan = end_15A_C_chan, start_15A_C_chan
# Channel number in terms of original channel widths
nchan_14A = end_14A_chan - start_14A_chan
nchan_15A_B = end_15A_B_chan - start_15A_B_chan
nchan_15A_C = end_15A_C_chan - start_15A_C_chan
# These need to be the same. Catch possible rounding errors
assert nchan_14A == nchan_15A_C
assert nchan_14A == nchan_15A_B
# Now convert to the number of channels at the expected velocity resolution
nchan = nchan_14A // navg_channel
# Pad number to reach integer factor of navg_channel
if nchan_14A % navg_channel != 0:
nchan += 1
# Now split out individual channels for imaging.
chan_path = "HI_{0}_{1}".format("contsub" if use_contsub else "nocontsub",
chan_width_label)
if not os.path.exists(chan_path):
os.mkdir(chan_path)
nchan_part = int(np.ceil(nchan / total_parts))
start = part * nchan_part
end = min((part + 1) * nchan_part, nchan)
start_chans = [start_14A_chan, start_15A_B_chan, start_15A_C_chan]
for chan in range(start, end):
casalog.post("On splitting channel {}".format(chan))
# Parallel is to be run on the cedar cluster
# First check to see if this channel is already saved.
#
ind_chan_path = os.path.join(chan_path,
"channel_{}".format(chan))
if not os.path.exists(ind_chan_path):
os.mkdir(ind_chan_path)
concat_vis_name = '{0}_chan_{1}'.format(concat_vis, chan)
concat_ms = os.path.join(ind_chan_path,
concat_vis_name)
# Does it exist already? If yes, skip
if os.path.exists(concat_ms):
casalog.post("Channel {} already has an concatenated MS. Skipping.".format(chan))
continue
if use_parallel:
out_channel = os.path.join(out_path, "channel_{}".format(chan))
if not os.path.exists(out_channel):
os.mkdir(out_channel)
else:
# Does the MS already exist there? If so, skip it here.
scratch_ms = os.path.join(out_channel, concat_vis_name)
if os.path.exists(scratch_ms):
casalog.post("Found the split + concat MS for {} in scratch. "
"Skipping.".format(chan))
continue
chan_mss = []
# Loop through splitting for the 3 MSs
for my_mms, my_ms, start_chan_obs in zip(all_mms, all_ms, start_chans):
chan_msname = "{0}_channel_{1}.ms".format(my_ms, chan)
if use_parallel:
chan_mmsname = "{0}_channel_{1}.mms".format(my_mms, chan)
else:
chan_mmsname = chan_msname
starter = chan * navg_channel + start_chan_obs
ender = (chan + 1) * navg_channel + start_chan_obs - 1
if navg_channel == 1:
# These should equal when not averaging channels
assert starter == ender
spw_selec = "0:{0}".format(starter)
else:
spw_selec = '0:{0}~{1}'.format(starter, ender)
mstransform(vis=my_mms,
outputvis=os.path.join(ind_chan_path, chan_mmsname),
datacolumn='data',
mode='channel',
field=myfields,
spw=spw_selec,
chanaverage=True if navg_channel > 1 else False,
chanbin=navg_channel)
local_split_ms = os.path.join(ind_chan_path, chan_msname)
local_split_mms = os.path.join(ind_chan_path, chan_mmsname)
if use_parallel:
# Convert the final MMS to an MS b/c an MMS uses a lot of files and
# clusters don't like that.
split(vis=local_split_mms,
outputvis=local_split_ms,
keepmms=False, datacolumn='DATA')
# Remove the split MMS
os.system("rm -rf {}".format(local_split_mms))
chan_mss.append(local_split_ms)
# If the concat ms already exists, delete it. Otherwise more data
# will be appended on
if os.path.exists(concat_ms):
os.system("rm -rf {}".format(concat_ms))
concat(vis=chan_mss,
concatvis=concat_ms)
# Remove the non-concatenated MSs
for chan_ms in chan_mss:
os.system("rm -rf {}".format(chan_ms))
| 2.0625 | 2 |
docs/userguide/processing/smoothing.py | spectrochempy/spectrochempy | 44 | 12770743 | <filename>docs/userguide/processing/smoothing.py<gh_stars>10-100
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# cell_metadata_filter: title,-all
# formats: ipynb,py:percent
# notebook_metadata_filter: all
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.11.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# # Smoothing
#
# In this tutorial, we show how to smooth spectra along one dimension (another tutorial will be devoted to 2-D
# smoothing)
# and gives information on the algorithms used in Spectrochempy.
#
# We first import spectrochempy, the other libraries used in this tutorial, and a sample dataset (
# nh4y-activation.spg) from which we extract a noisy part:
# %%
import spectrochempy as scp
import numpy as np
# %%
X = scp.read_omnic("irdata//nh4y-activation.spg") # import spectra
X = X[
0:5, 3600.0:2800.0
] # select a noisy part (the first 5 spectra in the 3700-2800 cm-1 range)
# %%
prefs = X.preferences
prefs.figure.figsize = (7, 3)
ax = X.plot() # plot
# %% Two methods implemented in spectrochempy can be used to smooth spectra along either one dimension ( [markdown]
# In this tutorial we will apply smoothing of the
# spectra along the wavelength dimension. These methods are based on window functions, which prototype is the *moving
# average*.
# %% Two methods implemented in spectrochempy can be used to smooth spectra along either one dimension ( [markdown]
# ## The `smooth()` method
# %% Two methods implemented in spectrochempy can be used to smooth spectra along either one dimension ( [markdown]
# The `smooth()` method is adapted from the ["Smoothing of a 1D signal" code](
# https://scipy-cookbook.readthedocs.io/items/SignalSmooth.html) of the [Scipy cookbook](
# https://scipy-cookbook.readthedocs.io/). It is a (weighted)-moving average method and consist in the convolution of
# a window of a given length with the spectrum.
#
# In its simplest form - *i.e.* unweighted moving average - each absorbance at a given wavenumber of the smoothed
# spectrum is the average of the absorbance at the absorbance at the considered wavenumber and the N neighboring
# wavenumbers (*i.e.* N/2 before and N/2 after), hence the conventional use of an odd number of N+1 points to define
# the window length. For the points located at both end of the spectra, the extremities of the spectrum are mirrored
# beyond the initial limits to minimize boundary effects.
#
# When passed as is, i.e. `X.smooth()`, the method uses a moving average of 5 points:
# %%
ax = X.smooth().plot()
# %% [markdown]
# ### Window length
#
# The following code compares the influence of the window size on the smoothing of the first spectrum of the
# NDDataset `X[0]`.
# %% [markdown]
# Loop over window lengths.
# `i` index will run from 0 to 6.
# %%
lspectra = [
X[0],
]
llabels = [
"Initial",
]
for i, length in enumerate([5, 11, 27, 51, 101, 201, 501]):
s = X[0].smooth(window_length=length) # smooth
s += 0.1 * (
1 + i
) # shift the absorbance by +0.1 a.u. with respect to previous iteration
lspectra.append(s)
llabels.append(f"length: {length}")
ax = scp.plot_multiple(
figsize=(7, 6), method="pen", datasets=lspectra, labels=llabels, legend="upper left"
)
# %% The above spectra clearly show that as that the width of the window increases, the peaks belonging to [markdown]
# The spectrum is flattened out and distorted. When determining the optimum window length, one should thus consider
# the balance between noise removal and signal integrity: the larger the window length, the stronger the smoothing,
# but also the greater the chance to distort the spectrum.
#
# ### Window function
#
# Besides the window `length` (default=11 points), the user can also choose the type of
# window (`window`) from `flat`, `hanning` (i.e. Hann window), `hamming`, `bartlett` or `blackman`. The `flat`
# window - which is the default shown above - should be fine for the vast majority of cases.
#
# The code below compares the effect of the type of window:
# %%
wspectra = [
X[0],
]
wlabels = [
"Initial",
]
for i, window in enumerate(["flat", "bartlett", "hanning", "hamming", "blackman"]):
s = X[0].smooth(window_length=27, window=window) + 0.1 * (1 + i) # smooth and shift
wspectra.append(s)
wlabels.append(f"window: {window}")
ax = scp.plot_multiple(
figsize=(7, 4), method="pen", datasets=wspectra, labels=wlabels, legend="upper left"
)
# %% Close examination of the spectra shows that the flat window leads to the stronger smoothing. This is [markdown]
# because the other window functions (also known as *apodization functions*) are used as weighting functions for the
# N+1 points, with the largest weight on the central point and smaller weights for external points.
#
# The window functions as used in SpectroChemPy are derived from the numpy library. These builtin functions are such
# that the value of the central point is 1. Hence, as shown below, they are normalized to the sum of weights. The
# code below displays the corresponding normalized functions for 27 points:
# %%
functions = []
labels = []
for i, f in enumerate([np.bartlett, np.hanning, np.hamming, np.blackman]):
coord = scp.NDDataset.linspace(-13, 13, 27)
s = scp.NDDataset(
f(27) / np.sum(27) + i * 0.01, coordset=[coord]
) # normalized window function, y shifted : +0.1 for each function
functions.append(s)
labels.append(f"function: {f.__name__}")
ax = scp.plot_multiple(
figsize=(7, 4), method="pen", datasets=functions, labels=labels, legend="upper left"
)
# %% As shown above, the "bartlett" function is equivalent to a triangular apodization, while other [markdown]
# functions (`hanning`, `hamming`, `blackman`) are bell-shaped. More information on window functions can be found [
# here](https://en.wikipedia.org/wiki/Window_function).
#
# Overall, the impact of the window function on the final spectrum is moderate, as can be shown by comparing the
# differences (noisy spectrum *minus* smoothed spectra and the standard deviation along dimension x:
# %%
diffs = []
stds = []
labels = wlabels[1:]
for s in wspectra[1:]:
s = s - X[0]
diffs.append(s)
stds.append(s.std(dim="x").values.m)
ax = scp.plot_multiple(
figsize=(7, 4), method="pen", datasets=diffs, labels=labels, legend="upper left"
)
ax.set_ylim(0, 0.8)
# %% [markdown]
# and the standard deviations (the largest the value, the stronger the smoothing):
# %%
for ll, s in zip(labels, stds):
print(f"{ll[7:]:10s}: {s:.4f}")
# %% [markdown]
# ## Savitzky-Golay algorithm:`savgol_filter()`
#
# The second algorithm implemented in spectrochempy is the Savitzky-Golay filter which uses a polynomial
# interpolation in the moving window. A demonstrative illustration of the method can be found on the [Savitzky-Golay
# filter](https://en.wikipedia.org/wiki/Savitzky%E2%80%93Golay_filter) entry of Wikipedia.
#
# The function implemented in spectrochempy is a wrapper of the [savgol_filert() method](
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.savgol_filter.html) from the [scipy.signal](
# https://docs.scipy.org/doc/scipy/reference/signal.html) module to which we refer the interested reader. It not only
# used to smooth spectra but also to compute their successive derivatives. The latter are treated in [the
# peak-finding tutorial](../analysis/peak_finding.ipynb) and we will focus here on the smoothing which is the default
# of the filter (default parameter: `deriv=0`).
#
# As for the `smooth()` method, it is a moving-window based method. Hence, the window length (`window_length`
# parameter) plays an equivalent role, except that it *must* be odd. Moreover, instead of choosing a window function,
# the user can choose the order of the polynomial used to fit the window data points (`polyorder`, default value: 0).
# The latter must be strictly smaller than the window size (so that the polynomial coefficients can be fully
# determined).
#
# The use of this method is illustrated below, we leave to the reader to assess the impact of the window length and
# polynomial order (see Exercises below)
# %%
_ = X.savgol_filter(window_length=5, polyorder=0).plot()
# %% [markdown]
# <div class='alert alert-info'>
# <b>Exercises</b>
#
# <em>intermediate</em>: - what would be the parameters to use in the `savogol_filter()` method to mimic `smooth()`?
# Write a
# code to check your answer - examine the impacts of `window_length` and `polyorder` on the extent of smoothing with
# a Svitzky-Golay filter.
# </div>
| 2.84375 | 3 |
varsom_avalanche_client/models/avalanche_problem.py | NVE/python-varsom-avalanche-client | 0 | 12770744 | # coding: utf-8
"""
Snøskredvarsel API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v5.0.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class AvalancheProblem(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'avalanche_problem_id': 'int',
'avalanche_ext_id': 'int',
'avalanche_ext_name': 'str',
'aval_cause_id': 'int',
'aval_cause_name': 'str',
'aval_probability_id': 'int',
'aval_probability_name': 'str',
'aval_trigger_simple_id': 'int',
'aval_trigger_simple_name': 'str',
'destructive_size_ext_id': 'int',
'destructive_size_ext_name': 'str',
'aval_propagation_id': 'int',
'aval_propagation_name': 'str',
'avalanche_type_id': 'int',
'avalanche_type_name': 'str',
'avalanche_problem_type_id': 'int',
'avalanche_problem_type_name': 'str',
'valid_expositions': 'str',
'exposed_height1': 'int',
'exposed_height2': 'int',
'exposed_height_fill': 'int'
}
attribute_map = {
'avalanche_problem_id': 'AvalancheProblemId',
'avalanche_ext_id': 'AvalancheExtId',
'avalanche_ext_name': 'AvalancheExtName',
'aval_cause_id': 'AvalCauseId',
'aval_cause_name': 'AvalCauseName',
'aval_probability_id': 'AvalProbabilityId',
'aval_probability_name': 'AvalProbabilityName',
'aval_trigger_simple_id': 'AvalTriggerSimpleId',
'aval_trigger_simple_name': 'AvalTriggerSimpleName',
'destructive_size_ext_id': 'DestructiveSizeExtId',
'destructive_size_ext_name': 'DestructiveSizeExtName',
'aval_propagation_id': 'AvalPropagationId',
'aval_propagation_name': 'AvalPropagationName',
'avalanche_type_id': 'AvalancheTypeId',
'avalanche_type_name': 'AvalancheTypeName',
'avalanche_problem_type_id': 'AvalancheProblemTypeId',
'avalanche_problem_type_name': 'AvalancheProblemTypeName',
'valid_expositions': 'ValidExpositions',
'exposed_height1': 'ExposedHeight1',
'exposed_height2': 'ExposedHeight2',
'exposed_height_fill': 'ExposedHeightFill'
}
def __init__(self, avalanche_problem_id=None, avalanche_ext_id=None, avalanche_ext_name=None, aval_cause_id=None, aval_cause_name=None, aval_probability_id=None, aval_probability_name=None, aval_trigger_simple_id=None, aval_trigger_simple_name=None, destructive_size_ext_id=None, destructive_size_ext_name=None, aval_propagation_id=None, aval_propagation_name=None, avalanche_type_id=None, avalanche_type_name=None, avalanche_problem_type_id=None, avalanche_problem_type_name=None, valid_expositions=None, exposed_height1=None, exposed_height2=None, exposed_height_fill=None): # noqa: E501
"""AvalancheProblem - a model defined in Swagger""" # noqa: E501
self._avalanche_problem_id = None
self._avalanche_ext_id = None
self._avalanche_ext_name = None
self._aval_cause_id = None
self._aval_cause_name = None
self._aval_probability_id = None
self._aval_probability_name = None
self._aval_trigger_simple_id = None
self._aval_trigger_simple_name = None
self._destructive_size_ext_id = None
self._destructive_size_ext_name = None
self._aval_propagation_id = None
self._aval_propagation_name = None
self._avalanche_type_id = None
self._avalanche_type_name = None
self._avalanche_problem_type_id = None
self._avalanche_problem_type_name = None
self._valid_expositions = None
self._exposed_height1 = None
self._exposed_height2 = None
self._exposed_height_fill = None
self.discriminator = None
if avalanche_problem_id is not None:
self.avalanche_problem_id = avalanche_problem_id
if avalanche_ext_id is not None:
self.avalanche_ext_id = avalanche_ext_id
if avalanche_ext_name is not None:
self.avalanche_ext_name = avalanche_ext_name
if aval_cause_id is not None:
self.aval_cause_id = aval_cause_id
if aval_cause_name is not None:
self.aval_cause_name = aval_cause_name
if aval_probability_id is not None:
self.aval_probability_id = aval_probability_id
if aval_probability_name is not None:
self.aval_probability_name = aval_probability_name
if aval_trigger_simple_id is not None:
self.aval_trigger_simple_id = aval_trigger_simple_id
if aval_trigger_simple_name is not None:
self.aval_trigger_simple_name = aval_trigger_simple_name
if destructive_size_ext_id is not None:
self.destructive_size_ext_id = destructive_size_ext_id
if destructive_size_ext_name is not None:
self.destructive_size_ext_name = destructive_size_ext_name
if aval_propagation_id is not None:
self.aval_propagation_id = aval_propagation_id
if aval_propagation_name is not None:
self.aval_propagation_name = aval_propagation_name
if avalanche_type_id is not None:
self.avalanche_type_id = avalanche_type_id
if avalanche_type_name is not None:
self.avalanche_type_name = avalanche_type_name
if avalanche_problem_type_id is not None:
self.avalanche_problem_type_id = avalanche_problem_type_id
if avalanche_problem_type_name is not None:
self.avalanche_problem_type_name = avalanche_problem_type_name
if valid_expositions is not None:
self.valid_expositions = valid_expositions
if exposed_height1 is not None:
self.exposed_height1 = exposed_height1
if exposed_height2 is not None:
self.exposed_height2 = exposed_height2
if exposed_height_fill is not None:
self.exposed_height_fill = exposed_height_fill
@property
def avalanche_problem_id(self):
"""Gets the avalanche_problem_id of this AvalancheProblem. # noqa: E501
:return: The avalanche_problem_id of this AvalancheProblem. # noqa: E501
:rtype: int
"""
return self._avalanche_problem_id
@avalanche_problem_id.setter
def avalanche_problem_id(self, avalanche_problem_id):
"""Sets the avalanche_problem_id of this AvalancheProblem.
:param avalanche_problem_id: The avalanche_problem_id of this AvalancheProblem. # noqa: E501
:type: int
"""
self._avalanche_problem_id = avalanche_problem_id
@property
def avalanche_ext_id(self):
"""Gets the avalanche_ext_id of this AvalancheProblem. # noqa: E501
:return: The avalanche_ext_id of this AvalancheProblem. # noqa: E501
:rtype: int
"""
return self._avalanche_ext_id
@avalanche_ext_id.setter
def avalanche_ext_id(self, avalanche_ext_id):
"""Sets the avalanche_ext_id of this AvalancheProblem.
:param avalanche_ext_id: The avalanche_ext_id of this AvalancheProblem. # noqa: E501
:type: int
"""
self._avalanche_ext_id = avalanche_ext_id
@property
def avalanche_ext_name(self):
"""Gets the avalanche_ext_name of this AvalancheProblem. # noqa: E501
:return: The avalanche_ext_name of this AvalancheProblem. # noqa: E501
:rtype: str
"""
return self._avalanche_ext_name
@avalanche_ext_name.setter
def avalanche_ext_name(self, avalanche_ext_name):
"""Sets the avalanche_ext_name of this AvalancheProblem.
:param avalanche_ext_name: The avalanche_ext_name of this AvalancheProblem. # noqa: E501
:type: str
"""
self._avalanche_ext_name = avalanche_ext_name
@property
def aval_cause_id(self):
"""Gets the aval_cause_id of this AvalancheProblem. # noqa: E501
:return: The aval_cause_id of this AvalancheProblem. # noqa: E501
:rtype: int
"""
return self._aval_cause_id
@aval_cause_id.setter
def aval_cause_id(self, aval_cause_id):
"""Sets the aval_cause_id of this AvalancheProblem.
:param aval_cause_id: The aval_cause_id of this AvalancheProblem. # noqa: E501
:type: int
"""
self._aval_cause_id = aval_cause_id
@property
def aval_cause_name(self):
"""Gets the aval_cause_name of this AvalancheProblem. # noqa: E501
:return: The aval_cause_name of this AvalancheProblem. # noqa: E501
:rtype: str
"""
return self._aval_cause_name
@aval_cause_name.setter
def aval_cause_name(self, aval_cause_name):
"""Sets the aval_cause_name of this AvalancheProblem.
:param aval_cause_name: The aval_cause_name of this AvalancheProblem. # noqa: E501
:type: str
"""
self._aval_cause_name = aval_cause_name
@property
def aval_probability_id(self):
"""Gets the aval_probability_id of this AvalancheProblem. # noqa: E501
:return: The aval_probability_id of this AvalancheProblem. # noqa: E501
:rtype: int
"""
return self._aval_probability_id
@aval_probability_id.setter
def aval_probability_id(self, aval_probability_id):
"""Sets the aval_probability_id of this AvalancheProblem.
:param aval_probability_id: The aval_probability_id of this AvalancheProblem. # noqa: E501
:type: int
"""
self._aval_probability_id = aval_probability_id
@property
def aval_probability_name(self):
"""Gets the aval_probability_name of this AvalancheProblem. # noqa: E501
:return: The aval_probability_name of this AvalancheProblem. # noqa: E501
:rtype: str
"""
return self._aval_probability_name
@aval_probability_name.setter
def aval_probability_name(self, aval_probability_name):
"""Sets the aval_probability_name of this AvalancheProblem.
:param aval_probability_name: The aval_probability_name of this AvalancheProblem. # noqa: E501
:type: str
"""
self._aval_probability_name = aval_probability_name
@property
def aval_trigger_simple_id(self):
"""Gets the aval_trigger_simple_id of this AvalancheProblem. # noqa: E501
:return: The aval_trigger_simple_id of this AvalancheProblem. # noqa: E501
:rtype: int
"""
return self._aval_trigger_simple_id
@aval_trigger_simple_id.setter
def aval_trigger_simple_id(self, aval_trigger_simple_id):
"""Sets the aval_trigger_simple_id of this AvalancheProblem.
:param aval_trigger_simple_id: The aval_trigger_simple_id of this AvalancheProblem. # noqa: E501
:type: int
"""
self._aval_trigger_simple_id = aval_trigger_simple_id
@property
def aval_trigger_simple_name(self):
"""Gets the aval_trigger_simple_name of this AvalancheProblem. # noqa: E501
:return: The aval_trigger_simple_name of this AvalancheProblem. # noqa: E501
:rtype: str
"""
return self._aval_trigger_simple_name
@aval_trigger_simple_name.setter
def aval_trigger_simple_name(self, aval_trigger_simple_name):
"""Sets the aval_trigger_simple_name of this AvalancheProblem.
:param aval_trigger_simple_name: The aval_trigger_simple_name of this AvalancheProblem. # noqa: E501
:type: str
"""
self._aval_trigger_simple_name = aval_trigger_simple_name
@property
def destructive_size_ext_id(self):
"""Gets the destructive_size_ext_id of this AvalancheProblem. # noqa: E501
:return: The destructive_size_ext_id of this AvalancheProblem. # noqa: E501
:rtype: int
"""
return self._destructive_size_ext_id
@destructive_size_ext_id.setter
def destructive_size_ext_id(self, destructive_size_ext_id):
"""Sets the destructive_size_ext_id of this AvalancheProblem.
:param destructive_size_ext_id: The destructive_size_ext_id of this AvalancheProblem. # noqa: E501
:type: int
"""
self._destructive_size_ext_id = destructive_size_ext_id
@property
def destructive_size_ext_name(self):
"""Gets the destructive_size_ext_name of this AvalancheProblem. # noqa: E501
:return: The destructive_size_ext_name of this AvalancheProblem. # noqa: E501
:rtype: str
"""
return self._destructive_size_ext_name
@destructive_size_ext_name.setter
def destructive_size_ext_name(self, destructive_size_ext_name):
"""Sets the destructive_size_ext_name of this AvalancheProblem.
:param destructive_size_ext_name: The destructive_size_ext_name of this AvalancheProblem. # noqa: E501
:type: str
"""
self._destructive_size_ext_name = destructive_size_ext_name
@property
def aval_propagation_id(self):
"""Gets the aval_propagation_id of this AvalancheProblem. # noqa: E501
:return: The aval_propagation_id of this AvalancheProblem. # noqa: E501
:rtype: int
"""
return self._aval_propagation_id
@aval_propagation_id.setter
def aval_propagation_id(self, aval_propagation_id):
"""Sets the aval_propagation_id of this AvalancheProblem.
:param aval_propagation_id: The aval_propagation_id of this AvalancheProblem. # noqa: E501
:type: int
"""
self._aval_propagation_id = aval_propagation_id
@property
def aval_propagation_name(self):
"""Gets the aval_propagation_name of this AvalancheProblem. # noqa: E501
:return: The aval_propagation_name of this AvalancheProblem. # noqa: E501
:rtype: str
"""
return self._aval_propagation_name
@aval_propagation_name.setter
def aval_propagation_name(self, aval_propagation_name):
"""Sets the aval_propagation_name of this AvalancheProblem.
:param aval_propagation_name: The aval_propagation_name of this AvalancheProblem. # noqa: E501
:type: str
"""
self._aval_propagation_name = aval_propagation_name
@property
def avalanche_type_id(self):
"""Gets the avalanche_type_id of this AvalancheProblem. # noqa: E501
:return: The avalanche_type_id of this AvalancheProblem. # noqa: E501
:rtype: int
"""
return self._avalanche_type_id
@avalanche_type_id.setter
def avalanche_type_id(self, avalanche_type_id):
"""Sets the avalanche_type_id of this AvalancheProblem.
:param avalanche_type_id: The avalanche_type_id of this AvalancheProblem. # noqa: E501
:type: int
"""
self._avalanche_type_id = avalanche_type_id
@property
def avalanche_type_name(self):
"""Gets the avalanche_type_name of this AvalancheProblem. # noqa: E501
:return: The avalanche_type_name of this AvalancheProblem. # noqa: E501
:rtype: str
"""
return self._avalanche_type_name
@avalanche_type_name.setter
def avalanche_type_name(self, avalanche_type_name):
"""Sets the avalanche_type_name of this AvalancheProblem.
:param avalanche_type_name: The avalanche_type_name of this AvalancheProblem. # noqa: E501
:type: str
"""
self._avalanche_type_name = avalanche_type_name
@property
def avalanche_problem_type_id(self):
"""Gets the avalanche_problem_type_id of this AvalancheProblem. # noqa: E501
:return: The avalanche_problem_type_id of this AvalancheProblem. # noqa: E501
:rtype: int
"""
return self._avalanche_problem_type_id
@avalanche_problem_type_id.setter
def avalanche_problem_type_id(self, avalanche_problem_type_id):
"""Sets the avalanche_problem_type_id of this AvalancheProblem.
:param avalanche_problem_type_id: The avalanche_problem_type_id of this AvalancheProblem. # noqa: E501
:type: int
"""
self._avalanche_problem_type_id = avalanche_problem_type_id
@property
def avalanche_problem_type_name(self):
"""Gets the avalanche_problem_type_name of this AvalancheProblem. # noqa: E501
:return: The avalanche_problem_type_name of this AvalancheProblem. # noqa: E501
:rtype: str
"""
return self._avalanche_problem_type_name
@avalanche_problem_type_name.setter
def avalanche_problem_type_name(self, avalanche_problem_type_name):
"""Sets the avalanche_problem_type_name of this AvalancheProblem.
:param avalanche_problem_type_name: The avalanche_problem_type_name of this AvalancheProblem. # noqa: E501
:type: str
"""
self._avalanche_problem_type_name = avalanche_problem_type_name
@property
def valid_expositions(self):
"""Gets the valid_expositions of this AvalancheProblem. # noqa: E501
:return: The valid_expositions of this AvalancheProblem. # noqa: E501
:rtype: str
"""
return self._valid_expositions
@valid_expositions.setter
def valid_expositions(self, valid_expositions):
"""Sets the valid_expositions of this AvalancheProblem.
:param valid_expositions: The valid_expositions of this AvalancheProblem. # noqa: E501
:type: str
"""
self._valid_expositions = valid_expositions
@property
def exposed_height1(self):
"""Gets the exposed_height1 of this AvalancheProblem. # noqa: E501
:return: The exposed_height1 of this AvalancheProblem. # noqa: E501
:rtype: int
"""
return self._exposed_height1
@exposed_height1.setter
def exposed_height1(self, exposed_height1):
"""Sets the exposed_height1 of this AvalancheProblem.
:param exposed_height1: The exposed_height1 of this AvalancheProblem. # noqa: E501
:type: int
"""
self._exposed_height1 = exposed_height1
@property
def exposed_height2(self):
"""Gets the exposed_height2 of this AvalancheProblem. # noqa: E501
:return: The exposed_height2 of this AvalancheProblem. # noqa: E501
:rtype: int
"""
return self._exposed_height2
@exposed_height2.setter
def exposed_height2(self, exposed_height2):
"""Sets the exposed_height2 of this AvalancheProblem.
:param exposed_height2: The exposed_height2 of this AvalancheProblem. # noqa: E501
:type: int
"""
self._exposed_height2 = exposed_height2
@property
def exposed_height_fill(self):
"""Gets the exposed_height_fill of this AvalancheProblem. # noqa: E501
:return: The exposed_height_fill of this AvalancheProblem. # noqa: E501
:rtype: int
"""
return self._exposed_height_fill
@exposed_height_fill.setter
def exposed_height_fill(self, exposed_height_fill):
"""Sets the exposed_height_fill of this AvalancheProblem.
:param exposed_height_fill: The exposed_height_fill of this AvalancheProblem. # noqa: E501
:type: int
"""
self._exposed_height_fill = exposed_height_fill
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(AvalancheProblem, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AvalancheProblem):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 1.398438 | 1 |
Level-3/bomb_baby/bomb-baby.py | FeLiNa22/google-foobar | 1 | 12770745 | import random
def solution(x, y):
m, f, gen = int(x), int(y), 0
while(True):
if(m == 1 and f == 1):
return str(gen);
elif(m < 1 or f < 1 or m==f):
return "impossible"
elif(m == 1 or f == 1):
return str(gen + f * m - 1)
elif(m > f):
gen += int(m/f);
m -= f * int(m/f);
elif(m < f):
gen += int(f/m);
f -= m * int(f/m);
print(solution(random.getrandbits(1280),random.getrandbits(1281))) | 3.53125 | 4 |
src/api/v1/node_api.py | glimsil/orch | 0 | 12770746 | from flask import jsonify
from api import api
import socket
@api.route('/v1/node/who/i/am', methods=['GET'])
def node_who_i_am():
return jsonify({'node_name':str(socket.gethostname())})
@api.route('/v1/node/sync', methods=['POST'])
def node_sync():
return jsonify({'node_name':str(socket.gethostname())})
@api.route('/v1/node/cluster/join', methods=['POST'])
def node_cluster_join():
return jsonify({'node_name':str(socket.gethostname())}) | 2.40625 | 2 |
tests/test_forms.py | azmeuk/webtest | 239 | 12770747 | <gh_stars>100-1000
import cgi
import os.path
import struct
import sys
import webtest
from webob import Request
from webtest.debugapp import DebugApp
from webtest.compat import to_bytes
from webtest.forms import NoValue, Submit, Upload
from tests.compat import unittest
from tests.compat import u
class TestForms(unittest.TestCase):
def callFUT(self, filename='form_inputs.html', formid='simple_form'):
dirname = os.path.join(os.path.dirname(__file__), 'html')
app = DebugApp(form=os.path.join(dirname, filename), show_form=True)
resp = webtest.TestApp(app).get('/form.html')
return resp.forms[formid]
def test_set_submit_field(self):
form = self.callFUT()
self.assertRaises(
AttributeError,
form['submit'].value__set,
'foo'
)
def test_button(self):
form = self.callFUT()
button = form['button']
self.assertTrue(isinstance(button, Submit),
"<button> without type is a submit button")
def test_button_value_if_submitted(self):
form = self.callFUT()
submit = form['submit']
self.assertEqual(
submit.value_if_submitted(), '',
"submit default value is ''")
button = form['button']
self.assertEqual(
button.value_if_submitted(), '',
"submit default value is ''")
def test_force_select(self):
form = self.callFUT()
form['select'].force_value('notavalue')
form['select'].value__set('value3')
self.assertTrue(
form['select']._forced_value is NoValue,
"Setting a value after having forced a value should keep a forced"
" state")
self.assertEqual(
form['select'].value, 'value3',
"the value should the the one set by value__set")
self.assertEqual(
form['select'].selectedIndex, 2,
"the value index should be the one set by value__set")
def test_form_select(self):
form = self.callFUT()
form.select('select', 'value1')
self.assertEqual(
form['select'].value, 'value1',
"when using form.select, the input selected value should be "
"changed")
def test_get_field_by_index(self):
form = self.callFUT()
self.assertEqual(form['select'],
form.get('select', index=0))
def test_get_unknown_field(self):
form = self.callFUT()
self.assertEqual(form['unknown'].value, '')
form['unknown'].value = '1'
self.assertEqual(form['unknown'].value, '1')
def test_get_non_exist_fields(self):
form = self.callFUT()
self.assertRaises(AssertionError, form.get, 'nonfield')
def test_get_non_exist_fields_with_default(self):
form = self.callFUT()
value = form.get('nonfield', default=1)
self.assertEqual(value, 1)
def test_upload_fields(self):
form = self.callFUT()
fu = webtest.Upload(__file__)
form['file'] = fu
self.assertEqual(form.upload_fields(),
[['file', __file__]])
def test_repr(self):
form = self.callFUT()
self.assertTrue(repr(form).startswith('<Form id='))
def test_the_bs_node_must_not_change(self):
form = self.callFUT()
self.assertEqual(form.text, str(form.html))
def test_set_multiple_checkboxes(self):
form = self.callFUT(formid='multiple_checkbox_form')
form['checkbox'] = [10, 30]
self.assertEqual(form.get('checkbox', index=0).value, '10')
self.assertEqual(form.get('checkbox', index=1).value, None)
self.assertEqual(form.get('checkbox', index=2).value, '30')
def test_button_submit(self):
form = self.callFUT(formid='multiple_buttons_form')
display = form.submit('action')
self.assertIn(u("action=deactivate"), display, display)
def test_button_submit_by_index(self):
form = self.callFUT(formid='multiple_buttons_form')
display = form.submit('action', index=1)
self.assertIn(u("action=activate"), display, display)
def test_button_submit_by_value(self):
form = self.callFUT(formid='multiple_buttons_form')
display = form.submit('action', value='activate')
self.assertIn(u("action=activate"), display, display)
def test_button_submit_by_value_and_index(self):
form = self.callFUT(formid='multiple_buttons_form')
self.assertRaises(ValueError,
form.submit, "action", value="activate",
index=0)
class TestResponseFormAttribute(unittest.TestCase):
def callFUT(self, body):
app = DebugApp(form=to_bytes(body))
return webtest.TestApp(app)
def test_no_form(self):
app = self.callFUT('<html><body></body></html>')
res = app.get('/form.html')
self.assertRaises(TypeError, lambda: res.form)
def test_too_many_forms(self):
app = self.callFUT(
'<html><body><form></form><form></form></body></html>')
res = app.get('/form.html')
self.assertRaises(TypeError, lambda: res.form)
class TestInput(unittest.TestCase):
def callFUT(self, filename='form_inputs.html'):
dirname = os.path.join(os.path.dirname(__file__), 'html')
app = DebugApp(form=os.path.join(dirname, filename), show_form=True)
return webtest.TestApp(app)
def test_input(self):
app = self.callFUT()
res = app.get('/form.html')
self.assertEqual(res.status_int, 200)
self.assertTrue(res.content_type.startswith('text/html'))
form = res.forms['text_input_form']
self.assertEqual(form['foo'].value, 'bar')
self.assertEqual(form.submit_fields(), [('foo', 'bar')])
form = res.forms['radio_input_form']
self.assertEqual(form['foo'].selectedIndex, 1)
self.assertEqual(form['foo'].value, 'baz')
self.assertEqual(form.submit_fields(), [('foo', 'baz')])
form = res.forms['checkbox_input_form']
self.assertEqual(form['foo'].value, 'bar')
self.assertEqual(form.submit_fields(), [('foo', 'bar')])
form = res.forms['password_input_form']
self.assertEqual(form['foo'].value, 'bar')
self.assertEqual(form.submit_fields(), [('foo', 'bar')])
def test_force_radio_input(self):
app = self.callFUT()
res = app.get('/form.html')
form = res.forms['radio_input_form']
form['foo'].force_value('fido')
self.assertEqual(form['foo'].value, 'fido')
self.assertEqual(form.submit_fields(), [('foo', 'fido')])
def test_radio_input_order(self):
app = self.callFUT()
res = app.get('/form.html')
self.assertEqual(res.status_int, 200)
self.assertTrue(res.content_type.startswith('text/html'))
form = res.forms['complex_radio_input_form']
form['foo'].value = 'true'
self.assertEqual(form['foo'].value, 'true')
self.assertEqual(form['foo'].selectedIndex, 0)
self.assertEqual(form.submit_fields(), [
('__start__', 'item:mapping'),
('foo', 'true'),
('__end__', 'item:mapping'),
('__start__', 'item:mapping'),
('__end__', 'item:mapping')])
res = app.get('/form.html')
form = res.forms['complex_radio_input_form']
self.assertEqual(form['foo'].value, 'true')
self.assertEqual(form['foo'].selectedIndex, 1)
self.assertEqual(form.submit_fields(), [
('__start__', 'item:mapping'),
('__end__', 'item:mapping'),
('__start__', 'item:mapping'),
('foo', 'true'),
('__end__', 'item:mapping')])
def test_input_unicode(self):
app = self.callFUT('form_unicode_inputs.html')
res = app.get('/form.html')
self.assertEqual(res.status_int, 200)
self.assertTrue(res.content_type.startswith('text/html'))
self.assertEqual(res.charset.lower(), 'utf-8')
form = res.forms['text_input_form']
self.assertEqual(form['foo'].value, u('Хармс'))
self.assertEqual(form.submit_fields(), [('foo', u('Хармс'))])
form = res.forms['radio_input_form']
self.assertEqual(form['foo'].selectedIndex, 1)
self.assertEqual(form['foo'].value, u('Блок'))
self.assertEqual(form.submit_fields(), [('foo', u('Блок'))])
form = res.forms['checkbox_input_form']
self.assertEqual(form['foo'].value, u('Хармс'))
self.assertEqual(form.submit_fields(), [('foo', u('Хармс'))])
form = res.forms['password_input_form']
self.assertEqual(form['foo'].value, u('Хармс'))
self.assertEqual(form.submit_fields(), [('foo', u('Хармс'))])
def test_input_no_default(self):
app = self.callFUT('form_inputs_with_defaults.html')
res = app.get('/form.html')
self.assertEqual(res.status_int, 200)
self.assertTrue(res.content_type.startswith('text/html'))
form = res.forms['text_input_form']
self.assertEqual(form['foo'].value, '')
self.assertEqual(form.submit_fields(), [('foo', '')])
form = res.forms['radio_input_form']
self.assertTrue(form['foo'].value is None)
self.assertEqual(form.submit_fields(), [])
form = res.forms['checkbox_input_form']
self.assertTrue(form['foo'].value is None)
self.assertEqual(form.submit_fields(), [])
form = res.forms['password_input_form']
self.assertEqual(form['foo'].value, '')
self.assertEqual(form.submit_fields(), [('foo', '')])
def test_textarea_entities(self):
app = self.callFUT()
res = app.get('/form.html')
form = res.forms.get("textarea_input_form")
self.assertEqual(form.get("textarea").value, "'foo&bar'")
self.assertEqual(form.submit_fields(), [('textarea', "'foo&bar'")])
def test_textarea_emptyfirstline(self):
app = self.callFUT()
res = app.get('/form.html')
form = res.forms.get("textarea_emptyline_form")
self.assertEqual(form.get("textarea").value, "aaa")
self.assertEqual(form.submit_fields(), [('textarea', "aaa")])
class TestFormLint(unittest.TestCase):
def test_form_lint(self):
form = webtest.Form(None, '''<form>
<input type="text" name="field"/>
</form>''')
self.assertRaises(AttributeError, form.lint)
form = webtest.Form(None, '''<form>
<input type="text" id="myfield" name="field"/>
</form>''')
self.assertRaises(AttributeError, form.lint)
form = webtest.Form(None, '''<form>
<label for="myfield">my field</label>
<input type="text" id="myfield" name="field"/>
</form>''')
form.lint()
form = webtest.Form(None, '''<form>
<label class="field" for="myfield" role="r">my field</label>
<input type="text" id="myfield" name="field"/>
</form>''')
form.lint()
def select_app(environ, start_response):
req = Request(environ)
status = b"200 OK"
if req.method == "GET":
body = to_bytes("""
<html>
<head><title>form page</title></head>
<body>
<form method="POST" id="single_select_form">
<select id="single" name="single">
<option value="4">Four</option>
<option value="5" selected="selected">Five</option>
<option value="6">Six</option>
<option value="7">Seven</option>
</select>
<input name="button" type="submit" value="single">
</form>
<form method="POST" id="multiple_select_form">
<select id="multiple" name="multiple" multiple>
<option value="8" selected="selected">Eight</option>
<option value="9">Nine</option>
<option value="10">Ten</option>
<option value="11" selected="selected">Eleven</option>
</select>
<input name="button" type="submit" value="multiple">
</form>
</body>
</html>
""")
else:
select_type = req.POST.get("button")
if select_type == "single":
selection = req.POST.get("single")
elif select_type == "multiple":
selection = ", ".join(req.POST.getall("multiple"))
body = to_bytes("""
<html>
<head><title>display page</title></head>
<body>
<p>You submitted the %(select_type)s </p>
<p>You selected %(selection)s</p>
</body>
</html>
""" % dict(selection=selection, select_type=select_type))
headers = [
('Content-Type', 'text/html; charset=utf-8'),
('Content-Length', str(len(body)))]
# PEP 3333 requires native strings:
headers = [(str(k), str(v)) for k, v in headers]
start_response(status, headers)
return [body]
def select_app_without_values(environ, start_response):
req = Request(environ)
status = b"200 OK"
if req.method == "GET":
body = to_bytes("""
<html>
<head><title>form page</title></head>
<body>
<form method="POST" id="single_select_form">
<select id="single" name="single">
<option>Four</option>
<option>Five</option>
<option>Six</option>
<option>Seven</option>
</select>
<input name="button" type="submit" value="single">
</form>
<form method="POST" id="multiple_select_form">
<select id="multiple" name="multiple" multiple="multiple">
<option>Eight</option>
<option selected value="Nine">Nine</option>
<option>Ten</option>
<option selected>Eleven</option>
</select>
<input name="button" type="submit" value="multiple">
</form>
</body>
</html>
""")
else:
select_type = req.POST.get("button")
if select_type == "single":
selection = req.POST.get("single")
elif select_type == "multiple":
selection = ", ".join(req.POST.getall("multiple"))
body = to_bytes("""
<html>
<head><title>display page</title></head>
<body>
<p>You submitted the %(select_type)s </p>
<p>You selected %(selection)s</p>
</body>
</html>
""" % dict(selection=selection, select_type=select_type))
headers = [
('Content-Type', 'text/html; charset=utf-8'),
('Content-Length', str(len(body)))]
# PEP 3333 requires native strings:
headers = [(str(k), str(v)) for k, v in headers]
start_response(status, headers)
return [body]
def select_app_without_default(environ, start_response):
req = Request(environ)
status = b"200 OK"
if req.method == "GET":
body = to_bytes("""
<html>
<head><title>form page</title></head>
<body>
<form method="POST" id="single_select_form">
<select id="single" name="single">
<option value="4">Four</option>
<option value="5">Five</option>
<option value="6">Six</option>
<option value="7">Seven</option>
</select>
<input name="button" type="submit" value="single">
</form>
<form method="POST" id="multiple_select_form">
<select id="multiple" name="multiple" multiple="multiple">
<option value="8">Eight</option>
<option value="9">Nine</option>
<option value="10">Ten</option>
<option value="11">Eleven</option>
</select>
<input name="button" type="submit" value="multiple">
</form>
</body>
</html>
""")
else:
select_type = req.POST.get("button")
if select_type == "single":
selection = req.POST.get("single")
elif select_type == "multiple":
selection = ", ".join(req.POST.getall("multiple"))
body = to_bytes("""
<html>
<head><title>display page</title></head>
<body>
<p>You submitted the %(select_type)s </p>
<p>You selected %(selection)s</p>
</body>
</html>
""" % dict(selection=selection, select_type=select_type))
headers = [
('Content-Type', 'text/html; charset=utf-8'),
('Content-Length', str(len(body)))]
# PEP 3333 requires native strings:
headers = [(str(k), str(v)) for k, v in headers]
start_response(status, headers)
return [body]
def select_app_unicode(environ, start_response):
req = Request(environ)
status = b"200 OK"
if req.method == "GET":
body = u("""
<html>
<head><title>form page</title></head>
<body>
<form method="POST" id="single_select_form">
<select id="single" name="single">
<option value="ЕКБ">Екатеринбург</option>
<option value="МСК" selected="selected">Москва</option>
<option value="СПБ">Санкт-Петербург</option>
<option value="САМ">Самара</option>
</select>
<input name="button" type="submit" value="single">
</form>
<form method="POST" id="multiple_select_form">
<select id="multiple" name="multiple" multiple="multiple">
<option value="8" selected="selected">Лондон</option>
<option value="9">Париж</option>
<option value="10">Пекин</option>
<option value="11" selected="selected">Бристоль</option>
</select>
<input name="button" type="submit" value="multiple">
</form>
</body>
</html>
""").encode('utf8')
else:
select_type = req.POST.get("button")
if select_type == "single":
selection = req.POST.get("single")
elif select_type == "multiple":
selection = ", ".join(req.POST.getall("multiple"))
body = (u("""
<html>
<head><title>display page</title></head>
<body>
<p>You submitted the %(select_type)s </p>
<p>You selected %(selection)s</p>
</body>
</html>
""") % dict(selection=selection, select_type=select_type)).encode('utf8')
headers = [
('Content-Type', 'text/html; charset=utf-8'),
('Content-Length', str(len(body)))]
# PEP 3333 requires native strings:
headers = [(str(k), str(v)) for k, v in headers]
start_response(status, headers)
if not isinstance(body, bytes):
raise AssertionError('Body is not %s' % bytes)
return [body]
class TestSelect(unittest.TestCase):
def test_unicode_select(self):
app = webtest.TestApp(select_app_unicode)
res = app.get('/')
single_form = res.forms["single_select_form"]
self.assertEqual(single_form["single"].value, u("МСК"))
display = single_form.submit("button")
self.assertIn(u("<p>You selected МСК</p>"), display, display)
res = app.get('/')
single_form = res.forms["single_select_form"]
self.assertEqual(single_form["single"].value, u("МСК"))
single_form.set("single", u("СПБ"))
self.assertEqual(single_form["single"].value, u("СПБ"))
display = single_form.submit("button")
self.assertIn(u("<p>You selected СПБ</p>"), display, display)
def test_single_select(self):
app = webtest.TestApp(select_app)
res = app.get('/')
self.assertEqual(res.status_int, 200)
self.assertEqual(res.headers['content-type'],
'text/html; charset=utf-8')
self.assertEqual(res.content_type, 'text/html')
single_form = res.forms["single_select_form"]
self.assertEqual(single_form["single"].value, "5")
display = single_form.submit("button")
self.assertIn("<p>You selected 5</p>", display, display)
res = app.get('/')
self.assertEqual(res.status_int, 200)
self.assertEqual(res.headers['content-type'],
'text/html; charset=utf-8')
self.assertEqual(res.content_type, 'text/html')
single_form = res.forms["single_select_form"]
self.assertEqual(single_form["single"].value, "5")
single_form.set("single", "6")
self.assertEqual(single_form["single"].value, "6")
display = single_form.submit("button")
self.assertIn("<p>You selected 6</p>", display, display)
res = app.get('/')
single_form = res.forms["single_select_form"]
self.assertRaises(ValueError, single_form.select, "single", "5",
text="Five")
self.assertRaises(ValueError, single_form.select, "single",
text="Three")
single_form.select("single", text="Seven")
self.assertEqual(single_form["single"].value, "7")
display = single_form.submit("button")
self.assertIn("<p>You selected 7</p>", display, display)
def test_single_select_forced_value(self):
app = webtest.TestApp(select_app)
res = app.get('/')
self.assertEqual(res.status_int, 200)
self.assertEqual(res.headers['content-type'],
'text/html; charset=utf-8')
self.assertEqual(res.content_type, 'text/html')
single_form = res.forms["single_select_form"]
self.assertEqual(single_form["single"].value, "5")
self.assertRaises(ValueError, single_form.set, "single", "984")
single_form["single"].force_value("984")
self.assertEqual(single_form["single"].value, "984")
display = single_form.submit("button")
self.assertIn("<p>You selected 984</p>", display, display)
def test_single_select_no_default(self):
app = webtest.TestApp(select_app_without_default)
res = app.get('/')
self.assertEqual(res.status_int, 200)
self.assertEqual(res.headers['content-type'],
'text/html; charset=utf-8')
self.assertEqual(res.content_type, 'text/html')
single_form = res.forms["single_select_form"]
self.assertEqual(single_form["single"].value, "4")
display = single_form.submit("button")
self.assertIn("<p>You selected 4</p>", display, display)
res = app.get('/')
self.assertEqual(res.status_int, 200)
self.assertEqual(res.headers['content-type'],
'text/html; charset=utf-8')
self.assertEqual(res.content_type, 'text/html')
single_form = res.forms["single_select_form"]
self.assertEqual(single_form["single"].value, "4")
single_form.set("single", 6)
self.assertEqual(single_form["single"].value, "6")
display = single_form.submit("button")
self.assertIn("<p>You selected 6</p>", display, display)
def test_multiple_select(self):
app = webtest.TestApp(select_app)
res = app.get('/')
self.assertEqual(res.status_int, 200)
self.assertEqual(res.headers['content-type'],
'text/html; charset=utf-8')
self.assertEqual(res.content_type, 'text/html')
multiple_form = res.forms["multiple_select_form"]
self.assertEqual(multiple_form["multiple"].value, ['8', '11'],
multiple_form["multiple"].value)
display = multiple_form.submit("button")
self.assertIn("<p>You selected 8, 11</p>", display, display)
res = app.get('/')
self.assertEqual(res.status_int, 200)
self.assertEqual(res.headers['content-type'],
'text/html; charset=utf-8')
self.assertEqual(res.content_type, 'text/html')
multiple_form = res.forms["multiple_select_form"]
self.assertEqual(multiple_form["multiple"].value, ["8", "11"],
multiple_form["multiple"].value)
multiple_form.set("multiple", ["9"])
self.assertEqual(multiple_form["multiple"].value, ["9"],
multiple_form["multiple"].value)
display = multiple_form.submit("button")
self.assertIn("<p>You selected 9</p>", display, display)
res = app.get('/')
multiple_form = res.forms["multiple_select_form"]
self.assertRaises(ValueError, multiple_form.select_multiple,
"multiple",
["8", "10"], texts=["Eight", "Ten"])
self.assertRaises(ValueError, multiple_form.select_multiple,
"multiple", texts=["Twelve"])
multiple_form.select_multiple("multiple",
texts=["Eight", "Nine", "Ten"])
display = multiple_form.submit("button")
self.assertIn("<p>You selected 8, 9, 10</p>", display, display)
def test_multiple_select_forced_values(self):
app = webtest.TestApp(select_app)
res = app.get('/')
self.assertEqual(res.status_int, 200)
self.assertEqual(res.headers['content-type'],
'text/html; charset=utf-8')
self.assertEqual(res.content_type, 'text/html')
multiple_form = res.forms["multiple_select_form"]
self.assertEqual(multiple_form["multiple"].value, ["8", "11"],
multiple_form["multiple"].value)
self.assertRaises(ValueError, multiple_form.set,
"multiple", ["24", "88"])
multiple_form["multiple"].force_value(["24", "88"])
self.assertEqual(multiple_form["multiple"].value, ["24", "88"],
multiple_form["multiple"].value)
display = multiple_form.submit("button")
self.assertIn("<p>You selected 24, 88</p>", display, display)
def test_multiple_select_no_default(self):
app = webtest.TestApp(select_app_without_default)
res = app.get('/')
self.assertEqual(res.status_int, 200)
self.assertEqual(res.headers['content-type'],
'text/html; charset=utf-8')
self.assertEqual(res.content_type, 'text/html')
multiple_form = res.forms["multiple_select_form"]
self.assertTrue(multiple_form["multiple"].value is None,
repr(multiple_form["multiple"].value))
display = multiple_form.submit("button")
self.assertIn("<p>You selected </p>", display, display)
res = app.get('/')
self.assertEqual(res.status_int, 200)
self.assertEqual(res.headers['content-type'],
'text/html; charset=utf-8')
self.assertEqual(res.content_type, 'text/html')
multiple_form = res.forms["multiple_select_form"]
self.assertTrue(multiple_form["multiple"].value is None,
multiple_form["multiple"].value)
multiple_form.set("multiple", ["9"])
self.assertEqual(multiple_form["multiple"].value, ["9"],
multiple_form["multiple"].value)
display = multiple_form.submit("button")
self.assertIn("<p>You selected 9</p>", display, display)
def test_select_no_value(self):
app = webtest.TestApp(select_app_without_values)
res = app.get('/')
self.assertEqual(res.status_int, 200)
self.assertEqual(res.headers['content-type'],
'text/html; charset=utf-8')
self.assertEqual(res.content_type, 'text/html')
single_form = res.forms["single_select_form"]
self.assertEqual(single_form["single"].value, "Four")
display = single_form.submit("button")
self.assertIn("<p>You selected Four</p>", display, display)
res = app.get('/')
self.assertEqual(res.status_int, 200)
self.assertEqual(res.headers['content-type'],
'text/html; charset=utf-8')
self.assertEqual(res.content_type, 'text/html')
single_form = res.forms["single_select_form"]
self.assertEqual(single_form["single"].value, "Four")
single_form.set("single", "Six")
self.assertEqual(single_form["single"].value, "Six")
display = single_form.submit("button")
self.assertIn("<p>You selected Six</p>", display, display)
def test_multiple_select_no_value(self):
app = webtest.TestApp(select_app_without_values)
res = app.get('/')
self.assertEqual(res.status_int, 200)
self.assertEqual(res.headers['content-type'],
'text/html; charset=utf-8')
self.assertEqual(res.content_type, 'text/html')
multiple_form = res.forms["multiple_select_form"]
self.assertEqual(multiple_form["multiple"].value, ["Nine", "Eleven"])
display = multiple_form.submit("button")
self.assertIn("<p>You selected Nine, Eleven</p>", display, display)
res = app.get('/')
self.assertEqual(res.status_int, 200)
self.assertEqual(res.headers['content-type'],
'text/html; charset=utf-8')
self.assertEqual(res.content_type, 'text/html')
multiple_form = res.forms["multiple_select_form"]
self.assertEqual(multiple_form["multiple"].value, ["Nine", "Eleven"])
multiple_form.set("multiple", ["Nine", "Ten"])
self.assertEqual(multiple_form["multiple"].value, ["Nine", "Ten"])
display = multiple_form.submit("button")
self.assertIn("<p>You selected Nine, Ten</p>", display, display)
def test_multiple_select_reset_value(self):
app = webtest.TestApp(select_app_without_values)
res = app.get('/')
self.assertEqual(res.status_int, 200)
self.assertEqual(res.headers['content-type'],
'text/html; charset=utf-8')
self.assertEqual(res.content_type, 'text/html')
multiple_form = res.forms["multiple_select_form"]
self.assertEqual(multiple_form["multiple"].value, ["Nine", "Eleven"])
# reset with value
multiple_form["multiple"].value = []
self.assertIsNone(multiple_form["multiple"].value)
# re-set a value
multiple_form["multiple"].value = ['Nine']
assert multiple_form["multiple"].value == ['Nine']
# reset with force_value
multiple_form["multiple"].force_value(None)
self.assertIsNone(multiple_form["multiple"].value)
display = multiple_form.submit("button")
self.assertIn("<p>You selected </p>", display, display)
class SingleUploadFileApp:
body = b"""
<html>
<head><title>form page</title></head>
<body>
<form method="POST" id="file_upload_form"
enctype="multipart/form-data">
<input name="file-field" type="file" value="some/path/file.txt" />
<input name="int-field" type="text" value="" />
<input name="button" type="submit" value="single">
</form>
</body>
</html>
"""
def __call__(self, environ, start_response):
req = Request(environ)
status = b"200 OK"
if req.method == "GET":
body = self.body
else:
body = b"""
<html>
<head><title>display page</title></head>
<body>
""" + self.get_files_page(req) + b"""
</body>
</html>
"""
headers = [
('Content-Type', 'text/html; charset=utf-8'),
('Content-Length', str(len(body)))]
# PEP 3333 requires native strings:
headers = [(str(k), str(v)) for k, v in headers]
start_response(status, headers)
assert(isinstance(body, bytes))
return [body]
def get_files_page(self, req):
file_parts = []
uploaded_files = [(k, v) for k, v in req.POST.items() if 'file' in k]
uploaded_files = sorted(uploaded_files)
for name, uploaded_file in uploaded_files:
if isinstance(uploaded_file, cgi.FieldStorage):
filename = to_bytes(uploaded_file.filename)
value = to_bytes(uploaded_file.value, 'ascii')
content_type = to_bytes(uploaded_file.type, 'ascii')
else:
filename = value = content_type = b''
file_parts.append(b"""
<p>You selected '""" + filename + b"""'</p>
<p>with contents: '""" + value + b"""'</p>
<p>with content type: '""" + content_type + b"""'</p>
""")
return b''.join(file_parts)
class UploadBinaryApp(SingleUploadFileApp):
def get_files_page(self, req):
uploaded_files = [(k, v) for k, v in req.POST.items() if 'file' in k]
data = uploaded_files[0][1].value
data = struct.unpack(b'255h', data[:510])
return b','.join([to_bytes(str(i)) for i in data])
class MultipleUploadFileApp(SingleUploadFileApp):
body = b"""
<html>
<head><title>form page</title></head>
<body>
<form method="POST" id="file_upload_form"
enctype="multipart/form-data">
<input name="file-field-1" type="file" />
<input name="file-field-2" type="file" />
<input name="button" type="submit" value="single">
</form>
</body>
</html>
"""
class TestFileUpload(unittest.TestCase):
def assertFile(self, name, contents, display, content_type=None):
if isinstance(name, bytes):
text_name = name.decode('ascii')
else:
text_name = name
self.assertIn("<p>You selected '" + text_name + "'</p>",
display, display)
if isinstance(contents, bytes):
text_contents = contents.decode('ascii')
else:
text_contents = contents
self.assertIn("<p>with contents: '" + text_contents + "'</p>",
display, display)
if content_type:
self.assertIn("<p>with content type: '" + content_type + "'</p>",
display, display)
def test_no_uploads_error(self):
app = webtest.TestApp(SingleUploadFileApp())
app.get('/').forms["file_upload_form"].upload_fields()
def test_upload_without_file(self):
app = webtest.TestApp(SingleUploadFileApp())
upload_form = app.get('/').forms["file_upload_form"]
upload_form.submit()
def test_file_upload_with_filename_only(self):
uploaded_file_name = os.path.join(os.path.dirname(__file__),
"__init__.py")
uploaded_file_contents = open(uploaded_file_name).read()
uploaded_file_contents = to_bytes(uploaded_file_contents)
app = webtest.TestApp(SingleUploadFileApp())
res = app.get('/')
self.assertEqual(res.status_int, 200)
self.assertEqual(res.headers['content-type'],
'text/html; charset=utf-8')
self.assertEqual(res.content_type, 'text/html')
self.assertEqual(res.charset, 'utf-8')
single_form = res.forms["file_upload_form"]
single_form.set("file-field", (uploaded_file_name,))
display = single_form.submit("button")
self.assertFile(uploaded_file_name, uploaded_file_contents, display)
def test_file_upload_with_filename_and_contents(self):
uploaded_file_name = os.path.join(os.path.dirname(__file__),
"__init__.py")
uploaded_file_contents = open(uploaded_file_name).read()
uploaded_file_contents = to_bytes(uploaded_file_contents)
app = webtest.TestApp(SingleUploadFileApp())
res = app.get('/')
self.assertEqual(res.status_int, 200)
self.assertEqual(res.headers['content-type'],
'text/html; charset=utf-8')
self.assertEqual(res.content_type, 'text/html')
single_form = res.forms["file_upload_form"]
single_form.set("file-field",
(uploaded_file_name, uploaded_file_contents))
display = single_form.submit("button")
self.assertFile(uploaded_file_name, uploaded_file_contents, display)
def test_file_upload_with_content_type(self):
uploaded_file_name = os.path.join(os.path.dirname(__file__),
"__init__.py")
with open(uploaded_file_name, 'rb') as f:
uploaded_file_contents = f.read()
app = webtest.TestApp(SingleUploadFileApp())
res = app.get('/')
single_form = res.forms["file_upload_form"]
single_form["file-field"].value = Upload(uploaded_file_name,
uploaded_file_contents,
'text/x-custom-type')
display = single_form.submit("button")
self.assertFile(uploaded_file_name, uploaded_file_contents, display,
content_type='text/x-custom-type')
def test_file_upload_binary(self):
binary_data = struct.pack('255h', *range(0, 255))
app = webtest.TestApp(UploadBinaryApp())
res = app.get('/')
single_form = res.forms["file_upload_form"]
single_form.set("file-field", ('my_file.dat', binary_data))
display = single_form.submit("button")
self.assertIn(','.join([str(n) for n in range(0, 255)]), display)
def test_multiple_file_uploads_with_filename_and_contents(self):
uploaded_file1_name = os.path.join(os.path.dirname(__file__),
"__init__.py")
uploaded_file1_contents = open(uploaded_file1_name).read()
uploaded_file1_contents = to_bytes(uploaded_file1_contents)
uploaded_file2_name = __file__
uploaded_file2_name = os.path.join(os.path.dirname(__file__), 'html',
"404.html")
uploaded_file2_contents = open(uploaded_file2_name).read()
uploaded_file2_contents = to_bytes(uploaded_file2_contents)
app = webtest.TestApp(MultipleUploadFileApp())
res = app.get('/')
self.assertEqual(res.status_int, 200)
self.assertEqual(res.headers['content-type'],
'text/html; charset=utf-8')
self.assertEqual(res.content_type, 'text/html')
single_form = res.forms["file_upload_form"]
single_form.set("file-field-1",
(uploaded_file1_name, uploaded_file1_contents))
single_form.set("file-field-2",
(uploaded_file2_name, uploaded_file2_contents))
display = single_form.submit("button")
self.assertFile(uploaded_file1_name, uploaded_file1_contents, display)
self.assertFile(uploaded_file1_name, uploaded_file1_contents, display)
def test_post_int(self):
binary_data = struct.pack('255h', *range(0, 255))
app = webtest.TestApp(SingleUploadFileApp())
res = app.get('/')
single_form = res.forms["file_upload_form"]
single_form.set("file-field", ('my_file.dat', binary_data))
single_form.set("int-field", 100)
# just check it does not raise
single_form.submit("button")
def test_invalid_types(self):
binary_data = struct.pack('255h', *range(0, 255))
app = webtest.TestApp(SingleUploadFileApp())
res = app.get('/')
single_form = res.forms["file_upload_form"]
single_form.set("file-field", ('my_file.dat', binary_data))
single_form.set("int-field", SingleUploadFileApp())
self.assertRaises(ValueError, single_form.submit, "button")
def test_upload_invalid_content(self):
app = webtest.TestApp(SingleUploadFileApp())
res = app.get('/')
single_form = res.forms["file_upload_form"]
single_form.set("file-field", ('my_file.dat', 1))
try:
single_form.submit("button")
except ValueError:
e = sys.exc_info()[1]
self.assertEquals(
str(e),
u('File content must be %s not %s' % (bytes, int))
)
def test_invalid_uploadfiles(self):
app = webtest.TestApp(SingleUploadFileApp())
self.assertRaises(ValueError, app.post, '/', upload_files=[()])
self.assertRaises(
ValueError,
app.post, '/',
upload_files=[('name', 'filename', 'content', 'extra')]
)
def test_goto_upload_files(self):
app = webtest.TestApp(SingleUploadFileApp())
resp = app.get('/')
resp = resp.goto(
'/',
method='post',
upload_files=[('file', 'filename', b'content')]
)
resp.mustcontain("<p>You selected 'filename'</p>",
"<p>with contents: 'content'</p>")
def test_post_upload_files(self):
app = webtest.TestApp(SingleUploadFileApp())
resp = app.post(
'/',
upload_files=[('file', 'filename', b'content')]
)
resp.mustcontain("<p>You selected 'filename'</p>",
"<p>with contents: 'content'</p>")
def test_post_upload_empty_files(self):
app = webtest.TestApp(SingleUploadFileApp())
resp = app.post(
'/',
upload_files=[('file', 'filename', b'')]
)
resp.mustcontain("<p>You selected 'filename'</p>",
"<p>with contents: ''</p>")
resp = app.get('/')
form = resp.form
form['file-field'] = Upload('filename', b'', 'text/plain')
resp = form.submit()
resp.mustcontain("<p>You selected 'filename'</p>",
"<p>with contents: ''</p>")
| 2.078125 | 2 |
Backspace String Compare.py | ngdeva99/Fulcrum | 0 | 12770748 | class Solution:
def backspaceCompare(self, S: str, T: str) -> bool:
print(S[0])
stack = []
for i in S:
if i=="#":
if len(stack)==0:
continue
stack.pop()
else:
stack.append(i)
S=""
while len(stack)!=0:
c = stack.pop(0)
S+=str(c)
print(S)
for i in T:
if i=="#":
if len(stack)==0:
continue
stack.pop()
else:
stack.append(i)
T=""
while len(stack)!=0:
c = stack.pop(0)
T+=str(c)
print(T)
return T==S
| 3.640625 | 4 |
gpMgmt/bin/gppylib/system/info.py | henglabs/gpdb | 0 | 12770749 | <reponame>henglabs/gpdb<gh_stars>0
#! /usr/bin/env python2
import psutil
import os
import resource
PERCENTAGE_OF_AVAIL_MEM_USED_FOR_THREADS = 0.8 # allow 20% of memory to remain for parent process
MB = 1024 * 1024
class SystemInfo():
def __init__(self, logger=None):
self.logger = logger
self.pid = os.getpid()
self.process = psutil.Process(self.pid)
def print_mem_usage(self):
mem = psutil.virtual_memory()
print "available: %sMB percent: %s" % (mem.available >> 20, mem.percent)
print "process memory usage %s" % (self.process.memory_info().vms >> 20)
def debug_log_mem_usage(self):
mem = psutil.virtual_memory()
if self.logger:
self.logger.debug('available: %sMB percent: %s' % (mem.available >> 20, mem.percent))
self.logger.debug('process memory usage: %sMB' % (self.process.memory_info().vms >> 20))
def get_max_available_thread_count():
stack_size, _ = resource.getrlimit(resource.RLIMIT_STACK)
# assuming a generous 10K bytes per line of error output,
# 20 MB allows 2000 errors in a single run; if user has more,
# we will explain in the manual
# the the user can always set batch (number of threads) manually
thread_size = 20 * MB + stack_size
mem = psutil.virtual_memory()
available_mem = mem.available
num_threads = int(available_mem / thread_size * PERCENTAGE_OF_AVAIL_MEM_USED_FOR_THREADS)
return max(1, num_threads)
| 2.875 | 3 |
lib/modeling/roi_heads/relation_network.py | SimeonZhang/detectron2_tensorflow | 3 | 12770750 | <filename>lib/modeling/roi_heads/relation_network.py
import tensorflow as tf
from .fast_rcnn import FastRCNNOutputLayers, FastRCNNOutputs
from .box_head import ROI_BOX_HEAD_REGISTRY, FastRCNNConvFCHead, build_box_head
from .roi_heads import ROI_HEADS_REGISTRY, StandardROIHeads
from ...layers import Linear, ShapeSpec, flatten
from .relation_module import (
compute_geometry_embeddings,
compute_rank_embeddings,
ObjectRelationModule
)
@ROI_BOX_HEAD_REGISTRY.register
class RelationBoxHead(FastRCNNConvFCHead):
def __init__(self, cfg, input_shape: ShapeSpec, **kwargs):
super().__init__(cfg, input_shape, **kwargs)
self._init_attention(cfg)
def _init_attention(self, cfg):
num_fc = cfg.MODEL.ROI_BOX_HEAD.NUM_FC
fc_dim = cfg.MODEL.ROI_BOX_HEAD.FC_DIM
num_groups = cfg.MODEL.ROI_BOX_RELATION_HEAD.NUM_GROUPS
key_dim = cfg.MODEL.ROI_BOX_RELATION_HEAD.KEY_DIM
self.geometry_embedding_dim = cfg.MODEL.ROI_BOX_RELATION_HEAD.GEOMETRY_EMBEDDING_DIM
self.relations = []
with tf.variable_scope(self.scope, auxiliary_name_scope=False):
for k in range(num_fc):
self.relations.append(
ObjectRelationModule(
input_size=fc_dim,
embedding_dim= self.geometry_embedding_dim,
key_dim=key_dim,
num_groups=num_groups,
scope="r{}".format(k + 1)
)
)
def call(self, x, proposals):
for layer in self.convs:
x = layer(x)
if len(self.fcs):
if x.shape.ndims > 2:
x = flatten(x)
for fc, relation in zip(self.fcs, self.relations):
x = fc(x)
x = relation(x, proposals)
return x
@ROI_HEADS_REGISTRY.register()
class RelationRoiHeads(StandardROIHeads):
def _forward_box(self, features, proposals):
"""
Forward logic of the box prediction branch.
Args:
features (list[Tensor]): #level input features for box prediction
proposals (SparseBoxList): the per-image object proposals with
their matching ground truth.
Each has fields "proposal_boxes", and "objectness_logits",
"gt_classes", "gt_boxes".
Returns:
In training, a dict of losses.
In inference, a list of `Instances`, the predicted instances.
"""
box_features = self.box_pooler(features, proposals)
box_features = self.box_head(box_features, proposals)
pred_class_logits, pred_proposal_deltas = self.box_predictor(box_features)
del box_features
outputs = FastRCNNOutputs(
self.box2box_transform,
pred_class_logits,
pred_proposal_deltas,
proposals,
self.smooth_l1_beta,
)
if self.training:
return outputs.losses()
else:
pred_instances, _ = outputs.inference(
self.test_score_thresh, self.test_nms_thresh,
self.test_detections_per_img, self.test_nms_cls_agnostic
)
return pred_instances
| 2.21875 | 2 |