content stringlengths 5 1.05M |
|---|
from datetime import datetime, date, timedelta
import csv
import srqi
import srdata
import os
def get_output_directory():
return os.path.join(os.path.abspath(srqi.__path__[0]), 'output')
def get_data_directory():
return os.path.join(os.path.abspath(srqi.__path__[0]),'Data')
_dir = os.path.abspath(srqi.__path__[0])
BJH_SYNGO_FILES = [os.path.join(_dir,'Data/BJH/April_Output_Org.xls'), os.path.join(_dir,'Data/BJH/May_Output_Org.xls')]
BJH_XML_FILE = os.path.join(_dir,'Data/BJH/all bjh.xml')
SLCH_SYNGO_FILES = [os.path.join(_dir,'Data/SLCH/April_IR_Output_Org.xls'), os.path.join(_dir,'Data/SLCH/May_IR_Output_Org.xls')]
SLCH_XML_FILE = os.path.join(_dir,'Data/SLCH/all slch.xml')
TEST_XML_FILE = os.path.join(_dir,'Data','sample.xml')
TEST_SYNGO_FILES = [os.path.join(_dir,'Data','sample_syngo.xls')]
def wx_date_to_python_date(wx_date):
import wx
assert isinstance(wx_date, wx.DateTime)
if wx_date.IsValid():
ymd = map(int, wx_date.FormatISODate().split('-'))
return date(*ymd)
else:
return None
def python_date_to_wx_date(py_date):
import wx
assert isinstance(py_date, (datetime, date))
tt = py_date.timetuple()
dmy = (tt[2], tt[1]-1, tt[0])
return wx.DateTimeFromDMY(*dmy)
def care_datetime_to_python_datetime(care_date):
care_date = str(care_date)
return datetime(int(care_date[:4]),int(care_date[4:6]),int(care_date[6:8]),int(care_date[8:10]),int(care_date[10:12]), int(care_date[12:14]))
def python_datetime_to_care_datetime(py_datetime):
return py_datetime.strftime("%Y%m%d%H%M%S")
def care_date_to_python_date(care_date):
care_date = str(care_date)
return date(int(care_date[:4]),int(care_date[4:6]),int(care_date[6:8]))
def python_date_to_care_date(python_date):
return python_date.strftime("%Y%m%d")
def write_csv(table, file_name = 'output.csv'):
writer = csv.writer(open(file_name,'wb'))
writer.writerows(table)
def total_seconds(time_delta):
td = time_delta
return (td.microseconds + (td.seconds + td.days * 24 * 3600.0) * 10**6) / 10**6
def multiply_timedelta(td, x):
return timedelta(days = td.days *x, seconds = td.seconds*x, microseconds = td.microseconds *int(x))
def transposed(lists):
if not lists: return []
return map(lambda *row: list(row), *lists)
def get_procs(group = 'all'):
if group == 'bjh':
procs, extra_procs = srdata.process_file(BJH_XML_FILE, BJH_SYNGO_FILES)
elif group == 'slch':
procs, extra_procs = srdata.process_file(SLCH_XML_FILE, SLCH_SYNGO_FILES)
elif group == 'all':
procs, extra_procs = srdata.process_file(BJH_XML_FILE, BJH_SYNGO_FILES)
procs2, extra_procs2 = srdata.process_file(SLCH_XML_FILE, SLCH_SYNGO_FILES)
procs = procs + procs2
extra_procs = extra_procs + procs2
elif group == 'test':
procs, extra_procs = srdata.process_file(TEST_XML_FILE, TEST_SYNGO_FILES)
else:
raise ValueError("Invalid group")
return procs, extra_procs
def get_procs_from_files(paths):
"""Return a list of procedures gleaned from a list of data files
Arguments:
- paths - iterable of absolute paths to data files. Files can be
Syngo data (.xls) or DICOM-SR (.xml). Eventually
extend to other.
"""
# this will eventually be more sophisticated
syngo_paths = [p for p in paths if os.path.splitext(p)[1] == '.xls']
sr_paths = [p for p in paths if os.path.splitext(p)[1] == '.xml']
return srdata.process_files(sr_paths, syngo_paths)
def average_fps(events):
"""Gets the average FPS weighted by event duration"""
if len(events) == 0:
raise ValueError("Cannot take average of empyt list")
total_fluoro_time = sum([e.get_duration() for e in events],timedelta(0) )
total_fluoro_seconds = total_seconds(total_fluoro_time)
mean_numerator = sum([multiply_timedelta(e.get_duration(),e.Pulse_Rate) for e in events], timedelta(0))
return total_seconds(mean_numerator)/total_fluoro_seconds
def is_subset(list1, list2):
"""Returns true if list 1 is a subset of list 2
(assumes neither list has any repeats)
"""
set1 = frozenset(list1)
set2 = frozenset(list2)
return set1 <= set2
def same_contents(list1,list2):
"""Returns true if list 1 has the exact same
contents as list 2. (assumes neither list has
any repeats)
"""
set1 = frozenset(list1)
set2 =frozenset(list2)
return set1 == set2
def matches(list1,list2):
"""Returns is_subset or same_contents
depending on whether or not the last
item in list1 is -99
"""
if int(list1[-1]) == -99:
return same_contents(list1[:-1],list2)
else:
return is_subset(list1,list2)
import numbers
def standard_cpt(cpt):
"""Given a cpt code as an integer, float
or string, convert it to a string in a
standard format.
* no leading or trailing whitespace
* no '.0' at the end of integers
* letters are all capitalized
"""
if isinstance(cpt, numbers.Number):
return str(int(cpt))
elif isinstance(cpt,basestring):
out = cpt.split('.')[0]
out = out.strip()
out = out.upper()
return out
def organize(iterable, key):
"""Put all of the elements in `iterable` into
a dictionary which maps possible return values
of key onto lists of items of iterable
iterable - any iterable object (e.g. a list, or tuple)
key - a function that takes items in interable as inputs
Example:
organize([1,2,3],lambda x: x==2)
{True:[1,3],False:[2]}
"""
out = {}
for item in iterable:
k = key(item)
if not k in out:
out[k] = []
out[k].append(item)
return out
def periodize_by_date(iterable, period_len, date_key):
"""Break up all the obects in iterable into time periods
Runs in linear time with respect to the # of days between the first
and the last object in `iterable`. (Would also be easy to implement
as n*log(n) with respect to the number of objects in iterable, but
for the data we're working on, that would generally be slower. TODO:
implement this so that it checks which would be faster and runs that
way)
Parameters:
iterable :
period_len : the length of the period in days
date_key : a function that takes an object in `iterable` and
returns a datetime.date object
"""
orgd_iter = organize(iterable, date_key)
first_date = min(orgd_iter.keys())
last_date = max(orgd_iter.keys())
current = first_date
periods = []
period_starts = []
while current <= last_date:
period_starts.append(current)
period_end = current + timedelta(days=period_len)
period = []
while current < period_end:
if current in orgd_iter:
period = period + orgd_iter[current]
current = current + timedelta(days =1)
periods.append(period)
assert(len(periods) == len(period_starts))
return periods, period_starts
import xlrd
def coerce_human_date(d, datemode = None):
"""Attempt to coerce d into a Python datetime.date
Generally d will have been retrieved from an excel
spreadsheet, so I expect it to be a string or an
excel date.
Args:
d - object to be coerced
datemode - optional xlrd datemode object
"""
if d is None:
raise ValueError("Cannot coerce None into date.")
if isinstance(d, date):
return d
if isinstance(d, basestring):
raise NotImplementedError("Can't coerce string into a date... yet")
date_tuple = xlrd.xldate_as_tuple(d,datemode)
return date(year = date_tuple[0],
month=date_tuple[1],
day=date_tuple[2])
_ARB_DATE = date(2000,1,1) # an arbitrary date
def subtract_times(t1, t2):
"""returns a datetime.timedelta object representing t1-t2
"""
t1 = datetime.combine(_ARB_DATE, t1)
t2 = datetime.combine(_ARB_DATE, t2)
return t1-t2
def add_to_time(t1, td):
"""Add a timedelta to a time.
Supports negative timedeltas and crossing midnight.
For example add_to_time(time(hours=1), timedelta(hours=-3))
returns time(hours=22)
"""
t1 = datetime.combine(_ARB_DATE, t1)
return (t1 + td).time()
def module_to_class_case(m_name):
m_name = m_name.split('.')[-1]
return m_name.title()
import pkgutil
import srqi.active_inquiries
from srqi.core import my_exceptions
def get_inquiry_classes():
"""Get a list of inquiry classes
"""
pkgpath = os.path.dirname(srqi.active_inquiries.__file__)
inq_module_names = [os.path.splitext(name)[0] for name in os.listdir(pkgpath) if os.path.splitext(name)[1] =='.py' and not name[0] =='_']
#inq_module_names = [name for _, name, _ in pkgutil.iter_modules([pkgpath])]
temp = __import__('srqi.active_inquiries', globals(), locals(), inq_module_names,-1)
inq_modules = [getattr(temp, name) for name in inq_module_names]
inq_classes = []
for module in inq_modules:
try:
class_name = module_to_class_case(module.__name__)
inq_classes.append(getattr(module, class_name))
except AttributeError as ae:
raise my_exceptions.BadInquiryError("No class named " + str(class_name) + " found in " + module.__name__ + ". Please ensure you have named your inquiry class correctly.")
#inq_classes = [getattr(module,dir(module)[0]) for module in inq_modules]
for module_name, class_name in zip(inq_module_names, [inq_class.__name__.lower() for inq_class in inq_classes]):
try:
assert module_name == class_name
except AssertionError as ae:
print module_name + " != " + class_name
raise ae
#assert inq_module_names == [inq_class.__name__.lower() for inq_class in inq_classes]
return inq_classes
|
import re
from typing import List, Set, Union
def filter(items: List[str], query: Union[str, List[str]]) -> Set[str]:
"""Filter items in list.
Filters items in list using full match, substring match and regex match.
Args:
items (List[str]): Input list.
query (Union[str, List[str]]): Filter expression.
Returns:
Set[str]: Filtered items.
"""
matches: Set[str] = set()
if isinstance(query, str):
query = [query]
for query_item in query:
# Full match
matches = matches.union({item for item in items if query_item == item})
# Substring match
matches = matches.union({item for item in items if query_item in item})
# Regular expression match
regex = re.compile(query_item, re.IGNORECASE | re.ASCII)
matches = matches.union({item for item in items if regex.search(item)})
return matches
|
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 30 11:04:35 2020
@author: Mei
"""
import unittest
from gyms import Gyms
class GymTests(unittest.TestCase):
def setUp(self):
self.gyms = Gyms('gyms.csv')
def test_gym_not_found(self):
self.assertEqual(len(self.gyms.find('asdf')), 0)
def test_find_by_alias(self):
self.assertEqual(self.gyms.find('VMP'),
['veteransmemorialparkfremontvmp'])
self.assertEqual(self.gyms.find('VMPUC'),
['veteransmemorialparkunioncityvmpuc'])
self.assertEqual(self.gyms.find('Veterans Memorial Park Fremont'),
['veteransmemorialparkfremontvmp'])
if __name__ == '__main__':
unittest.main()
|
import mqtt_kube.binding.association
class Patcher(mqtt_kube.binding.association.Association):
def __init__(self, locus, mqtt, topic, valuemap):
super().__init__(mqtt, topic, valuemap)
self._locus = locus
def open(self):
self._mqtt.subscribe(self._topic, self._on_payload)
def _on_payload(self, payload, _timestamp):
match = self._valuemap.lookup(payload)
if match:
self._locus.write(match.value)
|
#!/usr/bin/python
# ccn-lite/test/py/nfnproxy-test.py
# demo for invoking named functions ("labeled Results")
# the user-defined functions are in ccn-lite/src/py/pubfunc/
import cStringIO
import os
import sys
import time
sys.path.append('../../src/py')
import ccnlite.client
import ccnlite.ndn2013 as ndn
# ----------------------------------------------------------------------
''' setup the UNIX environment with two nfn relays (written in C)
and the Python nfn proxy server
'''
print '*** initializing servers:'
os.system('killall -9 ccn-nfn-relay >/dev/null 2>&1')
os.system('killall -9 ccn-nfn-proxy.py >/dev/null 2>&1')
os.system('rm -f /tmp/mgmt1.sock /tmp/mgmt2.sock')
CCNL='../..'
CCNLPY='../../src/py'
CCNLUT='../../src/util'
CONTENT_FILE = './computation_content.ndntlv'
os.system(CCNL + '/bin/ccn-nfn-relay -s ndn2013 -u 9000 -x /tmp/mgmt1.sock -v debug >/tmp/nfn0.log 2>&1 &')
os.system(CCNL + '/bin/ccn-nfn-relay -s ndn2013 -u 9001 -x /tmp/mgmt2.sock -v debug >/tmp/nfn1.log 2>&1 &')
os.system(CCNLPY + '/ccn-nfn-proxy.py -u 127.0.0.1/9001 9002 >/tmp/nfn2.log 2>&1 &')
#Connect mgmt1 with mgmt2
os.system(CCNLUT + '/ccn-lite-ctrl -x /tmp/mgmt1.sock newUDPface any 127.0.0.1 9001 | ' + CCNLUT + '/ccn-lite-ccnb2xml | grep ACTION')
os.system(CCNLUT + '/ccn-lite-ctrl -x /tmp/mgmt1.sock prefixreg /pynfn 2 | ' + CCNLUT + '/ccn-lite-ccnb2xml | grep ACTION')
#Connect mgmt2 with proxy (=computeserver)
os.system(CCNLUT + '/ccn-lite-ctrl -x /tmp/mgmt2.sock newUDPface any 127.0.0.1 9002 | ' + CCNLUT + '/ccn-lite-ccnb2xml | grep ACTION')
os.system(CCNLUT + '/ccn-lite-ctrl -x /tmp/mgmt2.sock prefixreg /pynfn 2 | ' + CCNLUT + '/ccn-lite-ccnb2xml | grep ACTION')
#Add content to mgmt2, register it
os.system(CCNLUT + '/ccn-lite-ctrl -x /tmp/mgmt2.sock addContentToCache ' + CONTENT_FILE + ' | ' + CCNLUT + '/ccn-lite-ccnb2xml | grep ACTION')
os.system(CCNLUT + '/ccn-lite-ctrl -x /tmp/mgmt1.sock prefixreg /test 2 | ' + CCNLUT + '/ccn-lite-ccnb2xml | grep ACTION')
# ----------------------------------------------------------------------
nw = ccnlite.client.Access()
nw.connect("127.0.0.1", 9001)
# ----------------------------------------------------------------------
def printIt(pkts):
if pkts != None and pkts[0] != None:
name, content = ndn.parseData(cStringIO.StringIO(pkts[0]))
print content
print
print '*** testing NFN support in Python. Expect the following 8 lines:'
print '2'
print '4'
print '6'
print '222'
print '225'
print 'test/data'
print 'Hello World'
print '1'
print '\n*** starting now:'
# ----------------------------------------------------------------------
# test cases: async function evaluation
nw.getLabeledResult("", "add 1 1", callback=printIt) # 2
nw.getLabeledResult("", "add 2 2", callback=printIt) # 4
nw.getLabeledResult("", "add 3 3", callback=printIt) # 6
# wait for the threads to have finished
time.sleep(1)
# ----------------------------------------------------------------------
# test cases: synchronous function calls
printIt(nw.getLabeledResult("", "add 1 221")) # 222
printIt(nw.getLabeledResult("", "add 2 223")) # 225
# ----------------------------------------------------------------------
# test cases: NFN locator and multiple parameters (using synchronous call)
printIt(nw.getLabeledResult("/pynfn/hello",
"call 2 /myNamedFunctions/getName /test/data"))
printIt(nw.getLabeledResult("/pynfn/hello",
"call 1 /myNamedFunctions/returnHelloWorld"))
printIt(nw.getLabeledResult("/pynfn/hello",
"call 2 /myNamedFunctions/wordcount /test/data"))
# ----------------------------------------------------------------------
# cleanup
os.system('killall -9 ccn-nfn-relay >/dev/null 2>&1')
os.system('killall -9 ccn-nfn-proxy.py >/dev/null 2>&1')
os.system('rm -f /tmp/mgmt1.sock /tmp/mgmt2.sock')
# eof
|
'''
Multiple time series slices (1)
You can easily slice subsets corresponding to different time intervals from a time series. In particular, you can use strings like '2001:2005', '2011-03:2011-12', or '2010-04-19:2010-04-30' to extract data from time intervals of length 5 years, 10 months, or 12 days respectively.
Unlike slicing from standard Python lists, tuples, and strings, when slicing time series by labels (and other pandas Series & DataFrames by labels), the slice includes the right-most portion of the slice. That is, extracting my_time_series['1990':'1995'] extracts data from my_time_series corresponding to 1990, 1991, 1992, 1993, 1994, and 1995 inclusive.
You can use partial strings or datetime objects for indexing and slicing from time series.
For this exercise, you will use time series slicing to plot the time series aapl over its full 11-year range and also over a shorter 2-year range. You'll arrange these plots in a 2×1
2
×
1
grid of subplots
INSTRUCTIONS
100XP
Plot the series aapl in 'blue' in the top subplot of a vertically-stacked pair of subplots, with the xticks rotated to 45 degrees.
Extract a slice named view from the series aapl containing data from the years 2007 to 2008 (inclusive). This has been done for you.
Plot the slice view in black in the bottom subplot.
'''
# Plot the series in the top subplot in blue
plt.subplot(2,1,1)
plt.xticks(rotation=45)
plt.title('AAPL: 2001 to 2011')
plt.plot(aapl, color='blue')
# Slice aapl from '2007' to '2008' inclusive: view
view = aapl['2007':'2008']
# Plot the sliced data in the bottom subplot in black
plt.subplot(2,1,2)
plt.xticks(rotation=45)
plt.title('AAPL: 2007 to 2008')
plt.plot(view, color='black')
plt.tight_layout()
plt.show()
|
"""A small package for sending and recieving commands per tcp
commands: connect() connects to the server
send() sends a message/command to the server
recieve() recieves a message/command from the server
"""
import socket
import sys
class telnet():
def __init__(self, ip, port=23):
"""
Define the server adress and the port
example: server = telnet.telnet('127.0.0.1', 2000)
server = telnet.telnet('127.0.0.1') standard port is 23
"""
self.client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server = (ip, port)
def connect(self):
"""connect to the server"""
self.client.connect(self.server)
def send(self, cmd, form="UTF-8"):
"""send a message/command to the server
example: server.send('say hello', 'UTF-16')
server.send('say hello') standard is UTF-8
"""
self.client.send(bytes(cmd, form))
def recieve(self, bytess):
"""recieve a message/command from the server
example: server.recieve(1024) number of bytes
"""
return str(self.client.recv(bytess), 'UTF-8') |
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from openapi_server.models.base_model_ import Model
from openapi_server.models.stats_ledger_version import StatsLedgerVersion
from openapi_server import util
from openapi_server.models.stats_ledger_version import StatsLedgerVersion # noqa: E501
class StatsLedger(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, visible_name=None, id=None, version=None, report_uuid=None): # noqa: E501
"""StatsLedger - a model defined in OpenAPI
:param visible_name: The visible_name of this StatsLedger. # noqa: E501
:type visible_name: str
:param id: The id of this StatsLedger. # noqa: E501
:type id: str
:param version: The version of this StatsLedger. # noqa: E501
:type version: StatsLedgerVersion
:param report_uuid: The report_uuid of this StatsLedger. # noqa: E501
:type report_uuid: str
"""
self.openapi_types = {
'visible_name': str,
'id': str,
'version': StatsLedgerVersion,
'report_uuid': str
}
self.attribute_map = {
'visible_name': 'visible_name',
'id': 'id',
'version': 'version',
'report_uuid': 'report_uuid'
}
self._visible_name = visible_name
self._id = id
self._version = version
self._report_uuid = report_uuid
@classmethod
def from_dict(cls, dikt) -> 'StatsLedger':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The stats_ledger of this StatsLedger. # noqa: E501
:rtype: StatsLedger
"""
return util.deserialize_model(dikt, cls)
def to_dict(self, prefix=""):
"""Returns the model as a dict:
:return: The StatsLedger as a dict
:rtype: dict
"""
return { 'visible_name': self._visible_name,
'id': self._id,
'version': self._version,
'report_uuid': self._report_uuid }
@property
def visible_name(self):
"""Gets the visible_name of this StatsLedger.
:return: The visible_name of this StatsLedger.
:rtype: str
"""
return self._visible_name
@visible_name.setter
def visible_name(self, visible_name):
"""Sets the visible_name of this StatsLedger.
:param visible_name: The visible_name of this StatsLedger.
:type visible_name: str
"""
self._visible_name = visible_name
@property
def id(self):
"""Gets the id of this StatsLedger.
:return: The id of this StatsLedger.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this StatsLedger.
:param id: The id of this StatsLedger.
:type id: str
"""
self._id = id
@property
def version(self):
"""Gets the version of this StatsLedger.
:return: The version of this StatsLedger.
:rtype: StatsLedgerVersion
"""
return self._version
@version.setter
def version(self, version):
"""Sets the version of this StatsLedger.
:param version: The version of this StatsLedger.
:type version: StatsLedgerVersion
"""
self._version = version
@property
def report_uuid(self):
"""Gets the report_uuid of this StatsLedger.
:return: The report_uuid of this StatsLedger.
:rtype: str
"""
return self._report_uuid
@report_uuid.setter
def report_uuid(self, report_uuid):
"""Sets the report_uuid of this StatsLedger.
:param report_uuid: The report_uuid of this StatsLedger.
:type report_uuid: str
"""
self._report_uuid = report_uuid
|
import os
import mysql.connector
class Orders:
def __init__(self, products_dac=None):
self.connection_string = eval(os.getenv('CONNECTION_STRING'))
self.products_dac = products_dac
if products_dac is not None:
self.products_dac.orders_dac = self
def insert(self, user, order:dict):
connection = mysql.connector.connect(**self.connection_string)
# TBD
# connection.cursor().execute()
def get_products_reserved(self, product_id:int):
# TBD
pass
|
"""
`gef` command test module
"""
from tests.utils import gdb_run_cmd, GefUnitTestGeneric
class GefCommand(GefUnitTestGeneric):
"""`gef` command test module"""
def test_cmd_gef(self):
res = gdb_run_cmd("gef")
self.assertNoException(res)
self.assertIn("GEF - GDB Enhanced Features", res)
def test_cmd_gef_config(self):
pass
def test_cmd_gef_help(self):
pass
def test_cmd_gef_missing(self):
pass
def test_cmd_gef_restore(self):
pass
def test_cmd_gef_run(self):
pass
def test_cmd_gef_save(self):
pass
def test_cmd_gef_set(self):
res = gdb_run_cmd("gef set args $_gef0",
before=("pattern create -n 4", ),
after=("show args"))
self.assertNoException(res)
self.assertIn("aaaabaaacaaadaaaeaaafaaagaaahaaaiaaajaaakaaalaaamaaan", res)
res = gdb_run_cmd("gef set args $_gef42",
before=("pattern create -n 4", ),
after=("show args"))
self.assertException(res)
|
import sys
import os
import zipfile
from histdatacom.concurrency import get_pool_cpu_count
from histdatacom.concurrency import ProcessPool
from rich import print
class _CSVs:
def __init__(self, args_, records_current_, records_next_):
# setting relationship to global outer parent
self.args = args_
global records_current
records_current = records_current_
global records_next
records_next = records_next_
def extract_csv(self, record, args, records_current, records_next):
try:
if "CSV_ZIP" in record.status:
zip_path = record.data_dir + record.zip_filename
with zipfile.ZipFile(zip_path, "r") as zip_ref:
[record.csv_filename] = [x for x in zip_ref.namelist() if (".csv" or ".xlsx") in x]
zip_ref.extract(record.csv_filename, path=record.data_dir)
os.remove(zip_path)
record.status = "CSV_FILE"
record.write_info_file(base_dir=args['default_download_dir'])
records_next.put(record)
except Exception:
print("Unexpected error:", sys.exc_info())
record.delete_info_file()
raise
finally:
records_current.task_done()
def extract_csvs(self, records_current, records_next):
pool = ProcessPool(self.extract_csv,
self.args,
"Extracting", "CSVs...",
get_pool_cpu_count(self.args['cpu_utilization']))
pool(records_current, records_next)
|
import zipfile
import io
import requests
inventory_to_FRS_pgm_acronymn = {"NEI":"EIS","TRI":"TRIS","eGRID":"EGRID","GHGRP":"E-GGRT","RCRAInfo":"RCRAINFO","DMR":"NPDES"}
def download_extract_FRS_combined_national(FRSpath):
url = 'https://www3.epa.gov/enviro/html/fii/downloads/state_files/national_combined.zip'
request = requests.get(url).content
zip_file = zipfile.ZipFile(io.BytesIO(request))
zip_file.extractall(FRSpath)
def filter_bridges_by_program_list(bridges,program_list):
bridges = bridges[bridges['PGM_SYS_ACRNM'].isin(program_list)]
return bridges
def filter_bridges_by_inventory_list(bridges,inventory_list):
bridges = bridges[bridges['PGM_SYS_ACRNM'].isin(inventory_list)]
return bridges
def filter_bridges_by_facility_list(bridges,facility_list):
bridges = bridges[bridges['REGISTRY_ID'].isin(facility_list)]
return bridges
def list_facilities_not_in_bridge(bridges, facility_list):
facilities = bridges[bridges['REGISTRY_ID'].isin(facility_list)]
return bridges
#Returns list of acronymns for inventories that correspond to
def get_programs_for_inventory_list(list_of_inventories):
program_list = []
for l in list_of_inventories:
pgm_acronym = inventory_to_FRS_pgm_acronymn[l]
program_list.append(pgm_acronym)
return program_list
def invert_inventory_to_FRS():
FRS_to_inventory_pgm_acronymn = {v: k for k, v in inventory_to_FRS_pgm_acronymn.items()}
return FRS_to_inventory_pgm_acronymn
|
import numpy as np
from core.model import Model
from layers.input import Input
from layers.dense import Dense
from util.cost_functions import L2
if __name__ == '__main__':
# demo MLP
data_x = np.array([1, 2])
data_y = np.array([0.2, 0.4])
train_x = np.reshape(data_x, (len(data_x), 1, 1))
train_y = np.reshape(data_y, (len(data_y), 1, 1))
model = Model()
model.add(Input(1))
model.add(Dense(3))
model.add(Dense(1))
model.compile(cost=L2(), optimizer='sgd', num_epochs=30000, batch_size=1, lr=0.1)
model.train(train_x, train_y)
test_data_x = np.array([1])
test_x = np.reshape(test_data_x, (len(test_data_x), 1))
print model.predict(test_x) |
'''
@author: Dirk Rother
@contact: dirrot@web.de
@license: GPL
@version: 0.1
'''
class TradingPair(object):
'''
This is a model for a TradingPair object.
'''
def __init__(self, id = "", price = "", price_before_24h = "", volume_first = "", volume_second = "", volume_btc = "", best_market = "", latest_trade = ""):
'''
Simple constructor for a Coin.
'''
self.id = id
self.price = price
self.price_before_24h = price_before_24h
self.volume_first = volume_first
self.volume_second = volume_second
self.volume_btc = volume_btc
self.best_market = best_market
self.latest_trade = latest_trade
def __repr__(self):
'''
The typ representation of a TradingPair.
'''
return "TradingPair"
def __str__(self):
'''
The string representation of a TradingPair.
'''
return "TradingPair: [" + self.id + "] - " + self.price
|
#encoding:utf-8
#!/usr/bin/env python
import os
COV = None
if os.environ.get('FLASK_COVERAGE'):
import coverage
COV = coverage.coverage(branch=True, include='app/*')
COV.start()
if os.path.exists('.env'):
print('Importing environment from .env...')
for line in open('.env'):
var = line.strip().split('=')
if len(var) == 2:
os.environ[var[0]] = var[1]
from app import create_app, db
from app.delete import deletenone
from app.models import User, Follow, Role, Permission, Post, Comment, Message, Category, Star, Webpush
from flask.ext.script import Manager, Shell
from flask.ext.migrate import Migrate, MigrateCommand
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
manager = Manager(app)
migrate = Migrate(app, db)
def make_shell_context():
return dict(app=app, db=db, User=User, Follow=Follow, Role=Role,
Permission=Permission, Post=Post, Comment=Comment,Message=Message,Category=Category,Star=Star,Webpush=Webpush,deletenone=deletenone)
manager.add_command("shell", Shell(make_context=make_shell_context))
manager.add_command('db', MigrateCommand)
@manager.command
def test(coverage=False):
"""Run the unit tests."""
if coverage and not os.environ.get('FLASK_COVERAGE'):
import sys
os.environ['FLASK_COVERAGE'] = '1'
os.execvp(sys.executable, [sys.executable] + sys.argv)
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
if COV:
COV.stop()
COV.save()
print('Coverage Summary:')
COV.report()
basedir = os.path.abspath(os.path.dirname(__file__))
covdir = os.path.join(basedir, 'tmp/coverage')
COV.html_report(directory=covdir)
print('HTML version: file://%s/index.html' % covdir)
COV.erase()
@manager.command
def profile(length=25, profile_dir=None):
"""Start the application under the code profiler."""
from werkzeug.contrib.profiler import ProfilerMiddleware
app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[length],
profile_dir=profile_dir)
app.run()
#测试数据库初始化
@manager.command
def datainit():
from app.models import Role,User,Post,Category
print ("Category init")
Category.insert_categorys()
print ("Role init")
User.add_self_follows()
Role.insert_roles()
print ("User and Post generate")
User.generate_fake(100)
Post.generate_fake(100)
wen=User.query.filter_by(username='wen').first()
if not wen:
print ("make wen in admin")
wen=User(username='wen',email='2535199139@qq.com',password='meian',confirmed=True)
wen.role=Role.query.filter_by(permissions=0xff).first()
db.session.add(wen)
db.session.commit()
else :
print ("User(wen) already in data")
print ("all_data readly now")
@manager.command
def deploy():
"""Run deployment tasks."""
from flask.ext.migrate import upgrade
from app.models import Role, User
# migrate database to latest revision
upgrade()
# create user roles
Role.insert_roles()
# create self-follows for all users
User.add_self_follows()
if __name__ == '__main__':
manager.run()
|
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Flatten, Activation, Input, UpSampling2D
from keras.layers import Conv2D, MaxPooling2D, BatchNormalization
from keras import regularizers
def get_VGG_model(input_shape, labels=10):
"""
A VGG model for CIFAR.
"""
weight_decay = 0.0005
model = Sequential()
model.add(Conv2D(64, (3, 3), padding='same',
input_shape=input_shape, kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.3))
model.add(Conv2D(64, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(128, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(256, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(256, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(256, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(512, kernel_regularizer=regularizers.l2(weight_decay), name='embedding'))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(labels, activation='softmax', name='softmax'))
return model
|
from google.appengine.ext import db
def DerivedProperty(func=None, *args, **kwargs): # noqa: D401
"""Derived datastore property.
Derived properties are not set directly, but are instead generated by a
function when required. They are useful to provide fields in the datastore
that can be used for filtering or sorting in ways that are not otherwise
possible with unmodified data - for example, filtering by the length of a
BlobProperty, or case insensitive matching by querying the lower cased version
of a string.
DerivedProperty can be declared as a regular property, passing a function as
the first argument, or it can be used as a decorator for the function that
does the calculation, either with or without arguments.
"""
if func:
# Regular invocation, or used as a decorator without arguments
return _DerivedProperty(func, *args, **kwargs)
else:
# We're being called as a decorator with arguments
def decorate(decorated_func):
return _DerivedProperty(decorated_func, *args, **kwargs)
return decorate
class _DerivedProperty(db.Property):
def __init__(self, derive_func, *args, **kwargs):
"""Constructor.
Args:
func: A function that takes one argument, the model instance, and
returns a calculated value.
"""
super(_DerivedProperty, self).__init__(*args, **kwargs)
self.derive_func = derive_func
def __get__(self, model_instance, model_class):
if model_instance is None:
return self
return self.derive_func(model_instance)
def __set__(self, model_instance, value):
raise db.DerivedPropertyError("Cannot assign to a DerivedProperty")
class LengthProperty(_DerivedProperty):
"""Convenience class for recording the length of another field."""
def __init__(self, property, *args, **kwargs):
"""Constructor.
Args:
property: The property to lower-case.
"""
super(LengthProperty, self).__init__(
lambda self: len(property.__get__(self, type(self))),
*args, **kwargs)
class LowerCaseProperty(_DerivedProperty):
"""A convenience class for generating lower-cased fields for filtering."""
def __init__(self, property, *args, **kwargs):
"""Constructor.
Args:
property: The property to lower-case.
"""
super(LowerCaseProperty, self).__init__(
lambda self: property.__get__(self, type(self)).lower(),
*args, **kwargs
)
|
import itertools
import logging
import os
import sys
import typing
from ._types import URL, Origin
_LOGGER_INITIALIZED = False
TRACE_LOG_LEVEL = 5
DEFAULT_PORTS = {b"http": 80, b"https": 443}
class Logger(logging.Logger):
# Stub for type checkers.
def trace(self, message: str, *args: typing.Any, **kwargs: typing.Any) -> None:
... # pragma: nocover
def get_logger(name: str) -> Logger:
"""
Get a `logging.Logger` instance, and optionally
set up debug logging based on the HTTPCORE_LOG_LEVEL or HTTPX_LOG_LEVEL
environment variables.
"""
global _LOGGER_INITIALIZED
if not _LOGGER_INITIALIZED:
_LOGGER_INITIALIZED = True
logging.addLevelName(TRACE_LOG_LEVEL, "TRACE")
log_level = os.environ.get(
"HTTPCORE_LOG_LEVEL", os.environ.get("HTTPX_LOG_LEVEL", "")
).upper()
if log_level in ("DEBUG", "TRACE"):
logger = logging.getLogger("httpcore")
logger.setLevel(logging.DEBUG if log_level == "DEBUG" else TRACE_LOG_LEVEL)
handler = logging.StreamHandler(sys.stderr)
handler.setFormatter(
logging.Formatter(
fmt="%(levelname)s [%(asctime)s] %(name)s - %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
)
logger.addHandler(handler)
logger = logging.getLogger(name)
def trace(message: str, *args: typing.Any, **kwargs: typing.Any) -> None:
logger.log(TRACE_LOG_LEVEL, message, *args, **kwargs)
logger.trace = trace # type: ignore
return typing.cast(Logger, logger)
def url_to_origin(url: URL) -> Origin:
scheme, host, explicit_port = url[:3]
default_port = DEFAULT_PORTS[scheme]
port = default_port if explicit_port is None else explicit_port
return scheme, host, port
def origin_to_url_string(origin: Origin) -> str:
scheme, host, explicit_port = origin
port = f":{explicit_port}" if explicit_port != DEFAULT_PORTS[scheme] else ""
return f"{scheme.decode('ascii')}://{host.decode('ascii')}{port}"
def exponential_backoff(factor: float) -> typing.Iterator[float]:
yield 0
for n in itertools.count(2):
yield factor * (2 ** (n - 2))
|
import time
class ContextTimer(object):
def __enter__(self):
self.t = time.time()
def __exit__(self, type, value, traceback):
print('time elapsed: {:.8f}'.format(time.time() - self.t)) |
from typing import List
import os
import sys
from modules.models.sao import Sao
def record_sao_log(patent_number: str, description_text: str, sentences: List[str], saos: List[Sao], filePath: str):
try:
with open(filePath, 'a', encoding="utf-8") as file:
file.write(patent_number + '\n')
file.write(description_text + '\n\n')
file.write('Отобранные для извлечения SAO предложения:\n')
for sentence in sentences:
file.write(sentence + '\n')
file.write('\nSAO:\n')
for sao in saos:
file.write('Subject: ' + sao.subject + '\n')
file.write('Action: ' + sao.action + '\n')
file.write('Object: ' + sao.object + '\n\n')
file.write('\n')
except EnvironmentError:
print("Ошибка при записи в файл:" + filePath)
|
import os
from lib.loaddata import DataSet
class PoliceData(DataSet):
'''
List of street crimes which are all geolocated
'''
NAME = 'police'
BOTTOM = 0.05
MIDDLE = 0.15
def load(self):
directory = './data/street_crime/'
for file in os.listdir(directory):
handle = open(os.path.join(directory, file), 'r')
self.load_data(handle)
def load_data(self, handle):
line_n = 0
for line in handle:
if line_n != 0:
split = line.split(',')
if split[4] and split[5] and split[9]:
self.data.append({'longitude': float(split[4]), 'latitude': float(split[5]), 'type': split[9]})
else:
line_n += 1
def get_value(self, row):
if row['type'] == 'Burglary':
return 2
elif row['type'] == 'Robbery':
return 3
elif row['type'] == 'Violent crime':
return 4
else:
return 1 |
import logging
import os
from openpyxl import load_workbook
from django.conf import settings
from django.core.management.base import BaseCommand
from django.db import connection, transaction
from dmd.models import DMDProduct
from gcutils.bigquery import Client
class Command(BaseCommand):
help = ('Parse BNF->dm+d mapping supplied by NHSBSA and update tables'
'accordingly.')
def add_arguments(self, parser):
parser.add_argument('--filename')
def handle(self, *args, **options):
filename = options['filename']
if filename is None:
base_dirname = os.path.join(settings.PIPELINE_DATA_BASEDIR, 'dmd_snomed')
dirname = sorted(os.listdir(base_dirname))[-1]
filenames = os.listdir(os.path.join(base_dirname, dirname))
assert len(filenames) == 1
filename = os.path.join(base_dirname, dirname, filenames[0])
wb = load_workbook(filename=filename)
rows = wb.active.rows
headers = rows[0]
assert headers[0].value.lower() == 'bnf code'
assert headers[2].value.lower() == 'snomed code'
with transaction.atomic():
with connection.cursor() as cursor:
for row in rows[1:]: # skip header
bnf_code = row[0].value
snomed_code = row[2].value
sql = "UPDATE dmd_product SET BNF_CODE = %s WHERE DMDID = %s "
cursor.execute(sql.lower(), [bnf_code, snomed_code])
rowcount = cursor.rowcount
if not rowcount:
logging.warn(
"When adding BNF codes, could not find %s", snomed_code)
Client('dmd').upload_model(DMDProduct)
|
# try and friends
<stmt>try:
p<body>ass
e<ex1>xcept ArithmeticError, e:
pass
e<ex2>xcept:
pass
e<else>lse:
pass
f<finally>inally:
pass
|
# Copyright (c) 2017, John Skinner
import unittest
import unittest.mock as mock
import os
import time
import yaml
import arvet.config.global_configuration as global_conf
class TestGlobalConfiguration(unittest.TestCase):
def test_save_global_config_writes_to_file(self):
mock_open = mock.mock_open()
filename = 'test_config_file_1'
with mock.patch('arvet.config.global_configuration.open', mock_open, create=True):
global_conf.save_global_config(filename, {'a': 1})
self.assertTrue(mock_open.called)
self.assertEqual(filename, mock_open.call_args[0][0])
@mock.patch('arvet.config.global_configuration.yaml.load', autospec=yaml.load)
@mock.patch('arvet.config.global_configuration.os.path.isfile', autospec=os.path.isfile)
def test_load_global_config_reads_config_file_if_available(self, mock_isfile, mock_yaml_load):
mock_isfile.return_value = True
mock_yaml_load.return_value = {}
mock_open = mock.mock_open()
filename = 'test_config_file_2'
with mock.patch('arvet.config.global_configuration.open', mock_open, create=True):
global_conf.load_global_config(filename)
self.assertTrue(mock_open.called)
self.assertEqual(filename, mock_open.call_args[0][0])
@mock.patch('arvet.config.global_configuration.save_global_config', autospec=global_conf.save_global_config)
@mock.patch('arvet.config.global_configuration.yaml.load', autospec=yaml.load)
@mock.patch('arvet.config.global_configuration.os.path.isfile', autospec=os.path.isfile)
def test_load_global_config_saves_config_file_if_not_available(self, mock_isfile, mock_yaml_load, mock_save):
mock_isfile.return_value = False
mock_yaml_load.return_value = {}
filename = 'test_config_file_3'
global_conf.load_global_config(filename)
self.assertTrue(mock_save.called)
self.assertEqual(filename, mock_save.call_args[0][0])
@mock.patch('arvet.config.global_configuration.save_global_config', autospec=global_conf.save_global_config)
@mock.patch('arvet.config.global_configuration.yaml.load', autospec=yaml.load)
@mock.patch('arvet.config.global_configuration.os.path.isfile', autospec=os.path.isfile)
def test_load_global_config_does_not_save_config_file_if_available(self, mock_isfile, mock_yaml_load, mock_save):
mock_isfile.return_value = True
mock_yaml_load.return_value = {}
filename = 'test_config_file_4'
with mock.patch('arvet.config.global_configuration.open', mock.mock_open(), create=True):
global_conf.load_global_config(filename)
self.assertFalse(mock_save.called)
@mock.patch('arvet.config.global_configuration.time.sleep', autospec=time.sleep)
@mock.patch('arvet.config.global_configuration.yaml.load', autospec=yaml.load)
@mock.patch('arvet.config.global_configuration.os.path.isfile', autospec=os.path.isfile)
def test_load_global_config_waits_and_retries_three_times_if_load_failed(self, mock_isfile, mock_yaml_load,
mock_sleep):
mock_isfile.return_value = True
mock_yaml_load.return_value = None
with mock.patch('arvet.config.global_configuration.open', mock.mock_open(), create=True):
global_conf.load_global_config('test_config_file_5')
self.assertEqual(3, mock_yaml_load.call_count)
self.assertEqual(3, mock_sleep.call_count)
@mock.patch('arvet.config.global_configuration.yaml.load', autospec=yaml.load)
@mock.patch('arvet.config.global_configuration.os.path.isfile', autospec=os.path.isfile)
def test_load_global_config_returns_read_config_merged_with_defaults(self, mock_isfile, mock_yaml_load):
mock_isfile.return_value = True
config = {
'test': 12.35,
'database': {
'database': 'a_different_database',
'host': '127.0.0.2',
'port': 'liverpool'
},
'image_manager': {
'path': '/dev/null',
'group': 'the null set'
},
'job_system_config': {
'a': 1
},
'logging': {
'demo': 'ATestProperty'
}
}
mock_yaml_load.return_value = config
with mock.patch('arvet.config.global_configuration.open', mock.mock_open(), create=True):
result = global_conf.load_global_config('test_config_file_6')
self.assertDictSubset(config, result)
def assertDictSubset(self, subset, actual):
"""
Recursively assert that an actual dict contains all the values in a subset dict.
The actual dict may have extra keys (which is why we can't use assertEqual)
:param subset: The expected values that must exist in the
:param actual:
:return:
"""
for key, value in subset.items():
self.assertIn(key, actual)
if isinstance(value, dict):
self.assertDictSubset(value, actual[key])
else:
self.assertEqual(value, actual[key])
|
from ipdb import set_trace as st
from sklearn.datasets import load_iris
def load_dataset():
iris = load_iris()
return iris.data, iris.target
|
"""MPD output parsing utilities."""
import re
from typing import Callable, Iterable, List, Tuple, Type, TypeVar, Union, overload
from .errors import CommandError, ErrorCode, get_error_constructor
from .types import Song
from .util import from_json_like, split_on
__all__ = [
'normalize',
'split_item',
'from_lines',
'parse_single',
'parse_playlist',
'parse_error',
]
T = TypeVar('T')
class IncompatibleErrorMessage(Exception):
"""Exception in case MPD sends an error in a different format somehow."""
def normalize(name: str) -> str:
"""Normalize a value name to a valid Python (PEP8 compliant) identifier.
Args:
name: The name of a value returned by MPD.
Returns:
The normalized name, in all lowercase with - replaced by _.
"""
return name.lower().replace('-', '_')
def split_item(item: str) -> Tuple[str, str]:
"""Split a key/value pair in a string into a tuple (key, value).
This also strips space from both sides of either.
Args:
item: A key/value string in 'key: value' format.
Returns:
The (key, value) tuple, with both sides stripped.
"""
lhs, rhs = item.split(':', maxsplit=1)
return lhs.strip(), rhs.strip()
def from_lines(cls: Type[T], lines: Iterable[str]) -> T:
"""Make a `cls` object from a list of lines in MPD output format."""
values = (split_item(l) for l in lines)
normalized = {normalize(k): v for k, v in values}
return from_json_like(cls, normalized)
def parse_error(error_line: str, partial: List[str]) -> CommandError:
"""Parse an error from MPD.
Errors are of format
`ACK [CODE@LINE] {COMMAND} MESSAGE`
Args:
error_line: an ACK line from MPD.
Returns:
A CommandError (or subclass) object with the error data.
"""
match = ERROR_RE.match(error_line)
if match is None:
raise IncompatibleErrorMessage(error_line)
code, line, command, message = match.groups()
error_code = ErrorCode(int(code))
return get_error_constructor(error_code)(int(line), command, message, partial)
@overload
def parse_single(lines: Iterable[str]) -> str: # pylint: disable=unused-argument
"""Overload."""
@overload # noqa: F811
def parse_single( # pylint: disable=function-redefined
lines: Iterable[str], # pylint: disable=unused-argument
cast: Callable[[str], T], # pylint: disable=unused-argument
) -> T:
"""Overload."""
def parse_single( # noqa: F811, pylint: disable=function-redefined
lines: Iterable[str], cast: Callable[[str], T] = None
) -> Union[str, T]:
"""Parse a single return value and discard its name.
Args:
lines: The return from MPD as a list of a single line.
cast: An optional function to read the string into another type.
Returns:
The value as a string or converted into the chosen type.
"""
(result,) = lines
_, value = split_item(result)
if cast is None:
return value
return cast(value)
def is_file(line: str) -> bool:
"""Check if a return line is a song file."""
return line.startswith('file:')
def parse_playlist(lines: Iterable[str]) -> List[Song]:
"""Parse playlist information into a list of songs."""
split = split_on(is_file, lines)
return [from_lines(Song, song_info) for song_info in split]
ERROR_RE = re.compile(r'ACK\s+\[(\d+)@(\d+)\]\s+\{(.*)\}\s+(.*)')
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-11-29 16:04
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('discount', '0005_auto_20170919_0839'),
]
operations = [
migrations.AlterModelOptions(
name='sale',
options={'permissions': (('view_sale', 'Can view sales'), ('edit_sale', 'Can edit sales'))},
),
migrations.AlterModelOptions(
name='voucher',
options={'permissions': (('view_voucher', 'Can view vouchers'), ('edit_voucher', 'Can edit vouchers'))},
),
]
|
from sklearn import tree
#[height, weight, shoe size]
x = [[165, 80, 44], [177, 70, 43], [160, 60, 38], [154, 54, 37], [166,65,40], [190,90,47], [175,64,38], [177,70,40],[159,55,37],[171,75,42],[181,85,43]]
y= ['male', 'female', 'female', 'female', 'male','male','male','female','male', 'female','male',]
clf = tree.DecisionTreeClassifier()
clf = clf.fit(x,y)
pred = clf.predict([[190,70,43]])
print(pred) |
class Gym:
def __init__(self):
self.customers = []
self.trainers = []
self.equipment = []
self.plans = []
self.subscriptions = []
def add_customer(self, customer):
if customer in self.customers:
return
self.customers.append(customer)
def add_trainer(self, trainer):
if trainer in self.trainers:
return
self.trainers.append(trainer)
def add_equipment(self, equipment):
if equipment in self.equipment:
return
self.equipment.append(equipment)
def add_plan(self, plan):
if plan in self.plans:
return
self.plans.append(plan)
def add_subscription(self, subscription):
if subscription in self.subscriptions:
return
self.subscriptions.append(subscription)
def subscription_info(self, subscription_id):
subscription = self.__get_object_by_id(self.subscriptions, subscription_id)
customer = self.__get_object_by_id(self.customers, subscription.customer_id)
trainer = self.__get_object_by_id(self.trainers, subscription.trainer_id)
plan = self.__get_object_by_id(self.plans, subscription.exercise_id)
equipment = self.__get_object_by_id(self.equipment, plan.equipment_id)
result = str(subscription) + "\n"
result += str(customer) + "\n"
result += str(trainer) + "\n"
result += str(equipment) + "\n"
result += str(plan)
return result
@staticmethod
def __get_object_by_id(object, object_id):
all_object = [x for x in object if x.id == object_id]
return all_object[0]
|
# Copyright (C) 2013 Google Inc.
#
# This file is part of YouCompleteMe.
#
# YouCompleteMe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# YouCompleteMe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with YouCompleteMe. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import * # noqa
from future.utils import itervalues, iteritems
from collections import defaultdict, namedtuple
from ycm import vimsupport
import vim
class DiagnosticInterface( object ):
def __init__( self, user_options ):
self._user_options = user_options
# Line and column numbers are 1-based
self._buffer_number_to_line_to_diags = defaultdict(
lambda: defaultdict( list ) )
self._next_sign_id = 1
self._previous_line_number = -1
self._diag_message_needs_clearing = False
self._placed_signs = []
def OnCursorMoved( self ):
line, _ = vimsupport.CurrentLineAndColumn()
line += 1 # Convert to 1-based
if line != self._previous_line_number:
self._previous_line_number = line
if self._user_options[ 'echo_current_diagnostic' ]:
self._EchoDiagnosticForLine( line )
def GetErrorCount( self ):
return len( self._FilterDiagnostics( _DiagnosticIsError ) )
def GetWarningCount( self ):
return len( self._FilterDiagnostics( _DiagnosticIsWarning ) )
def PopulateLocationList( self, diags ):
vimsupport.SetLocationList(
vimsupport.ConvertDiagnosticsToQfList( diags ) )
def UpdateWithNewDiagnostics( self, diags ):
normalized_diags = [ _NormalizeDiagnostic( x ) for x in diags ]
self._buffer_number_to_line_to_diags = _ConvertDiagListToDict(
normalized_diags )
if self._user_options[ 'enable_diagnostic_signs' ]:
self._placed_signs, self._next_sign_id = _UpdateSigns(
self._placed_signs,
self._buffer_number_to_line_to_diags,
self._next_sign_id )
if self._user_options[ 'enable_diagnostic_highlighting' ]:
_UpdateSquiggles( self._buffer_number_to_line_to_diags )
if self._user_options[ 'always_populate_location_list' ]:
self.PopulateLocationList( normalized_diags )
def _EchoDiagnosticForLine( self, line_num ):
buffer_num = vim.current.buffer.number
diags = self._buffer_number_to_line_to_diags[ buffer_num ][ line_num ]
if not diags:
if self._diag_message_needs_clearing:
# Clear any previous diag echo
vimsupport.PostVimMessage( '', warning = False )
self._diag_message_needs_clearing = False
return
text = diags[ 0 ][ 'text' ]
if diags[ 0 ].get( 'fixit_available', False ):
text += ' (FixIt)'
vimsupport.PostVimMessage( text, warning = False, truncate = True )
self._diag_message_needs_clearing = True
def _FilterDiagnostics( self, predicate ):
matched_diags = []
line_to_diags = self._buffer_number_to_line_to_diags[
vim.current.buffer.number ]
for diags in itervalues( line_to_diags ):
matched_diags.extend( list( filter( predicate, diags ) ) )
return matched_diags
def _UpdateSquiggles( buffer_number_to_line_to_diags ):
vimsupport.ClearYcmSyntaxMatches()
line_to_diags = buffer_number_to_line_to_diags[ vim.current.buffer.number ]
for diags in itervalues( line_to_diags ):
for diag in diags:
location_extent = diag[ 'location_extent' ]
is_error = _DiagnosticIsError( diag )
if location_extent[ 'start' ][ 'line_num' ] < 0:
location = diag[ 'location' ]
vimsupport.AddDiagnosticSyntaxMatch(
location[ 'line_num' ],
location[ 'column_num' ] )
else:
vimsupport.AddDiagnosticSyntaxMatch(
location_extent[ 'start' ][ 'line_num' ],
location_extent[ 'start' ][ 'column_num' ],
location_extent[ 'end' ][ 'line_num' ],
location_extent[ 'end' ][ 'column_num' ],
is_error = is_error )
for diag_range in diag[ 'ranges' ]:
vimsupport.AddDiagnosticSyntaxMatch(
diag_range[ 'start' ][ 'line_num' ],
diag_range[ 'start' ][ 'column_num' ],
diag_range[ 'end' ][ 'line_num' ],
diag_range[ 'end' ][ 'column_num' ],
is_error = is_error )
def _UpdateSigns( placed_signs, buffer_number_to_line_to_diags, next_sign_id ):
new_signs, kept_signs, next_sign_id = _GetKeptAndNewSigns(
placed_signs, buffer_number_to_line_to_diags, next_sign_id
)
# Dummy sign used to prevent "flickering" in Vim when last mark gets
# deleted from buffer. Dummy sign prevents Vim to collapsing the sign column
# in that case.
# There's also a vim bug which causes the whole window to redraw in some
# conditions (vim redraw logic is very complex). But, somehow, if we place a
# dummy sign before placing other "real" signs, it will not redraw the
# buffer (patch to vim pending).
dummy_sign_needed = not kept_signs and new_signs
if dummy_sign_needed:
vimsupport.PlaceDummySign( next_sign_id + 1,
vim.current.buffer.number,
new_signs[ 0 ].line )
# We place only those signs that haven't been placed yet.
new_placed_signs = _PlaceNewSigns( kept_signs, new_signs )
# We use incremental placement, so signs that already placed on the correct
# lines will not be deleted and placed again, which should improve performance
# in case of many diags. Signs which don't exist in the current diag should be
# deleted.
_UnplaceObsoleteSigns( kept_signs, placed_signs )
if dummy_sign_needed:
vimsupport.UnPlaceDummySign( next_sign_id + 1, vim.current.buffer.number )
return new_placed_signs, next_sign_id
def _GetKeptAndNewSigns( placed_signs, buffer_number_to_line_to_diags,
next_sign_id ):
new_signs = []
kept_signs = []
for buffer_number, line_to_diags in iteritems(
buffer_number_to_line_to_diags ):
if not vimsupport.BufferIsVisible( buffer_number ):
continue
for line, diags in iteritems( line_to_diags ):
for diag in diags:
sign = _DiagSignPlacement( next_sign_id,
line,
buffer_number,
_DiagnosticIsError( diag ) )
if sign not in placed_signs:
new_signs += [ sign ]
next_sign_id += 1
else:
# We use .index here because `sign` contains a new id, but
# we need the sign with the old id to unplace it later on.
# We won't be placing the new sign.
kept_signs += [ placed_signs[ placed_signs.index( sign ) ] ]
return new_signs, kept_signs, next_sign_id
def _PlaceNewSigns( kept_signs, new_signs ):
placed_signs = kept_signs[:]
for sign in new_signs:
# Do not set two signs on the same line, it will screw up storing sign
# locations.
if sign in placed_signs:
continue
vimsupport.PlaceSign( sign.id, sign.line, sign.buffer, sign.is_error )
placed_signs.append(sign)
return placed_signs
def _UnplaceObsoleteSigns( kept_signs, placed_signs ):
for sign in placed_signs:
if sign not in kept_signs:
vimsupport.UnplaceSignInBuffer( sign.buffer, sign.id )
def _ConvertDiagListToDict( diag_list ):
buffer_to_line_to_diags = defaultdict( lambda: defaultdict( list ) )
for diag in diag_list:
location = diag[ 'location' ]
buffer_number = vimsupport.GetBufferNumberForFilename(
location[ 'filepath' ] )
line_number = location[ 'line_num' ]
buffer_to_line_to_diags[ buffer_number ][ line_number ].append( diag )
for line_to_diags in itervalues( buffer_to_line_to_diags ):
for diags in itervalues( line_to_diags ):
# We also want errors to be listed before warnings so that errors aren't
# hidden by the warnings; Vim won't place a sign oven an existing one.
diags.sort( key = lambda diag: ( diag[ 'location' ][ 'column_num' ],
diag[ 'kind' ] ) )
return buffer_to_line_to_diags
def _DiagnosticIsError( diag ):
return diag[ 'kind' ] == 'ERROR'
def _DiagnosticIsWarning( diag ):
return diag[ 'kind' ] == 'WARNING'
def _NormalizeDiagnostic( diag ):
def ClampToOne( value ):
return value if value > 0 else 1
location = diag[ 'location' ]
location[ 'column_num' ] = ClampToOne( location[ 'column_num' ] )
location[ 'line_num' ] = ClampToOne( location[ 'line_num' ] )
return diag
class _DiagSignPlacement(
namedtuple( "_DiagSignPlacement",
[ 'id', 'line', 'buffer', 'is_error' ] ) ):
# We want two signs that have different ids but the same location to compare
# equal. ID doesn't matter.
def __eq__( self, other ):
return ( self.line == other.line and
self.buffer == other.buffer and
self.is_error == other.is_error )
|
from tkinter import Tk,Spinbox
master = Tk()
w = Spinbox(master, from_=0, to=10)
w.pack()
master.mainloop() |
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 17 21:49:47 2021
@author: Yerke
"""
#import necessary libraries
import json
import pandas as pd
import re
from tqdm import tqdm
from textblob import TextBlob
import plotly.express as px
from langdetect import detect
# Opening JSON file
f = open('result.json', encoding="utf8")
# returns JSON object as
# a dictionary
data = json.load(f)
messages = data['messages']
# Iterating through the json
# list
listo = []
for i in tqdm(messages):
s = i['text']
if 'SHIB' in s or 'DOGE.' in s:
listo.append(i)
# Closing file
f.close()
# Remove Non-English messages
# using search() to get only those strings with alphabets
clean_data = []
for i in tqdm(listo):
s = i['text']
if detect(s) == 'en':
clean_data.append(i)
df = pd.DataFrame.from_dict(clean_data)
# leave only necessary columns
df = df[['date', 'text']]
#create a functiom to clean data from emoji
def remove_emoji(string):
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
u"\U00002500-\U00002BEF" # chinese char
u"\U00002702-\U000027B0"
u"\U00002702-\U000027B0"
u"\U000024C2-\U0001F251"
u"\U0001f926-\U0001f937"
u"\U00010000-\U0010ffff"
u"\u2640-\u2642"
u"\u2600-\u2B55"
u"\u200d"
u"\u23cf"
u"\u23e9"
u"\u231a"
u"\ufe0f" # dingbats
u"\u3030"
"]+", flags=re.UNICODE)
return emoji_pattern.sub(r'', string)
# remove emoji from data
df['text'] = df['text'].apply(remove_emoji)
# create a function to get the polarity
def getPolarity(text):
return TextBlob(text).sentiment.polarity
# create a new column
df['Polarity'] = df['text'].apply(getPolarity)
# Changing object type column to datetime
df['date'] = pd.to_datetime(df.date)
# Creating new column with just the date
df['date'] = df['date'].dt.date
# Get the average sentiment per day
df2 = df.groupby(df['date']).mean()
# reset index
df2 = df2.reset_index()
fig = px.bar(df2, x=df2['date'], y=df2['Polarity'])
fig.show()
|
import random
import string
from time import time
def gen_device_data():
client_uuid = "".join(random.sample(string.digits * 2, 15))
serial_number = "".join(random.sample(string.digits + "abcdef", 16))
openudid = "".join(random.sample(string.digits * 2, 16))
data = {"time_sync": {"local_time": str(int(time())), "server_time": str(int(time()))},
"magic_tag": "ss_app_log",
"header": {"sdk_version": 1132, "language": "zh",
"user_agent": "okhttp/2.9.0",
"app_name": "aweme", "app_version": "2.9.0", "is_upgrade_user": 0, "region": "CN",
"vendor_id": serial_number, "app_region": "CN",
"channel": "App Store", "mcc_mnc": "46001",
"custom": {"app_region": "CN", "build_number": "29001", "app_language": "zh"},
"resolution": "1125*2436", "aid": "1128", "os": "Android", "tz_offset": 28800,
"access": "WIFI", "openudid": openudid,
"carrier": "%D6%D0%B9%FA%D2%C6%B6%AF", "is_jailbroken": 0, "os_version": "11.4",
"app_language": "zh", "device_model": "OnePlus",
"display_name": "%B6%B6%D2%F4%B6%CC%CA%D3%C6%B5", "mc": "02:00:00:00:00:00",
"package": "com.ss.android.ugc.Aweme", "timezone": 8, "tz_name": "Asia\/Shanghai",
"idfa": client_uuid}, "fingerprint": ""}
return data,(openudid,serial_number,client_uuid) |
# -*- coding: utf-8 -*-
"""
This module contains a class to load the step and terrain files
"""
import os
import sys
import fnmatch
from . import utils
def load_modules(location):
"""
Loads all modules in the `location` folder
"""
location = os.path.expanduser(os.path.expandvars(location))
if not os.path.exists(location):
raise OSError("Location '{0}' to load modules does not exist".format(location))
for p, _, f in os.walk(location):
for filename in fnmatch.filter(f, "*.py"):
load_module(os.path.join(p, filename))
def load_module(path):
"""
Loads a module by the given `path`
:param string path: the path to the module to load
"""
module_name = os.path.splitext(os.path.split(path)[1])[0]
try:
if sys.version_info >= (3, 5):
# the imp module is deprecated since Python 3.6
import importlib.util
spec = importlib.util.spec_from_file_location(module_name, path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
else:
import imp
parent = os.path.dirname(utils.expandpath(path))
f, pathname, desc = imp.find_module(module_name, [parent])
imp.load_module(module_name, f, pathname, desc)
f.close()
except Exception as e:
raise ImportError("Unable to import module '{0}' from '{1}': {2}".format(
module_name, path, e))
|
"""
#########################################################################
# Copyright (c) Davar Lab @ Hikvision Research Institute. All rights reserved.
# Filename : mask_rcnn_r50.py
# Abstract : Model settings for mask-rcnn-based detector on Total-Text
# Current Version: 1.0.0
# Date : 2020-05-31
#########################################################################
"""
_base_ = "./mask_rcnn_r50_fpn_tt.py"
model = dict(
backbone=dict(depth=101)
)
checkpoint_config = dict(interval=5, filename_tmpl='checkpoint/res101_maskrcnn_tt_epoch_{}.pth')
load_from = "/path/to/mask_rcnn_r101_fpn_1x_20181129-34ad1961.pth"
|
import datastorage
def prompt_for_action():
while True:
print()
print("Action?")
print()
print(" A = add item to inventory")
print(" R = remove item from inventory")
print(" C = report current inventory")
print(" O = report re-order inventory")
print(" Q = quit")
print()
action = input("> ").strip().upper()
if action == "A": return "ADD"
elif action == "R": return "REMOVE"
elif action == "C": return "INVENTORY_REPORT"
elif action == "O": return "REORDER_REPORT"
elif action == "Q": return "QUIT"
else:
print("Unknown Action")
def prompt_for_product():
while True:
print()
print("Select a product:")
print()
n = 1
for code,description,desired_number in datastorage.products():
print(" {}. {} - {}".format(n, code, description))
n += 1
s = input("> ").strip()
if s == "":
return None
try:
n = int(s)
except ValueError:
n = -1
if n < 1 or n > len(datastorage.products()):
print("Invalid option: {}".format(s))
continue
product_code = datastorage.products()[n-1][0]
return product_code
def prompt_for_location():
while True:
print()
print("Select a location:")
print()
n = 1
for code,description in datastorage.locations():
print(" {}. {} - {}".format(n, code, description))
n += 1
s = input("> ").strip()
if s == "":
return None
try:
n = int(s)
except ValueError:
n = -1
if n < 1 or n > len(datastorage.locations()):
print("Invalid option: {}".format(s))
continue
location_code = datastorage.locations()[n-1][0]
return location_code
def show_report(report):
print()
for line in report:
print(line)
print()
def show_error(err_msg):
print()
print(err_msg)
print()
|
import numbers
class BackpropNode:
def diveritive(self):
raise Exception("BackpropNode: need to implement diveritive")
def compute(self):
raise Exception("BackpropNode: need to implement compute")
def forward(self):
raise Exception("BackpropNode: need to implement forward")
def backward(self):
raise Exception("BackpropNode: need to implement backward")
class PolynomialNode(BackpropNode):
def __init__(self, coefs):
'''
@coefs :dictionary-like.
mapping degree of polynomial to its coeffients. for degrees
not in the mapping, they are assigned 0 coeffients.
'''
if (not isinstance(coefs, dict)):
raise TypeError("coef1")
for degree, coef in coefs.items():
if (not isinstance(degree, int) and not isinstance(coef, numbers.Number)):
raise TypeError("coef2")
self._coefs = dict(coefs)
def compute(self):
result = 0
for degree, coef in coefs.items():
|
from PySide2.QtWidgets import *
from PySide2.QtCore import *
from PySide2.QtGui import *
import sys
from ..functions import *
class settingsButton(QPushButton):
def __init__(self,text,icon):
super().__init__()
self.setWindowFlags(Qt.FramelessWindowHint)
self.setObjectName(u"widgetset")
self.texts = text
self.Icon = icon
self.content = QHBoxLayout()
self.content.setContentsMargins(0,0,0,0)
self.content.setSpacing(0)
self.settingsText = QLabel()
self.settingsText.setObjectName(u"textset")
self.settingsText.setStyleSheet("""
QLabel{
max-width: 175px;
min-width: 175px;
padding: 0 0 0 10;
color: #E6F0D1;
font-size: 20px;
text-align: left;
}""")
self.settingsText.setText(self.texts)
self.settingsIcon = QLabel()
self.settingsIcon.setObjectName(u"iconset")
self.settingsIcon.setPixmap(QPixmap(self.Icon).scaled(25,25))
self.settingsIcon.setStyleSheet("""
QLabel{
max-width: 30px;
min-width: 30px;
}""")
self.content.addWidget(self.settingsText)
self.content.addWidget(self.settingsIcon,Qt.AlignCenter,Qt.AlignRight)
self.setStyleSheet("""
QWidget{
max-height: 30px;
min-height: 30px;
}
#widgetset:hover{
background-color: #FFBF1F;
}
#widgetset{
max-width: 200px;
min-width: 200px;
background-color: #779A32;
}""")
self.setLayout(self.content)
class SessionSettingsContainer(QWidget):
def __init__(self,session,database,linklayout,sessionlayout,buttons):
super().__init__()
self.setObjectName(u"layout")
self.setWindowFlags(Qt.FramelessWindowHint)
self.setStyleSheet("""
#layout{
max-width: 200px;
min-width: 200px;
background-color: #181F0A;
}""")
self.session = session
self.database = database
self.linklayout = linklayout
self.sessionlayout = sessionlayout
self.buttons = buttons
self.container = QVBoxLayout()
self.container.addSpacing(0)
self.container.setContentsMargins(0,0,0,0)
self.container.setAlignment(Qt.AlignTop)
self.delete = settingsButton("Delete Session",DELETE_URL)
self.delete.clicked.connect(self.DeleteSession)
self.container.addWidget(self.delete)
self.setLayout(self.container)
def DeleteSession(self):
delete_session(self.session,self.database) #UPDATE DATABASE
for i in range(self.linklayout.count()): #UPDATE LINKLAYOUT
self.linklayout.itemAt(i).widget().deleteLater()
for i in range(self.sessionlayout.count()): #DELETE SESSION BUTTONS
if self.sessionlayout.itemAt(i).widget().text() == self.session:
self.sessionlayout.itemAt(i).widget().deleteLater()
self.buttons.remove(self.sessionlayout.itemAt(i).widget())
class LinkSettingsContainer(QWidget): #POPUP WIDGET FOR LINK MODIFICATIONS
def __init__(self,titleEdit,url,session,database,rootlayout):
super().__init__()
self.setObjectName(u"layout")
self.setWindowFlags(Qt.FramelessWindowHint)
self.setStyleSheet("""
#layout{
max-width: 200px;
min-width: 200px;
background-color: #181F0A;
}""")
self.url = url
self.session = session
self.title = titleEdit
self.database = database
self.rootlayout = rootlayout
self.container = QVBoxLayout()
self.container.addSpacing(0)
self.container.setContentsMargins(0,0,0,0)
self.container.setAlignment(Qt.AlignTop)
self.restore = settingsButton("Restore title",RESTORE_TITLE)
self.restore.clicked.connect(self.RestoreTitle)
self.delete = settingsButton("Delete link",DELETE_URL)
self.delete.clicked.connect(self.DeleteLink)
self.container.addWidget(self.restore)
self.container.addWidget(self.delete)
self.setLayout(self.container)
def RestoreTitle(self):
original_title = get_url_title(self.url)
modify_title(self.title.text(),original_title,self.session,self.database)
self.title.setText(original_title)
def DeleteLink(self):
delete_link(self.url,self.session,self.database)
for i in range(self.rootlayout.count()):
if self.rootlayout.itemAt(i).widget().linktext == self.url:
self.rootlayout.itemAt(i).widget().deleteLater()
self.deleteLater()
if __name__ == "__main__":
class valami(QMainWindow):
def __init__(self):
super().__init__()
#layout = LinkSettingsContainer("","")
#self.setCentralWidget(layout)
app = QApplication(sys.argv)
#window = valami()
#window = LinkSettingsContainer("","")
#window.show()
#app.exec_() |
from .suite import Suite
|
from .validator import Validator
from ..logger import getLogger
logger = getLogger(__name__)
class StringValidator(Validator):
def __init__(self, name):
super().__init__(name)
def validate(self, value):
if value is None or (type(value) == str and len(value) == 0):
return ""
return str(value)
|
import numpy as np
import pandas as pd
import os
from classes.handlers.ParamsHandler import ParamsHandler
class PIDExtractor:
def __init__(self, mode: str, extraction_method: str, pid_file_paths: dict, dataset_name: str):
self.__dataset_name = dataset_name
supp_datasets = ["canary", "dementia_bank"]
if self.__dataset_name not in supp_datasets:
raise ValueError("Dataset '{}' is not supported! Supported datasets are: {}"
.format(self.__dataset_name, supp_datasets))
self.__mode = mode
self.__extraction_method = extraction_method
self.__pid_file_paths = pid_file_paths
self.__superset_ids = []
@staticmethod
def __fetch_eye_pids(task: str, data_path: str, filename: str, plog_thr: float) -> tuple:
table_eye = pd.read_csv(os.path.join(data_path, filename[0]))
pids_eye = table_eye.loc[table_eye['task'] == task]['interview']
table_plog = pd.read_csv(os.path.join(data_path, filename[1]))
pids_plog = table_plog[table_plog['Eye-Tracking Calibration?'] >= plog_thr]['interview']
return np.intersect1d(pids_eye, pids_plog)
def __fetch_speech_pids(self, task: str, data_path: str, filename: str) -> tuple:
if self.__dataset_name == 'canary':
task = {'CookieTheft': 1, 'Reading': 2, 'Memory': 3}[task]
table_audio = pd.read_csv(os.path.join(data_path, filename[0]))
pids_audio = table_audio.loc[table_audio['task'] == task]['interview']
table_text = pd.read_csv(os.path.join(data_path, filename[1]))
pids_text = table_text[table_text['task'] == task]['interview']
return np.intersect1d(pids_audio, pids_text)
if self.__dataset_name == 'dementia_bank':
dbank_pids_all = [pd.read_csv(os.path.join(data_path, i))['interview'] for i in filename]
while len(dbank_pids_all) > 1:
dbank_pids_all = [np.intersect1d(dbank_pids_all[i], dbank_pids_all[i + 1])
for i in range(len(dbank_pids_all) - 1)]
return dbank_pids_all[0]
@staticmethod
def __fetch_moca_pids(data_path: str, filename: str) -> tuple:
return pd.read_csv(os.path.join(data_path, filename[0]))['interview']
@staticmethod
def __fetch_multimodal_pids(task: str, data_path: str, filename: str) -> tuple:
table_multimodal = pd.read_csv(os.path.join(data_path, filename[0]))
return table_multimodal.loc[table_multimodal['task'] == task]['interview']
def __fetch_modality_pids(self, task: str, modality: str, filename: str, data_path: str, plog_thr: float) -> list:
pids_mod = []
# For eye modality, PIDs from eye_fixation and participant_log are intersected
if modality == 'eye':
pids_mod.append(self.__fetch_eye_pids(task, data_path, filename, plog_thr))
# For speech modality the files being accessed (text and audio) have tasks as 1, 2, 3 under the tasks column
# then PIDs from text and audio are intersected
if modality == 'speech':
pids_mod.append(self.__fetch_speech_pids(task, data_path, filename))
# PIDs from MOCA are used
if modality == 'moca':
pids_mod.append(self.__fetch_moca_pids(data_path, filename))
# PIDs from mm_overall are used
if modality == 'multimodal':
pids_mod.append(self.__fetch_multimodal_pids(task, data_path, filename))
return pids_mod
def __combine_pids(self, pids_mod: list) -> list:
while len(pids_mod) > 1:
# for single task and ensemble modes, we require an intersection of all PIDs, from all modalities
if self.__mode == 'single_tasks':
pids_mod = [np.intersect1d(pids_mod[i], pids_mod[i + 1]) for i in range(len(pids_mod) - 1)]
# for fusion mode, we require a union of PIDs taken from each modality
elif self.__mode == 'fusion' or self.__mode == 'ensemble':
pids_mod = [np.union1d(pids_mod[i], pids_mod[i + 1]) for i in range(len(pids_mod) - 1)]
return pids_mod
def __fetch_task_pids(self, task: str) -> list:
"""
:param task: the task for which PIDs are required
:return: list of PIDs that satisfy the task and modality constraints
"""
modalities = ParamsHandler.load_parameters(os.path.join(self.__dataset_name, task))['modalities']
database = ParamsHandler.load_parameters(os.path.join(self.__dataset_name, 'database'))
modality_wise_datasets = database['modality_wise_datasets']
data_path = os.path.join('datasets', self.__dataset_name)
plog_threshold = ParamsHandler.load_parameters('settings')['eye_tracking_calibration_flag']
pids_mod = []
for modality in modalities:
filename = modality_wise_datasets[modality]
pids_mod += (self.__fetch_modality_pids(task, modality, filename, data_path, plog_threshold))
pids_mod = self.__combine_pids(pids_mod)
# Intersecting the final list of PIDs with diagnosis, to get the PIDs with valid diagnosis
pids_diag = pd.read_csv(os.path.join(data_path, 'diagnosis.csv'))['interview']
pids = list(np.intersect1d(pids_mod[0], pids_diag))
return pids
def extract_pids(self, tasks: list):
superset_ids = []
for task in tasks:
# Getting pids and saving them at pid_file_path for each task
pids = self.__fetch_task_pids(task=task)
pd.DataFrame(pids, columns=['interview']).to_csv(self.__pid_file_paths[task])
superset_ids.append(pids)
if self.__mode == 'fusion' or self.__mode == 'ensemble':
# Getting superset_ids for fusion, which are the union of all lists of PIDs taken from all tasks
while (len(superset_ids)) > 1:
superset_ids = [np.union1d(superset_ids[i], superset_ids[i + 1]) for i in range(len(superset_ids) - 1)]
self.__superset_ids = superset_ids[0]
file_name = "{}_{}_super_pids.csv".format(self.__mode, self.__extraction_method)
super_pids_file_path = os.path.join('assets', self.__dataset_name, 'PIDs', file_name)
print("\n\t\t --> * Created superset of PIDs for mode '{}' * ".format(self.__mode))
pd.DataFrame(self.__superset_ids, columns=['interview']).to_csv(super_pids_file_path)
|
# Copyright (c) 2015, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
# STIX TAGS
TAG_STIX_PACKAGE = "{http://stix.mitre.org/stix-1}STIX_Package"
|
# Copyright 2021 portfolio-robustfpm-framework Authors
# Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
# http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
# http://opensource.org/licenses/MIT>, at your option. This file may not be
# copied, modified, or distributed except according to those terms.
r""" This submodule implements :class:`MultivalMap` as an abstract interface for multivalued mappings and some specific mappings.
For any sets :math:`X, Y`, a multivalued mapping is such a mapping :math:`\Gamma: X \mapsto Y`, that
.. math :: \forall x \in X \;\; \Gamma(x) \subseteq Y
This module also provides two simple instances of :class:`IMultivalMap`: `NoConstraints` and `LongOnlyConstraints` for
ease of use. They are defined as:
>>> NoConstraints = IdenticalMap(RealSpaceHandler())
>>> LongOnlyConstraints = IdenticalMap(NonNegativeSpaceHandler())
See Also
--------
:class:`robustfpm.pricing.set_handler.RealSpaceHandler`
:class:`robustfpm.pricing.set_handler.NonNegativeSpaceHandler`
"""
import numpy as np
from abc import ABC, abstractmethod
from .set_handler import ISetHandler, NonNegativeSimplex, RealSpaceHandler, NonNegativeSpaceHandler
from .lattice import Lattice
__all__ = ['IMultivalMap',
'IdenticalMap',
'PriceDynamics',
'PIDynamics',
'ConstantDynamics',
'MDAFDynamics',
'SimplexConstraints',
'NoConstraints',
'LongOnlyConstraints'
]
class IMultivalMap(ABC):
""" An abstract interface class for multivalued mappings"""
@abstractmethod
def __call__(self, x, t):
r""" :code:`A.__call__(x,t)` is equivalent to :code:`A(x,t)`
Parameters
----------
x: np.ndarray
A point or a set of points from :math:`\mathbb{R}^{n}`
t: int
Value of time
Returns
-------
ISetHandler
Value of a mapping at point(s) `x`, `t`
"""
raise NotImplementedError('The method must be defined in a subclass')
@property
@abstractmethod
def dim(self):
r"""
int: The dimension of image or :code:`np.inf` if return value can be of any dimension (e.g., a :class:`robustfpm.pricing.set_handler.RealSpaceHandler`)
For a multivalued map :math:`\Gamma: \mathbb{R}^{n} \times \{0, 1, \dots\} \mapsto \mathbb{R}^{n}`, returns :math:`n`.
"""
raise NotImplementedError('The method must be defined in a subclass')
class IdenticalMap(IMultivalMap):
""" Identical mapping: always returns the same set (its support)
Parameters
----------
support: ISetHandler
The underlying set to return
"""
def __init__(self, support: ISetHandler):
self.support = support
def __call__(self, x, t):
return self.support
@property
def dim(self):
return self.support.dim
class SimplexConstraints(IMultivalMap):
r""" A simplex mapping that corresponds to the following trading constraints:
- No short positions;
- Total value of risky assets at any given time can not exceed given limit `r`
In mathematical terms, given a vector of discounted prices of risky assets :math:`x = (x_1, \dots, x_n)`
.. math:: D_t(x) = \{h = (h_1, \dots, h_n):\; \sum\limits_{i=1}^{n}h_i x_i \leqslant r, \; h_i \geqslant 0, i = 1,\dots, n\}
Parameters
----------
r : np.float64
Limit on total value of risky assets (in discounted prices, i.e., in terms of units of riskless asset)
Notes
-----
We denote these constraints as *simplex* since :math:`D_t(x)` is, in fact, a special type of N-simplex.
See Also
--------
:class:`robustfpm.pricing.set_handler.NonNegativeSimplex`
"""
def __init__(self, r: np.float64):
if r < 0:
raise ValueError('Bound must be non-negative!')
self.r = r
def __call__(self, x, t):
return NonNegativeSimplex(self.r / x, dtype=x.dtype)
@property
def dim(self):
return np.inf
class PriceDynamics(IMultivalMap):
"""An abstract base class for different price dynamics.
Differs from :class:`IMultivalMap` in that it also has to have:
- type
- time horizon
- method for calculating Lipschitz constant
"""
_allowed_types = {'mult', 'add'}
@abstractmethod
def __call__(self, x, t=1):
r""" :code:`A.__call__(x,t)` is equivalent to :code:`A(x,t)`
Parameters
----------
x: np.ndarray, size (`t`, n)
A set of previous prices, each price — a point from :math:`\mathbb{R}^{n}`
t: int
Value of time
Returns
-------
ISetHandler
For additive dynamics, returns increment :math:`K_t(x)`.
For multiplicative dynamics, returns multipliers :math:`C_t(x)`.
"""
raise NotImplementedError('The method must be defined in a subclass')
@property
@abstractmethod
def type(self):
"""
{'mult', 'add'}: Type of price dynamics, either multiplicative or additive"""
raise NotImplementedError('The method must be defined in a subclass')
@property
@abstractmethod
def t_max(self):
"""int: Time horizon"""
raise NotImplementedError('The method must be defined in a subclass')
@abstractmethod
def get_lipschitz(self, t: int):
""" Get value of a Lipshitz constant for underlying multivalued mapping at time `t`.
Parameters
----------
t: int
Time horizon
Returns
-------
np.float64
Lipschitz constant of :math:`K_t(x)`
"""
raise NotImplementedError('The method must be defined in a subclass')
class PIDynamics(PriceDynamics):
""" Abstract class for price-independent price dynamics.
That is, increment (for additive) or multipliers (for multiplicative) is independent of x.
"""
def __call__(self, x=0, t=1):
return self._call(t)
@abstractmethod
def _call(self, t):
raise NotImplementedError('The method must be defined in a subclass')
class ConstantDynamics(PIDynamics):
""" Time-independent, price-independent price dynamics.
It is a price dynamics where increments (for additive) or multipliers (for multiplicative)
don't depend both on previous prices and time.
Parameters
----------
support: ISetHandler
Set of multipliers (or increments, if model is additive)
type: {'mult', 'add'}
Dynamics type: multiplicative or additive
Notes
-----
This class, essentially, functions as IdenticalMap with additional properties `type` and `t_max`.
"""
def __init__(self, support: ISetHandler, type='mult'):
self._support = support
if type not in self._allowed_types:
raise TypeError('Wrong type dynamics!')
self._type = type
def _call(self, t):
return self._support
@property
def dim(self):
return self._support.dim
@property
def type(self):
return self._type
@property
def t_max(self):
""":code:`np.inf` (since the model is time-independent)"""
return np.inf
def get_lipschitz(self, t: int):
# TODO: implement this method
pass
class MDAFDynamics(PriceDynamics):
r"""Multiplicative dynamics in additive form (time-independent).
Given as its support a set of `price-independent` multipliers :math:`K`, it returns (when called), a set of `increments` :math:`\{y = k\cdot x - x:\; k \in K\}`.
Here, :math:`k \cdot x` denotes `element-wise` multiplication (Hadamard product) of :math:`k` and :math:`x`.
"""
def __init__(self, support: ISetHandler):
self._support = support
def __call__(self, x, t=1):
return self._support.multiply(x).add(-x)
@property
def dim(self):
return self._support.dim
@property
def type(self):
return 'add'
@property
def t_max(self):
return np.inf
def get_lipschitz(self, t: int):
pass
# Two simple constraints for ease of use
NoConstraints = IdenticalMap(RealSpaceHandler())
LongOnlyConstraints = IdenticalMap(NonNegativeSpaceHandler())
|
# encoding: utf-8
from __future__ import unicode_literals
import unittest
import spotify
from spotify import compat
from spotify.playlist import _PlaylistCallbacks
import tests
from tests import mock
@mock.patch('spotify.playlist.lib', spec=spotify.lib)
class PlaylistTest(unittest.TestCase):
def setUp(self):
self.session = tests.create_session_mock()
def test_create_without_uri_or_sp_playlist_fails(self, lib_mock):
with self.assertRaises(AssertionError):
spotify.Playlist(self.session)
@mock.patch('spotify.Link', spec=spotify.Link)
def test_create_from_uri(self, link_mock, lib_mock):
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
link_instance_mock = link_mock.return_value
link_instance_mock._as_sp_playlist.return_value = sp_playlist
uri = 'spotify:playlist:foo'
result = spotify.Playlist(self.session, uri=uri)
link_mock.assert_called_once_with(self.session, uri)
link_instance_mock._as_sp_playlist.assert_called_once_with()
self.assertEqual(link_instance_mock.as_playlist.call_count, 0)
self.assertEqual(lib_mock.sp_playlist_add_ref.call_count, 0)
self.assertEqual(result._sp_playlist, sp_playlist)
@mock.patch('spotify.Link', spec=spotify.Link)
def test_create_from_uri_is_cached(self, link_mock, lib_mock):
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
link_instance_mock = link_mock.return_value
link_instance_mock._as_sp_playlist.return_value = sp_playlist
uri = 'spotify:playlist:foo'
result = spotify.Playlist(self.session, uri=uri)
self.assertEqual(self.session._cache[sp_playlist], result)
@mock.patch('spotify.Link', spec=spotify.Link)
def test_create_from_uri_fail_raises_error(self, link_mock, lib_mock):
link_instance_mock = link_mock.return_value
link_instance_mock._as_sp_playlist.return_value = None
uri = 'spotify:playlist:foo'
with self.assertRaises(spotify.Error):
spotify.Playlist(self.session, uri=uri)
def test_life_cycle(self, lib_mock):
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
sp_playlist = playlist._sp_playlist
lib_mock.sp_playlist_add_ref.assert_called_with(sp_playlist)
# Callbacks are only added when someone registers a Python event
# handler on the playlist:
lib_mock.sp_playlist_add_callbacks.assert_not_called()
playlist.on(spotify.PlaylistEvent.TRACKS_ADDED, lambda *args: None)
lib_mock.sp_playlist_add_callbacks.assert_called_with(
sp_playlist, mock.ANY, mock.ANY)
playlist = None # noqa
tests.gc_collect()
# Callbacks are removed when the playlist is GC-ed:
lib_mock.sp_playlist_remove_callbacks.assert_called_with(
sp_playlist, mock.ANY, mock.ANY)
# FIXME Won't be called because lib_mock has references to the
# sp_playlist object, and it thus won't be GC-ed.
# lib_mock.sp_playlist_release.assert_called_with(sp_playlist)
def test_cached_playlist(self, lib_mock):
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
result1 = spotify.Playlist._cached(self.session, sp_playlist)
result2 = spotify.Playlist._cached(self.session, sp_playlist)
self.assertIsInstance(result1, spotify.Playlist)
self.assertIs(result1, result2)
@mock.patch('spotify.Link', spec=spotify.Link)
def test_repr(self, link_mock, lib_mock):
lib_mock.sp_playlist_is_loaded.return_value = 1
link_instance_mock = link_mock.return_value
link_instance_mock.uri = 'foo'
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
result = repr(playlist)
self.assertEqual(result, 'Playlist(%r)' % 'foo')
@mock.patch('spotify.Link', spec=spotify.Link)
def test_repr_if_unloaded(self, link_mock, lib_mock):
lib_mock.sp_playlist_is_loaded.return_value = 0
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
result = repr(playlist)
self.assertEqual(result, 'Playlist(<not loaded>)')
@mock.patch('spotify.Link', spec=spotify.Link)
def test_repr_if_link_creation_fails(self, link_mock, lib_mock):
lib_mock.sp_playlist_is_loaded.return_value = 1
link_mock.side_effect = spotify.Error('error message')
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
result = repr(playlist)
self.assertEqual(result, 'Playlist(<error: error message>)')
def test_eq(self, lib_mock):
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist1 = spotify.Playlist(self.session, sp_playlist=sp_playlist)
playlist2 = spotify.Playlist(self.session, sp_playlist=sp_playlist)
self.assertTrue(playlist1 == playlist2)
self.assertFalse(playlist1 == 'foo')
def test_ne(self, lib_mock):
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist1 = spotify.Playlist(self.session, sp_playlist=sp_playlist)
playlist2 = spotify.Playlist(self.session, sp_playlist=sp_playlist)
self.assertFalse(playlist1 != playlist2)
def test_hash(self, lib_mock):
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist1 = spotify.Playlist(self.session, sp_playlist=sp_playlist)
playlist2 = spotify.Playlist(self.session, sp_playlist=sp_playlist)
self.assertEqual(hash(playlist1), hash(playlist2))
def test_is_loaded(self, lib_mock):
lib_mock.sp_playlist_is_loaded.return_value = 1
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
result = playlist.is_loaded
lib_mock.sp_playlist_is_loaded.assert_called_once_with(sp_playlist)
self.assertTrue(result)
@mock.patch('spotify.utils.load')
def test_load(self, load_mock, lib_mock):
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
playlist.load(10)
load_mock.assert_called_with(self.session, playlist, timeout=10)
@mock.patch('spotify.track.lib', spec=spotify.lib)
def test_tracks(self, track_lib_mock, lib_mock):
sp_track = spotify.ffi.cast('sp_track *', 43)
lib_mock.sp_playlist_num_tracks.return_value = 1
lib_mock.sp_playlist_track.return_value = sp_track
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
self.assertEqual(lib_mock.sp_playlist_add_ref.call_count, 1)
result = playlist.tracks
self.assertEqual(lib_mock.sp_playlist_add_ref.call_count, 2)
self.assertEqual(len(result), 1)
lib_mock.sp_playlist_num_tracks.assert_called_with(sp_playlist)
item = result[0]
self.assertIsInstance(item, spotify.Track)
self.assertEqual(item._sp_track, sp_track)
self.assertEqual(lib_mock.sp_playlist_track.call_count, 1)
lib_mock.sp_playlist_track.assert_called_with(sp_playlist, 0)
track_lib_mock.sp_track_add_ref.assert_called_with(sp_track)
def test_tracks_if_no_tracks(self, lib_mock):
lib_mock.sp_playlist_num_tracks.return_value = 0
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
result = playlist.tracks
self.assertEqual(len(result), 0)
lib_mock.sp_playlist_num_tracks.assert_called_with(sp_playlist)
self.assertEqual(lib_mock.sp_playlist_track.call_count, 0)
def test_tracks_if_unloaded(self, lib_mock):
lib_mock.sp_playlist_is_loaded.return_value = 0
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
result = playlist.tracks
lib_mock.sp_playlist_is_loaded.assert_called_with(sp_playlist)
self.assertEqual(len(result), 0)
def test_tracks_is_a_mutable_sequence(self, lib_mock):
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
self.assertIsInstance(playlist.tracks, compat.MutableSequence)
def test_tracks_setitem(self, lib_mock):
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
tracks = playlist.tracks
tracks.__len__ = mock.Mock(return_value=5)
playlist.remove_tracks = mock.Mock()
playlist.add_tracks = mock.Mock()
tracks[0] = mock.sentinel.track
playlist.add_tracks.assert_called_with(mock.sentinel.track, index=0)
playlist.remove_tracks.assert_called_with(1)
def test_tracks_setitem_with_slice(self, lib_mock):
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
tracks = playlist.tracks
tracks.__len__ = mock.Mock(return_value=5)
playlist.remove_tracks = mock.Mock()
playlist.add_tracks = mock.Mock()
tracks[0:2] = [mock.sentinel.track1, mock.sentinel.track2]
playlist.add_tracks.assert_has_calls([
mock.call(mock.sentinel.track1, index=0),
mock.call(mock.sentinel.track2, index=1),
], any_order=False)
playlist.remove_tracks.assert_has_calls([
mock.call(3),
mock.call(2),
], any_order=False)
def test_tracks_setittem_with_slice_and_noniterable_value_fails(
self, lib_mock):
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
tracks = playlist.tracks
tracks.__len__ = mock.Mock(return_value=5)
with self.assertRaises(TypeError):
tracks[0:2] = mock.sentinel.track
def test_tracks_setitem_raises_index_error_on_negative_index(
self, lib_mock):
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
tracks = playlist.tracks
tracks.__len__ = mock.Mock(return_value=5)
with self.assertRaises(IndexError):
tracks[-1] = None
def test_tracks_setitem_raises_index_error_on_too_high_index(
self, lib_mock):
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
tracks = playlist.tracks
tracks.__len__ = mock.Mock(return_value=1)
with self.assertRaises(IndexError):
tracks[1] = None
def test_tracks_setitem_raises_type_error_on_non_integral_index(
self, lib_mock):
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
tracks = playlist.tracks
tracks.__len__ = mock.Mock(return_value=1)
with self.assertRaises(TypeError):
tracks['abc'] = None
def test_tracks_delitem(self, lib_mock):
lib_mock.sp_playlist_remove_tracks.return_value = int(
spotify.ErrorType.OK)
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
tracks = playlist.tracks
tracks.__len__ = mock.Mock(return_value=4)
del tracks[3]
lib_mock.sp_playlist_remove_tracks.assert_called_with(
sp_playlist, [3], 1)
def test_tracks_delitem_with_slice(self, lib_mock):
lib_mock.sp_playlist_remove_tracks.return_value = int(
spotify.ErrorType.OK)
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
tracks = playlist.tracks
tracks.__len__ = mock.Mock(return_value=3)
del tracks[0:2]
# Delete items in reverse order, so the indexes doesn't change
lib_mock.sp_playlist_remove_tracks.assert_has_calls([
mock.call(sp_playlist, [1], 1),
mock.call(sp_playlist, [0], 1),
], any_order=False)
def test_tracks_delitem_raises_index_error_on_negative_index(
self, lib_mock):
lib_mock.sp_playlist_remove_tracks.return_value = int(
spotify.ErrorType.OK)
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
tracks = playlist.tracks
tracks.__len__ = mock.Mock(return_value=1)
with self.assertRaises(IndexError):
del tracks[-1]
def test_tracks_delitem_raises_index_error_on_too_high_index(
self, lib_mock):
lib_mock.sp_playlist_remove_tracks.return_value = int(
spotify.ErrorType.OK)
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
tracks = playlist.tracks
tracks.__len__ = mock.Mock(return_value=1)
with self.assertRaises(IndexError):
del tracks[1]
def test_tracks_delitem_raises_type_error_on_non_integral_index(
self, lib_mock):
lib_mock.sp_playlist_remove_tracks.return_value = int(
spotify.ErrorType.OK)
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
tracks = playlist.tracks
tracks.__len__ = mock.Mock(return_value=1)
with self.assertRaises(TypeError):
del tracks['abc']
def test_tracks_insert(self, lib_mock):
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
tracks = playlist.tracks
tracks.__len__ = mock.Mock(return_value=5)
playlist.add_tracks = mock.Mock()
tracks.insert(3, mock.sentinel.track)
playlist.add_tracks.assert_called_with(
mock.sentinel.track, index=3)
@mock.patch('spotify.playlist_track.lib', spec=spotify.lib)
def test_tracks_with_metadata(self, playlist_track_lib_mock, lib_mock):
lib_mock.sp_playlist_num_tracks.return_value = 1
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
# Created a Playlist with a ref to sp_playlist
self.assertEqual(lib_mock.sp_playlist_add_ref.call_count, 1)
self.assertEqual(
playlist_track_lib_mock.sp_playlist_add_ref.call_count, 0)
result = playlist.tracks_with_metadata
# Created a Sequence with a ref to sp_playlist
self.assertEqual(lib_mock.sp_playlist_add_ref.call_count, 2)
self.assertEqual(
playlist_track_lib_mock.sp_playlist_add_ref.call_count, 0)
self.assertEqual(len(result), 1)
lib_mock.sp_playlist_num_tracks.assert_called_with(sp_playlist)
item = result[0]
self.assertIsInstance(item, spotify.PlaylistTrack)
# Created a PlaylistTrack with a ref to sp_playlist
self.assertEqual(lib_mock.sp_playlist_add_ref.call_count, 2)
self.assertEqual(
playlist_track_lib_mock.sp_playlist_add_ref.call_count, 1)
def test_tracks_with_metadata_if_no_tracks(self, lib_mock):
lib_mock.sp_playlist_num_tracks.return_value = 0
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
result = playlist.tracks_with_metadata
self.assertEqual(len(result), 0)
lib_mock.sp_playlist_num_tracks.assert_called_with(sp_playlist)
self.assertEqual(lib_mock.sp_playlist_track.call_count, 0)
def test_tracks_with_metadata_if_unloaded(self, lib_mock):
lib_mock.sp_playlist_is_loaded.return_value = 0
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
result = playlist.tracks_with_metadata
lib_mock.sp_playlist_is_loaded.assert_called_with(sp_playlist)
self.assertEqual(len(result), 0)
def test_name(self, lib_mock):
lib_mock.sp_playlist_name.return_value = spotify.ffi.new(
'char[]', b'Foo Bar Baz')
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
result = playlist.name
lib_mock.sp_playlist_name.assert_called_once_with(sp_playlist)
self.assertEqual(result, 'Foo Bar Baz')
def test_name_is_none_if_unloaded(self, lib_mock):
lib_mock.sp_playlist_name.return_value = spotify.ffi.new('char[]', b'')
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
result = playlist.name
lib_mock.sp_playlist_name.assert_called_once_with(sp_playlist)
self.assertIsNone(result)
def test_rename(self, lib_mock):
lib_mock.sp_playlist_rename.return_value = int(spotify.ErrorType.OK)
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
playlist.rename('Quux')
lib_mock.sp_playlist_rename.assert_called_with(sp_playlist, mock.ANY)
self.assertEqual(
spotify.ffi.string(lib_mock.sp_playlist_rename.call_args[0][1]),
b'Quux')
def test_rename_fails_if_error(self, lib_mock):
lib_mock.sp_playlist_rename.return_value = int(
spotify.ErrorType.BAD_API_VERSION)
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
with self.assertRaises(spotify.Error):
playlist.rename('Quux')
def test_name_setter(self, lib_mock):
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
playlist.rename = mock.Mock()
playlist.name = 'Quux'
playlist.rename.assert_called_with('Quux')
@mock.patch('spotify.user.lib', spec=spotify.lib)
def test_owner(self, user_lib_mock, lib_mock):
sp_user = spotify.ffi.cast('sp_user *', 43)
lib_mock.sp_playlist_owner.return_value = sp_user
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
result = playlist.owner
lib_mock.sp_playlist_owner.assert_called_with(sp_playlist)
self.assertIsInstance(result, spotify.User)
self.assertEqual(result._sp_user, sp_user)
user_lib_mock.sp_user_add_ref.assert_called_with(sp_user)
def test_is_collaborative(self, lib_mock):
lib_mock.sp_playlist_is_collaborative.return_value = 1
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
result = playlist.collaborative
lib_mock.sp_playlist_is_collaborative.assert_called_with(sp_playlist)
self.assertTrue(result)
def test_set_collaborative(self, lib_mock):
lib_mock.sp_playlist_set_collaborative.return_value = int(
spotify.ErrorType.OK)
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
playlist.collaborative = False
lib_mock.sp_playlist_set_collaborative.assert_called_with(
sp_playlist, 0)
def test_set_collaborative_fails_if_error(self, lib_mock):
lib_mock.sp_playlist_set_collaborative.return_value = int(
spotify.ErrorType.BAD_API_VERSION)
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
with self.assertRaises(spotify.Error):
playlist.collaborative = False
def test_set_autolink_tracks(self, lib_mock):
lib_mock.sp_playlist_set_autolink_tracks.return_value = int(
spotify.ErrorType.OK)
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
playlist.set_autolink_tracks(True)
lib_mock.sp_playlist_set_autolink_tracks.assert_called_with(
sp_playlist, 1)
def test_set_autolink_tracks_fails_if_error(self, lib_mock):
lib_mock.sp_playlist_set_autolink_tracks.return_value = int(
spotify.ErrorType.BAD_API_VERSION)
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
with self.assertRaises(spotify.Error):
playlist.set_autolink_tracks(True)
def test_description(self, lib_mock):
lib_mock.sp_playlist_get_description.return_value = spotify.ffi.new(
'char[]', b'Lorem ipsum')
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
result = playlist.description
lib_mock.sp_playlist_get_description.assert_called_with(sp_playlist)
self.assertEqual(result, 'Lorem ipsum')
def test_description_is_none_if_unset(self, lib_mock):
lib_mock.sp_playlist_get_description.return_value = spotify.ffi.NULL
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
result = playlist.description
lib_mock.sp_playlist_get_description.assert_called_with(sp_playlist)
self.assertIsNone(result)
@mock.patch('spotify.Image', spec=spotify.Image)
def test_image(self, image_mock, lib_mock):
image_id = b'image-id'
def func(sp_playlist, sp_image_id):
buf = spotify.ffi.buffer(sp_image_id)
buf[:len(image_id)] = image_id
return 1
lib_mock.sp_playlist_get_image.side_effect = func
sp_image = spotify.ffi.cast('sp_image *', 43)
lib_mock.sp_image_create.return_value = sp_image
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
image_mock.return_value = mock.sentinel.image
callback = mock.Mock()
result = playlist.image(callback=callback)
self.assertIs(result, mock.sentinel.image)
lib_mock.sp_playlist_get_image.assert_called_with(
sp_playlist, mock.ANY)
lib_mock.sp_image_create.assert_called_with(
self.session._sp_session, mock.ANY)
self.assertEqual(
spotify.ffi.string(lib_mock.sp_image_create.call_args[0][1]),
b'image-id')
# Since we *created* the sp_image, we already have a refcount of 1 and
# shouldn't increase the refcount when wrapping this sp_image in an
# Image object
image_mock.assert_called_with(
self.session, sp_image=sp_image, add_ref=False, callback=callback)
def test_image_is_none_if_no_image(self, lib_mock):
lib_mock.sp_playlist_get_image.return_value = 0
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
result = playlist.image()
lib_mock.sp_playlist_get_image.assert_called_with(
sp_playlist, mock.ANY)
self.assertIsNone(result)
def test_has_pending_changes(self, lib_mock):
lib_mock.sp_playlist_has_pending_changes.return_value = 1
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
result = playlist.has_pending_changes
lib_mock.sp_playlist_has_pending_changes.assert_called_with(
sp_playlist)
self.assertTrue(result)
@mock.patch('spotify.track.lib', spec=spotify.lib)
def test_add_tracks(self, track_lib_mock, lib_mock):
lib_mock.sp_playlist_add_tracks.return_value = int(
spotify.ErrorType.OK)
sp_track1 = spotify.ffi.new('int * ')
track1 = spotify.Track(self.session, sp_track=sp_track1)
sp_track2 = spotify.ffi.new('int * ')
track2 = spotify.Track(self.session, sp_track=sp_track2)
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
playlist.add_tracks([track1, track2], index=4)
lib_mock.sp_playlist_add_tracks.assert_called_with(
sp_playlist, [sp_track1, sp_track2], 2, 4,
self.session._sp_session)
@mock.patch('spotify.track.lib', spec=spotify.lib)
def test_add_tracks_without_index(self, track_lib_mock, lib_mock):
lib_mock.sp_playlist_add_tracks.return_value = int(
spotify.ErrorType.OK)
lib_mock.sp_playlist_num_tracks.return_value = 10
sp_track1 = spotify.ffi.new('int * ')
track1 = spotify.Track(self.session, sp_track=sp_track1)
sp_track2 = spotify.ffi.new('int * ')
track2 = spotify.Track(self.session, sp_track=sp_track2)
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
playlist.add_tracks([track1, track2])
lib_mock.sp_playlist_add_tracks.assert_called_with(
sp_playlist, [sp_track1, sp_track2], 2, 10,
self.session._sp_session)
@mock.patch('spotify.track.lib', spec=spotify.lib)
def test_add_tracks_with_a_single_track(self, track_lib_mock, lib_mock):
lib_mock.sp_playlist_add_tracks.return_value = int(
spotify.ErrorType.OK)
sp_track = spotify.ffi.new('int * ')
track = spotify.Track(self.session, sp_track=sp_track)
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
playlist.add_tracks(track, index=7)
lib_mock.sp_playlist_add_tracks.assert_called_with(
sp_playlist, [sp_track], 1, 7, self.session._sp_session)
def test_add_tracks_fails_if_error(self, lib_mock):
lib_mock.sp_playlist_add_tracks.return_value = int(
spotify.ErrorType.PERMISSION_DENIED)
lib_mock.sp_playlist_num_tracks.return_value = 0
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
with self.assertRaises(spotify.Error):
playlist.add_tracks([])
@mock.patch('spotify.track.lib', spec=spotify.lib)
def test_remove_tracks(self, track_lib_mock, lib_mock):
lib_mock.sp_playlist_remove_tracks.return_value = int(
spotify.ErrorType.OK)
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
index1 = 13
index2 = 17
playlist.remove_tracks([index1, index2])
lib_mock.sp_playlist_remove_tracks.assert_called_with(
sp_playlist, mock.ANY, 2)
self.assertIn(
index1, lib_mock.sp_playlist_remove_tracks.call_args[0][1])
self.assertIn(
index2, lib_mock.sp_playlist_remove_tracks.call_args[0][1])
@mock.patch('spotify.track.lib', spec=spotify.lib)
def test_remove_tracks_with_a_single_track(self, track_lib_mock, lib_mock):
lib_mock.sp_playlist_remove_tracks.return_value = int(
spotify.ErrorType.OK)
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
index = 17
playlist.remove_tracks(index)
lib_mock.sp_playlist_remove_tracks.assert_called_with(
sp_playlist, [index], 1)
@mock.patch('spotify.track.lib', spec=spotify.lib)
def test_remove_tracks_with_duplicates(self, track_lib_mock, lib_mock):
lib_mock.sp_playlist_remove_tracks.return_value = int(
spotify.ErrorType.OK)
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
index = 17
playlist.remove_tracks([index, index])
lib_mock.sp_playlist_remove_tracks.assert_called_with(
sp_playlist, [index], 1)
@mock.patch('spotify.track.lib', spec=spotify.lib)
def test_remove_tracks_fails_if_error(self, track_lib_mock, lib_mock):
lib_mock.sp_playlist_remove_tracks.return_value = int(
spotify.ErrorType.PERMISSION_DENIED)
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
index = 17
with self.assertRaises(spotify.Error):
playlist.remove_tracks(index)
@mock.patch('spotify.track.lib', spec=spotify.lib)
def test_reorder_tracks(self, track_lib_mock, lib_mock):
lib_mock.sp_playlist_reorder_tracks.return_value = int(
spotify.ErrorType.OK)
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
position1 = 13
position2 = 17
playlist.reorder_tracks([position1, position2], 17)
lib_mock.sp_playlist_reorder_tracks.assert_called_with(
sp_playlist, mock.ANY, 2, 17)
self.assertIn(
position1, lib_mock.sp_playlist_reorder_tracks.call_args[0][1])
self.assertIn(
position2, lib_mock.sp_playlist_reorder_tracks.call_args[0][1])
@mock.patch('spotify.track.lib', spec=spotify.lib)
def test_reorder_tracks_with_a_single_track(
self, track_lib_mock, lib_mock):
lib_mock.sp_playlist_reorder_tracks.return_value = int(
spotify.ErrorType.OK)
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
position = 13
playlist.reorder_tracks(position, 17)
lib_mock.sp_playlist_reorder_tracks.assert_called_with(
sp_playlist, [position], 1, 17)
@mock.patch('spotify.track.lib', spec=spotify.lib)
def test_reorder_tracks_with_duplicates(self, track_lib_mock, lib_mock):
lib_mock.sp_playlist_reorder_tracks.return_value = int(
spotify.ErrorType.OK)
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
position = 13
playlist.reorder_tracks([position, position], 17)
lib_mock.sp_playlist_reorder_tracks.assert_called_with(
sp_playlist, [position], 1, 17)
@mock.patch('spotify.track.lib', spec=spotify.lib)
def test_reorder_tracks_fails_if_error(self, track_lib_mock, lib_mock):
lib_mock.sp_playlist_reorder_tracks.return_value = int(
spotify.ErrorType.PERMISSION_DENIED)
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
position = 13
with self.assertRaises(spotify.Error):
playlist.reorder_tracks(position, 17)
def test_num_subscribers(self, lib_mock):
lib_mock.sp_playlist_num_subscribers.return_value = 7
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
result = playlist.num_subscribers
lib_mock.sp_playlist_num_subscribers.assert_called_with(sp_playlist)
self.assertEqual(result, 7)
def test_subscribers(self, lib_mock):
sp_subscribers = spotify.ffi.new('sp_subscribers *')
sp_subscribers.count = 1
user_alice = spotify.ffi.new('char[]', b'alice')
sp_subscribers.subscribers = [user_alice]
lib_mock.sp_playlist_subscribers.return_value = sp_subscribers
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
result = playlist.subscribers
lib_mock.sp_playlist_subscribers.assert_called_with(sp_playlist)
tests.gc_collect()
lib_mock.sp_playlist_subscribers_free.assert_called_with(
sp_subscribers)
self.assertEqual(result, ['alice'])
def test_update_subscribers(self, lib_mock):
lib_mock.sp_playlist_update_subscribers.return_value = int(
spotify.ErrorType.OK)
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
playlist.update_subscribers()
lib_mock.sp_playlist_update_subscribers.assert_called_with(
self.session._sp_session, sp_playlist)
def test_update_subscribers_fails_if_error(self, lib_mock):
lib_mock.sp_playlist_update_subscribers.return_value = int(
spotify.ErrorType.BAD_API_VERSION)
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
with self.assertRaises(spotify.Error):
playlist.update_subscribers()
def test_is_in_ram(self, lib_mock):
lib_mock.sp_playlist_is_in_ram.return_value = 1
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
result = playlist.is_in_ram
lib_mock.sp_playlist_is_in_ram.assert_called_with(
self.session._sp_session, sp_playlist)
self.assertTrue(result)
def test_set_in_ram(self, lib_mock):
lib_mock.sp_playlist_set_in_ram.return_value = int(
spotify.ErrorType.OK)
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
playlist.set_in_ram(False)
lib_mock.sp_playlist_set_in_ram.assert_called_with(
self.session._sp_session, sp_playlist, 0)
def test_set_in_ram_fails_if_error(self, lib_mock):
lib_mock.sp_playlist_set_in_ram.return_value = int(
spotify.ErrorType.BAD_API_VERSION)
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
with self.assertRaises(spotify.Error):
playlist.set_in_ram(False)
def test_set_offline_mode(self, lib_mock):
lib_mock.sp_playlist_set_offline_mode.return_value = int(
spotify.ErrorType.OK)
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
playlist.set_offline_mode(False)
lib_mock.sp_playlist_set_offline_mode.assert_called_with(
self.session._sp_session, sp_playlist, 0)
def test_set_offline_mode_fails_if_error(self, lib_mock):
lib_mock.sp_playlist_set_offline_mode.return_value = int(
spotify.ErrorType.BAD_API_VERSION)
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
with self.assertRaises(spotify.Error):
playlist.set_offline_mode(False)
def test_offline_status(self, lib_mock):
lib_mock.sp_playlist_get_offline_status.return_value = 2
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
result = playlist.offline_status
lib_mock.sp_playlist_get_offline_status.assert_called_with(
self.session._sp_session, sp_playlist)
self.assertIs(result, spotify.PlaylistOfflineStatus.DOWNLOADING)
def test_offline_download_completed(self, lib_mock):
lib_mock.sp_playlist_get_offline_status.return_value = 2
lib_mock.sp_playlist_get_offline_download_completed.return_value = 73
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
result = playlist.offline_download_completed
lib_mock.sp_playlist_get_offline_download_completed.assert_called_with(
self.session._sp_session, sp_playlist)
self.assertEqual(result, 73)
def test_offline_download_completed_when_not_downloading(self, lib_mock):
lib_mock.sp_playlist_get_offline_status.return_value = 0
lib_mock.sp_playlist_get_offline_download_completed.return_value = 0
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
result = playlist.offline_download_completed
self.assertEqual(
lib_mock.sp_playlist_get_offline_download_completed.call_count, 0)
self.assertIsNone(result)
@mock.patch('spotify.Link', spec=spotify.Link)
def test_link_creates_link_to_playlist(self, link_mock, lib_mock):
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
sp_link = spotify.ffi.cast('sp_link *', 43)
lib_mock.sp_link_create_from_playlist.return_value = sp_link
link_mock.return_value = mock.sentinel.link
result = playlist.link
link_mock.assert_called_once_with(
self.session, sp_link=sp_link, add_ref=False)
self.assertEqual(result, mock.sentinel.link)
@mock.patch('spotify.Link', spec=spotify.Link)
def test_link_fails_if_playlist_not_loaded(
self, lik_mock, lib_mock):
lib_mock.sp_playlist_is_loaded.return_value = 0
lib_mock.sp_link_create_from_playlist.return_value = spotify.ffi.NULL
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
with self.assertRaises(spotify.Error):
playlist.link
# Condition is checked before link creation is tried
self.assertEqual(lib_mock.sp_link_create_from_playlist.call_count, 0)
@mock.patch('spotify.Link', spec=spotify.Link)
def test_link_may_fail_if_playlist_has_not_been_in_ram(
self, link_mock, lib_mock):
lib_mock.sp_playlist_is_loaded.return_value = 1
lib_mock.sp_link_create_from_playlist.return_value = spotify.ffi.NULL
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
with self.assertRaises(spotify.Error):
playlist.link
# Condition is checked only if link creation returns NULL
lib_mock.sp_link_create_from_playlist.assert_called_with(sp_playlist)
lib_mock.sp_playlist_is_in_ram.assert_called_with(
self.session._sp_session, sp_playlist)
def test_first_on_call_adds_ref_to_obj_on_session(self, lib_mock):
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
playlist.on(spotify.PlaylistEvent.TRACKS_ADDED, lambda *args: None)
self.assertIn(playlist, self.session._emitters)
def test_last_off_call_removes_ref_to_obj_from_session(self, lib_mock):
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
playlist.on(spotify.PlaylistEvent.TRACKS_ADDED, lambda *args: None)
playlist.off(spotify.PlaylistEvent.TRACKS_ADDED)
self.assertNotIn(playlist, self.session._emitters)
def test_other_off_calls_keeps_ref_to_obj_on_session(self, lib_mock):
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist(self.session, sp_playlist=sp_playlist)
playlist.on(spotify.PlaylistEvent.TRACKS_ADDED, lambda *args: None)
playlist.on(spotify.PlaylistEvent.TRACKS_MOVED, lambda *args: None)
playlist.off(spotify.PlaylistEvent.TRACKS_ADDED)
self.assertIn(playlist, self.session._emitters)
playlist.off(spotify.PlaylistEvent.TRACKS_MOVED)
self.assertNotIn(playlist, self.session._emitters)
@mock.patch('spotify.playlist.lib', spec=spotify.lib)
class PlaylistCallbacksTest(unittest.TestCase):
def setUp(self):
self.session = tests.create_session_mock()
spotify._session_instance = self.session
def tearDown(self):
spotify._session_instance = None
@mock.patch('spotify.track.lib', spec=spotify.lib)
def test_tracks_added_callback(self, track_lib_mock, lib_mock):
callback = mock.Mock()
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist._cached(
self.session, sp_playlist=sp_playlist)
playlist.on(spotify.PlaylistEvent.TRACKS_ADDED, callback)
sp_tracks = [
spotify.ffi.cast('sp_track *', 43),
spotify.ffi.cast('sp_track *', 44),
spotify.ffi.cast('sp_track *', 45),
]
index = 7
_PlaylistCallbacks.tracks_added(
sp_playlist, sp_tracks, len(sp_tracks), index, spotify.ffi.NULL)
callback.assert_called_once_with(playlist, mock.ANY, index)
tracks = callback.call_args[0][1]
self.assertEqual(len(tracks), len(sp_tracks))
self.assertIsInstance(tracks[0], spotify.Track)
self.assertEqual(tracks[0]._sp_track, sp_tracks[0])
track_lib_mock.sp_track_add_ref.assert_has_calls([
mock.call(sp_tracks[0]),
mock.call(sp_tracks[1]),
mock.call(sp_tracks[2]),
])
def test_tracks_removed_callback(self, lib_mock):
callback = mock.Mock()
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist._cached(
self.session, sp_playlist=sp_playlist)
playlist.on(spotify.PlaylistEvent.TRACKS_REMOVED, callback)
track_numbers = [43, 44, 45]
_PlaylistCallbacks.tracks_removed(
sp_playlist, track_numbers, len(track_numbers), spotify.ffi.NULL)
callback.assert_called_once_with(playlist, mock.ANY)
tracks = callback.call_args[0][1]
self.assertEqual(len(tracks), len(track_numbers))
self.assertEqual(tracks[0], 43)
def test_tracks_moved_callback(self, lib_mock):
callback = mock.Mock()
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist._cached(
self.session, sp_playlist=sp_playlist)
playlist.on(spotify.PlaylistEvent.TRACKS_MOVED, callback)
track_numbers = [43, 44, 45]
index = 7
_PlaylistCallbacks.tracks_moved(
sp_playlist, track_numbers, len(track_numbers), index,
spotify.ffi.NULL)
callback.assert_called_once_with(playlist, mock.ANY, index)
tracks = callback.call_args[0][1]
self.assertEqual(len(tracks), len(track_numbers))
self.assertEqual(tracks[0], 43)
def test_playlist_renamed_callback(self, lib_mock):
callback = mock.Mock()
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist._cached(
self.session, sp_playlist=sp_playlist)
playlist.on(spotify.PlaylistEvent.PLAYLIST_RENAMED, callback)
_PlaylistCallbacks.playlist_renamed(sp_playlist, spotify.ffi.NULL)
callback.assert_called_once_with(playlist)
def test_playlist_state_changed_callback(self, lib_mock):
callback = mock.Mock()
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist._cached(
self.session, sp_playlist=sp_playlist)
playlist.on(spotify.PlaylistEvent.PLAYLIST_STATE_CHANGED, callback)
_PlaylistCallbacks.playlist_state_changed(
sp_playlist, spotify.ffi.NULL)
callback.assert_called_once_with(playlist)
def test_playlist_update_in_progress_callback(self, lib_mock):
callback = mock.Mock()
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist._cached(
self.session, sp_playlist=sp_playlist)
playlist.on(
spotify.PlaylistEvent.PLAYLIST_UPDATE_IN_PROGRESS, callback)
done = True
_PlaylistCallbacks.playlist_update_in_progress(
sp_playlist, int(done), spotify.ffi.NULL)
callback.assert_called_once_with(playlist, done)
def test_playlist_metadata_updated_callback(self, lib_mock):
callback = mock.Mock()
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist._cached(
self.session, sp_playlist=sp_playlist)
playlist.on(spotify.PlaylistEvent.PLAYLIST_METADATA_UPDATED, callback)
_PlaylistCallbacks.playlist_metadata_updated(
sp_playlist, spotify.ffi.NULL)
callback.assert_called_once_with(playlist)
@mock.patch('spotify.user.lib', spec=spotify.lib)
def test_track_created_changed_callback(self, user_lib_mock, lib_mock):
callback = mock.Mock()
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist._cached(
self.session, sp_playlist=sp_playlist)
playlist.on(spotify.PlaylistEvent.TRACK_CREATED_CHANGED, callback)
index = 7
sp_user = spotify.ffi.cast('sp_user *', 43)
time = 123456789
_PlaylistCallbacks.track_created_changed(
sp_playlist, index, sp_user, time, spotify.ffi.NULL)
callback.assert_called_once_with(playlist, index, mock.ANY, time)
user = callback.call_args[0][2]
self.assertIsInstance(user, spotify.User)
self.assertEqual(user._sp_user, sp_user)
user_lib_mock.sp_user_add_ref.assert_called_with(sp_user)
def test_track_seen_changed_callback(self, lib_mock):
callback = mock.Mock()
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist._cached(
self.session, sp_playlist=sp_playlist)
playlist.on(spotify.PlaylistEvent.TRACK_SEEN_CHANGED, callback)
index = 7
seen = True
_PlaylistCallbacks.track_seen_changed(
sp_playlist, index, int(seen), spotify.ffi.NULL)
callback.assert_called_once_with(playlist, index, seen)
def test_description_changed_callback(self, lib_mock):
callback = mock.Mock()
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist._cached(
self.session, sp_playlist=sp_playlist)
playlist.on(spotify.PlaylistEvent.DESCRIPTION_CHANGED, callback)
description = 'foo bar æøå'
desc = spotify.ffi.new('char[]', description.encode('utf-8'))
_PlaylistCallbacks.description_changed(
sp_playlist, desc, spotify.ffi.NULL)
callback.assert_called_once_with(playlist, description)
@mock.patch('spotify.image.lib', spec=spotify.lib)
def test_image_changed_callback(self, image_lib_mock, lib_mock):
callback = mock.Mock()
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist._cached(
self.session, sp_playlist=sp_playlist)
playlist.on(spotify.PlaylistEvent.IMAGE_CHANGED, callback)
image_id = spotify.ffi.new('char[]', b'image-id')
sp_image = spotify.ffi.cast('sp_image *', 43)
lib_mock.sp_image_create.return_value = sp_image
image_lib_mock.sp_image_add_load_callback.return_value = int(
spotify.ErrorType.OK)
_PlaylistCallbacks.image_changed(
sp_playlist, image_id, spotify.ffi.NULL)
callback.assert_called_once_with(playlist, mock.ANY)
image = callback.call_args[0][1]
self.assertIsInstance(image, spotify.Image)
self.assertEqual(image._sp_image, sp_image)
lib_mock.sp_image_create.assert_called_once_with(
self.session._sp_session, image_id)
self.assertEqual(image_lib_mock.sp_image_add_ref.call_count, 0)
def test_track_message_changed_callback(self, lib_mock):
callback = mock.Mock()
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist._cached(
self.session, sp_playlist=sp_playlist)
playlist.on(spotify.PlaylistEvent.TRACK_MESSAGE_CHANGED, callback)
index = 7
message = 'foo bar æøå'
msg = spotify.ffi.new('char[]', message.encode('utf-8'))
_PlaylistCallbacks.track_message_changed(
sp_playlist, index, msg, spotify.ffi.NULL)
callback.assert_called_once_with(playlist, index, message)
def test_subscribers_changed_callback(self, lib_mock):
callback = mock.Mock()
sp_playlist = spotify.ffi.cast('sp_playlist *', 42)
playlist = spotify.Playlist._cached(
self.session, sp_playlist=sp_playlist)
playlist.on(spotify.PlaylistEvent.SUBSCRIBERS_CHANGED, callback)
_PlaylistCallbacks.subscribers_changed(sp_playlist, spotify.ffi.NULL)
callback.assert_called_once_with(playlist)
class PlaylistOfflineStatusTest(unittest.TestCase):
def test_has_constants(self):
self.assertEqual(spotify.PlaylistOfflineStatus.NO, 0)
self.assertEqual(spotify.PlaylistOfflineStatus.DOWNLOADING, 2)
|
# Copyright (c) 2011 Martin Ueding <dev@martin-ueding.de>
from camelot.view.art import Icon
from camelot.admin.application_admin import ApplicationAdmin
from camelot.admin.section import Section
from gettext import gettext
class MyApplicationAdmin(ApplicationAdmin):
def get_sections(self):
from camelot.model.memento import Memento
from camelot.model.authentication import Person, Organization
from camelot.model.i18n import Translation
from model import Customer, Package, HosterBill, HosterCustomerNumber, Domain, OwnBill
return [Section(gettext('data'),
Icon('tango/22x22/apps/system-users.png'),
items = [Customer, Package, HosterBill, HosterCustomerNumber, Domain, OwnBill]),
Section('configuration',
Icon('tango/22x22/categories/preferences-system.png'),
items = [Memento, Translation])
]
|
#
# @lc app=leetcode id=740 lang=python3
#
# [740] Delete and Earn
#
# @lc code=start
from collections import Counter
class Solution:
def deleteAndEarn(self, nums: list[int]) -> int:
counter, prev, cur = Counter(nums), 0, 0
for i in range(min(nums), max(nums) + 1):
prev, cur = cur, max(i * counter[i] + prev, cur)
return cur
# @lc code=end
|
# api/urls.py
from django.urls import path
from rest_framework.urlpatterns import format_suffix_patterns
from . import views
from rest_framework_jwt.views import obtain_jwt_token
app_name="user"
urlpatterns = [
<<<<<<< HEAD
path('user/', views.UserCreate.as_view()),
path('user/<int:pk>/', views.UserProfile.as_view()),
path('user/login', obtain_jwt_token, name="token"),
=======
path('<int:pk>', views.UserViewSet.as_view({'get':'retrieve'})),
path('', views.UserViewSet.as_view({'get':'list','post':'create'})),
path('login', obtain_jwt_token, name="token"),
#path('/register', obtain_jwt_token, name="token"),
>>>>>>> 1028fb4f4ec46e5db744bb2965e732dae2e5d301
] |
#!/usr/bin/env python
# This program generates a line/table with all Atom-Atom contributions to the Lennard Jones Energy between two fragments
#
# The atom order for each fragment is read from the .rtf file
# The Atom types are obtained either from the .rtf file or from the .lpun file
# The number of fragments and the coordinates are read from a CHARMM .pdb file
#
import os,sys
rtf = []
lpun = ''
lpun_atype = False
pdb = ''
LJexp = [12,6]
atyp = []
def usage():
print "Usage: python LJ_Tab.py -rtf file.rtf [file2 [...]] -pdb file.pdb"
print " [-lpun file.lpun] [-LJexp # # ...] [-aty file.aty [file2 [...]]"
print "where file.aty is a list of atom-type pairs."
exit(0)
for i in range(len(sys.argv)):
if sys.argv[i] == '-rtf':
next_id = 1
next_arg = sys.argv[i+next_id]
while next_arg[0] != '-' and next_arg != '':
rtf.append(next_arg)
next_id += 1
if len(sys.argv) > i+next_id:
next_arg = sys.argv[i+next_id]
else:break
elif sys.argv[i] == '-lpun':
lpun = sys.argv[i+1]
lpun_atype = True
elif sys.argv[i] == '-pdb':
pdb = sys.argv[i+1]
elif sys.argv[i] == '-LJexp':
LJexp = []
j = i+1
while True:
if j+1 > len(sys.argv): break
if sys.argv[j][0] == '-': break
LJexp.append(int(sys.argv[j]))
j += 1
elif sys.argv[i] == '-aty':
next_id = 1
next_arg = sys.argv[i+next_id]
while next_arg[0] != '-' and next_arg != '':
atyp.append(next_arg)
next_id += 1
if len(sys.argv) > i+next_id:
next_arg = sys.argv[i+next_id]
else: break
elif sys.argv[i] == '-h':
usage()
if len(rtf) == 0 or pdb == '':
usage()
frag = {}
for i in range(len(rtf)):
try:
r = open(rtf[i],'r')
except:
print "Could not open file ->",rtf
print "Please check path and filename"
exit(0)
############################
# Read .rtf file and store necessary information
#
# Each fragment setup as obtained from the .rtf file (and the .lpun file) is stored in a dictionary with dictionaries inside
# This could later be replace by objects.
#
rl = r.readlines()
r.close()
i = 0
while i < len(rl):
if rl[i][:4] == 'RESI':
line = rl[i].split()
name = line[1]
i += 2
atoms = []
atypes = []
while rl[i][:4] == 'ATOM':
line = rl[i].split()
atoms.append(line[1])
atypes.append(line[2])
i += 1
frag[(name,'atoms')]=atoms
frag[(name,'atypes')]=atypes
i += 1
try:
p = open(pdb,'r')
except:
print "Could not open file ->",pdb
print "Please check path and filename and eventually specify pdb filename individually"
exit(0)
if lpun_atype == True:
try:
l = open(lpun,'r')
except:
print "Could not open file ->",lpun
print "Please check path and filename"
exit(0)
############################
# Read .pdb file and extract setup and coordinates
#
import numpy
pl = p.readlines()
p.close()
i = 0
s_atoms = []
s_ftype = []
s_fid = []
s_coords = []
for line in pl:
if line[0:6] == 'HETATM' or line[0:4] == 'ATOM':
s_atoms.append(line[13:16].rstrip())
s_ftype.append(line[17:21].rstrip())
s_fid.append(int(line[25]))
s_coords.append(numpy.array([float(line[30:38]),float(line[38:46]),float(line[46:54])]))
frag_idx = list(set(s_fid))
nfrags = len(frag_idx)
############################
# If given, read .lpun file and update atom types
#
if lpun_atype == True:
ll = l.readlines()
l.close()
frags = set(s_ftype)
for f in frags:
start = s_ftype.index(f)
for i in range(len(frag[(f,'atypes')])):
frag[(f,'atypes')][i] = ll[3+6*start+6*i].split()[1]
############################
# The program can calucalate the distance dependent part for a variety of exponents.
#
# If only two exponents are use, the potential used here is the m-n potential:
#
# A(ij) B(ij)
# LJ(ij) = ----------- - ----------
# R(ij)^m R(ij)^n
#
# The well known e(ij) and Rmin(ij) can be obtained from A and B according to
#
#
# B^(m/(m-n))
# e(ij) = -----
# 4*A^(n/(m-n))
#
# A
# Rmin(ij) = (-----)^(1/(m-n))
# B
#
#
# From these, in principle atomic Rmin(i) and Rmin(j) can be calculated,
# but this is not really necessary and it probably the Rmin's obtained
# will be different for different pairs of i and j.
#
# Atomic e(i) can not be calculated back from e(ij), at least not directly.
# Again, this is not necessary for the theory.
#
############################
############################
# Create Table with all contributors to the LJ potential
#
# 1st: create a list with all atom type pairs contributing to the interfragment
# LJ energy. If file.aty is given, read the file to extract the list.
atype_pairs = []
if atyp == []:
for i in range(nfrags-1):
for j in range(i+1,nfrags):
for k in frag[(s_ftype[s_fid.index(frag_idx[i])],'atypes')]:
for l in frag[(s_ftype[s_fid.index(frag_idx[j])],'atypes')]:
a = [k,l]
a.sort()
# Exclude HT with OT (TIP3P water)
if a not in [['OT','HT'],['HT','OT'],['HT','HT'],['OT','OT']]:
atype_pairs.append(a[0]+':'+a[1])
else:
for k in range(len(atyp)):
try:
aty_f = open(atyp[k],'r')
except:
print "Could not open file",aty
exit(1)
aty_l = aty_f.readlines()
aty_f.close()
for i in range(len(aty_l)):
pair = aty_l[i].rstrip('\n').split('_')[0]
el1, el2 = pair.split(":")
conj_pair = el2+":"+el1
# Check that the pair hasn't already been added
if pair not in atype_pairs and conj_pair not in atype_pairs:
atype_pairs.append(pair)
atp = list(set(atype_pairs))
atp.sort()
# 2nd: add all contributions of specific Atom Type pairs up
import math
LJ_coeffs = [[0.0 for i in range(len(atp))] for j in range(len(LJexp))]
for i in range(nfrags-1):
for j in range(i+1,nfrags):
for k in range(len(frag[(s_ftype[s_fid.index(frag_idx[i])],'atypes')])):
for l in range(len(frag[(s_ftype[s_fid.index(frag_idx[j])],'atypes')])):
idx_af1 = s_fid.index(frag_idx[i])+k
idx_af2 = s_fid.index(frag_idx[j])+l
atype1 = frag[(s_ftype[s_fid.index(frag_idx[i])],'atypes')][frag[(s_ftype[s_fid.index(frag_idx[i])],'atoms')].index(s_atoms[idx_af1])]
atype2 = frag[(s_ftype[s_fid.index(frag_idx[j])],'atypes')][frag[(s_ftype[s_fid.index(frag_idx[j])],'atoms')].index(s_atoms[idx_af2])]
a = [atype1,atype2]
a.sort()
atype_pair = a[0]+':'+a[1]
if atype_pair not in atp: atype_pair = a[1]+':'+a[0]
dist = math.sqrt(numpy.dot(s_coords[idx_af1]-s_coords[idx_af2],s_coords[idx_af1]-s_coords[idx_af2]))
for ii,m in enumerate(LJexp):
if atype_pair not in ['OT:HT','HT:OT','HT:HT','OT:OT']:
LJ_coeffs[ii][atp.index(atype_pair)] += 1/dist**m
# 3rd: Write Atom type pairs and values to output
header = "Complex_ID"
for i in LJexp:
for j in atp: header = header + ' ' + j +'_' + str(i)
values = pdb[:-4]
for i in range(len(LJexp)):
for j in range(len(atp)): values = values + ' ' + str(LJ_coeffs[i][j])
#for i in range(len(LJ_A)): values = values + ' ' + str(LJ_A[i]) + ' ' + str(LJ_B[i])
f = open(pdb[:-4]+'.ljf','w')
f.write(header+'\n')
f.write(values+'\n')
f.close()
|
# -*- coding: utf-8 -*-
# file: rule_base_keywords_extraction.py
# date: 2022-02-25
import os
import textwrap
import keytext4py
from typing import List, Tuple
from functools import reduce
from keytext4py import corenlp_helper
from keytext4py import utils
#from keytext4py_ext_cpp import keywords_by_pos
from keytext4py import fast_keywords
from keytext4py import CoreNLPProcessor
from stanza.server import CoreNLPClient
if __name__ == "__main__":
zh_text: str = utils.file2text("./text/zh1.txt")
text_processor: CoreNLPProcessor = CoreNLPProcessor().init()
doc_ana: List[List[Tuple[str, str] ] ] = text_processor.run(zh_text)
doc_tokens: List[Tuple[str, str] ] = reduce(
lambda a, b: a + b, doc_ana)
keywords = fast_keywords(doc_tokens,
pos_bucketing=True, top_k=8,
min_word_len=2 * 3 # Since each ZH char takes 3 bytes
)
print("text:")
print(textwrap.fill(zh_text[:512] + "...\n", width=32))
print("\nkey-words:")
for keyword in keywords:
print("{}: {}".format(
f"{keyword[0]:>10}", keyword[1]))
|
from django.db import models
from utils.basemodel import BaseModel
# Create your models here.
class People(models.Model):
name = models.CharField(max_length=20)
age = models.IntegerField()
class Projects(BaseModel):
# unique = True 指定当前字段为唯一约束
# null = True 指定数据库中当前字段可以值为空 null, ORM默认是NOT NULL 非空
# id = models.IntegerField(primary_key=True, verbose_name='id主键', help_text='id主键')
name = models.CharField(max_length=50, unique=True, verbose_name='项目名称', help_text='项目名称')
leader = models.CharField(max_length=20, verbose_name='项目负责人', help_text='项目负责人')
# default= True 为当前字段指定默认值,指定默认值以后,前端创建数据时如果不指定该字段,那么将自动把default值作为当前字段值
is_execute = models.BooleanField(verbose_name='是否启动项目', help_text='是否启动项目', default=True)
# blank=True 指定前端在创建数据时可以不用给该字段传值,默认前端创建时是需要必须传递该字段值
description = models.TextField(verbose_name='项目描述', help_text='项目描述', null=True, blank=True, default="描述语句")
# 可以为DateTimeField、DateField字段添加auto_now_add、auto_now参数
# >> auto_now_add=True指定在创建该记录时,会自动将当前字段创建时间作为该字段的值,后续不会变更
# >> auto_now=True指定在创建该记录时,会自动将当前字段更新时间作为该字段的值,后续只要更新了该记录,都会自动修改
# >> auto_now_add和auto_now不能同时指定
# create_time = models.DateTimeField(auto_now_add=True, verbose_name='创建时间', help_text='创建时间')
# update_time = models.DateTimeField(auto_now=True, verbose_name='更新时间', help_text='更新时间')
# 定制该表 内部类 给当前模型类 添加元素信息
class Meta:
db_table = 'tb_projects'
verbose_name = '项目表'
verbose_name_plural = '项目表'
ordering = ['id', 'name']
|
import pyautogui
while(True):
pyautogui.moveTo(255,255)
pyautogui.moveTo(0,0)
|
# Copyright (c) 2016-2021 Renata Hodovan, Akos Kiss.
#
# Licensed under the BSD 3-Clause License
# <LICENSE.rst or https://opensource.org/licenses/BSD-3-Clause>.
# This file may not be copied, modified, or distributed except
# according to those terms.
from ..config import as_bool, as_list
from .call_decorator import CallDecorator
from .non_issue import NonIssue
class ExitCodeFilter(CallDecorator):
"""
Decorator filter for SUT calls that return issues with ``'exit_code'``
property.
**Mandatory parameter of the decorator:**
- ``exit_codes``: if ``issue['exit_code']`` is not in the array of
``exit_codes``, the issue is filtered out; this behavior can be
inverted by setting the ``'invert'`` property to True, thus
keeping issues with exit code not listed in the array.
**Optional parameter of the decorator:**
- ``invert``: if it's true then exit code filtering mechanism is
inverted (boolean value, False by default).
The issues that are not filtered out are not changed in any way.
**Example configuration snippet:**
.. code-block:: ini
[sut.foo]
call=fuzzinator.call.StdinSubprocessCall
call.decorate(0)=fuzzinator.call.ExitCodeFilter
[sut.foo.call]
command=/home/alice/foo/bin/foo -
[sut.foo.call.decorate(0)]
exit_codes=[139]
invert=false
"""
def __init__(self, *, exit_codes, invert=False, **kwargs):
self.exit_codes = as_list(exit_codes)
self.invert = as_bool(invert)
def call(self, cls, obj, *, test, **kwargs):
issue = super(cls, obj).__call__(test=test, **kwargs)
if not issue:
return issue
if not self.invert:
if issue['exit_code'] in self.exit_codes:
return issue
elif issue['exit_code'] not in self.exit_codes:
return issue
return NonIssue(issue)
|
import pandas as pd
from .config import *
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
def run():
'''
Definition:
Run simulation
'''
# Single
exec_mode = ExecutionMode()
local_mode_ctx = ExecutionContext(context=exec_mode.local_mode)
simulation = Executor(exec_context=local_mode_ctx, configs=exp.configs)
raw_system_events, tensor_field, sessions = simulation.execute()
# Result System Events DataFrame
df = pd.DataFrame(raw_system_events)
# subset to last substep
df = df[df['substep'] == df.substep.max()]
return df
|
class TagHelper(object):
"""
An helper on tag operations
"""
@staticmethod
def list(tutorial, priority=False):
"""
Return a list of tutorial tags
"""
if not tutorial:
return []
tags = {}
tags_list = []
priority_tag = None
for tag in tutorial.tutorial_tags.all():
tags[tag.tag.slug] = tag.tag.name
tags_list.append(tag.tag.slug)
# Get priority tag?
if priority:
tutorial_url = tutorial.url_set.filter(is_alias=False).first()
if tutorial_url:
priority_tag = tutorial_url.tag
# Move priority tag first?
if priority_tag and priority_tag in tags_list:
tags_list.remove(priority_tag)
tags_list = [priority_tag] + tags_list
# Build final tag object
return [{'name': tags[tag], 'slug': tag} for tag in tags_list]
@staticmethod
def list_instances(tutorial):
"""
Return a list of tutorial tags (instances and not processed dict)
"""
return [t.tag for t in tutorial.tutorial_tags.all()]
|
import os
import imp
import sys
import time
import threading
import subprocess
# usb or sd card
user_dir = os.getenv("USER_DIR", "/usbdrive")
fw_dir = os.getenv("FW_DIR")
# imports
current_dir = os.path.dirname(os.path.abspath(__file__))
og = imp.load_source('og', current_dir + '/og.py')
wifi = imp.load_source('wifi_control', current_dir + '/wifi_control.py')
wifi.log_file = user_dir + "/wifi_log.txt"
# UI elements
menu = og.Menu()
banner = og.Alert()
# lock for updating menu
menu_lock = threading.Lock()
# quits but doesn't return to main menu
# for vnc start / stop, keeps the starting / stopping message up until mother is restarted
def quit_no_return():
og.osc_server.free()
exit()
def quit():
og.end_app()
# stores possible networks
# used to build wifi menu
# contains connect callback
class WifiNet :
ssid = ''
pw = ''
def connect (self):
wifi.connect(self.ssid, self.pw)
update_menu()
og.redraw_flag = True
def disconnect():
print("wifi disconnect all")
wifi.disconnect_all()
update_menu()
og.redraw_flag = True
def start_vnc():
cmd = fw_dir + "/scripts/vnc-start.sh"
try:
ret = subprocess.check_output(['bash', '-c', cmd], close_fds=True)
except: pass
quit_no_return()
def stop_vnc():
cmd = fw_dir + "/scripts/vnc-stop.sh"
try:
ret = subprocess.check_output(['bash', '-c', cmd], close_fds=True)
except: pass
quit_no_return()
def check_vnc():
cmd = "pgrep Xvnc"
try:
subprocess.check_output(['bash', '-c', cmd], close_fds=True)
ret = True
except:
ret = False
return ret
def start_web():
print("start web")
wifi.start_web_server()
update_menu()
og.redraw_flag = True
def stop_web():
print("stop web")
wifi.stop_web_server()
update_menu()
og.redraw_flag = True
def start_ap():
print("start ap")
wifi.start_ap_server()
update_menu()
og.redraw_flag = True
def stop_ap():
print("stop ap")
wifi.stop_ap_server()
update_menu()
og.redraw_flag = True
# update menu based on connection status
def update_menu():
dots = ['.','..','...','....']
menu_lock.acquire()
try :
# update wifi network labels
if (wifi.state == wifi.CONNECTING) :
menu.header = 'Connecting'+dots[wifi.connecting_timer % 4]
update_net_status_label('.')
elif (wifi.state == wifi.CONNECTED) :
menu.header = 'Connected ' + wifi.current_net
update_net_status_label('*')
elif (wifi.state == wifi.DISCONNECTING) :
menu.header = 'Disconnecting..'
update_net_status_label('-')
elif (wifi.state == wifi.CONNECTION_ERROR) :
menu.header = 'Problem Connecting'
update_net_status_label('-')
else :
menu.header = 'Not Connected'
update_net_status_label('-')
# update webserver menu entry
if (wifi.web_server_state == wifi.WEB_SERVER_RUNNING) :
update_web_server_menu_entry(True)
else :
update_web_server_menu_entry(False)
# update webserver menu entry
if (wifi.ap_state == wifi.AP_RUNNING) :
update_ap_menu_entry(True)
else :
update_ap_menu_entry(False)
finally :
menu_lock.release()
# show connected status for each network
def update_net_status_label(stat):
# check entries that have stashed net info (I know)
for i in range(len(menu.items)) :
try :
if (menu.items[i][2]['type'] == 'net') :
if (menu.items[i][2]['ssid'] == wifi.current_net) :
menu.items[i][0] = ' '+stat+' ' + menu.items[i][2]['ssid']
else :
menu.items[i][0] = ' - ' + menu.items[i][2]['ssid']
except :
pass
def update_web_server_menu_entry(stat):
if (stat) :
label = 'Stop Web Server'
action = stop_web
else :
label = 'Start Web server'
action = start_web
for i in range(len(menu.items)) :
try :
if (menu.items[i][2]['type'] == 'web_server_control') :
menu.items[i][0] = label
menu.items[i][1] = action
except :
pass
def update_ap_menu_entry(stat):
if (stat) :
label = 'Stop AP'
action = stop_ap
else :
label = 'Start AP'
action = start_ap
for i in range(len(menu.items)) :
try :
if (menu.items[i][2]['type'] == 'ap_control') :
menu.items[i][0] = label
menu.items[i][1] = action
except :
pass
# bg connection checker
def check_status():
while True:
time.sleep(1)
wifi.update_state()
update_menu()
og.redraw_flag = True
def non():
pass
def error_wifi_file() :
og.clear_screen()
og.println(0, "Error with wifi.txt")
og.println(2, "Please check file")
og.println(3, "is in the correct")
og.println(4, "format.")
og.flip()
og.enc_input()
quit()
# build main menu
menu.items = []
menu.header='Not Connected'
# start it up
og.start_app()
# check for wifi file, create one if not found
wifi_file = user_dir + "/wifi.txt"
if os.path.exists(wifi_file):
f = open(user_dir + "/wifi.txt", "r")
else :
print("wifi file not found, creating")
f = open(user_dir + "/wifi.txt", "w")
f.write("Network Name\n")
f.write("password\n")
f.close()
f = open(user_dir + "/wifi.txt", "r")
try :
networks = f.readlines()
networks = [x.strip() for x in networks]
ssids = networks[0::2]
pws = networks[1::2]
for i in range(len(ssids)) :
if (ssids[i] != '') :
ssid = ssids[i]
pw = pws[i]
net = WifiNet()
net.ssid = ssid
net.pw = pw
menu.items.append([' - ' + ssid, net.connect, {'type':'net', 'ssid':ssid}]) # stash some extra info with these net entries
except :
error_wifi_file()
print("bad wifi file")
menu.items.append(['Start Web Server', non, {'type':'web_server_control'}])
menu.items.append(['Start AP', non, {'type':'ap_control'}])
if check_vnc() : menu.items.append(['Stop VNC', stop_vnc])
else : menu.items.append(['Start VNC', start_vnc])
menu.items.append(['Turn Wifi Off', disconnect])
menu.items.append(['< Home', quit])
menu.selection = 0
# bg thread
menu_updater = threading.Thread(target=check_status)
menu_updater.daemon = True # stop the thread when we exit
wifi.initialize_state()
update_menu()
og.redraw_flag = True
# start thread to update connection status
menu_updater.start()
# enter menu
menu.perform()
|
from flask import current_app, request
from flask_restful import Resource, reqparse
from app.helpers import helpers, validator
from app.middlewares import auth
from app.models import model
from app.vendors.rest import response
class GetUserData(Resource):
@auth.auth_required
def get(self):
try:
users = model.get_all("user")
if not users:
return response(404)
return response(200, data=users)
except Exception as e:
current_app.logger.error(f"{e}")
return response(500)
class GetUserDataId(Resource):
@auth.auth_required
def get(self):
user_id = request.args.get("id")
email = request.args.get("email")
try:
if not any((user_id, email)):
return response(422, "Problems parsing parameters")
if user_id:
user = model.get_one(table="user", field="id", value=user_id)
if email:
user = model.get_one(table="user", field="email", value=email)
if not user:
return response(404)
return response(200, data=user)
except Exception as e:
current_app.logger.error(f"{e}")
return response(500)
class UserSignUp(Resource):
@auth.auth_required
def post(self):
parser = reqparse.RequestParser()
parser.add_argument("email", type=str, required=True)
args = parser.parse_args()
email = args["email"]
if not model.is_unique(table="user", field="email", value=f"{email}"):
return response(409, message="Duplicate Email")
try:
validator.validate("EMAIL", email)
except Exception as e:
return response(422, message=f"{e}")
try:
data = {"email": email, "created_at": helpers.get_datetime()}
inserted_id = model.insert(table="user", data=data)
data_ = {"id": inserted_id, **data}
return response(201, data=data_)
except Exception as e:
current_app.logger.error(f"{e}")
return response(500)
class UserUpdate(Resource):
@auth.auth_required
def put(self, user_id):
parser = reqparse.RequestParser()
parser.add_argument("email", type=str, required=True)
args = parser.parse_args()
email = args["email"]
args = parser.parse_args()
if not model.is_unique(table="user", field="email", value=f"{email}"):
return response(409, message="Duplicate Email")
try:
validator.validate("EMAIL", email)
except Exception as e:
return response(422, message=f"{e}")
try:
data = {"where": {"id": user_id}, "data": {"email": email}}
row_count = model.update("user", data=data)
if not row_count:
return response(404)
return response(200, data=data.get("data"))
except Exception as e:
current_app.logger.error(f"{e}")
return response(500)
class UserDelete(Resource):
@auth.auth_required
def delete(self, user_id):
try:
row_count = model.delete(table="user", field="id", value=user_id)
if not row_count:
return response(404)
return response(204)
except Exception as e:
current_app.logger.error(f"{e}")
return response(500)
|
"""Backwards compatibility for old IDs
"""
def legacy_ids(study_ids):
"""Handle legacy study IDs, returning the new format
Parameters:
study_ids: List of study IDs (new or old)
Returns:
result: List of new study IDs
"""
legacy_id_subs = {
"UKB-a:": "ukb-a-",
"UKB-b:": "ukb-b-",
"UKB-c:": "ukb-c-",
"IEU-a:": "ieu-a-",
"\\D": "ieu-a-",
}
result = []
for id in study_ids:
for legacy_id in legacy_id_subs.keys():
if id.find(legacy_id) >= 0:
id = id.replace(legacy_id, legacy_id_subs[legacy_id])
result.append(id)
return result
|
#! /usr/bin/env python
from momo import sys, np, osio, endl, flush
from __qmshell__ import e_xyz_from_xyz
from __molecules__ import Atom, Molecule
osio.Connect()
osio.AddArg('file', typ=str, default=None, help='Input xyz-file')
osio.AddArg('molname', typ=str, default='UNSCRAMBLED', help='Molecule name')
osio.AddArg('gro', typ=str, default='', help='Output gro-file')
osio.AddArg('xyz', typ=str, default='', help='Output xyz-file')
osio.AddArg('ring_exclude', typ=(list, str), default=['Al','Zn'], help='Exclude these atoms from ring structure')
opts, xopts = osio.Parse()
xyzfile = opts.file
molname = opts.molname
outfile_xyz = opts.xyz
outfile_gro = opts.gro
exclude_bonds_to = opts.ring_exclude
wordy = False
leaky = False
# COVALENCE RADII (from Cambridge Structural Database, table see http://en.wikipedia.org/wiki/Covalent_radius)
COVRAD_TABLE = {}
COVRAD_TABLE['H'] = 0.31
COVRAD_TABLE['C'] = 0.76
COVRAD_TABLE['N'] = 0.71
COVRAD_TABLE['O'] = 0.66
COVRAD_TABLE['Se'] = 1.20
COVRAD_TABLE['S'] = 1.05
COVRAD_TABLE['Zn'] = 1.22
# FORCEFIELD TYPE TABLE
TYPE_TABLE = {\
'C:CCH' : 'CA', # Aromatic
'C:CCN' : 'CA', # Aromatic + Nitrogen (TODO)
'C:CHS' : 'CA', #
'C:CCSe' : 'CB', # Aromatic + Selenium (TODO)
'C:CCS' : 'CB', # Aromatic + Sulphur
'C:CCO' : 'CO', # Aromatic + Carboxylic
'C:CNSe' : 'CS', # Aromatic + Selenium + Nitrogen (TODO)
'C:CCHH' : 'CR', # Aliphatic
'C:CHHN' : 'CR', # Aliphatic
'C:CHHH' : 'CR', # Methyl
'C:CCC' : 'CC', #
'C:CN' : 'CN', # Cyano-group (TODO)
'H:C' : 'HC', #
'N:CCC' : 'NA', # Aromatic
'N:C' : 'NC', #
'O:C' : 'OC', # Carboxylic group
'S:CC' : 'S', # Thiophene sulphur
'Se:CC' : 'Se'} #
ALPHABET = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
NUMBERS = '1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
class AtomXyz(object):
def __init__(self, e, xyz, id):
# PROPERTIES
self.e = e
self.id = id
self.xyz = xyz
self.x = xyz[0]
self.y = xyz[1]
self.z = xyz[2]
self.covrad = COVRAD_TABLE[self.e]
if self.e in exclude_bonds_to: self.covrad = 0.0
# FRAGMENT INFO
self.name = ''
self.type = ''
self.fragname = '___'
self.fragid = 0
# BONDING LEVEL (-1 = CORE)
self.level = -1
# ALL BONDS
self.bonded = []
self.bonded_short = []
# CORE BONDS
self.bonded_core = []
self.bonds_core = []
# NON-RING CORE BONDS
self.bonded_non_ring = []
self.bonds_non_ring = []
# DOCKED
self.docked_to = []
self.dock_for = []
# PATHS OF SOME LENGTH
self.path_length = -1
self.paths = []
# NON-RING PATHS OF SOME LENGTH
self.path_length_non_ring = -1
self.paths_non_ring = []
def generate_type(self):
type_key = ''
bonded_elems = []
for b in self.bonded:
bonded_elems.append(b.e)
bonded_elems.sort()
for e in bonded_elems:
type_key += e
type_key = self.e + ':' + type_key
try:
self.type = TYPE_TABLE[type_key]
except KeyError:
default_type = self.e+'X'
print "Type definition missing for '%s', defaulting to '%s'" % (type_key, default_type)
self.type = default_type
return
def get_all_docked_atoms(self):
docked_atoms = []
docked_atoms = docked_atoms + self.dock_for
for d in self.dock_for:
docked_atoms = docked_atoms + d.get_all_docked_atoms()
return docked_atoms
def add_bond_core(self, bonded_atom, bond):
self.bonded_core.append(bonded_atom)
self.bonds_core.append(bond)
return
def add_bond_non_ring(self, bonded_atom, bond):
self.bonded_non_ring.append(bonded_atom)
self.bonds_non_ring.append(bond)
return
def find_paths(self, length, exclusion_list=[], start_here=True):
if length == 0: return []
paths = []
#exclusion_list.append(self)
for bond in self.bonds_core:
new_path = Path(start_with_atom=self if start_here else None)
if not bond.b in exclusion_list:
new_path.extend(bond)
paths.append(new_path)
other = bond.b
#other_paths = other.find_paths(length=length-1, exclusion_list=exclusion_list, start_here=False)
other_paths = other.find_paths(length=length-1, exclusion_list=[self], start_here=False)
for o in other_paths:
joined = JoinPaths(new_path, o)
paths.append(joined)
if start_here:
self.path_length = length
self.paths = paths
return paths
def find_paths_non_ring(self, length=4, exclusion_list=[], start_here=True):
if length == 0: return []
paths = []
#exclusion_list.append(self)
for bond in self.bonds_non_ring:
new_path = Path(start_with_atom=self if start_here else None)
if not bond.b in exclusion_list:
new_path.extend(bond)
paths.append(new_path)
other = bond.b
#other_paths = other.find_paths(length=length-1, exclusion_list=exclusion_list, start_here=False)
other_paths = other.find_paths_non_ring(length=length-1, exclusion_list=[self], start_here=False)
for o in other_paths:
joined = JoinPaths(new_path, o)
paths.append(joined)
if start_here:
self.path_length_non_ring = length
self.paths_non_ring = paths
return paths
class Path(object):
def __init__(self, start_with_atom=None):
self.visited = []
if start_with_atom != None: self.visited.append(start_with_atom)
self.bonds = []
def add_visited(self, v):
self.visited.append(v)
def add_bond(self, b):
self.bonds.append(b)
def extend(self, bond):
self.add_visited(bond.b)
self.add_bond(bond)
def print_info(self):
for v in self.visited:
print "[%d:%s]" % (v.id,v.e),
print ""
def get_first(self):
return self.visited[0]
def get_last(self):
return self.visited[-1]
class BondXyz(object):
def __init__(self, atom1, atom2):
self.a = atom1
self.b = atom2
def JoinPaths(path1, path2):
joined_path = Path()
joined_path.visited = path1.visited + path2.visited
joined_path.bonds = path1.bonds + path2.bonds
return joined_path
class Ring(object):
def __init__(self, first_pair=None):
self.atoms = []
self.bonded_structures = []
if first_pair != None:
self.atoms.append(pair[0])
self.atoms.append(pair[1])
def type(self):
return "ring"
def has_atom(self, atom):
return self.check_atom(atom)
def check_atom(self, atom):
if atom in self.atoms: return True
else: return False
def check_add_pair(self, pair):
has_a = self.check_atom(pair[0])
has_b = self.check_atom(pair[1])
if has_a and not has_b:
self.atoms.append(pair[1])
return True
elif has_b and not has_a:
self.atoms.append(pair[0])
return True
elif has_b and has_a:
return True
else:
return False
def print_info(self):
for a in self.atoms:
print "%2d" % a.id,
print ""
def intersects(self, other):
intersects = False
for a in self.atoms:
if a in other.atoms:
intersects = True
break
return intersects
def add(self, other):
for atom in other.atoms:
if not atom in self.atoms:
self.atoms.append(atom)
return
def find_round_trip_path(self, visited=[], start_atm=None):
started_here = False
if start_atm == None:
self.atoms = sorted(self.atoms, key=lambda atm: len(atm.bonded))
start_atm = self.atoms[0]
visited.append(start_atm)
started_here = True
for bond in start_atm.bonds_core:
# Backup visited list to be able to revert to this point
visited_0 = []
for v in visited: visited_0.append(v)
if bond.b in visited or not bond.b in self.atoms: continue
# Partner atom not yet visited, proceed
visited.append(bond.b)
visited = self.find_round_trip_path(visited, bond.b)
# All atoms visited = round path?
if len(visited) == len(self.atoms):
break
# A dead end. Revert & try next bond
else:
visited = []
for v in visited_0: visited.append(v)
#assert len(visited) <= len(self.atoms)
if started_here:
if len(visited) != len(self.atoms):
osio << osio.mr << "WARNING Failed to generate round-trip path (atom order will be compromised)" << endl
visited = self.atoms
else:
if wordy:
osio << "Found round-trip path" << endl
return visited
def order_atoms(self):
self.atoms = self.find_round_trip_path(visited=[], start_atm=None)
return
def get_all_bonded_structures(self, exclude_list=[]):
bonded = []
exclude_list.append(self)
for b in self.bonded_structures:
if b in exclude_list:
continue
bonded.append(b)
bonded = bonded + b.get_all_bonded_structures(exclude_list)
return bonded
def JoinRings(ring1, ring2):
joined_ring = Ring()
for a in ring1.atoms:
joined_ring.atoms.append(a)
for b in ring2.atoms:
if not joined_ring.has_atom(b):
joined_ring.atoms.append(b)
return
class Chain(object):
def __init__(self, first_pair=None):
self.atoms = []
self.bonded_structures = []
if first_pair != None:
self.atoms.append(pair[0])
self.atoms.append(pair[1])
def type(self):
return "chain"
def has_atom(self, atom):
return self.check_atom(atom)
def intersects(self, other):
intersects = False
for a in self.atoms:
if a in other.atoms:
intersects = True
break
return intersects
def add(self, other):
for atom in other.atoms:
if not atom in self.atoms:
self.atoms.append(atom)
return
def print_info(self):
for a in self.atoms:
print "%2d" % a.id,
print ""
def find_round_trip_path(self, visited=[], start_atm=None):
started_here = False
if start_atm == None:
self.atoms = sorted(self.atoms, key=lambda atm: len(atm.bonded))
start_atm = self.atoms[0]
visited.append(start_atm)
started_here = True
for bond in start_atm.bonds_core:
# Backup visited list to be able to revert to this point
visited_0 = []
for v in visited: visited_0.append(v)
if bond.b in visited or not bond.b in self.atoms: continue
# Partner atom not yet visited, proceed
visited.append(bond.b)
visited = self.find_round_trip_path(visited, bond.b)
# All atoms visited = round path?
if len(visited) == len(self.atoms):
break
# A dead end. Revert & try next bond
else:
visited = []
for v in visited_0: visited.append(v)
#assert len(visited) <= len(self.atoms)
if started_here:
if len(visited) != len(self.atoms):
osio << osio.mr << "WARNING Failed to generate round-trip path (atom order will be compromised)" << endl
visited = self.atoms
else:
if wordy:
osio << "Found round-trip path" << endl
return visited
def order_atoms(self):
self.atoms = self.find_round_trip_path(visited=[], start_atm=None)
return
def get_all_bonded_structures(self, exclude_list=[]):
bonded = []
exclude_list.append(self)
for b in self.bonded_structures:
if b in exclude_list:
continue
bonded.append(b)
bonded = bonded + b.get_all_bonded_structures(exclude_list)
return bonded
def CreateMolecule(name, atoms, xyz_conv_fact=0.1):
molecule = Molecule(0, name)
for atom in atoms:
gro_atom = Atom(ln='')
gro_atom.fragId = atom.fragid
gro_atom.fragName = atom.fragname
gro_atom.name = atom.name
gro_atom.Id = atom.id
gro_atom.pos = np.array(atom.xyz)*xyz_conv_fact
gro_atom.vel = None
molecule.append_atom(gro_atom)
return molecule
# LOAD ATOMS
atoms = []
e,xyz = e_xyz_from_xyz(xyzfile)
count = 0
for e,r in zip(e,xyz):
count += 1
atoms.append(AtomXyz(e,r,count))
# ESTABLISH BONDING VIA COVALENCE CRITERION
osio << osio.mg << "Find bonds using covalence criterion" << endl
bond_count = 0
for i in range(len(atoms)):
for j in range(i+1, len(atoms)):
a = atoms[i]
b = atoms[j]
dr = np.dot(a.xyz-b.xyz,a.xyz-b.xyz)**0.5
dv = 1.1*(a.covrad+b.covrad)
if dr < dv:
bond_count += 1
a.bonded.append(b)
b.bonded.append(a)
print "%d bonds in molecule" % bond_count
if leaky:
for a in atoms:
print "%2s bonded to %d" % (a.e, len(a.bonded))
for a in atoms:
if len(a.bonded) == 0:
osio << osio.my << "NOTE: Unbonded atom" << a.e << a.id << endl
# SEQUENTIALLY SPLIT OFF LIGAND UNITS
osio << osio.mg << "Find core using sequential reduction" << endl
short_list = []
for a in atoms:
short_list.append(a)
selection_levels = []
this_level = 0
while True:
if leaky:
print "Level", this_level
print "Short-listed", len(short_list)
for a in short_list:
a.bonded_short = []
for i in range(len(short_list)):
for j in range(i+1, len(short_list)):
a = short_list[i]
b = short_list[j]
dr = np.dot(a.xyz-b.xyz,a.xyz-b.xyz)**0.5
dv = 1.1*(a.covrad+b.covrad)
if dr < dv:
a.bonded_short.append(b)
b.bonded_short.append(a)
rm_atoms = []
for s in short_list:
if len(s.bonded_short) == 1:
rm_atoms.append(s)
if len(rm_atoms) == 0:
break
if leaky:
print "Removing", len(rm_atoms)
for r in rm_atoms:
r.level = this_level
for b in r.bonded_short:
b.dock_for.append(r)
r.docked_to.append(b)
short_list.remove(r)
if leaky:
ofs = open('level_%d.xyz' % this_level, 'w')
ofs.write('%d\n\n' % len(short_list))
for s in short_list:
ofs.write('%s %+1.7f %+1.7f %+1.7f\n' % (s.e, s.x, s.y, s.z))
ofs.close()
this_level += 1
# READ OFF CORE ATOMS
core = []
for a in atoms:
assert len(a.docked_to) <= 1
docked_atoms = a.get_all_docked_atoms()
if wordy:
osio << "%-2s bonded to %d, docked to %d, dock for %d/%-2d at level %+d" \
% (a.e, len(a.bonded), len(a.docked_to), len(a.dock_for), len(docked_atoms), a.level) << endl
if len(a.docked_to) < 1:
core.append(a)
print "%d atoms in core" % len(core)
# ESTABLISH BONDING AMONG CORE ATOMS
osio << osio.mg << "Find core-atom bonds using covalence criterion" << endl
bonds = []
for i in range(len(core)):
for j in range(i+1, len(core)):
a = core[i]
b = core[j]
dr = np.dot(a.xyz-b.xyz,a.xyz-b.xyz)**0.5
dv = 1.1*(a.covrad+b.covrad)
if dr < dv:
a.bonded_short.append(b)
b.bonded_short.append(a)
bond_ab = BondXyz(a,b)
bond_ba = BondXyz(b,a)
a.add_bond_core(b, bond_ab)
b.add_bond_core(a, bond_ba)
bonds.append(bond_ab)
print "%d bonds in core" % len(bonds)
# GENERATE PATHS ALONG CORE BONDS
path_cutoff_length = 7
osio << osio.mg << "Find connecting paths (max. length %d)" % path_cutoff_length << endl
path_count = 0
for c in core:
paths = c.find_paths(length=path_cutoff_length, exclusion_list=[], start_here=True)
if wordy:
osio << "%2d paths of length <= %d from atom %2d" % (len(paths), path_cutoff_length, c.id) << endl
if leaky:
for p in paths:
p.print_info()
path_count += len(paths)
osio << "Generated a total of %d bond paths" % path_count << endl
# FROM PATHS FIND RING-CONNECTED ATOMS
ring_pairs = []
for i in range(len(core)):
for j in range(i+1, len(core)):
a = core[i]
b = core[j]
paths_ab = []
for p in a.paths:
if p.get_last() == b:
paths_ab.append(p)
paths_ba = []
for p in b.paths:
if p.get_last() == a:
paths_ba.append(p)
if leaky:
print "ID1 %d ID2 %d" % (a.id, b.id)
print "a => b: %d" % len(paths_ab)
#for p in paths_ab:
# p.print_info()
print "b => a: %d" % len(paths_ba)
#for p in paths_ba:
# p.print_info()
assert len(paths_ab) == len(paths_ba)
if len(paths_ab) == 1: continue
has_disjoint_paths = False
for k in range(len(paths_ab)):
for l in range(k+1, len(paths_ab)):
intersects = False
p1 = paths_ab[k]
p2 = paths_ab[l]
b1 = p1.bonds
b2 = p2.bonds
for bond in b1:
if bond in b2:
intersects = True
if not intersects:
has_disjoint_paths = True
if has_disjoint_paths:
pair = [a,b]
ring_pairs.append(pair)
if leaky:
osio << osio.mg << "Ring pair:" << a.id-1 << b.id-1 << endl
# FROM RING PAIRS, FIND RINGS VIA SUCCESSIVE ADDITION
osio << osio.mg << "Find rings using set of ring pairs" << endl
rings = []
for pair in ring_pairs:
new_ring = Ring(first_pair=pair)
rings.append(new_ring)
i = 0
while i <= len(rings)-1:
ring = rings[i]
rm_rings = []
for j in range(i+1, len(rings)):
other = rings[j]
if ring.intersects(other):
rm_rings.append(other)
ring.add(other)
for r in rm_rings:
rings.remove(r)
i += 1
osio << "Core rings (# = %d)" % len(rings) << endl
if wordy:
for r in rings:
r.print_info()
# READ OFF NON-RING ATOMS
non_ring_core_atoms = []
for c in core:
in_ring = False
for r in rings:
if r.has_atom(c):
in_ring = True
if not in_ring:
non_ring_core_atoms.append(c)
osio << "Non-ring core atoms: %d" % len(non_ring_core_atoms) << endl
# ESTABLISH BONDING AMONG NON-RING CORE ATOMS
osio << osio.mg << "Find non-ring core-atom bonds using covalence criterion" << endl
bonds = []
for i in range(len(non_ring_core_atoms)):
for j in range(i+1, len(non_ring_core_atoms)):
a = non_ring_core_atoms[i]
b = non_ring_core_atoms[j]
dr = np.dot(a.xyz-b.xyz,a.xyz-b.xyz)**0.5
dv = 1.1*(a.covrad+b.covrad)
if dr < dv:
a.bonded_short.append(b)
b.bonded_short.append(a)
bond_ab = BondXyz(a,b)
bond_ba = BondXyz(b,a)
a.add_bond_non_ring(b, bond_ab)
b.add_bond_non_ring(a, bond_ba)
bonds.append(bond_ab)
print "%d bonds in non-ring core" % len(bonds)
# GENERATE PATHS ALONG NON-RING CORE BONDS
path_cutoff_length = 7
osio << osio.mg << "Find connecting non-ring paths (max. length %d)" % path_cutoff_length << endl
path_count = 0
for c in non_ring_core_atoms:
paths = c.find_paths_non_ring(length=path_cutoff_length, exclusion_list=[], start_here=True)
if wordy:
print "%2d paths of length <= %d from atom %2d" % (len(paths), path_cutoff_length, c.id)
if leaky:
for p in paths:
p.print_info()
path_count += len(paths)
osio << "Generated a total of %d non-ring bond paths" % path_count << endl
# FROM PATHS FIND NON-RING-CONNECTED ATOMS
non_ring_pairs = []
for i in range(len(non_ring_core_atoms)):
for j in range(i+1, len(non_ring_core_atoms)):
a = non_ring_core_atoms[i]
b = non_ring_core_atoms[j]
paths_ab = []
for p in a.paths_non_ring:
if p.get_last() == b:
paths_ab.append(p)
paths_ba = []
for p in b.paths_non_ring:
if p.get_last() == a:
paths_ba.append(p)
if leaky:
print "ID1 %d ID2 %d" % (a.id, b.id)
print "a => b: %d" % len(paths_ab)
#for p in paths_ab:
# p.print_info()
print "b => a: %d" % len(paths_ba)
#for p in paths_ba:
# p.print_info()
assert len(paths_ab) == len(paths_ba)
assert len(paths_ab) <= 1
if len(paths_ab) > 0:
pair = [a,b]
non_ring_pairs.append(pair)
if leaky:
osio << osio.mg << "Non-ring pair:" << a.id-1 << b.id-1 << endl
# FROM NON-RING PAIRS, FIND NON-RINGS (= CHAINS) VIA SUCCESSIVE ADDITION
osio << osio.mg << "Find non-ring structures using set of non-ring pairs" << endl
chains = []
for pair in non_ring_pairs:
new_chain = Chain(first_pair=pair)
chains.append(new_chain)
i = 0
while i <= len(chains)-1:
chain = chains[i]
rm_chains = []
for j in range(i+1, len(chains)):
other = chains[j]
if chain.intersects(other):
rm_chains.append(other)
chain.add(other)
for r in rm_chains:
chains.remove(r)
i += 1
if len(chains) == 0:
for atom in non_ring_core_atoms:
new_chain = Chain()
new_chain.atoms.append(atom)
chains.append(new_chain)
osio << "Core chains (# = %d)" % len(chains) << endl
if wordy:
for c in chains:
c.print_info()
# REORDER STRUCTURAL ELEMENTS (CORE RINGS & CORE CHAINS)
molecule = []
structures = rings + chains
for i in range(len(structures)):
for j in range(i+1, len(structures)):
s1 = structures[i]
s2 = structures[j]
bond_count = 0
for a in s1.atoms:
for b in s2.atoms:
dr = np.dot(a.xyz-b.xyz,a.xyz-b.xyz)**0.5
dv = 1.1*(a.covrad+b.covrad)
if dr < dv:
bond_count += 1
assert bond_count <= 1
if bond_count:
s1.bonded_structures.append(s2)
s2.bonded_structures.append(s1)
start_struct_idx = 0
if len(structures) == 1:
pass
else:
structures = sorted(structures, key=lambda s: len(s.bonded_structures))
while structures[start_struct_idx].bonded_structures == []:
molecule.append(structures[start_struct_idx])
if start_struct_idx+1 == len(structures): break
start_struct_idx += 1
start_struct = structures[start_struct_idx]
docked_structures = start_struct.get_all_bonded_structures(exclude_list=[])
molecule = molecule + [start_struct] + docked_structures
# REORDER ATOMS IN EACH STRUCTURE
for struct in molecule:
print "Structure type %-10s" % ("'%s'" % struct.type()), "(bonded to %d)" % len(struct.bonded_structures)
struct.order_atoms()
# GENERATE ATOM TYPES
osio << osio.mg << "Assign atom types" << endl
frag_atom_type_count = {}
for atm in atoms:
atm.generate_type()
frag_atom_type_count[atm.type] = 0
# SORT ATOMS AND ASSIGN FRAGMENT NAMES & IDs
osio << osio.mg << "Sort atoms, assign fragment names & IDs" << endl
atoms_ordered = []
frag_count = 0
core_count = 0
ligand_count = 0
core_alphabet_index = 0
ligand_alphabet_index = 1
for struct in molecule:
# Core atoms
frag_count += 1
core_count += 1
ligand_sets = []
# Reset fragment atom-type counter
for key in frag_atom_type_count.keys():
frag_atom_type_count[key] = 0
osio << "Core '%s' (size: %d)" % (ALPHABET[core_count-1], len(struct.atoms)) << endl
for atm in struct.atoms:
atm.fragid = frag_count
atm.fragname = 'CO' + ALPHABET[core_count-1]
atm.name = atm.type + NUMBERS[frag_atom_type_count[atm.type]]
frag_atom_type_count[atm.type] += 1
atoms_ordered.append(atm)
docked = atm.get_all_docked_atoms()
if len(docked) <= 4:
for datm in docked:
datm.fragid = frag_count
datm.fragname = 'CO' + ALPHABET[core_count-1]
datm.name = datm.type + NUMBERS[frag_atom_type_count[datm.type]]
atoms_ordered.append(datm)
else:
ligand_sets.append(docked)
# Reset fragment atom-type counter
for key in frag_atom_type_count.keys():
frag_atom_type_count[key] = 0
# Ligand atoms
for lset in ligand_sets:
frag_count += 1
ligand_count += 1
osio << "Ligand '%s' (size: %d)" % (ALPHABET[core_count-1], len(struct.atoms)) << endl
for atm in lset:
atm.fragid = frag_count
atm.fragname = 'LI' + ALPHABET[core_count-1]
atm.name = atm.type + NUMBERS[frag_atom_type_count[atm.type]]
frag_atom_type_count[atm.type] += 1
atoms_ordered.append(atm)
# FIX ATOM IDs
atom_count = 0
for atom in atoms_ordered:
atom_count += 1
atom.id = atom_count
# OUTPUT XYZ
if outfile_xyz != '':
ofs = open(outfile_xyz, 'w')
ofs.write('%d\n\n' % len(atoms_ordered))
for atm in atoms_ordered:
if wordy:
print atm.e, atm.fragname, atm.fragid, atm.type, atm.name
ofs.write('%-2s %+1.7f %+1.7f %+1.7f\n' % (atm.e, atm.x, atm.y, atm.z))
ofs.close()
# OUTPUT GRO
if outfile_gro != '':
molecule = CreateMolecule(molname, atoms_ordered)
molecule.write_gro(outfile_gro)
|
import sys
import numpy as np
import matplotlib.pyplot as plt
from numpy.core.numeric import Inf
from tensorboardX import SummaryWriter
from rlcontrol.agents.base import Agent
from rlcontrol.systems.base_systems.base_env import ENV
from rlcontrol.logger.logger import Logger
np.random.seed(59)
#TODO : Rename
class Organizer(object):
def __init__(self,
env:ENV,
agent_class:Agent,
agent_config:dict,
env_config:dict):
self.env = env(env_config)
self.agent = agent_class(
state_dim=self.env.dimensions['state'],
action_dim=self.env.dimensions['action'],
agent_config = agent_config)
self.config = self.get_default_training_config()
self.logger = Logger()
@staticmethod
def get_default_training_config():
default_config_training = {
'enable_log' : True, # Create log folders and log weights and plots
'max_episode' : 10,
'max_step' : 500,
'freq_weight_log' : 50, # Frequency of logging trained weights
'freq_tensorboard_log' : 50, # Frequency of logging training stats to tensorboard
'algorithm_name' : 'Unknown',
'plotting' : {
'freq' : 10, # Plot per episode
'enable' : False}
}
return default_config_training
@staticmethod
def get_default_inference_config():
default_config_inference = {
'max_episode' : 10,
'max_step' : 500,
}
return default_config_inference
def train(self, training_config=None):
self.config = self.get_default_training_config() if training_config is None else training_config
# TODO: Move this to logging class when created
if self.config.get("enable_log") is True:
self.logger.set(self.config.get("algorithm_name"))
best_reward = -99999
batch_size = self.agent.get_batch_size()
for eps in range(self.config['max_episode']):
# Log plot list
output_list = []
reward_list=[]
reference_list=[]
control_sig_list=[]
# Reset agent to random state
self.agent.reset()
# Zero log params
# TODO: Make this config
episode_reward = 0
episode_policy_loss = 0
episode_value_loss = 0
total_control_signal = 0
total_output_signal = 0
# Reset envrionment to random state
state = self.env.reset()
# One training loop
for step in range(self.config['max_step']):
action = self.agent.apply(state, step)
if action is None:
print("NaN value detected, network is destroyed. Exiting training ..")
print("action : ",action)
if self.config.get("enable_log") is True: self.logger.close()
sys.exit()
total_control_signal = total_control_signal + action
next_state, reward, done = self.env.step(action)
state = next_state
output = self.env.get_info()['state'][0]
if np.isnan(action) or np.isnan(state) or np.isnan(next_state) or np.isnan(done):
print("NaN value detected, network is destroyed. Exiting training ..")
print("state : ",state)
print("next state : ",next_state)
print("action : ",action)
print("reward : ",reward)
print("done :",done)
if self.config.get("enable_log") is True: self.logger.close()
sys.exit()
y1 = np.asscalar(output)
u1 = np.asscalar(action[0])
output_list.append(y1)
reference_list.append(self.env.get_info()['state_ref'][0])
reward_list.append(reward)
control_sig_list.append(action)
total_control_signal += u1
total_output_signal += y1
self.agent.update_memory(state, action, reward, next_state, done)
episode_reward = episode_reward + reward
if len(self.agent.replay_buffer) > batch_size:
value_loss,policy_loss = self.agent.update_agent(batch_size)
episode_policy_loss += policy_loss
episode_value_loss += value_loss
if done:
if self.config.get("enable_log") is True:
train_dict = {
"Train/reward" : episode_reward,
"Train/policy_loss" : episode_policy_loss,
"Train/value_loss" : episode_value_loss,
"Train/mean_control_signal" : np.mean(total_control_signal),
"Train/mean_output_signal" : np.mean(total_output_signal)}
self.logger.log_tensorboard_train(train_dict, eps)
break
# Print progress
train_print_dict = {
'eps' : eps+1,
'state_reference' : self.env.get_info()['state_ref'][0],
'state' : np.asscalar(self.env.get_info()['state'][0]),
'reward' : episode_reward,
'step' : step,
}
self.logger.print_progress(train_print_dict)
# TODO: Move this to logging class when created
# Saving Model
if self.config.get("enable_log") is True:
self.agent.save(self.logger.log_weight_dir+'/agent_'+str(eps)+'.pth')
# Save best model seperately
if episode_reward > best_reward:
best_reward = episode_reward
if self.config.get("enable_log") is True: self.agent.save(
self.logger.log_weight_dir+'/agent_best.pth')
# TODO: Create another class for plotting
# Plot whithin some episodes
if training_config['plotting']['enable'] is True and \
eps % training_config['plotting']['freq'] == 0 and \
eps != 0:
fig, axs = plt.subplots(3)
axs[0].set_title("Output vs Reference")
axs[0].plot(output_list)
axs[0].plot(reference_list)
axs[1].set_title("Rewards")
axs[1].plot(reward_list)
axs[2].set_title("Control Signals")
axs[2].plot(control_sig_list)
plt.show()
def inference(self, agent_path, inference_config=None):
self.config = self.get_default_training_config() if inference_config is None else inference_config
for eps in range(self.config['max_episode']):
# Saving Model
self.agent.load(agent_path)
state = self.env.reset()
self.agent.reset()
episode_reward = 0
total_control_signal = 0
total_output_signal = 0
# DEBUG
fig, axs = plt.subplots(3)
output_list = []
reward_list=[]
reference_list=[]
control_sig_list=[]
for step in range(self.config['max_step']):
action = self.agent.apply(state, step)
total_control_signal = total_control_signal + action
next_state, reward, done = self.env.step(action)
y1 = np.asscalar(state[0])
u1 = np.asscalar(action[0])
ref = self.env.get_info().get("state_ref")
output_list.append(y1)
reference_list.append(ref)
reward_list.append(reward)
control_sig_list.append(action)
total_control_signal += u1
total_output_signal += y1
episode_reward = episode_reward + reward
state = next_state
axs[0].set_title("Output vs Reference")
axs[0].plot(output_list)
axs[0].plot(reference_list)
axs[1].set_title("Rewards")
axs[1].plot(reward_list)
axs[2].set_title("Control Signals")
axs[2].plot(control_sig_list)
plt.show()
str1 = "Trial : [ {0} ] is completed with reference : [ {1} ]\nOUT-1 : [ {2} ]\nEpisode Reward : [ {3} ]".format(
eps+1,
self.env.get_info().get("state_ref"),
np.asscalar(self.env.get_info().get("state")[0]),
episode_reward)
print(str1)
print("\n*******************************\n")
|
from locustio.common_utils import read_input_file
from util.project_paths import BAMBOO_USERS, BAMBOO_BUILD_PLANS
class Login:
action_name = 'jmeter_login_and_view_all_builds'
atl_token_pattern = r'name="atlassian-token" content="(.+?)">'
login_body = {
'os_username': '',
'os_password': '',
'os_destination': '',
'save': 'Log in',
'atl_token': '',
}
def bamboo_datasets():
data_sets = dict()
data_sets["users"] = read_input_file(BAMBOO_USERS)
data_sets["build_plans"] = read_input_file(BAMBOO_BUILD_PLANS)
return data_sets
|
from flask import Flask
app = Flask(__name__)
@app.route('/')
def index():
return "Hello Flask Test on Docker"
@app.route('/foo')
def foo():
return "Endpoint is Foo"
|
#!/usr/bin/env python
"""
Created on Thu Jul 17 11:44:35 2014
Author: Oren Freifeld
Email: freifeld@csail.mit.edu
"""
from cpab.cpa3d.CpaSpace import CpaSpace
from cpab.cpa3d.Multiscale import Multiscale
from cpab.distributions.CpaCovs import CpaCovs
#from cpab.prob_and_stats.cpa_simple_mean import cpa_simple_mean
from cpab.distributions.draw_from_normal_in_cpa_space import draw_from_normal_in_cpa_space
from cpab.cpa3d.calcs import *
|
# -*- mode: python -*-
# =============================================================================
# @@-COPYRIGHT-START-@@
#
# Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# @@-COPYRIGHT-END-@@
# =============================================================================
""" Qunatizer Info """
import io
from enum import Enum
import tensorflow as tf
import numpy as np
from aimet_common.defs import QuantScheme
from aimet_common.quantsim import calculate_delta_offset
from aimet_tensorflow.utils.constants import QuantizeOpIndices
import libpymo
quant_scheme_to_libpymo = {QuantScheme.post_training_tf: libpymo.QuantizationMode.QUANTIZATION_TF,
QuantScheme.post_training_tf_enhanced:
libpymo.QuantizationMode.QUANTIZATION_TF_ENHANCED,
QuantScheme.training_range_learning_with_tf_init:
libpymo.QuantizationMode.QUANTIZATION_TF,
QuantScheme.training_range_learning_with_tf_enhanced_init:
libpymo.QuantizationMode.QUANTIZATION_TF_ENHANCED}
class QuantizerType(Enum):
""" Enum for quantize op types """
param = 0
activation = 1
class PickleableTensorQuantizerState:
"""
State variables to be saved while pickling tensor quantizer
"""
def __init__(self, quant_op_name, tensor_quantizer_ref, quantizer_type):
"""
class type to save pickle-able info pertaining to tensor quantizer
:param quant_op_name: name of the quantize op
:param tensor_quantizer_ref: TensorQuantizer reference
:param quantizer_type : param or activation quantizer
"""
self.quant_op_name = quant_op_name
self.quantizer_type = quantizer_type
self.num_channels = 0
if isinstance(tensor_quantizer_ref, list):
self.quant_scheme = tensor_quantizer_ref[0].getQuantScheme()
self.use_strict_symmetric = tensor_quantizer_ref[0].getStrictSymmetric()
self.use_unsigned_symmetric = tensor_quantizer_ref[0].getUnsignedSymmetric()
self.rounding_mode = tensor_quantizer_ref[0].roundingMode
self.is_encoding_valid = tensor_quantizer_ref[0].isEncodingValid
self.num_channels = len(tensor_quantizer_ref)
else:
self.quant_scheme = tensor_quantizer_ref.getQuantScheme()
self.use_strict_symmetric = tensor_quantizer_ref.getStrictSymmetric()
self.use_unsigned_symmetric = tensor_quantizer_ref.getUnsignedSymmetric()
self.rounding_mode = tensor_quantizer_ref.roundingMode
self.is_encoding_valid = tensor_quantizer_ref.isEncodingValid
class QuantizerInfo:
"""
Holds information about a given MO Quantizer object and active session
"""
__slots__ = ['session', 'tensor_quantizer', 'quant_op_name', 'quantizer_type', '_is_encoding_frozen']
def __init__(self, session: tf.compat.v1.Session, tensor_quantizer: libpymo.TensorQuantizer,
quant_op_name: str, quantizer_type: QuantizerType):
self.session = session
self.tensor_quantizer = tensor_quantizer
self.quant_op_name = quant_op_name
self.quantizer_type = quantizer_type
self._is_encoding_frozen = False
def set_variable(self, var_name, value):
"""
sets Quantize op variable with value passed
:param var_name: Name of the variable to be updated
:param value: value to be assigned to the variable
"""
with self.session.graph.as_default():
vars_with_given_name = [var for var in tf.compat.v1.global_variables()
if var.op.name == var_name]
var_to_be_updated = vars_with_given_name[0]
var_to_be_updated.load(value, self.session)
def get_variable_from_op(self, var_index):
"""
Reads variable from Quantize op
:param var_index: Quantize op input param index corresponding to the variable to be read
:return: variable value read from the Quantize op
"""
quantize_op = self.session.graph.get_operation_by_name(self.quant_op_name)
op_var_tensor = quantize_op.inputs[var_index]
return self.session.run(op_var_tensor)
@property
def bitwidth(self) -> int:
"""
Reads bitwidth from the Quantize op
:return: returns the bitiwdth associated with Quantize op
"""
# return the variable value from op
return self.get_variable_from_op(QuantizeOpIndices.bit_width)
@bitwidth.setter
def bitwidth(self, bitwidth: int):
"""
Sets the bitwidth in the Quantize op
:param bitwidth: value to be assigned to bitwidth variable
"""
var_name = self.quant_op_name + '_bit_width'
self.set_variable(var_name, bitwidth)
self._invalidate_tensor_quantizer_encodings()
def _invalidate_tensor_quantizer_encodings(self):
"""
Sets isEncodingValid flag to False for every tensor quantizer
"""
if isinstance(self.tensor_quantizer, list):
for tensor_quantizer in self.tensor_quantizer:
tensor_quantizer.isEncodingValid = False
else:
self.tensor_quantizer.isEncodingValid = False
@property
def use_symmetric_encoding(self) -> bool:
"""
Reads use_symmetric_encoding flag in the Quantize op
:return: use_symmetric_encoding config as bool
"""
return self.get_variable_from_op(QuantizeOpIndices.use_symmetric_encoding)
@use_symmetric_encoding.setter
def use_symmetric_encoding(self, use_symmetric_encoding: bool):
"""
Sets the use_symmetric_encoding flag in the Quantize op
:param use_symmetric_encoding: value to be assigned to use_symmetric_encoding flag
"""
var_name = self.quant_op_name + '_use_symmetric_encoding'
self.set_variable(var_name, use_symmetric_encoding)
self._invalidate_tensor_quantizer_encodings()
@property
def quant_scheme(self) -> libpymo.QuantizationMode:
"""
Reads the quant_scheme associated with the Quantize op
:return: quant_scheme as libpymo.QuantizationMode type
"""
if isinstance(self.tensor_quantizer, list):
return self.tensor_quantizer[0].getQuantScheme()
return self.tensor_quantizer.getQuantScheme()
@quant_scheme.setter
def quant_scheme(self, quant_scheme: libpymo.QuantizationMode):
"""
Sets the quant_scheme associated with the Quantize op
:param quant_scheme: value to be assigned to quant_scheme param in Quantizer
"""
if isinstance(self.tensor_quantizer, list):
for tensor_quantizer in self.tensor_quantizer:
tensor_quantizer.setQuantScheme(quant_scheme_to_libpymo[quant_scheme])
else:
self.tensor_quantizer.setQuantScheme(quant_scheme_to_libpymo[quant_scheme])
@property
def rounding_mode(self) -> libpymo.RoundingMode:
"""
Reads rounding_mode associated with the Quantize op
:return: rounding_mode value as libpymo.RoundingMode type
"""
if isinstance(self.tensor_quantizer, list):
return self.tensor_quantizer[0].roundingMode
return self.tensor_quantizer.roundingMode
@rounding_mode.setter
def rounding_mode(self, rounding_mode: libpymo.RoundingMode):
"""
Sets the rounding_mode associated with the Quantize op
:param rounding_mode: value to be assigned to rounding_mode param in Quantizer
"""
if isinstance(self.tensor_quantizer, list):
for tensor_quantizer in self.tensor_quantizer:
tensor_quantizer.isEncodingValid = False
tensor_quantizer.roundingMode = rounding_mode
else:
self.tensor_quantizer.isEncodingValid = False
self.tensor_quantizer.roundingMode = rounding_mode
@property
def use_strict_symmetric(self) -> bool:
"""
Reads useStrictSymmetric config from Tensor Quantizer
:return: True if strict symmetric mode is to be used, False otherwise
"""
if isinstance(self.tensor_quantizer, list):
return self.tensor_quantizer[0].getStrictSymmetric()
return self.tensor_quantizer.getStrictSymmetric()
@use_strict_symmetric.setter
def use_strict_symmetric(self, use_strict_symmetric: bool):
"""
Sets the useStrictSymmetric associated with the Tensor Quantizer
:param use_strict_symmetric: True if strict symmetric mode is to be used, False otherwise
"""
if isinstance(self.tensor_quantizer, list):
for tensor_quantizer in self.tensor_quantizer:
tensor_quantizer.setStrictSymmetric(use_strict_symmetric)
else:
self.tensor_quantizer.setStrictSymmetric(use_strict_symmetric)
@property
def use_unsigned_symmetric(self) -> bool:
"""
Reads useStrictSymmetric config from Tensor Quantizer
:return: True if unsigned symmetric mode is to be used, False otherwise
"""
if isinstance(self.tensor_quantizer, list):
return self.tensor_quantizer[0].getUnsignedSymmetric()
return self.tensor_quantizer.getUnsignedSymmetric()
@use_unsigned_symmetric.setter
def use_unsigned_symmetric(self, use_unsigned_symmetric: bool):
"""
Sets the useUnsignedSymmetric associated with the Tensor Quantizer
:param use_unsigned_symmetric: True if unsigned symmetric mode is to be used, False otherwise
"""
if isinstance(self.tensor_quantizer, list):
for tensor_quantizer in self.tensor_quantizer:
tensor_quantizer.setUnsignedSymmetric(use_unsigned_symmetric)
else:
self.tensor_quantizer.setUnsignedSymmetric(use_unsigned_symmetric)
def get_op_mode(self) -> libpymo.TensorQuantizerOpMode:
"""
Reads op mode variable from Quantize op
:return: Op mode as pymo.TensorQuantizerOpMode type
"""
op = self.session.graph.get_operation_by_name(self.quant_op_name)
op_mode_tensor = op.inputs[QuantizeOpIndices.op_mode]
return self.session.run(op_mode_tensor)
def set_op_mode(self, op_mode: libpymo.TensorQuantizerOpMode):
"""
Set op mode for Quantize op
:param op_mode: Op mode as pymo.TensorQuantizerOpMode type
"""
if not self._is_encoding_frozen:
var_name = self.quant_op_name + '_op_mode'
self.set_variable(var_name, int(op_mode))
@property
def enabled(self) -> bool:
"""
Reads Quantize op flag that indicates if op is enabled or disabled
:return: bool
"""
is_enabled = True
# return the variable value from op
if self.get_op_mode() == int(libpymo.TensorQuantizerOpMode.passThrough):
is_enabled = False
return is_enabled
@enabled.setter
def enabled(self, enabled: bool):
"""
Enables or disables given Quantize op if enabled is False
:param enabled: boolean flag to indicate enable or disable
"""
# if disable is requested on the op and this op was not already in "passThrough" mode,
# we will disable the op by marking it as "passThrough"
if not enabled and self.get_op_mode() != int(libpymo.TensorQuantizerOpMode.passThrough):
op_mode = int(libpymo.TensorQuantizerOpMode.passThrough)
# update the isEncodingValid state to False
self._invalidate_tensor_quantizer_encodings()
# if enable is requested and this op was previously disabled
# we enable the op by setting the initial op_mode that depends on the Quantizer type
elif enabled and self.get_op_mode() == int(libpymo.TensorQuantizerOpMode.passThrough):
if self.quantizer_type is QuantizerType.param:
op_mode = int(libpymo.TensorQuantizerOpMode.oneShotQuantizeDequantize)
elif self.quantizer_type is QuantizerType.activation:
op_mode = int(libpymo.TensorQuantizerOpMode.updateStats)
# update the isEncodingValid state to False
self._invalidate_tensor_quantizer_encodings()
var_name = self.quant_op_name + '_op_mode'
self.set_variable(var_name, op_mode)
def compute_encoding(self, bitwidth: int, use_symmetric_encodings: bool) -> libpymo.TfEncoding:
"""
Compute the quantization encoding for this tensor
:param bitwidth: Quantization bitwidth
:param use_symmetric_encodings: True if symmetric encoding is used. False otherwise.
:return: Encoding
"""
if not self._is_encoding_frozen:
encoding = []
if isinstance(self.tensor_quantizer, list):
for tensor_quantizer in self.tensor_quantizer:
encoding_val = tensor_quantizer.computeEncoding(bitwidth, use_symmetric_encodings, False, False)
if encoding_val:
tensor_quantizer.isEncodingValid = True
encoding.append(encoding_val)
else:
encoding.append(self.tensor_quantizer.computeEncoding(bitwidth, use_symmetric_encodings, False, False))
encoding = encoding[0]
else:
encoding = self.get_encoding()
return encoding
def set_encoding(self, encoding: libpymo.TfEncoding):
"""
Set encoding min and max variable and update isEncodingValid state to True
:param encoding: Encoding
"""
if not self._is_encoding_frozen:
encoding_min_var = self.quant_op_name + '_encoding_min'
encoding_max_var = self.quant_op_name + '_encoding_max'
# update the isEncodingValid state to True as well as encoding variable in the TF op
if isinstance(self.tensor_quantizer, list):
encoding_min = []
encoding_max = []
for index, tensor_quantizer in enumerate(self.tensor_quantizer):
tensor_quantizer.isEncodingValid = True
encoding_min.append(encoding[index].min)
encoding_max.append(encoding[index].max)
self.set_variable(encoding_min_var, encoding_min)
self.set_variable(encoding_max_var, encoding_max)
else:
self.tensor_quantizer.isEncodingValid = True
self.set_variable(encoding_min_var, encoding.min)
self.set_variable(encoding_max_var, encoding.max)
def get_encoding(self) -> libpymo.TfEncoding:
"""
Get encoding if valid else raise error
:return: encoding
"""
def _create_encoding_object(min_val, max_val, bitwidth):
""" Creates a libpymo encoding object """
encoding = libpymo.TfEncoding()
encoding.min = min_val
encoding.max = max_val
encoding.bw = bitwidth
encoding.delta, encoding.offset = calculate_delta_offset(min_val, max_val, bitwidth)
return encoding
if self.is_encoding_valid():
encoding_min = self.get_variable_from_op(QuantizeOpIndices.encoding_min)
encoding_max = self.get_variable_from_op(QuantizeOpIndices.encoding_max)
bitwidth = self.bitwidth
# If per channel quantization is enabled then we need to create a list of TF encoding objects
if isinstance(encoding_min, np.ndarray):
encoding = []
for i, encoding_min_val in enumerate(encoding_min):
_encoding = _create_encoding_object(encoding_min_val, encoding_max[i], bitwidth)
encoding.append(_encoding)
else:
encoding = _create_encoding_object(encoding_min, encoding_max, bitwidth)
else:
raise AssertionError('Compute encoding or Set encoding must be invoked before')
return encoding
def freeze_encoding(self):
"""
Set is_encoding_frozen flag to True
"""
self._is_encoding_frozen = True
def set_and_freeze_encoding_and_op_mode(self, encoding: libpymo.TfEncoding, op_mode: libpymo.TensorQuantizerOpMode):
"""
Set encoding min and max variable, op_mode and freezes it
:param encoding: Encoding
:param op_mode: Op mode as pymo.TensorQuantizerOpMode type
"""
self.set_encoding(encoding)
self.set_op_mode(op_mode)
self.freeze_encoding()
def is_encoding_valid(self) -> bool:
"""
Return bool if encoding is valid or not
:return: Boolean
"""
if isinstance(self.tensor_quantizer, list):
return self.tensor_quantizer[0].isEncodingValid
return self.tensor_quantizer.isEncodingValid
def __getstate__(self):
# convert tensor quantizer state to pickle-able form
state = PickleableTensorQuantizerState(self.quant_op_name,
self.tensor_quantizer,
self.quantizer_type)
return state
def __setstate__(self, state):
self.session = None
# Create the cpp tensor quantizer reference
self.quant_op_name = state.quant_op_name
self.quantizer_type = state.quantizer_type
# If per channel quantization is enabled for a parameter
if state.num_channels > 0:
self.tensor_quantizer = []
for _ in range(state.num_channels):
tensor_quantizer = libpymo.TensorQuantizer(state.quant_scheme,
state.rounding_mode)
tensor_quantizer.setStrictSymmetric(state.use_strict_symmetric)
tensor_quantizer.setUnsignedSymmetric(state.use_unsigned_symmetric)
tensor_quantizer.isEncodingValid = state.is_encoding_valid
self.tensor_quantizer.append(tensor_quantizer)
else:
self.tensor_quantizer = libpymo.TensorQuantizer(state.quant_scheme,
state.rounding_mode)
self.tensor_quantizer.setStrictSymmetric(state.use_strict_symmetric)
self.tensor_quantizer.setUnsignedSymmetric(state.use_unsigned_symmetric)
self.tensor_quantizer.isEncodingValid = state.is_encoding_valid
def __str__(self):
stream = io.StringIO(newline='\n')
stream.write('Quantizer Info:\n')
stream.write(' quantize_op_name:{}\n quantizer_type:{}\n bitwidth={}\n use_symmetric_encoding={}\n'
' round_mode={}\n quant_scheme={}\n use_strict_symmetric={}\n use_unsigned_symmetric={}\n'
' enabled:{}\n'.format(self.quant_op_name,
self.quantizer_type,
self.bitwidth,
self.use_symmetric_encoding,
self.rounding_mode,
self.quant_scheme,
self.use_strict_symmetric,
self.use_unsigned_symmetric,
self.enabled))
return stream.getvalue()
|
import sys
sys.stdout = open("out.txt", "w")
def xyz():
print("test")
xyz()
sys.stdout.close() |
import collections, os
import re
def parse():
files = os.listdir("/Users/kaladhar/Desktop/casey/")
contents = ""
for file in files:
f = open("/Users/kaladhar/Desktop/casey/" + file, "r")
contents = contents + f.read()
newContents = contents.replace("\r\n", " ")
pattern = re.compile(r'airport', re.IGNORECASE)
freqs = collections.Counter(contents.split())
#print(freqs)
print(len(re.findall(pattern, contents)))
print(re.findall(pattern, contents))
if __name__ == '__main__':
parse() |
import os
import unittest
from pathlib import Path
import paramak
import pytest
class TestShape(unittest.TestCase):
def test_shape_default_properties(self):
"""Creates a Shape object and checks that the points attribute has
a default of None."""
test_shape = paramak.Shape()
assert test_shape.points is None
def test_azimuth_placement_angle_getting_setting(self):
"""Checks that the azimuth_placement_angle of a Shape can be
changed to a single value or iterable."""
test_shape = paramak.Shape()
assert test_shape.azimuth_placement_angle == 0
test_shape.azimuth_placement_angle = 180
assert test_shape.azimuth_placement_angle == 180
test_shape.azimuth_placement_angle = [0, 90, 180, 270]
assert test_shape.azimuth_placement_angle == [0, 90, 180, 270]
def test_incorrect_color_values(self):
"""Checks that an error is raised when the color of a shape is
defined as an invalid string."""
def incorrect_color_string():
paramak.Shape(color=('1', '0', '1'))
self.assertRaises(
ValueError,
incorrect_color_string
)
def test_incorrect_workplane(self):
"""Creates Shape object with incorrect workplane and checks ValueError
is raised."""
test_shape = paramak.Shape()
def incorrect_workplane():
"""Creates Shape object with unacceptable workplane."""
test_shape.workplane = "AB"
self.assertRaises(ValueError, incorrect_workplane)
def test_incorrect_points(self):
"""Creates Shape objects and checks errors are raised correctly when
specifying points."""
test_shape = paramak.Shape()
def incorrect_points_end_point_is_start_point():
"""Checks ValueError is raised when the start and end points are
the same."""
test_shape.points = [(0, 200), (200, 100), (0, 0), (0, 200)]
self.assertRaises(
ValueError,
incorrect_points_end_point_is_start_point)
def incorrect_points_missing_z_value():
"""Checks ValueError is raised when a point is missing a z
value."""
test_shape.points = [(0, 200), (200), (0, 0), (0, 50)]
self.assertRaises(ValueError, incorrect_points_missing_z_value)
def incorrect_points_not_a_list():
"""Checks ValueError is raised when the points are not a list."""
test_shape.points = (0, 0), (0, 20), (20, 20), (20, 0)
self.assertRaises(ValueError, incorrect_points_not_a_list)
def incorrect_points_wrong_number_of_entries():
"""Checks ValueError is raised when individual points dont have 2
or 3 entries."""
test_shape.points = [(0, 0), (0, 20), (20, 20, 20, 20)]
self.assertRaises(ValueError, incorrect_points_wrong_number_of_entries)
def incorrect_x_point_value_type():
"""Checks ValueError is raised when X point is not a number."""
test_shape.points = [("string", 0), (0, 20), (20, 20)]
self.assertRaises(ValueError, incorrect_x_point_value_type)
def incorrect_y_point_value_type():
"""Checks ValueError is raised when Y point is not a number."""
test_shape.points = [(0, "string"), (0, 20), (20, 20)]
self.assertRaises(ValueError, incorrect_y_point_value_type)
def test_create_limits(self):
"""Creates a Shape object and checks that the create_limits function
returns the expected values for x_min, x_max, z_min and z_max."""
test_shape = paramak.Shape()
test_shape.points = [
(0, 0),
(0, 10),
(0, 20),
(10, 20),
(20, 20),
(20, 10),
(20, 0),
(10, 0),
]
assert test_shape.create_limits() == (0.0, 20.0, 0.0, 20.0)
# test with a component which has a find_points method
test_shape2 = paramak.Plasma()
test_shape2.create_limits()
assert test_shape2.x_min is not None
def test_create_limits_error(self):
"""Checks error is raised when no points are given."""
test_shape = paramak.Shape()
def limits():
test_shape.create_limits()
self.assertRaises(ValueError, limits)
def test_initial_solid_construction(self):
"""Creates a shape and checks that a cadquery solid with a unique hash
value is created when .solid is called."""
test_shape = paramak.RotateStraightShape(
points=[(0, 0), (0, 20), (20, 20), (20, 0)], rotation_angle=360
)
assert test_shape.hash_value is None
assert test_shape.solid is not None
assert type(test_shape.solid).__name__ == "Workplane"
assert test_shape.hash_value is not None
def test_solid_return(self):
"""Checks that the same cadquery solid with the same unique hash value
is returned when shape.solid is called again after no changes have been
made to the Shape."""
test_shape = paramak.RotateStraightShape(
points=[(0, 0), (0, 20), (20, 20), (20, 0)], rotation_angle=360
)
assert test_shape.solid is not None
initial_hash_value = test_shape.hash_value
assert test_shape.solid is not None
assert initial_hash_value == test_shape.hash_value
def test_conditional_solid_reconstruction(self):
"""Checks that a new cadquery solid with a new unique hash value is
constructed when shape.solid is called after changes to the Shape have
been made."""
test_shape = paramak.RotateStraightShape(
points=[(0, 0), (0, 20), (20, 20)], rotation_angle=360
)
assert test_shape.solid is not None
assert test_shape.hash_value is not None
initial_hash_value = test_shape.hash_value
test_shape.rotation_angle = 180
assert test_shape.solid is not None
assert test_shape.hash_value is not None
assert initial_hash_value != test_shape.hash_value
def test_hash_value_update(self):
"""Checks that the hash value of a Shape is not updated until a new
cadquery solid has been created."""
test_shape = paramak.RotateStraightShape(
points=[(0, 0), (0, 20), (20, 20)], rotation_angle=360
)
test_shape.solid
assert test_shape.hash_value is not None
initial_hash_value = test_shape.hash_value
test_shape.rotation_angle = 180
assert test_shape.hash_value == initial_hash_value
test_shape.solid
assert test_shape.hash_value != initial_hash_value
def test_material_tag_warning(self):
"""Checks that a warning is raised when a Shape has a material tag >
28 characters."""
test_shape = paramak.Shape()
def warning_material_tag():
test_shape.material_tag = "abcdefghijklmnopqrstuvwxyz12345"
self.assertWarns(UserWarning, warning_material_tag)
def test_invalid_material_tag(self):
"""Checks a ValueError is raised when a Shape has an invalid material
tag."""
test_shape = paramak.Shape()
def invalid_material_tag():
test_shape.material_tag = 123
self.assertRaises(ValueError, invalid_material_tag)
def test_export_html(self):
"""Checks a plotly figure of the Shape is exported by the export_html
method with the correct filename with RGB and RGBA colors."""
test_shape = paramak.RotateStraightShape(
points=[(0, 0), (0, 20), (20, 20), (20, 0)], rotation_angle=360
)
os.system("rm filename.html")
test_shape.export_html('filename')
assert Path("filename.html").exists() is True
os.system("rm filename.html")
test_shape.color = (1, 0, 0, 0.5)
test_shape.export_html('filename')
assert Path("filename.html").exists() is True
os.system("rm filename.html")
def test_export_html_view_planes(self):
"""Checks a plotly figure of the Shape is exported by the export_html
method with a range of different view_plane options."""
test_shape = paramak.RotateStraightShape(
points=[(0, 0), (0, 20), (20, 20), (20, 0)], rotation_angle=180
)
for view_plane in ['XZ', 'XY', 'YZ', 'YX', 'ZY', 'ZX', 'RZ', 'XYZ']:
os.system("rm *.html")
test_shape.export_html(
filename='filename',
view_plane=view_plane
)
assert Path("filename.html").exists() is True
def test_export_html_with_points_None(self):
"""Checks that an error is raised when points is None and export_html
"""
test_shape = paramak.Shape()
def export():
test_shape.export_html("out.html")
self.assertRaises(ValueError, export)
def test_export_html_with_wire_None(self):
"""Checks that an error is raised when wire is None and export_html
"""
test_shape = paramak.Shape(points=[(0, 0), (0, 20), (20, 20), (20, 0)])
test_shape.wire = None
def export():
test_shape.export_html("out.html")
self.assertRaises(ValueError, export)
def test_invalid_stp_filename(self):
"""Checks ValueError is raised when invalid stp filenames are used."""
def invalid_filename_suffix():
paramak.RotateStraightShape(
points=[(0, 0), (0, 20), (20, 20)],
stp_filename="filename.invalid_suffix"
)
self.assertRaises(ValueError, invalid_filename_suffix)
def invalid_filename_type():
paramak.RotateStraightShape(
points=[(0, 0), (0, 20), (20, 20)],
stp_filename=123456
)
self.assertRaises(ValueError, invalid_filename_type)
def test_invalid_stl_filename(self):
"""Checks ValueError is raised when invalid stl filenames are used."""
def invalid_filename_suffix():
paramak.RotateStraightShape(
points=[(0, 0), (0, 20), (20, 20)],
stl_filename="filename.invalid_suffix"
)
self.assertRaises(ValueError, invalid_filename_suffix)
def invalid_filename_type():
paramak.RotateStraightShape(
points=[(0, 0), (0, 20), (20, 20)],
stl_filename=123456
)
self.assertRaises(ValueError, invalid_filename_type)
def test_invalid_color(self):
"""Checks ValueError is raised when invalid colors are used."""
def invalid_color_type():
paramak.RotateStraightShape(
points=[(0, 0), (0, 20), (20, 20)],
color=255
)
self.assertRaises(ValueError, invalid_color_type)
def invalid_color_length():
paramak.RotateStraightShape(
points=[(0, 0), (0, 20), (20, 20)],
color=(255, 255, 255, 1, 1)
)
self.assertRaises(ValueError, invalid_color_length)
def test_volumes_add_up_to_total_volume_Compound(self):
"""Checks the volume and volumes attributes are correct types
and that the volumes sum to equalt the volume for a Compound."""
test_shape = paramak.PoloidalFieldCoilSet(
heights=[10, 10],
widths=[20, 20],
center_points=[(15, 15), (50, 50)]
)
assert isinstance(test_shape.volume, float)
assert isinstance(test_shape.volumes, list)
assert isinstance(test_shape.volumes[0], float)
assert isinstance(test_shape.volumes[1], float)
assert len(test_shape.volumes) == 2
assert sum(test_shape.volumes) == pytest.approx(test_shape.volume)
def test_volumes_add_up_to_total_volume(self):
"""Checks the volume and volumes attributes are correct types
and that the volumes sum to equalt the volume."""
test_shape = paramak.PoloidalFieldCoil(
center_point=(100, 100),
height=50,
width=50
)
assert isinstance(test_shape.volume, float)
assert isinstance(test_shape.volumes, list)
assert isinstance(test_shape.volumes[0], float)
assert len(test_shape.volumes) == 1
assert sum(test_shape.volumes) == pytest.approx(test_shape.volume)
def test_areas_add_up_to_total_area_Compound(self):
"""Checks the area and areas attributes are correct types
and that the areas sum to equalt the area for a Compound."""
test_shape = paramak.PoloidalFieldCoilSet(
heights=[10, 10],
widths=[20, 20],
center_points=[(15, 15), (50, 50)]
)
assert isinstance(test_shape.area, float)
assert isinstance(test_shape.areas, list)
assert isinstance(test_shape.areas[0], float)
assert isinstance(test_shape.areas[1], float)
assert isinstance(test_shape.areas[2], float)
assert isinstance(test_shape.areas[3], float)
assert isinstance(test_shape.areas[4], float)
assert isinstance(test_shape.areas[5], float)
assert isinstance(test_shape.areas[6], float)
assert isinstance(test_shape.areas[7], float)
assert len(test_shape.areas) == 8
assert sum(test_shape.areas) == pytest.approx(test_shape.area)
def test_areas_add_up_to_total_area(self):
"""Checks the area and areas attributes are correct types
and that the areas sum to equalt the area."""
test_shape = paramak.PoloidalFieldCoil(
center_point=(100, 100),
height=50,
width=50
)
assert isinstance(test_shape.area, float)
assert isinstance(test_shape.areas, list)
assert isinstance(test_shape.areas[0], float)
assert isinstance(test_shape.areas[1], float)
assert isinstance(test_shape.areas[2], float)
assert isinstance(test_shape.areas[3], float)
assert len(test_shape.areas) == 4
assert sum(test_shape.areas) == pytest.approx(test_shape.area)
def test_create_patch_error(self):
"""Checks _create_patch raises a ValueError when points is None."""
test_shape = paramak.Shape()
def patch():
test_shape._create_patch()
self.assertRaises(ValueError, patch)
def test_create_patch_alpha(self):
"""Checks _create_patch returns a patch when alpha is given."""
test_shape = paramak.PoloidalFieldCoil(
center_point=(100, 100),
height=50,
width=50,
color=(0.5, 0.5, 0.5, 0.1)
)
assert test_shape._create_patch() is not None
def test_azimuth_placement_angle_error(self):
"""Checks an error is raised when invalid value for
azimuth_placement_angle is set.
"""
test_shape = paramak.Shape()
def angle_str():
test_shape.azimuth_placement_angle = "coucou"
def angle_str_in_Iterable():
test_shape.azimuth_placement_angle = [0, "coucou"]
self.assertRaises(ValueError, angle_str)
self.assertRaises(ValueError, angle_str_in_Iterable)
def test_name_error(self):
"""Checks an error is raised when invalid value for name is set."""
test_shape = paramak.Shape()
def name_float():
test_shape.name = 2.0
def name_int():
test_shape.name = 1
def name_list():
test_shape.name = ['coucou']
self.assertRaises(ValueError, name_float)
self.assertRaises(ValueError, name_int)
self.assertRaises(ValueError, name_list)
def test_tet_mesh_error(self):
"""Checks an error is raised when invalid value for tet_mesh is set.
"""
test_shape = paramak.Shape()
def tet_mesh_float():
test_shape.tet_mesh = 2.0
def tet_mesh_int():
test_shape.tet_mesh = 1
def tet_mesh_list():
test_shape.tet_mesh = ['coucou']
self.assertRaises(ValueError, tet_mesh_float)
self.assertRaises(ValueError, tet_mesh_int)
self.assertRaises(ValueError, tet_mesh_list)
def test_get_rotation_axis(self):
"""Creates a shape and test the expected rotation_axis is the correct
values for several cases
"""
shape = paramak.Shape()
expected_dict = {
"X": [(-1, 0, 0), (1, 0, 0)],
"-X": [(1, 0, 0), (-1, 0, 0)],
"Y": [(0, -1, 0), (0, 1, 0)],
"-Y": [(0, 1, 0), (0, -1, 0)],
"Z": [(0, 0, -1), (0, 0, 1)],
"-Z": [(0, 0, 1), (0, 0, -1)],
}
# test with axis from string
for axis in expected_dict:
shape.rotation_axis = axis
assert shape.get_rotation_axis()[0] == expected_dict[axis]
assert shape.get_rotation_axis()[1] == axis
# test with axis from list of two points
expected_axis = [(-1, -2, -3), (1, 4, 5)]
shape.rotation_axis = expected_axis
assert shape.get_rotation_axis()[0] == expected_axis
assert shape.get_rotation_axis()[1] == "custom_axis"
# test with axis from workplane
shape.rotation_axis = None
workplanes = ["XY", "XZ", "YZ"]
expected_axis = ["Y", "Z", "Z"]
for wp, axis in zip(workplanes, expected_axis):
shape.workplane = wp
assert shape.get_rotation_axis()[0] == expected_dict[axis]
assert shape.get_rotation_axis()[1] == axis
# test with axis from path_workplane
for wp, axis in zip(workplanes, expected_axis):
shape.path_workplane = wp
assert shape.get_rotation_axis()[0] == expected_dict[axis]
assert shape.get_rotation_axis()[1] == axis
def test_rotation_axis_error(self):
"""Checks errors are raised when incorrect values of rotation_axis are
set
"""
incorrect_values = [
"coucou",
2,
2.2,
[(1, 1, 1), 'coucou'],
[(1, 1, 1), 1],
[(1, 1, 1), 1.0],
[(1, 1, 1), (1, 1, 1)],
[(1, 1, 1), (1, 0, 1, 2)],
[(1, 1, 1, 2), (1, 0, 2)],
[(1, 1, 2), [1, 0, 2]],
[(1, 1, 1)],
[(1, 1, 1), (1, 'coucou', 1)],
[(1, 1, 1), (1, 0, 1), (1, 2, 3)],
]
shape = paramak.Shape()
def set_value():
shape.rotation_axis = incorrect_values[i]
for i in range(len(incorrect_values)):
self.assertRaises(ValueError, set_value)
if __name__ == "__main__":
unittest.main()
|
# from dcor import distance_correlation as dc
import numpy as np
import dcor
def distance_corr(X, Y):
"""
Computes the distance correlation between X and Y.
Taken from pypi package dcor based on the paper:
*Measuring and testing dependence by correlation of distances*
by Gábor et Al (2007)
Parameters
----------
X : numpy array like object where the rows correspond to the samples
and the columns to features.
Y : numpy array like, of same size as X and one single output.
Returns
-------
numpy array of size the number of input features of X
which holds the distance correlation between each feature
and Y.
"""
n, d = X.shape
ny, nd = Y.shape
assert n == ny
assert nd == 1
dc_stats = np.zeros((d, 1))
for i in range(d):
dc_stats[i] = dcor.distance_correlation(X[:, i], Y[:, 0])
return dc_stats
|
import re
from typing import List, Union
camel_to_snake_regex = re.compile(r'(?<!^)(?=[A-Z])')
def camel_to_snake(word: str) -> str:
return camel_to_snake_regex.sub('_', word).lower()
def find_z_value_entries(lines: List[str]) -> List[int]:
"""Find the strings which contains a z-value entry.
"""
z_value_idxs = []
for idx, line in enumerate(lines):
if line.startswith('[ZValue ='):
z_value_idxs.append(idx)
return z_value_idxs
def find_title_entries(lines: List[str]) -> List[int]:
"""Find mdoc title entries in a list of strings"""
title_idxs = []
for idx, line in enumerate(lines):
if line.startswith('[T ='):
title_idxs.append(idx)
return title_idxs |
# Copyright (C) 2019 Greenweaves Software Limited
# This is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with GNU Emacs. If not, see <http://www.gnu.org/licenses/>.
def create_rule(N):
n = N
rule = []
while len(rule)<8:
n,r = divmod(n,2)
rule.append(r)
return rule
def execute_rule(state,rule):
extended_state = [0,0] + state + [0,0]
new_state = []
for i in range(len(state)+2):
x = extended_state[i:i+3]
input = 2*(2*x[0] + x[1]) + x[2]
new_state.append(rule[input])
return new_state
def convert(state):
result = 0
for i in state:
result = 2 * result + i
return result
def project(state,table=[0,1,1,0]):
return [table[2*state[i]+state[i+1]] for i in range(0,len(state),2)]
if __name__=='__main__':
print (create_rule(105))
#print (create_rule(110))
#print (create_rule(137))
#r = create_rule(28)
#state = [1]
#for i in range(25):
#state = execute_rule(state,r)
#print (convert(state)) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2017 theloop, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""loopchain docker process log parser"""
import json
import sys
print(f"loopchain log parser")
if len(sys.argv) < 3:
exit('need params input file output file')
input_file = sys.argv[1]
output_file = sys.argv[2]
print(f"input_file({input_file})")
f = open("./" + input_file, 'r')
out = open("./" + output_file, "a")
while True:
line = f.readline()
if not line:
break
line_elements = line.split('\t')
json_data = json.loads(line_elements[2])
try:
log_type = json_data['log'].split(" ")[3]
# TODO getopt 로 옵션 받아서 화면에서 log 재생하는 기능 구현하기
if log_type in ["DEBUG", "INFO", "SPAM", "WARNING"]:
# print(f"{log_type}")
pass
else: # print ERROR and Exception
print(f"{json_data['log']}")
except IndexError:
# print(f"there is no loopchain log: ({line})")
pass
out.write(json_data['log'] + '\n')
f.close()
out.close()
|
# Copyright (c) 2015 Pontianak
"""
9.4 Write a program to read through the mbox-short.txt and figure out who has the sent the greatest number of mail messages. The program looks for 'From ' lines and takes the second word of those lines as the person who sent the mail. The program creates a Python dictionary that maps the sender's mail address to a count of the number of times they appear in the file. After the dictionary is produced, the program reads through the dictionary using a maximum loop to find the most prolific committer.
"""
name = raw_input("Enter file:")
if len(name) < 1 : name = "mbox-short.txt"
handle = open(name)
eMailDict = dict()
largestName = None
largestCount = None
for line in handle :
if line.startswith("From:") : continue
if line.startswith("From") :
line = line.split()
tempHandle = line[1]
eMailDict[tempHandle] = eMailDict.get(tempHandle,0) + 1
for counter in eMailDict :
if largestCount == None or largestCount < eMailDict[counter] :
largestName = counter
largestCount = eMailDict[counter]
print largestName , largestCount
|
import logging
import os
import sys
import tempfile
from mrjob.util import log_to_stream
from samplecdxjob import SampleCDXJob
from seqfileutils import make_text_null_seq
SEQ_FILE = 'splits.seq'
SPL_FILE = 'splits.txt'
LOG = logging.getLogger('SampleCDXJob')
LOG_FORMAT = "%(asctime)s %(levelname)s %(name)s: %(message)s"
def run_sample_job():
job = SampleCDXJob(args=sys.argv[1:])
verbose = '--verbose' in sys.argv[1:]
log_to_stream(format=LOG_FORMAT, name='SampleCDXJob', debug=verbose)
log_to_stream(format=LOG_FORMAT, name='mrjob', debug=verbose)
log_to_stream(format=LOG_FORMAT, name='__main__', debug=verbose)
with job.make_runner() as runner:
runner.run()
if os.path.isfile(SEQ_FILE):
os.remove(SEQ_FILE)
if os.path.isfile(SPL_FILE):
os.remove(SPL_FILE)
# dump streaming output to file
with open(SPL_FILE, 'wb') as fh:
for x in runner.cat_output():
fh.write(x)
fh.close()
# convert streaming output to sequence file
count = make_text_null_seq(SEQ_FILE, open(SPL_FILE))
if job.options.splitfile and hasattr(runner.fs, 'make_s3_key'):
key = job.fs.make_s3_key(job.options.splitfile)
key.set_contents_from_filename(SEQ_FILE)
def main():
run_sample_job()
if __name__ == "__main__":
main()
|
from core import db_conn, db_log
from psycopg2 import DatabaseError
from core.exceptions import DBException, NotExistingEntityException
import json
def insert_va_quota_status(kubeconfig, vertical_application_slice_id: str):
# Create a new entry <uuid, kubeconfig> in the DB for a vertical application quota
command = """
INSERT INTO vertical_application_quota_status(vertical_application_quota_kubeconfig,
vertical_application_slice_id) VALUES (%s, %s) RETURNING vertical_application_quota_id
"""
try:
cur = db_conn.cursor()
cur.execute(command, (json.dumps(kubeconfig), vertical_application_slice_id))
va_quota_id = cur.fetchone()[0]
cur.close()
db_conn.commit()
db_log.info('Created new va_quota_status with ID %s', va_quota_id)
return va_quota_id
except (Exception, DatabaseError) as error:
db_log.error(str(error))
raise DBException('Error while creating vertical_application_quota_status: ' + str(error))
def get_va_quota_status():
# Retrieve all va_quota_status entries from the DB
command = """SELECT * FROM vertical_application_quota_status"""
try:
cur = db_conn.cursor()
cur.execute(command)
va_quota_status = cur.fetchall()
cur.close()
return va_quota_status
except (Exception, DatabaseError) as error:
db_log.error(str(error))
raise DBException('Error while fetching vertical_application_quota_status: ' + str(error))
def get_va_quota_status_by_id(vertical_application_quota_id: str):
# Retrieve va_quota_status entry by vertical_application_quota_id (PRIMARY KEY)
command = """SELECT * FROM vertical_application_quota_status WHERE vertical_application_quota_id = (%s)"""
try:
cur = db_conn.cursor()
cur.execute(command, (vertical_application_quota_id,))
va_quota_status = cur.fetchone()
cur.close()
if va_quota_status is None:
raise NotExistingEntityException('va_quota_status with ID ' +
vertical_application_quota_id + ' not found.')
return va_quota_status
except DatabaseError as error:
db_log.error(str(error))
raise DBException('Error while fetching vertical_application_quota_status: ' + str(error))
def get_va_quota_status_by_vas_id(vertical_application_slice_id: str):
# Retrieve all va_quota_status linked to the given vertical_application_slice_id
command = """SELECT * FROM vertical_application_quota_status WHERE vertical_application_slice_id = (%s)"""
try:
cur = db_conn.cursor()
cur.execute(command, (vertical_application_slice_id, ))
va_quota_status = cur.fetchall()
cur.close()
return va_quota_status
except (Exception, DatabaseError) as error:
db_log.error(str(error))
raise DBException('Error while fetching vertical_application_quota_status: ' + str(error))
def insert_network_slice_status(network_slice_id: str, network_slice_status: str):
# Create a new entry <network_slice_id, network_slice_status> in the DB for a network slice
command = """INSERT INTO network_slice_status(network_slice_id, network_slice_status) VALUES (%s, %s)"""
try:
cur = db_conn.cursor()
cur.execute(command, (network_slice_id, network_slice_status))
cur.close()
db_conn.commit()
db_log.info('Created new network_slice_status with ID %s', network_slice_id)
except (Exception, DatabaseError) as error:
db_log.error(str(error))
raise DBException('Error while creating network_slice_status: ' + str(error))
def update_network_slice_status(network_slice_id: str, network_slice_status: str):
# Update a network_slice_status status
command = """UPDATE network_slice_status SET network_slice_status = %s WHERE network_slice_id = %s"""
try:
cur = db_conn.cursor()
cur.execute(command, (network_slice_status, network_slice_id))
cur.close()
db_conn.commit()
db_log.info('Updated network_slice_status %s with status %s', network_slice_id, network_slice_status)
except (Exception, DatabaseError) as error:
db_log.error(str(error))
raise DBException('Error while updating network_slice_status: ' + str(error))
def get_network_slice_status():
# Retrieve all network_slice_status entries from the DB
command = """SELECT * FROM network_slice_status"""
try:
cur = db_conn.cursor()
cur.execute(command)
network_slice_status = cur.fetchall()
cur.close()
return network_slice_status
except (Exception, DatabaseError) as error:
db_log.error(str(error))
raise DBException('Error while fetching network_slice_status: ' + str(error))
def get_network_slice_status_by_id(network_slice_id: str):
# Retrieve network_slice_status entry by network_slice_id (PRIMARY KEY)
command = """SELECT * FROM network_slice_status WHERE network_slice_id = (%s)"""
try:
cur = db_conn.cursor()
cur.execute(command, (network_slice_id,))
network_slice_status = cur.fetchone()
cur.close()
if network_slice_status is None:
raise NotExistingEntityException('network_slice_status with ID ' + network_slice_id + ' not found.')
return network_slice_status
except DatabaseError as error:
db_log.error(str(error))
raise DBException('Error while fetching network_slice_status: ' + str(error))
def insert_va_status(vertical_application_slice_status: str, intent):
# Create a new entry <uuid, vertical_application_slice_status, intent> in the DB for a vertical application status
command = """
INSERT INTO vertical_application_slice_status(vertical_application_slice_status, intent)
VALUES (%s, %s) RETURNING vertical_application_slice_id
"""
try:
cur = db_conn.cursor()
cur.execute(command, (vertical_application_slice_status, json.dumps(intent)))
va_status_id = cur.fetchone()[0]
cur.close()
db_conn.commit()
db_log.info('Created new va_status with ID %s', va_status_id)
return va_status_id
except (Exception, DatabaseError) as error:
db_log.error(str(error))
raise DBException('Error while creating vertical_application_slice_status: ' + str(error))
def execute_va_status_update(command: str, vertical_application_slice_id: str, update: str):
try:
cur = db_conn.cursor()
cur.execute(command, (update, vertical_application_slice_id))
cur.close()
db_conn.commit()
db_log.info('Updated va_status %s', vertical_application_slice_id)
except (Exception, DatabaseError) as error:
db_log.error(str(error))
raise DBException('Error while updating vertical_application_slice_status: ' + str(error))
def update_va_with_status(vertical_application_slice_id: str, vertical_application_slice_status: str):
# Update the status of a va_status entry by ID
command = """
UPDATE vertical_application_slice_status SET vertical_application_slice_status = %s
WHERE vertical_application_slice_id = %s
"""
execute_va_status_update(command, vertical_application_slice_id, vertical_application_slice_status)
def update_va_status_with_ns(vertical_application_slice_id: str, network_slice_status: str):
# Update the network_slice_status of a va_status entry by ID
command = """
UPDATE vertical_application_slice_status SET network_slice_status = %s
WHERE vertical_application_slice_id = %s
"""
execute_va_status_update(command, vertical_application_slice_id, network_slice_status)
def update_va_status_with_nest_id(vertical_application_slice_id: str, nest_id: str):
# Update the nest_id of a va_status entry by ID
command = """UPDATE vertical_application_slice_status SET nest_id = %s WHERE vertical_application_slice_id = %s"""
execute_va_status_update(command, vertical_application_slice_id, nest_id)
def update_va_with_status_by_network_slice(network_slice_id: str, vertical_application_slice_status: str):
# Update vertical application entry status by network_slice_id (FOREIGN KEY)
command = """
UPDATE vertical_application_slice_status SET vertical_application_slice_status = %s
WHERE network_slice_status = %s
"""
try:
cur = db_conn.cursor()
cur.execute(command, (vertical_application_slice_status, network_slice_id))
cur.close()
db_conn.commit()
db_log.info('Updated va_status with network_slice_status %s', network_slice_id)
except (Exception, DatabaseError) as error:
db_log.error(str(error))
raise DBException('Error while updating vertical_application_slice_status: ' + str(error))
def get_va_status():
# Retrieve all va_status entries from the DB
command = """SELECT * FROM vertical_application_slice_status"""
try:
cur = db_conn.cursor()
cur.execute(command)
va_status = cur.fetchall()
cur.close()
return va_status
except (Exception, DatabaseError) as error:
db_log.error(str(error))
raise DBException('Error while fetching vertical_application_slice_status: ' + str(error))
def get_va_status_by_id(vertical_application_slice_id: str):
# Retrieve va_status entry by vertical_application_slice_id (PRIMARY KEY)
command = """SELECT * FROM vertical_application_slice_status WHERE vertical_application_slice_id = (%s)"""
try:
cur = db_conn.cursor()
cur.execute(command, (vertical_application_slice_id,))
va_status = cur.fetchone()
cur.close()
if va_status is None:
raise NotExistingEntityException('va_status with ID ' + vertical_application_slice_id + ' not found.')
return va_status
except DatabaseError as error:
db_log.error(str(error))
raise DBException('Error while fetching vertical_application_slice_status: ' + str(error))
def get_va_status_by_network_slice(network_slice_id: str):
# Retrieve va_status entry by network slice id
command = """SELECT * FROM vertical_application_slice_status WHERE network_slice_status = %s"""
try:
cur = db_conn.cursor()
cur.execute(command, (network_slice_id,))
va_status = cur.fetchone()
cur.close()
if va_status is None:
raise NotExistingEntityException('va_status with network slice ID ' + network_slice_id + ' not found.')
return va_status
except DatabaseError as error:
db_log.error(str(error))
raise DBException('Error while fetching vertical_application_slice_status: ' + str(error))
|
t=input()
print(t) |
from .version import __version__
from .openapi import OpenAPI
from .loader import FileSystemLoader
from .errors import SpecError, ReferenceResolutionError, HTTPError, HTTPStatusError, ContentTypeError
__all__ = [
"__version__",
"OpenAPI",
"FileSystemLoader",
"SpecError",
"ReferenceResolutionError",
"HTTPStatusError",
"ContentTypeError",
"HTTPError",
]
|
from typing import final
import pandas as pd
from utils.PUMA_helpers import clean_PUMAs
from internal_review.set_internal_review_file import set_internal_review_files
year_map = {"2000": "00", "0812": "10", "1519": "20"}
def load_decennial_census_001020() -> pd.DataFrame:
"""Load in the xlsx file, fill the missing values with the values from geogtype, rename the columns
following conventions, drop the duplicate column"""
df = pd.read_excel(
"./resources/decennial_census_data/EDDT_Census00-10-20_MUTU.xlsx",
skiprows=2,
dtype={"GeogType": str, "GeoID": str},
)
df.rename(
columns={
"GeogType": "geo_type",
"GeoID": "geo_id",
"Pop20": "pop_20_count",
"Pop20P": "pop_20_pct",
"Hsp20": "pop_20_hsp_count",
"Hsp20P": "pop_20_hsp_pct",
"WNH20": "pop_20_wnh_count",
"WNH20P": "pop_20_wnh_pct",
"BNH20": "pop_20_bnh_count",
"BNH20P": "pop_20_bnh_pct",
"ANH20": "pop_20_anh_count",
"ANH20P": "pop_20_anh_pct",
"OTwoNH20": "pop_20_onh_count",
"OTwoNH20P": "pop_20_onh_pct",
"Pop10": "pop_10_count",
"Pop10P": "pop_10_pct",
"Hsp10": "pop_10_hsp_count",
"Hsp10P": "pop_10_hsp_pct",
"WNH10": "pop_10_wnh_count",
"WNH10P": "pop_10_wnh_pct",
"BNH10": "pop_10_bnh_count",
"BNH10P": "pop_10_bnh_pct",
"ANH10": "pop_10_anh_count",
"ANH10P": "pop_10_anh_pct",
"OTwoNH10": "pop_10_onh_count",
"OTwoNH10P": "pop_10_onh_pct",
"Pop00": "pop_00_count",
"Pop00P": "pop_00_pct",
"Hsp00": "pop_00_hsp_count",
"Hsp00P": "pop_00_hsp_pct",
"WNH00": "pop_00_wnh_count",
"WNH00P": "pop_00_wnh_pct",
"BNH00": "pop_00_bnh_count",
"BNH00P": "pop_00_bnh_pct",
"ANH00": "pop_00_anh_count",
"ANH00P": "pop_00_anh_pct",
"OTwoNH00": "pop_00_onh_count",
"OTwoNH00P": "pop_00_onh_pct",
},
inplace=True,
)
df.geo_id.fillna(df.geo_type, inplace=True)
df = df.replace(
{
"geo_id": {
"Bronx": "BX",
"Brooklyn": "BK",
"Manhattan": "MN",
"Queens": "QN",
"Staten Island": "SI",
"NYC": "citywide",
}
}
)
df.drop("geo_type", axis=1, inplace=True)
df.set_index("geo_id", inplace=True)
return df
def create_citywide_level_df_by_year(df, year):
"""create the dataframes by geography type and year, strip year from columns"""
df_citywide = (
df.loc[["citywide"]].reset_index().rename(columns={"geo_id": "citywide"})
)
df_citywide.set_index("citywide", inplace=True)
final = df_citywide.filter(regex=f"citywide|{year}")
final.columns = final.columns.str.replace(f"_{year}", "")
return final
def create_borough_level_df_by_year(df, year):
"""create the dataframes by geography type and year, strip year from columns"""
df_borough = (
df.loc[["BX", "BK", "MN", "QN", "SI"]]
.reset_index()
.rename(columns={"geo_id": "borough"})
)
df_borough.set_index("borough", inplace=True)
final = df_borough.filter(regex=f"borough|{year}")
final.columns = final.columns.str.replace(f"_{year}", "")
return final
def create_puma_level_df_by_year(df, year):
"""create the dataframes by geography type and year, strip year from columns"""
df_puma = df.loc["3701":"4114"].reset_index().rename(columns={"geo_id": "puma"})
df_puma["puma"] = df_puma["puma"].apply(func=clean_PUMAs)
df_puma.set_index("puma", inplace=True)
final = df_puma.filter(regex=f"puma|{year}")
final.columns = final.columns.str.replace(f"_{year}", "")
return final
def decennial_census_001020(
geography: str, year: str = "2000", write_to_internal_review=False
) -> pd.DataFrame:
assert geography in ["citywide", "borough", "puma"]
assert year in ["2000", "0812", "1519"]
df = load_decennial_census_001020()
if geography == "citywide":
final = create_citywide_level_df_by_year(df, year_map[year])
if geography == "borough":
final = create_borough_level_df_by_year(df, year_map[year])
if geography == "puma":
final = create_puma_level_df_by_year(df, year_map[year])
if write_to_internal_review:
set_internal_review_files(
data=[
(
final,
f"demographics_{year}_decennial_census.csv",
geography,
)
],
category="demographics",
)
return final
|
from models import User
from templates import TextTemplate
from utilities import send_message
def send_campaign():
message = TextTemplate(text="NEW FEATURE: SUBSCRIPTIONS \n\n"+
"Hi there, this week a new feature is coming out and that is SUBSCRIPTIONS.\n\n"+
"How it works: When someone gets into the Waiting List due to non availability of "+
"partners, we will send out a message to our subscribed users. For example, if you "+
"subscribe for women, we will notify you when a woman is looking for a partner even "+
"when you are not active and hence you'll gain the chance to chat if you are free. \n\n"+
"The feature will be made available to every user after one month but some users will "+
"be given access to it within 1-2 days. To be eligible for getting access, LIKE our "+
"page and leave a REVIEW on our page within 36 hours. Just to emphasize, please "+
"complete both to be eligible. \n\nIf you have any question, post it on our page. "+
"We'll guide you, but make it within the 36 hours because after that, the feature will be out.")
print("IN CAMPAIGN")
message = TextTemplate(text="FUCKING TEST")
#users = User.query.all()
#for user in users:
# id = user.id
#send_message(message, id=id)
users = ["1708022462556195", "1145959985508112"]
for user in users:
send_message(message, id=user)
|
import numpy as np
from .base_model import BaseModel
from .data_utils import minibatches, pad_sequences
from .general_utils import Progbar
np.set_printoptions(threshold=np.nan)
from modelFood import NlabelCell
import os
import math
import joblib
import argparse
import numpy as np
import tensorflow as tf
parser = argparse.ArgumentParser()
parser.add_argument('--desc', type=str,default="food")
parser.add_argument('--dataset', type=str,default="food")
parser.add_argument('--log_dir', type=str, default='modelFood/log/')
parser.add_argument('--save_dir', type=str, default='modelFood/save/')
parser.add_argument('--data_dir', type=str, default='foodData/')
parser.add_argument('--submission_dir', type=str, default='submission/')
parser.add_argument('--submit', default=True)
parser.add_argument('--analysis', default=True)
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--n_iter', type=int, default=100)
parser.add_argument('--n_batch', type=int, default=8)
parser.add_argument('--max_grad_norm', type=int, default=1)
parser.add_argument('--lr', type=float, default=6.25e-5)
parser.add_argument('--lr_warmup', type=float, default=0.002)
parser.add_argument('--n_ctx', type=int, default=512)
parser.add_argument('--n_embd', type=int, default=32)
parser.add_argument('--n_head', type=int, default=8)
parser.add_argument('--n_layer', type=int, default=12)
parser.add_argument('--embd_pdrop', type=float, default=0.1)
parser.add_argument('--attn_pdrop', type=float, default=0.1)
parser.add_argument('--resid_pdrop', type=float, default=0.1)
parser.add_argument('--clf_pdrop', type=float, default=0.1)
parser.add_argument('--l2', type=float, default=0.01)
parser.add_argument('--vector_l2', action='store_true')
parser.add_argument('--n_gpu', type=int, default=1)
parser.add_argument('--opt', type=str, default='adam')
parser.add_argument('--afn', type=str, default='gelu')
parser.add_argument('--lr_schedule', type=str, default='warmup_linear')
parser.add_argument('--encoder_path', type=str, default='modelFood/model/encoder_food_20.json')
parser.add_argument('--bpe_path', type=str, default='modelFood/model/vocab_40000.bpe')
parser.add_argument('--n_transfer', type=int, default=12)
parser.add_argument('--lm_coef', type=float, default=0.5)
parser.add_argument('--b1', type=float, default=0.9)
parser.add_argument('--b2', type=float, default=0.999)
parser.add_argument('--e', type=float, default=1e-8)
class ModelFood(BaseModel):
def add_placeholders(self):
self.sequence_lengths = tf.placeholder(tf.int32, shape=[None],
name="sequence_lengths")
self.char_ids = tf.placeholder(tf.int32, shape=[None, None],
name="char_ids")
self.angle1s = tf.placeholder(tf.float32, shape=[None, None],
name="angle1s")
self.angle2s = tf.placeholder(tf.float32, shape=[None, None],
name="angle2s")
self.label_ids=tf.placeholder(tf.int32, shape=[None, None],
name="label_ids")
self.sp_token_weight = tf.placeholder(tf.float32, shape=[None, None],
name="sp_token")
self.dropout = tf.placeholder(dtype=tf.float32, shape=[],
name="dropout")
self.lr = tf.placeholder(dtype=tf.float32, shape=[],
name="lr")
# line_c, removed.
def get_feed_dict(self, chars, angle1s ,angle2s,labels,line_c,sp_token,lr=None, dropout=None):
char_ids, sequence_lengths = pad_sequences(chars, pad_tok=0)
label_ids, sequence_lengths = pad_sequences(labels, pad_tok=0)
sp_token, sequence_lengths = pad_sequences(sp_token, pad_tok=0)
feed = {
self.char_ids: char_ids,
self.sequence_lengths: sequence_lengths,
self.label_ids: label_ids,#1,2,3,4
self.sp_token_weight:sp_token
}
# print(feed)
if angle1s is not None:
angle1s, _ = pad_sequences(angle1s, pad_tok=0)
feed[self.angle1s] = angle1s
if angle2s is not None:
angle2s, _ = pad_sequences(angle2s, pad_tok=0)
feed[self.angle2s] = angle2s
if lr is not None:
feed[self.lr] = lr
if dropout is not None:
feed[self.dropout] = dropout
return feed, sequence_lengths
def add_word_embeddings_op(self):
if(self.config.use_transformer==False):
_char_embeddings = tf.get_variable(
name="_char_embeddings",
dtype=tf.float32,
shape=[self.config.nchars, self.config.dim_char])
char_embeddings = tf.nn.embedding_lookup(_char_embeddings,
self.char_ids, name="char_embeddings")
cell_fw = tf.contrib.rnn.BasicLSTMCell(self.config.hidden_size_char,
state_is_tuple=True)
cell_bw = tf.contrib.rnn.BasicLSTMCell(self.config.hidden_size_char,
state_is_tuple=True)
_output = tf.nn.bidirectional_dynamic_rnn(
cell_fw, cell_bw, char_embeddings,
sequence_length= self.sequence_lengths, dtype=tf.float32,scope='rnn1')
(output_fw, output_bw),_ = _output
output_concate = tf.concat([output_fw, output_bw], axis=-1)
char_embeddings = output_concate
if(self.config.ngram_embed==True):
kernels = tf.get_variable("kernels", shape=[self.config.ngram,2*self.config.hidden_size_char,self.config.n_kernel],
initializer=tf.contrib.layers.xavier_initializer(), dtype=tf.float32)
char_embeddings = tf.nn.conv1d(char_embeddings, kernels, stride=1, padding="SAME")
char_embeddings = tf.nn.relu(char_embeddings)
#print("charemb",char_embeddings)
self.M3_fw = tf.get_variable("M3_fw", dtype=tf.float32,
shape=[self.config.hidden_size_char , self.config.label_emb_size],initializer=tf.contrib.layers.xavier_initializer())
self.M3_bw = tf.get_variable("M3_bw", dtype=tf.float32,
shape=[self.config.hidden_size_char, self.config.label_emb_size],initializer=tf.contrib.layers.xavier_initializer())
self.M4k_fw = tf.get_variable("M4k_fw", dtype=tf.float32,
shape=[(self.config.use_K_histroy+1)*self.config.label_emb_size,
self.config.hidden_size_char],initializer=tf.contrib.layers.xavier_initializer())
self.M4k_bw = tf.get_variable("M4k_bw", dtype=tf.float32,
shape=[(self.config.use_K_histroy+1)*self.config.label_emb_size ,
self.config.hidden_size_char],initializer=tf.contrib.layers.xavier_initializer())
if(self.config.use_label_linkM4==True):
cell_fw2 = tf.contrib.rnn.LSTMCell(self.config.hidden_size_char,
state_is_tuple=True)
cell_bw2 = tf.contrib.rnn.LSTMCell(self.config.hidden_size_char,
state_is_tuple=True)
if(self.config.use_attention==True):
#print("M4 attention")
cell_fw2 = NlabelCell.NLabelAttentionCellWrapper(cell_fw2, self.config.ngram, emb_M3=self.M3_fw,
emb_M4k=self.M4k_fw, state_is_tuple=True) #attn len is not used
cell_bw2 = NlabelCell.NLabelAttentionCellWrapper(cell_bw2, self.config.ngram, emb_M3=self.M3_bw,
emb_M4k=self.M4k_bw, state_is_tuple=True)
else:
#print("M4 no attention")
cell_fw2 = NlabelCell.NLabelNoAttentionCellWrapper(cell_fw2, emb_M3=self.M3_fw,
emb_M4k=self.M4k_fw, state_is_tuple=True)
cell_bw2 = NlabelCell.NLabelNoAttentionCellWrapper(cell_bw2, emb_M3=self.M3_bw,
emb_M4k=self.M4k_bw, state_is_tuple=True)
else:
cell_fw2 = tf.contrib.rnn.LSTMCell(self.config.hidden_size_char,
state_is_tuple=True)
cell_bw2 = tf.contrib.rnn.LSTMCell(self.config.hidden_size_char,
state_is_tuple=True)
#print("use simple LSTM")
_output = tf.nn.bidirectional_dynamic_rnn(
cell_fw2, cell_bw2, char_embeddings,
sequence_length=self.sequence_lengths, dtype=tf.float32,scope='rnn_output')
(output_fw, output_bw), _ = _output
output_concate = tf.concat([output_fw, output_bw], axis=-1)
char_embeddings = output_concate
#print("char emb out",char_embeddings)
self.char_embedding=tf.nn.dropout(char_embeddings, self.dropout)
self.fwbw=(output_fw, output_bw)
def add_logits_op(self):
fw,bw=self.fwbw
fw_shape=tf.shape(fw)
bw_shape=tf.shape(bw)
fw=tf.reshape(fw, shape=[-1,self.config.hidden_size_char])
bw = tf.reshape(bw, shape=[-1, self.config.hidden_size_char])
pre_logits_labels_fw = tf.nn.relu(tf.matmul(fw, self.M3_fw))
pre_logits_labels_bw = tf.nn.relu(tf.matmul(bw, self.M3_bw))
pre_logits_labels_fw=tf.reshape(pre_logits_labels_fw ,shape= [fw_shape[0],fw_shape[1],self.config.label_emb_size ])
pre_logits_labels_bw = tf.reshape(pre_logits_labels_bw, shape=[bw_shape[0],bw_shape[1],self.config.label_emb_size ])
pre_logits_labels_concat=tf.concat([pre_logits_labels_fw, pre_logits_labels_bw],axis=-1)
self.logits_labels=tf.layers.dense(inputs=pre_logits_labels_concat,units=self.config.nlabels,activation=tf.nn.relu)
self.logits = tf.layers.dense(inputs=self.char_embedding, units=self.config.output_dim)
def add_loss_op(self):
if self.config.loss_op=="msn":
if(self.config.use_triangle==True):
self.angles_stacked = tf.stack(
[tf.sin(self.angle1s / 180 * math.pi), tf.cos(self.angle1s / 180 * math.pi),
tf.sin(self.angle2s / 180 * math.pi), tf.cos(self.angle2s / 180 * math.pi)], axis=2)
else:
self.angles_stacked=tf.stack([self.angle1s, self.angle2s],axis=2)
label_ids=tf.one_hot(indices=self.label_ids,depth=self.config.nlabels)
self.crossEntropy=tf.nn.softmax_cross_entropy_with_logits( logits=self.logits_labels,labels= label_ids) #output : element-wised
if (self.config.weighted_loss != 1):
self.crossEntropy = tf.multiply(self.crossEntropy, self.sp_token_weight)
self.angles_stacked = tf.multiply(self.angles_stacked, tf.expand_dims(self.sp_token_weight,-1))
self.zero_mask = tf.where(tf.not_equal(self.char_ids, 0))
self.masked_angles_stacked = tf.gather_nd(self.angles_stacked,self.zero_mask)
if(self.config.use_triangle==True):
self.logger.info("using l2 norm")
s_logits=tf.shape(self.logits)
tmp_logits=tf.reshape(self.logits,shape=[s_logits[0],s_logits[1],2,2])
tmp_logits=tf.nn.l2_normalize(tmp_logits,dim=-1)
self.logits=tf.reshape(tmp_logits,shape=[s_logits[0],s_logits[1],4])
if (self.config.weighted_loss !=1):
self.logits = tf.multiply(self.logits, tf.expand_dims(self.sp_token_weight,-1))
self.masked_logits = tf.gather_nd(self.logits, self.zero_mask)
self.masked_crossEntropy=tf.gather_nd(self.crossEntropy,self.zero_mask)
self.loss_angle = tf.losses.mean_squared_error(self.masked_angles_stacked, self.masked_logits) # +self.masked_crossEntropy
self.loss_label=tf.reduce_mean(self.masked_crossEntropy)
self.loss_label=self.loss_label/8
if(self.config.use_label_info==True):
self.loss=self.loss_angle +self.loss_label
else:
self.loss=self.loss_angle
tf.summary.scalar("loss", self.loss)
def build(self):
with tf.variable_scope("mergedModel"):
self.add_placeholders()
self.add_word_embeddings_op()
self.add_logits_op()
self.add_loss_op()
self.add_train_op(self.config.lr_method, self.lr, self.loss,
self.config.clip)
self.initialize_session()
print("finish build")
def predict_batch(self, words):
fd, sequence_lengths = self.get_feed_dict(words, dropout=1.0)
labels_pred = self.sess.run(self.labels_pred, feed_dict=fd)
return labels_pred, sequence_lengths
def run_epoch(self, train, dev, epoch):
# progbar stuff for logging
batch_size = self.config.batch_size
nbatches = (len(train) + batch_size - 1) // batch_size
prog = Progbar(target=nbatches)
# iterate over dataset
for i, (chars, angle1s,angle2s,labels,line_c, sp_token) in enumerate(minibatches(train, batch_size)):
fd, _ = self.get_feed_dict(chars, angle1s,angle2s,labels,line_c,sp_token, self.config.lr,
self.config.dropout)
_, train_loss, summary,ce,ag,m_ce,m_l = self.sess.run([self.train_op, self.loss, self.merged,self.loss_label,self.loss_angle,self.masked_crossEntropy,self.masked_logits], feed_dict=fd)
print("current batch loss :")
print("ce",ce)
print("ag",ag)
prog.update(i + 1, [("train loss", train_loss)])
if i % 10 == 0:
self.file_writer.add_summary(summary, epoch * nbatches + i)
metrics = self.run_evaluate(dev)
msg = " - ".join(["{} {:04.2f}".format(k, v)
for k, v in metrics.items()])
self.logger.info(msg)
return metrics["loss"]
def run_evaluate(self, test):
batch_size = self.config.batch_size
sum_loss=0
for i, (chars, angle1s, angle2s,labels,line_c,sp_token) in enumerate(minibatches(test, batch_size)):
fd, _ = self.get_feed_dict(chars, angle1s, angle2s,labels,line_c,sp_token, lr=0,dropout=1)
test_loss= self.sess.run(
[self.loss], feed_dict=fd)
sum_loss=sum_loss+test_loss[0]
return {"loss": sum_loss/(i+1),"average of validation batches num:": (i+1) }
def predict(self, data):
batch_size = self.config.batch_size
preds=[]
ang1=[]
ang2=[]
for i, (chars, angle1s, angle2s,labels,line_c,sp_token) in enumerate(minibatches(data, batch_size)):
if(i%1000==0):
print("test at batch:",i)
fd, _ = self.get_feed_dict(chars, angle1s, angle2s,labels, line_c,sp_token, lr=None,dropout=1)
p= self.sess.run(
[self.logits,self.angle1s,self.angle2s,self.label_ids], feed_dict=fd)
preds.append(p[0])
ang1.append(p[1])
ang2.append(p[2])
preds=np.asarray(preds)
ang1 = np.asarray(ang1)
ang2 = np.asarray(ang2)
return preds,ang1,ang2
|
from textcritical.default_settings import *
DATABASE_ROUTERS = [] |
from random import randint
from django import forms
from care.groupaccount.models import GroupAccount, GroupSetting
from care.userprofile.models import NotificationInterval
class NewGroupAccountForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['name'] = forms.CharField(max_length=200, label='Group name')
self.fields['settings'] = forms.ModelChoiceField(
widget=forms.HiddenInput,
queryset=GroupSetting.objects.all(),
empty_label=None,
required=False
)
class Meta:
model = GroupAccount
fields = '__all__'
class EditGroupSettingForm(forms.ModelForm):
def __init__(self, user, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['notification_lower_limit'] = forms.IntegerField(
min_value=-1000,
max_value=0,
initial=-100,
label='Balance reminder threshold (€)',
help_text="A reminder is sent when someone\'s balance is lower than this value."
)
self.fields['notification_lower_limit_interval'] = forms.ModelChoiceField(
queryset=NotificationInterval.objects.all(),
label='Email notification interval',
empty_label=None,
help_text="The interval of the balance reminder email."
)
class Meta:
model = GroupSetting
fields = '__all__'
|
# http://poynton.ca/ColorFAQ.html
# https://www.cambridgeincolour.com/tutorials/color-spaces.htm
# 颜色空间(HSV/HSB与HLS)的区别 https://blog.csdn.net/u010712012/article/details/85240100
"""
YIQ,是NTSC(National Television Standards Committee)电视系统标准。
Y是提供黑白电视及彩色电视的亮度信号(Luminance),即亮度(Brightness),
I代表In-phase,色彩从橙色到青色,
Q代表Quadrature-phase,色彩从紫色到黄绿色。
colorsys.rgb_to_yiq(r, g, b)
把颜色从RGB值转为YIQ值。
colorsys.yiq_to_rgb(y, i, q)
把颜色从YIQ值转为RGB值。
HLS 是Hue(色相)、Luminance(亮度)、Saturation(饱和度)
colorsys.rgb_to_hls(r, g, b)
把颜色从RGB值转为HLS值。
colorsys.hls_to_rgb(h, l, s)
把颜色从HLS值转为RGB值。
"""
"""
# HSV(Hue, Saturation, Value)是根据颜色的直观特性由A. R. Smith在1978年创建的一种颜色空间, 也称六角锥体模型(Hexcone Model)。
色调H
用角度度量,取值范围为0°~360°,从红色开始按逆时针方向计算,红色为0°,绿色为120°,蓝色为240°。它们的补色是:黄色为60°,青色为180°,紫色为300°;
饱和度S
饱和度S表示颜色接近光谱色的程度。一种颜色,可以看成是某种光谱色与白色混合的结果。其中光谱色所占的比例愈大,颜色接近光谱色的程度就愈高,颜色的饱和度也就愈高。饱和度高,颜色则深而艳。
光谱色的白光成分为0,饱和度达到最高。通常取值范围为0%~100%,值越大,颜色越饱和。
明度V
明度表示颜色明亮的程度,对于光源色,明度值与发光体的光亮度有关;对于物体色,此值和物体的透射比或反射比有关。通常取值范围为0%(黑)到100%(白)。
RGB和CMY颜色模型都是面向硬件的,而HSV(Hue Saturation Value)颜色模型是面向用户的。
HSV模型的三维表示从RGB立方体演化而来。设想从RGB沿立方体对角线的白色顶点向黑色顶点观察,就可以看到立方体的六边形外形。六边形边界表示色彩,水平轴表示纯度,明度沿垂直轴测量。
colorsys.rgb_to_hsv(r, g, b)
把颜色从RGB值转为HSV值。
colorsys.hsv_to_rgb(h, s, v)
把颜色从HSV值转为RGB值。
"""
import colorsys
print(colorsys.rgb_to_hsv(0.2, 0.4, 0.4))
print(colorsys.hsv_to_rgb(0.5, 0.5, 0.4))
|
# Copyright (c) 2022 Exograd SAS.
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
# IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from typing import Any, Dict
from eventline.api_object import ReadableAPIObject
class Resource(ReadableAPIObject):
"""A resource."""
def __init__(self) -> None:
super().__init__("resource")
def _read(self, data: Dict[str, Any]) -> None:
self.id_ = self._read_string(data, "id")
self.org_id = self._read_string(data, "org_id")
self.project_id = self._read_string(data, "project_id")
self.creation_time = self._read_datetime(data, "creation_time")
self.update_time = self._read_datetime(data, "update_time")
self.disabled = self._read_optional_boolean(data, "disabled")
self.spec = data["spec"]
|
# Generated by Django 3.0.10 on 2020-09-19 23:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='interest_arts',
field=models.BooleanField(blank=True, default=0, verbose_name='Arts'),
),
migrations.AddField(
model_name='user',
name='interest_drinks',
field=models.BooleanField(blank=True, default=0, verbose_name='Drinks'),
),
migrations.AddField(
model_name='user',
name='interest_env',
field=models.BooleanField(blank=True, default=0, verbose_name='Env'),
),
migrations.AddField(
model_name='user',
name='interest_games',
field=models.BooleanField(blank=True, default=0, verbose_name='Games'),
),
migrations.AddField(
model_name='user',
name='interest_photography',
field=models.BooleanField(blank=True, default=0, verbose_name='Photography'),
),
migrations.AddField(
model_name='user',
name='interest_social',
field=models.BooleanField(blank=True, default=0, verbose_name='Social'),
),
migrations.AddField(
model_name='user',
name='interest_sports',
field=models.BooleanField(blank=True, default=0, verbose_name='Sports'),
),
migrations.AddField(
model_name='user',
name='interest_startups',
field=models.BooleanField(blank=True, default=0, verbose_name='Startups'),
),
migrations.AddField(
model_name='user',
name='place',
field=models.CharField(choices=[('V', 'VIRTUAL'), ('P', 'PHYSICAL')], default='V', max_length=1),
),
migrations.AddField(
model_name='user',
name='time',
field=models.CharField(choices=[('L', 'LUNCH'), ('WD', 'WEEKDAY'), ('WE', 'WEEKEND')], default='WD', max_length=2),
),
]
|
"""
------------------------------------------------------
This file is part of RobustGaussianFittingLibrary,
a free library WITHOUT ANY WARRANTY
Copyright: 2017-2020 LaTrobe University Melbourne,
2019-2020 Deutsches Elektronen-Synchrotron
------------------------------------------------------
"""
import RobustGaussianFittingLibrary
import RobustGaussianFittingLibrary.useMultiproc
import RobustGaussianFittingLibrary.misc
import RobustGaussianFittingLibrary.basic
import numpy as np
import time
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import os
import scipy.stats
from cProfile import label
from docutils.nodes import inline
np.set_printoptions(suppress = True)
np.set_printoptions(precision = 2)
LWidth = 3
font = {'weight' : 'bold',
'size' : 8}
params = {'legend.fontsize': 'x-large',
'axes.labelsize': 'x-large',
'axes.titlesize':'x-large',
'xtick.labelsize':'x-large',
'ytick.labelsize':'x-large'}
def test_PDF2Uniform():
inVec = np.random.randn(20000)
inds = RobustGaussianFittingLibrary.misc.PDF2Uniform(inVec,
numBins=40, nUniPoints = 2000,
lowPercentile = 50, highPercentile=100)
b, e = np.histogram(inVec, 100)
e = e[:-1]
b2, e2 = np.histogram(inVec[inds], 100)
e2 = e2[:-1]
plt.plot(e, b/b.sum())
plt.plot(e2, b2/b2.sum())
plt.show()
def test_textProgBar():
print('test_textProgBar')
pBar = RobustGaussianFittingLibrary.misc.textProgBar(180)
for _ in range(60):
for _ in range(5000000):
pass
pBar.go(3)
pBar.end()
def visOrderStat():
print('visOrderStat')
# std of a few closests samplse of a gaussian to its average
# is less than the actual std:
allN = list([10])
intervals = np.arange(0.01,1.01,0.01)
for N in allN:
Data = np.random.randn(N)
res = np.fabs(Data - Data.mean())
inds = np.argsort(res)
result_STD = np.zeros(intervals.shape[0])
result_MSSE = np.zeros(intervals.shape[0])
pBar = RobustGaussianFittingLibrary.misc.textProgBar(intervals.shape[0])
for idx, k in enumerate(intervals):
result_STD[idx] = Data[inds[:int(k*N)]].std()
result_MSSE[idx] = RobustGaussianFittingLibrary.MSSE(Data[inds[:int(k*N)]], k=2)
pBar.go()
del pBar
plt.plot(intervals, result_STD)
plt.plot(intervals, result_MSSE)
plt.plot(intervals, intervals)
plt.legend(allN)
plt.title('The estimated STD by the portion of \ninliers of a Gaussian structure')
plt.show()
def test_MSSE():
n_iters = 100
min_N = 3
max_N = 100
estScaleMSSE = np.zeros((n_iters, max_N - min_N))
estScaleMSSEWeighted = np.zeros((n_iters, max_N - min_N))
for iterCnt, iter in enumerate(range(n_iters)):
for NCnt, N in enumerate(np.arange(min_N,max_N)):
vec = np.random.randn(N)
res = np.abs(vec - vec.mean())
estScaleMSSE[iterCnt, NCnt] = RobustGaussianFittingLibrary.MSSE(res, k=int(N*0.5))
estScaleMSSEWeighted[iterCnt, NCnt] = RobustGaussianFittingLibrary.MSSEWeighted(res, k=int(N*0.5))
plt.plot(estScaleMSSE.mean(0), label='estScaleMSSE')
plt.plot(estScaleMSSEWeighted.mean(0), label='estScaleMSSEWeighted')
plt.legend()
plt.show()
def gkern(kernlen):
lim = kernlen//2 + (kernlen % 2)/2
x = np.linspace(-lim, lim, kernlen+1)
kern1d = np.diff(scipy.stats.norm.cdf(x))
kern2d = np.outer(kern1d, kern1d)
return kern2d/(kern2d.flatten().max())
def diffractionPatternMaker(XSZ, YSZ, WINSIZE, inputPeaksNumber, numOutliers):
inData = np.zeros((XSZ, YSZ), dtype='float32')
inMask = np.ones(inData.shape, dtype = 'uint8')
inMask[::64, ::64] = 0
for ccnt in range(inData.shape[1]):
for rcnt in range(inData.shape[0]):
inData[rcnt, ccnt] += 100 + np.fabs(400*np.exp(-(((rcnt-512)**2+(ccnt-512)**2)**0.5 - 250)**2/(2*75**2)))
inData[rcnt, ccnt] += 6*np.sqrt(inData[rcnt, ccnt])*np.random.randn(1)
randomLocations = np.random.rand(2,inputPeaksNumber)
randomLocations[0,:] = XSZ/2 + np.floor(XSZ*0.8*(randomLocations[0,:] - 0.5))
randomLocations[1,:] = YSZ/2 + np.floor(YSZ*0.8*(randomLocations[1,:] - 0.5))
for cnt in np.arange(inputPeaksNumber):
bellShapedCurve = 600*gkern(WINSIZE)
winXStart = (randomLocations[0, cnt] - (WINSIZE-1)/2).astype(np.int)
winXEnd = (randomLocations[0, cnt] + (WINSIZE+1)/2).astype(np.int)
winYStart = (randomLocations[1, cnt] - (WINSIZE-1)/2).astype(np.int)
winYEnd = (randomLocations[1, cnt] + (WINSIZE+1)/2).astype(np.int)
inData[ winXStart : winXEnd, winYStart : winYEnd ] += bellShapedCurve;
if (cnt >= inputPeaksNumber - numOutliers):
inMask[ winXStart : winXEnd, winYStart : winYEnd ] = 0;
return(inData, inMask, randomLocations)
def test_removeIslands():
print('test_removeIslands')
#an island cannot be bigger than the stack size of your OS
inMask = np.ones((20, 21), dtype='uint8')
inMask[0,1] = 0
inMask[1,1] = 0
inMask[1,0] = 0
inMask[3,3] = 0
inMask[4,2] = 0
inMask[4,4] = 0
inMask[5,3] = 0
inMask[0,4] = 0
inMask[1,4] = 0
inMask[1,5] = 0
inMask[1,6] = 0
inMask[0,6] = 0
inMask[14,0] =0
inMask[14,1] =0
inMask[15,1] =0
inMask[16,1] =0
inMask[16,0] =0
inMask[6,6] = 0
inMask[6,7] = 0
inMask[6,8] = 0
inMask[6,9] = 0
inMask[7,5] = 0
inMask[7,8] = 0
inMask[8,6] = 0
inMask[8,7] = 0
inMask[8,8] = 0
inMask[8,9] = 0
inMask[16,16] = 0
inMask[16,17] = 0
inMask[16,18] = 0
inMask[16,19] = 0
inMask[17,15] = 0
inMask[17,18] = 0
inMask[18,16] = 0
inMask[18,17] = 0
inMask[18,18] = 0
inMask[18,19] = 0
plt.imshow(inMask), plt.show()
outMask = 1 - RobustGaussianFittingLibrary.misc.removeIslands(1 - inMask, minSize=2)
plt.imshow(outMask), plt.show()
def test_bigTensor2SmallsInds():
print('test_bigTensor2SmallsInds')
a = (100*np.random.randn(20,16,11)).astype('int')
rowClmInds, segInds = \
RobustGaussianFittingLibrary.useMultiproc.bigTensor2SmallsInds(a.shape,
2, 3)
print(rowClmInds)
def test_fitValue_sweep():
print('test_fitValue_sweep_over_N')
numIter = 1000
maxN = 400
minN = 200
mean_inliers = np.zeros((maxN-minN, numIter))
std_inliers = np.zeros((maxN-minN, numIter))
robust_mean = np.zeros((maxN-minN, numIter))
robust_std = np.zeros((maxN-minN, numIter))
pBar = RobustGaussianFittingLibrary.misc.textProgBar(maxN-minN)
x = np.zeros(maxN-minN)
timeR = 0
for N in range(minN,maxN):
for iter in range(numIter):
RNN0 = np.random.randn(N)
RNN1 = 1000+5*(np.random.rand(int(N*0.25))-0.5)
testData = np.concatenate((RNN0, RNN1)).flatten()
np.random.shuffle(testData)
time_time = time.time()
rmode, rstd = RobustGaussianFittingLibrary.fitValue(testData,
fit2Skewed = False,
topKthPerc=0.5,
bottomKthPerc=0.3,
MSSE_LAMBDA=3.0,
optIters= 10,
downSampledSize = 100)
timeR = time.time() - time_time
mean_inliers[N-minN, iter] = RNN0.mean()
std_inliers[N-minN, iter] = RNN0.std()
robust_mean[N-minN, iter] = rmode
robust_std[N-minN, iter] = rstd
x[N-minN] = N
pBar.go()
del pBar
plt.plot(x, ((robust_mean-mean_inliers)/std_inliers).mean(1) - \
((robust_mean-mean_inliers)/std_inliers).std(1),
'.', label = 'robust mean of data - std')
plt.plot(x, ((robust_mean-mean_inliers)/std_inliers).mean(1), '.', label = 'robust mean of data')
plt.plot(x, ((robust_mean-mean_inliers)/std_inliers).mean(1) + \
((robust_mean-mean_inliers)/std_inliers).std(1),
'.', label = 'robust mean of data + std')
plt.legend()
plt.show()
plt.plot(x, (robust_std/std_inliers).mean(1)-(robust_std/std_inliers).std(1),
'.', label='robust std of data - std')
plt.plot(x, (robust_std/std_inliers).mean(1), '.', label='robust std of data')
plt.plot(x, (robust_std/std_inliers).mean(1)+(robust_std/std_inliers).std(1),
'.', label='robust std of data + std')
plt.grid()
plt.legend()
plt.show()
def test_RobustAlgebraicPlaneFittingPy():
print('test_RobustAlgebraicPlaneFittingPy')
N = 100
numOut = 2
inX = 100*np.random.rand(N)-50
inY = 100*np.random.rand(N)-50
inZ = 1*inX - 2 * inY + 50*np.random.randn(N) + 50
inZ[((N-1)*np.random.rand(numOut)).astype('int')] = 500*np.random.rand(numOut) +500
plt.rc('font', **font)
plt.rcParams.update(params)
fig = plt.figure(1)
ax = fig.add_subplot(111, projection='3d')
ax.scatter(inX, inY, inZ, color = 'black', label='data')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('values')
fig.legend()
plt.show()
mP = RobustGaussianFittingLibrary.fitPlane(inX, inY, inZ)
print(mP)
Xax = np.arange(inX.min(), inX.max())
Yax = np.arange(inY.min(), inY.max())
X, Y = np.meshgrid(Xax, Yax)
Zax_H = mP[0]*X + mP[1]*Y + mP[2] + 3*mP[3]
Zax_U = mP[0]*X + mP[1]*Y + mP[2]
Zax_L = mP[0]*X + mP[1]*Y + mP[2] - 3*mP[3]
plt.rc('font', **font)
plt.rcParams.update(params)
fig = plt.figure(3)
ax = fig.add_subplot(111, projection='3d')
ax.scatter(inX, inY, inZ, color = 'black', label='data')
c1 = ax.plot_surface(X, Y, Zax_H, color = 'blue', label='upper threshold')
c1._facecolors2d = c1._facecolors3d
c1._edgecolors2d = c1._edgecolors3d
c2 = ax.plot_surface(X, Y, Zax_U, color = 'green', label='model plane')
c2._facecolors2d = c2._facecolors3d
c2._edgecolors2d = c2._edgecolors3d
c3 = ax.plot_surface(X, Y, Zax_L, color = 'red', label='lower threshold')
c3._facecolors2d = c3._facecolors3d
c3._edgecolors2d = c3._edgecolors3d
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('values')
fig.legend()
plt.show()
def test_fitValueVSMeanShiftPy():
print('test_RobustAlgebraicPlaneFittingPy')
N = 100
numOut = 2
inX = 100*np.random.rand(N)-50
inY = 100*np.random.rand(N)-50
inZ = 1*inX - 2 * inY + 50*np.random.randn(N) + 50
inZ[((N-1)*np.random.rand(numOut)).astype('int')] = 500*np.random.rand(numOut) +500
mP = RobustGaussianFittingLibrary.basic.fitValue_by_meanShift(inVec = inZ, minSNR = 3.0, MS_numIter = 8)
print(mP)
Xax = np.arange(inX.min(), inX.max())
Yax = np.arange(inY.min(), inY.max())
X, Y = np.meshgrid(Xax, Yax)
Zax_H = 0*X + 0*Y + mP[0] + 3*mP[1]
Zax_U = 0*X + 0*Y + mP[0]
Zax_L = 0*X + 0*Y + mP[0] - 3*mP[1]
plt.rc('font', **font)
plt.rcParams.update(params)
fig = plt.figure(3)
ax = fig.add_subplot(111, projection='3d')
ax.scatter(inX, inY, inZ, color = 'black', label='data')
c1 = ax.plot_surface(X, Y, Zax_H, color = 'blue', label='upper threshold')
c1._facecolors2d = c1._facecolors3d
c1._edgecolors2d = c1._edgecolors3d
c2 = ax.plot_surface(X, Y, Zax_U, color = 'green', label='model plane')
c2._facecolors2d = c2._facecolors3d
c2._edgecolors2d = c2._edgecolors3d
c3 = ax.plot_surface(X, Y, Zax_L, color = 'red', label='lower threshold')
c3._facecolors2d = c3._facecolors3d
c3._edgecolors2d = c3._edgecolors3d
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('values')
fig.legend()
plt.show()
mP = RobustGaussianFittingLibrary.fitValue(inZ, fit2Skewed = False)
print(mP)
Xax = np.arange(inX.min(), inX.max())
Yax = np.arange(inY.min(), inY.max())
X, Y = np.meshgrid(Xax, Yax)
Zax_H = 0*X + 0*Y + mP[0] + 3*mP[1]
Zax_U = 0*X + 0*Y + mP[0]
Zax_L = 0*X + 0*Y + mP[0] - 3*mP[1]
plt.rc('font', **font)
plt.rcParams.update(params)
fig = plt.figure(3)
ax = fig.add_subplot(111, projection='3d')
ax.scatter(inX, inY, inZ, color = 'black', label='data')
c1 = ax.plot_surface(X, Y, Zax_H, color = 'blue', label='upper threshold')
c1._facecolors2d = c1._facecolors3d
c1._edgecolors2d = c1._edgecolors3d
c2 = ax.plot_surface(X, Y, Zax_U, color = 'green', label='model plane')
c2._facecolors2d = c2._facecolors3d
c2._edgecolors2d = c2._edgecolors3d
c3 = ax.plot_surface(X, Y, Zax_L, color = 'red', label='lower threshold')
c3._facecolors2d = c3._facecolors3d
c3._edgecolors2d = c3._edgecolors3d
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('values')
fig.legend()
plt.show()
plt.rc('font', **font)
plt.rcParams.update(params)
fig = plt.figure(1)
ax = fig.add_subplot(111, projection='3d')
ax.scatter(inX, inY, inZ, color = 'black', label='data')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('values')
fig.legend()
plt.show()
def test_fitPlaneVSMeanShiftPy():
np.random.seed(94713)
print('test_RobustAlgebraicPlaneFittingPy')
ax_azim = 70
ax_dist = 10
ax_elev = 15
wireFrameStride = 10
N = 400
numOut = 4
inX = 100*np.random.rand(N)-50
inY = 100*np.random.rand(N)-50
addedNoise = np.random.randn(N)
upinlierInds = addedNoise>=0
lowinlierInds = addedNoise<0
phi = 73
mu_B = 4*phi
sig_B = phi*(mu_B/phi)**0.5
min_outlier = 6.1*sig_B
max_outlier = 8.1*sig_B
inZ_noNise = 1*inX - 2 * inY + mu_B
inZ = inZ_noNise + sig_B*addedNoise
outliersInds = ((N-1)*np.random.rand(numOut)).astype('int')
inZ[outliersInds] = inZ_noNise[outliersInds] + min_outlier + (max_outlier - min_outlier)*np.random.rand(numOut)
excludeInds = np.ones(inZ.shape[0], dtype = 'int')
excludeInds[outliersInds] = 0
mP = RobustGaussianFittingLibrary.basic.fitPlane_by_meanShift(inX, inY, inZ,
minSNR = 6.0, MS_numIter = 5)
print(mP)
Xax = np.arange(inX.min(), inX.max())
Yax = np.arange(inY.min(), inY.max())
X, Y = np.meshgrid(Xax, Yax)
Zax_H = mP[0]*X + mP[1]*Y + mP[2] + 6*mP[3]
Zax_U = mP[0]*X + mP[1]*Y + mP[2]
Zax_L = mP[0]*X + mP[1]*Y + mP[2] - 6*mP[3]
res = inZ - (mP[0]*inX + mP[1]*inY + mP[2])
resOutliers = inZ[outliersInds] - (mP[0]*inX[outliersInds] + mP[1]*inY[outliersInds] + mP[2])
plt.rc('font', **font)
plt.rcParams.update(params)
fig = plt.figure(3)
ax = fig.add_subplot(111, projection='3d')
c1 = ax.plot_wireframe(X, Y, Zax_H, rstride=wireFrameStride, cstride=wireFrameStride,
color = 'blue', alpha = 0.25, label='MeanShift peak threshold')
#c1._facecolors2d = c1._facecolors3d
#c1._edgecolors2d = c1._edgecolors3d
c2 = ax.plot_wireframe(X, Y, Zax_U, rstride=wireFrameStride, cstride=wireFrameStride,
color = 'green', label='model plane')
#c2._facecolors2d = c2._facecolors3d
#c2._edgecolors2d = c2._edgecolors3d
#c3 = ax.plot_surface(X, Y, Zax_L, color = 'red', alpha = 0.25, label='lower threshold')
#c3._facecolors2d = c3._facecolors3d
#c3._edgecolors2d = c3._edgecolors3d
ax.scatter(inX[(res>=0) & (excludeInds>0)],
inY[(res>=0) & (excludeInds>0)],
inZ[(res>=0) & (excludeInds>0)], color = 'blue', label='data above model')
ax.scatter(inX[(res<0) & (excludeInds>0)],
inY[(res<0) & (excludeInds>0)],
inZ[(res<0) & (excludeInds>0)], color = 'green', label='data below model')
result_outInds = outliersInds[resOutliers/mP[3]>=6]
ax.scatter(inX[result_outInds],
inY[result_outInds],
inZ[result_outInds], color = 'black', label='detected outliers')
for cnt in range(result_outInds.shape[0]):
ax.plot(np.array([ inX[result_outInds[cnt]], inX[result_outInds[cnt]] ]),
np.array([ inY[result_outInds[cnt]], inY[result_outInds[cnt]] ]),
np.array([ inZ_noNise[result_outInds[cnt]], inZ[result_outInds[cnt]] ]), color = 'black')
result_outInds = outliersInds[resOutliers/mP[3]<6]
ax.scatter(inX[result_outInds],
inY[result_outInds],
inZ[result_outInds], color = 'red', label='missed outliers')
for cnt in range(result_outInds.shape[0]):
ax.plot(np.array([ inX[result_outInds[cnt]], inX[result_outInds[cnt]] ]),
np.array([ inY[result_outInds[cnt]], inY[result_outInds[cnt]] ]),
np.array([ inZ_noNise[result_outInds[cnt]], inZ[result_outInds[cnt]] ]), color = 'red')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('values')
fig.legend()
ax.azim = ax_azim
ax.dist = ax_dist
ax.elev = ax_elev
plt.show()
mP = RobustGaussianFittingLibrary.fitPlane(inX, inY, inZ)
print(mP)
Xax = np.arange(inX.min(), inX.max())
Yax = np.arange(inY.min(), inY.max())
X, Y = np.meshgrid(Xax, Yax)
Zax_H = mP[0]*X + mP[1]*Y + mP[2] + 6*mP[3]
Zax_U = mP[0]*X + mP[1]*Y + mP[2]
Zax_L = mP[0]*X + mP[1]*Y + mP[2] - 6*mP[3]
res = inZ - (mP[0]*inX + mP[1]*inY + mP[2])
resOutliers = inZ[outliersInds] - (mP[0]*inX[outliersInds] + mP[1]*inY[outliersInds] + mP[2])
plt.rc('font', **font)
plt.rcParams.update(params)
fig = plt.figure(3)
ax = fig.add_subplot(111, projection='3d')
c1 = ax.plot_wireframe(X, Y, Zax_H, rstride=wireFrameStride, cstride=wireFrameStride,
color = 'blue', alpha = 0.25, label='FLKOS peak threshold')
#c1._facecolors2d = c1._facecolors3d
#c1._edgecolors2d = c1._edgecolors3d
c2 = ax.plot_wireframe(X, Y, Zax_U, rstride=wireFrameStride, cstride=wireFrameStride,
color = 'green', label='model plane')
#c2._facecolors2d = c2._facecolors3d
#c2._edgecolors2d = c2._edgecolors3d
#c3 = ax.plot_surface(X, Y, Zax_L, color = 'red', alpha = 0.25, label='lower threshold')
#c3._facecolors2d = c3._facecolors3d
#c3._edgecolors2d = c3._edgecolors3d
ax.scatter(inX[(res>=0) & (excludeInds>0)],
inY[(res>=0) & (excludeInds>0)],
inZ[(res>=0) & (excludeInds>0)], color = 'blue', label='data above model')
ax.scatter(inX[(res<0) & (excludeInds>0)],
inY[(res<0) & (excludeInds>0)],
inZ[(res<0) & (excludeInds>0)], color = 'green', label='data below model')
result_outInds = outliersInds[resOutliers/mP[3]>=6]
ax.scatter(inX[result_outInds],
inY[result_outInds],
inZ[result_outInds], color = 'black', label='detected outliers')
for cnt in range(result_outInds.shape[0]):
ax.plot(np.array([ inX[result_outInds[cnt]], inX[result_outInds[cnt]] ]),
np.array([ inY[result_outInds[cnt]], inY[result_outInds[cnt]] ]),
np.array([ inZ_noNise[result_outInds[cnt]], inZ[result_outInds[cnt]] ]), color = 'black')
result_outInds = outliersInds[resOutliers/mP[3]<6]
ax.scatter(inX[result_outInds],
inY[result_outInds],
inZ[result_outInds], color = 'red', label='missed outliers')
for cnt in range(result_outInds.shape[0]):
ax.plot(np.array([ inX[result_outInds[cnt]], inX[result_outInds[cnt]] ]),
np.array([ inY[result_outInds[cnt]], inY[result_outInds[cnt]] ]),
np.array([ inZ_noNise[result_outInds[cnt]], inZ[result_outInds[cnt]] ]), color = 'red')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('values')
fig.legend()
ax.azim = ax_azim
ax.dist = ax_dist
ax.elev = ax_elev
plt.show()
plt.rc('font', **font)
plt.rcParams.update(params)
fig = plt.figure(3)
ax = fig.add_subplot(111, projection='3d')
mP = RobustGaussianFittingLibrary.basic.fitPlane_by_meanShift(inX, inY, inZ,
minSNR = 6.0, MS_numIter = 5)
print(mP)
Xax = np.arange(inX.min(), inX.max())
Yax = np.arange(inY.min(), inY.max())
X, Y = np.meshgrid(Xax, Yax)
Zax_H = mP[0]*X + mP[1]*Y + mP[2] + 6*mP[3]
Zax_U = mP[0]*X + mP[1]*Y + mP[2]
Zax_L = mP[0]*X + mP[1]*Y + mP[2] - 6*mP[3]
c2 = ax.plot_wireframe(X, Y, Zax_H, rstride=wireFrameStride, cstride=wireFrameStride,
color = 'green', label='MeanShift peak threshold')
mP = RobustGaussianFittingLibrary.fitPlane(inX, inY, inZ)
print(mP)
Xax = np.arange(inX.min(), inX.max())
Yax = np.arange(inY.min(), inY.max())
X, Y = np.meshgrid(Xax, Yax)
Zax_H = mP[0]*X + mP[1]*Y + mP[2] + 6*mP[3]
Zax_U = mP[0]*X + mP[1]*Y + mP[2]
Zax_L = mP[0]*X + mP[1]*Y + mP[2] - 6*mP[3]
res = inZ - (mP[0]*inX + mP[1]*inY + mP[2])
resOutliers = inZ[outliersInds] - (mP[0]*inX[outliersInds] + mP[1]*inY[outliersInds] + mP[2])
c1 = ax.plot_wireframe(X, Y, Zax_H, rstride=wireFrameStride, cstride=wireFrameStride,
color = 'blue', alpha = 0.25, label='FLKOS peak threshold')
ax.scatter(inX[(res>=0) & (excludeInds>0)],
inY[(res>=0) & (excludeInds>0)],
inZ[(res>=0) & (excludeInds>0)], color = 'blue', label='data above model')
ax.scatter(inX[(res<0) & (excludeInds>0)],
inY[(res<0) & (excludeInds>0)],
inZ[(res<0) & (excludeInds>0)], color = 'green', label='data below model')
result_outInds = outliersInds[resOutliers/mP[3]>=6]
ax.scatter(inX[result_outInds],
inY[result_outInds],
inZ[result_outInds], color = 'black', label='detected outliers')
for cnt in range(result_outInds.shape[0]):
ax.plot(np.array([ inX[result_outInds[cnt]], inX[result_outInds[cnt]] ]),
np.array([ inY[result_outInds[cnt]], inY[result_outInds[cnt]] ]),
np.array([ inZ_noNise[result_outInds[cnt]], inZ[result_outInds[cnt]] ]), color = 'black')
result_outInds = outliersInds[resOutliers/mP[3]<6]
ax.scatter(inX[result_outInds],
inY[result_outInds],
inZ[result_outInds], color = 'red', label='missed outliers')
for cnt in range(result_outInds.shape[0]):
ax.plot(np.array([ inX[result_outInds[cnt]], inX[result_outInds[cnt]] ]),
np.array([ inY[result_outInds[cnt]], inY[result_outInds[cnt]] ]),
np.array([ inZ_noNise[result_outInds[cnt]], inZ[result_outInds[cnt]] ]), color = 'red')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('values')
fig.legend()
ax.azim = ax_azim
ax.dist = 4
ax.elev = ax_elev
plt.show()
plt.rc('font', **font)
plt.rcParams.update(params)
fig = plt.figure(1)
ax = fig.add_subplot(111, projection='3d')
ax.scatter(inX[upinlierInds*excludeInds>0],
inY[upinlierInds*excludeInds>0],
inZ[upinlierInds*excludeInds>0],
color = 'blue', label='data above true model')
ax.scatter(inX[lowinlierInds*excludeInds>0],
inY[lowinlierInds*excludeInds>0],
inZ[lowinlierInds*excludeInds>0],
color = 'green', label='data below true model')
ax.scatter(inX[outliersInds],
inY[outliersInds],
inZ[outliersInds],
color = 'black', label='outliers')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('values')
fig.legend()
ax.azim = ax_azim
ax.dist = ax_dist
ax.elev = ax_elev
plt.show()
def test_RobustAlgebraicLineFittingPy():
print('test_RobustAlgebraicLineFittingPy')
inSigma = 3
slope = 0.5
intercept = 10
print('slope: ', slope)
print('intercept: ', intercept)
print('Scale: ', inSigma)
n_in = 100
inX = 200*(np.random.rand(n_in)-0.5)
inY = slope*inX + intercept + inSigma*np.random.randn(n_in)
n_out = 80
outX = 200*(np.random.rand(n_out)-0.5)
outY = 200*(np.random.rand(n_out)-0.25)
X = np.concatenate((inX, outX))
Y = np.concatenate((inY, outY))
label = np.ones(X.shape[0], dtype='uint8')
_errors = Y - (0.5*X + 10)
label[np.fabs(_errors) >= 3*inSigma] = 0
print(X.shape)
mP = RobustGaussianFittingLibrary.fitLine(X, Y, 0.5, 0.3)
Xax = np.arange(X.min(), X.max())
Yax_U = mP[0]*Xax + mP[1] + 3*mP[2]
Yax_M = mP[0]*Xax + mP[1]
Yax_L = mP[0]*Xax + mP[1] - 3*mP[2]
plt.rc('font', **font)
plt.rcParams.update(params)
plt.scatter(X[label==0], Y[label==0], color='royalblue', label='outliers', marker='o')
plt.scatter(X[label==1], Y[label==1], color='mediumblue', label='outliers', marker='o')
plt.plot(Xax, Yax_U, linewidth = 3, color = 'purple')
plt.plot(Xax, Yax_M, linewidth = 3, color = 'green')
plt.plot(Xax, Yax_L, linewidth = 3, color = 'red')
plt.xlabel('X')
plt.ylabel('Y').set_rotation(0)
plt.ylabel('Y')
plt.show()
print(mP)
RobustGaussianFittingLibrary.misc.naiveHistTwoColors(_errors, np.array([0, mP[2]]))
def test_fitBackground():
print('test_fitBackground')
XSZ = 512
YSZ = 512
WINSIZE = 7
inputPeaksNumber = 50
numOutliers = 0
print("Generating a pattern with " + str(inputPeaksNumber) + " peaks...")
inImage, inMask, randomLocations = diffractionPatternMaker(XSZ, YSZ, WINSIZE, inputPeaksNumber, numOutliers)
fig, axes = plt.subplots(1, 3)
winXL = 200
winXU = 300
winYL = 200
winYU = 300
im0 = axes[0].imshow(inImage*inMask, vmin=0, vmax=1000)
axes[0].set_xlim([winXL, winXU])
axes[0].set_ylim([winYL, winYU])
fig.colorbar(im0, ax=axes[0], shrink =0.5)
mP = RobustGaussianFittingLibrary.fitBackground(inImage, inMask,
winX = 64,
winY = 64,
numStrides=2) \
+ RobustGaussianFittingLibrary.fitBackground(inImage, inMask,
winX = 32,
winY = 32,
numStrides=2) \
+ RobustGaussianFittingLibrary.fitBackground(inImage, inMask,
winX = 16,
winY = 16,
numStrides=2)
mP = mP/3
im1 = axes[1].imshow(inMask*mP[0], vmin=0, vmax=1000)
axes[1].set_xlim([winXL, winXU])
axes[1].set_ylim([winYL, winYU])
fig.colorbar(im1, ax=axes[1], shrink = 0.5)
im2 = axes[2].imshow(inMask*(inImage - mP[0])/mP[1])
axes[2].set_xlim([winXL, winXU])
axes[2].set_ylim([winYL, winYU])
fig.colorbar(im2, ax=axes[2], shrink = 0.5)
plt.show()
def centeredDistanceMatrix(n):
n = int(2*int(n/2) + 1)
x,y = np.meshgrid(range(n),range(n))
return np.sqrt((x-(n/2)+1)**2+(y-(n/2)+1)**2)
def test_fitBackgroundCylindrically():
print('test_fitBackgroundCylindrically')
n_F = 50
n_R = 1024
n_C = 1024
imDiag = (n_R**2 + n_C**2)**0.5
radii = centeredDistanceMatrix(imDiag)
water_avg_profile = np.exp(-(np.arange(imDiag)-n_R/4)**2/(2*(n_R/16)**2))
water2D_avg = np.exp(-(radii-n_R/4)**2/(2*(n_R/16)**2))
water2D_std = water2D_avg**0.5
r_cent = int(water2D_std.shape[0]/2)
c_cent = int(water2D_std.shape[1]/2)
water2D_avg = water2D_avg[r_cent - int(n_R/2):r_cent + int(n_R/2),
r_cent - int(n_C/2):r_cent + int(n_C/2)]
water2D_std = water2D_std[r_cent - int(n_R/2):r_cent + int(n_R/2),
r_cent - int(n_C/2):r_cent + int(n_C/2)]
normalNoisePattern = np.zeros((n_F, n_R, n_C), dtype='float32')
for fCnt in range(n_F):
normalNoisePattern[fCnt] = \
(np.random.randn(n_R, n_C) + water2D_avg)*water2D_std
print(normalNoisePattern.shape)
mP, est_profile = \
RobustGaussianFittingLibrary.basic.fitBackgroundCylindrically(\
inTensor = normalNoisePattern,
inMask = None,
minRes = 1,
includeCenter = 0,
maxRes = None,
shellWidth = 3,
topKthPerc = 0.5,
bottomKthPerc = 0.25,
MSSE_LAMBDA = 3.0,
optIters = 16,
numStrides = 2,
finiteSampleBias = 10000,
minimumResidual = 0,
return_vecMP = True)
plt.figure(1)
plt.imshow(normalNoisePattern[0])
plt.figure(2)
plt.imshow(mP[0])
plt.figure(3)
plt.imshow(mP[1])
plt.figure(4)
plt.plot(est_profile[0], label = 'ext_profile')
plt.plot(water_avg_profile, label = 'true_profile')
plt.show()
def test_fitBackgroundRadially():
print('test_fitBackgroundRadially')
XSZ = 1024
YSZ = 1024
WINSIZE = 7
inputPeaksNumber = 50
numOutliers = 0
print("Generating a pattern with " + str(inputPeaksNumber) + " peaks...")
inImage, inMask, randomLocations = diffractionPatternMaker(XSZ, YSZ, WINSIZE, inputPeaksNumber, numOutliers)
time_time = time.time()
print('Calculating mp', flush = True)
mP, vecMP = RobustGaussianFittingLibrary.fitBackgroundRadially(
inImage,
inMask = inMask,
shellWidth = 5,
stride = 1,
includeCenter=1,
return_vecMP = True)
print('time: ' + str(time.time() -time_time) + ' s', flush = True)
plt.plot(vecMP[0] + vecMP[1] , label='avg + std')
plt.plot(vecMP[0] , label='avg')
plt.plot(vecMP[0] - vecMP[1] , label='avg - std')
plt.legend()
plt.show()
plt.imshow(mP[0]), plt.show()
im0_img = inImage*inMask
im1_img = inMask*mP[0]
im2_img = inMask*(inImage - mP[0])/(mP[1]+0.001)
im2_img[(np.fabs(mP[1])<1)]=0
fig, axes = plt.subplots(1, 3)
winXL = 0
winXU = 1024
winYL = 0
winYU = 1024
im0 = axes[0].imshow(im0_img, vmin=0, vmax=1000)
axes[0].set_xlim([winXL, winXU])
axes[0].set_ylim([winYL, winYU])
fig.colorbar(im0, ax=axes[0], shrink = 0.5)
im1 = axes[1].imshow(im1_img, vmin=0, vmax=1000)
axes[1].set_xlim([winXL, winXU])
axes[1].set_ylim([winYL, winYU])
fig.colorbar(im1, ax=axes[1], shrink = 0.5)
im2 = axes[2].imshow(im2_img, vmin = -6, vmax = 6)
axes[2].set_xlim([winXL, winXU])
axes[2].set_ylim([winYL, winYU])
fig.colorbar(im2, ax=axes[2], shrink = 0.5)
plt.show()
def test_fitBackgroundTensor():
print('test_fitBackgroundTensor')
imgDimX = 100
imgDimY = 100
Xax = np.arange(imgDimX)
Yax = np.arange(imgDimY)
inX, inY = np.meshgrid(Xax, Yax)
img1 = 0+1*np.random.randn(1, imgDimX,imgDimY)
mP = RobustGaussianFittingLibrary.fitPlane(inX = inX.flatten(),
inY = inY.flatten(),
inZ = img1.flatten())
print(mP)
mP = RobustGaussianFittingLibrary.fitBackground(np.squeeze(img1))
print(mP)
img2 = 3+1*np.random.randn(1, imgDimX,imgDimY)
mP = RobustGaussianFittingLibrary.fitPlane(inX = inX.flatten(),
inY = inY.flatten(),
inZ = img2.flatten())
print(mP)
mP = RobustGaussianFittingLibrary.fitBackground(np.squeeze(img2))
print(mP)
img3 = 100+10*np.random.randn(1, imgDimX,imgDimY)
mP = RobustGaussianFittingLibrary.fitPlane(inX = inX.flatten(),
inY = inY.flatten(),
inZ = img3.flatten())
print(mP)
mP = RobustGaussianFittingLibrary.fitBackground(np.squeeze(img3))
print(mP)
inTensor = np.concatenate((img1, img2, img3))
print('input Tensor shape is: ', str(inTensor.shape))
modelParamsMap = RobustGaussianFittingLibrary.fitBackgroundTensor(inTensor, numStrides=5)
print(modelParamsMap)
def test_fitBackgroundTensor_multiproc():
print('test_fitBackgroundTensor_multiproc')
f_N, r_N, c_N = (100, 128, 512)
inTensor = np.zeros((f_N, r_N, c_N), dtype='float32')
for frmCnt in range(f_N):
inTensor[frmCnt] = frmCnt+frmCnt**0.5*np.random.randn(r_N,c_N)
print('input Tensor shape is: ', str(inTensor.shape))
modelParamsMap = RobustGaussianFittingLibrary.useMultiproc.fitBackgroundTensor_multiproc(inTensor,
winX = 64,
winY = 64)
for frmCnt in list([f_N-1]):
fig, axes = plt.subplots(2, 1)
axes[0].imshow(modelParamsMap[0,frmCnt])
axes[1].imshow(modelParamsMap[1,frmCnt])
plt.show()
def test_fitBackgroundRadiallyTensor_multiproc():
print('test_fitBackgroundTensor_multiproc')
f_N, r_N, c_N = (4, 1024, 1024)
inTensor = np.zeros((f_N, r_N, c_N), dtype='float32')
for frmCnt in range(f_N):
inTensor[frmCnt] = frmCnt+frmCnt**0.5*np.random.randn(r_N,c_N)
print('input Tensor shape is: ', str(inTensor.shape))
modelParamsMap = RobustGaussianFittingLibrary.useMultiproc.fitBackgroundRadiallyTensor_multiproc(inTensor,
shellWidth = 4,
stride = 1,
topKthPerc = 0.5,
bottomKthPerc = 0.25,
finiteSampleBias = 400,
showProgress = True)
for frmCnt in list([f_N-1]):
fig, axes = plt.subplots(2, 1)
axes[0].imshow(modelParamsMap[0,frmCnt], vmin = f_N - 2, vmax = f_N + 1)
axes[1].imshow(modelParamsMap[1,frmCnt])
plt.show()
def test_SginleGaussianVec():
print('test_SginleGaussianVec')
RNN0 = 50 + 5*np.random.randn(1000)
RNN1 = 200*(np.random.rand(500)-0.5)
testData = np.concatenate((RNN0, RNN1)).flatten()
np.random.shuffle(testData)
print('testing RobustSingleGaussianVecPy')
mP = RobustGaussianFittingLibrary.fitValue(testData,
fit2Skewed = False,
topKthPerc = 0.5,
bottomKthPerc=0.35,
MSSE_LAMBDA=3.0)
print(mP)
RobustGaussianFittingLibrary.misc.naiveHist(testData, mP)
plt.plot(testData,'.'), plt.show()
plt.plot(testData,'.'),
plt.plot(np.array([0, testData.shape[0]]), np.array([mP[0]-3*mP[1], mP[0]-3*mP[1]]))
plt.plot(np.array([0, testData.shape[0]]), np.array([mP[0], mP[0]]))
plt.plot(np.array([0, testData.shape[0]]), np.array([mP[0]+3*mP[1], mP[0]+3*mP[1]]))
plt.show()
RobustGaussianFittingLibrary.misc.sGHist(testData, mP)
def test_fitValue2Skewed():
print('test_fitValue2Skewed')
RNN0 = 50 + 5*np.random.randn(50)
RNN1 = 200*(np.random.rand(50)-0.5)
testData = np.concatenate((RNN0, RNN1)).flatten()
np.random.shuffle(testData)
print('testing fitValue2Skewed')
mP = RobustGaussianFittingLibrary.fitValue(testData,
fit2Skewed = True,
topKthPerc = 0.43,
bottomKthPerc=0.37,
MSSE_LAMBDA=3.0)
RobustGaussianFittingLibrary.misc.naiveHist(testData, mP)
plt.plot(testData,'.'), plt.show()
plt.plot(testData,'.'),
plt.plot(np.array([0, testData.shape[0]]), np.array([mP[0]-3*mP[1], mP[0]-3*mP[1]]))
plt.plot(np.array([0, testData.shape[0]]), np.array([mP[0], mP[0]]))
plt.plot(np.array([0, testData.shape[0]]), np.array([mP[0]+3*mP[1], mP[0]+3*mP[1]]))
plt.show()
RobustGaussianFittingLibrary.misc.sGHist(testData, mP)
def test_fitValue2Skewed_sweep_over_N():
print('test_fitValue2Skewed_sweep_over_N')
numIter = 1000
maxN = 30
minN = 3
mean_inliers = np.zeros((maxN-minN, numIter))
std_inliers = np.zeros((maxN-minN, numIter))
robustSkew_mean = np.zeros((maxN-minN, numIter))
robustSkew_std = np.zeros((maxN-minN, numIter))
robust_mean = np.zeros((maxN-minN, numIter))
robust_std = np.zeros((maxN-minN, numIter))
x = np.zeros(maxN-minN)
timeSkew = 0
timeR = 0
pBar = RobustGaussianFittingLibrary.misc.textProgBar(maxN-minN)
for N in range(minN,maxN):
for iter in range(numIter):
RNN0 = np.random.randn(N)
RNN1 = 12+3*(np.random.rand(int(N*0.5))-0.5)
testData = np.concatenate((RNN0, RNN1)).flatten()
np.random.shuffle(testData)
time_time = time.time()
rmodeSkew, rstdSkew = \
RobustGaussianFittingLibrary.fitValue(
testData,
topKthPerc = 0.5,
bottomKthPerc = 0.45,
optIters = 12,
fit2Skewed = True)
timeSkew = time.time() - time_time
time_time = time.time()
rmode, rstd = RobustGaussianFittingLibrary.fitValue(testData,
fit2Skewed = False)
timeR = time.time() - time_time
mean_inliers[N-minN, iter] = RNN0.mean()
std_inliers[N-minN, iter] = RNN0.std()
robustSkew_mean[N-minN, iter] = rmodeSkew
robustSkew_std[N-minN, iter] = rstdSkew
robust_mean[N-minN, iter] = rmode
robust_std[N-minN, iter] = rstd
x[N-minN] = testData.shape[0]
pBar.go()
del pBar
print(timeR/timeSkew)
plt.plot(x, mean_inliers.mean(1), '.', label = 'mean of inliers')
plt.plot(x, robustSkew_mean.mean(1), '.', label = 'robust skewed mean of data')
plt.plot(x, robust_mean.mean(1), '.', label = 'robust mean of data')
plt.legend()
plt.show()
plt.plot(x, std_inliers.mean(1), '.', label='std of inliers')
plt.plot(x, robustSkew_std.mean(1), '.', label='robust skewed std of data')
plt.plot(x, robust_std.mean(1), '.', label='robust std of data')
plt.grid()
plt.legend()
plt.show()
def test_flatField():
print('test_flatField')
RNN0 = 0 + 1*np.random.randn(2048)
RNN1 = 6 + 6**0.5*np.random.randn(1024)
RNN2 = 12 + 12**0.5*np.random.randn(512)
RNN3 = 18 + 18**0.5*np.random.randn(256)
data = np.concatenate((RNN0, RNN1, RNN2, RNN3)).flatten()
np.random.shuffle(data)
mP_All = np.zeros((2, 4))
testData = data.copy()
modelCnt = 0
mP = RobustGaussianFittingLibrary.fitValue(testData,
topKthPerc = 0.49, bottomKthPerc=0.45,
fit2Skewed = False, MSSE_LAMBDA=2.0)
RobustGaussianFittingLibrary.misc.naiveHist(data, mP)
for modelCnt in range(4):
mP = RobustGaussianFittingLibrary.fitValue(testData,
fit2Skewed = False,
topKthPerc = 0.49,
bottomKthPerc=0.45,
MSSE_LAMBDA=1.0)
probs = np.random.rand(testData.shape[0]) - np.exp(-(testData - mP[0])**2/(2*mP[1]**2))
probs[testData<mP[0]] = 0
probs[probs>mP[0]+3.0*mP[1]] = 1
testData = testData[probs>0]
mP_All[:, modelCnt] = mP
RobustGaussianFittingLibrary.misc.naiveHist_multi_mP(data, mP_All)
RobustGaussianFittingLibrary.misc.sGHist_multi_mP(data, mP_All, SNR=2.5)
def test_fitValueTensor_MultiProc():
print('fitValueTensor functions')
SIGMA = 10
RNN1 = SIGMA*np.random.randn(500-50-3, 18, 38)
RNN2 = 5*SIGMA + 5*SIGMA*np.random.randn(50, 18, 38)
RNU = 30*SIGMA+SIGMA*np.random.randn(3, 18, 38)
testData = np.concatenate((RNN1, RNN2))
testData = np.concatenate((testData, RNU))
inMask = np.ones(testData.shape)
print('testing fitValueTensor')
nowtime = time.time()
modelParamsMap = RobustGaussianFittingLibrary.fitValueTensor(testData, inMask)
print(time.time() - nowtime)
print(modelParamsMap)
print('testing fitValueTensor_MultiProc')
nowtime = time.time()
modelParamsMap = RobustGaussianFittingLibrary.useMultiproc.fitValueTensor_MultiProc(
testData,
inMask,
numRowSegs = 6,
numClmSegs = 12)
print(time.time() - nowtime)
print(modelParamsMap)
def test_fitLineTensor_MultiProc():
print('test_fitLineTensor_MultiProc')
n_F, n_R, n_C = (500, 32, 32)
dataX = np.zeros((n_F, n_R, n_C), dtype='float32')
dataY = np.zeros((n_F, n_R, n_C), dtype='float32')
for imgCnt in range(n_F):
dataX[imgCnt] = imgCnt
dataY[imgCnt] = imgCnt + np.random.randn(n_R, n_C)
lP = RobustGaussianFittingLibrary.useMultiproc.fitLineTensor_MultiProc(inTensorX = dataX,
inTensorY = dataY,
numRowSegs = 2,
numClmSegs = 2,
topKthPerc = 0.5,
bottomKthPerc = 0.4,
MSSE_LAMBDA = 3.0,
showProgress = True)
plt.imshow(lP[0]), plt.show()
plt.imshow(lP[1]), plt.show()
plt.imshow(lP[2]), plt.show()
def test_fitValueSmallSample():
print('test_fitValueSmallSample')
inliers = np.random.randn(100)
outliers = np.array([100, 64])
testData = np.hstack((inliers, outliers))
np.random.shuffle(testData)
print('testing fitValue with ' + str(inliers.shape[0]) + ' inliers and ' + str(outliers.shape[0]) + ' outliers.')
mP = RobustGaussianFittingLibrary.fitValue(testData, fit2Skewed = False,
modelValueInit = 100)
print('inliers mean ' + str(inliers.mean()) + ' inliers std ' + str(inliers.std()))
print(mP)
def test_fitValue2SkewedSmallSample():
print('test_fitValue2SkewedSmallSample')
inliers = np.random.randn(100)
outliers = np.array([100, 64])
testData = np.hstack((inliers, outliers))
np.random.shuffle(testData)
print('testing fitValue with ' + str(inliers.shape[0]) + ' inliers and ' + str(outliers.shape[0]) + ' outliers.')
mP = RobustGaussianFittingLibrary.fitValue(testData,
fit2Skewed = True, bottomKthPerc = 0.4,
modelValueInit = 100, optIters = 12)
print('inliers mean ' + str(inliers.mean()) + ' inliers std ' + str(inliers.std()))
print(mP)
def test_fit2Poisson():
#import scipy.math.factorial as factorial
numIters = 20
photon = 73.5
offset = -10
bckSTD = 14
pois_lambda_list = np.arange(0.2, 2, 0.05)
#pois_lambda_list = np.concatenate( (pois_lambda_list, np.arange(2, 3, 0.25)), axis=0)
mP = np.zeros((pois_lambda_list.shape[0], 2))
inliers_mP = np.zeros((pois_lambda_list.shape[0], 2))
meanShift = np.zeros((pois_lambda_list.shape[0], 2))
for Lcnt, pois_lambda in enumerate(pois_lambda_list):
minSNR = 6.0
intended_mu = pois_lambda*photon + offset
worstInlier = intended_mu + minSNR*photon*(intended_mu/photon)**0.5
ADU = np.arange(-bckSTD*4, worstInlier)
kList = np.unique(photon*(ADU[ADU>=photon+offset]/photon).astype('int'))
poissonDensity_kList = np.zeros(kList.shape[0])
for cnt in range(kList.shape[0]):
poissonDensity_kList[cnt] = pois_lambda**int((kList[cnt]-offset)/photon) * \
np.exp(-pois_lambda) / \
scipy.math.factorial(int((kList[cnt]-offset)/photon))
N = int(1000*2*np.pi)
poissonDensity_kList[poissonDensity_kList<10/N]=0
vec = np.zeros(kList.shape[0]*N)
vecSize = 0
for kListCnt in range(kList.shape[0]):
tmp = kList[kListCnt] + bckSTD*np.random.randn(int(N* \
poissonDensity_kList[kListCnt]))
vec[vecSize: vecSize + tmp.shape[0]] = tmp
vecSize += tmp.shape[0]
vec = vec[:vecSize:int(N/1000)].flatten()
N = vec.shape[0]
inliers_mu = vec.mean()
inliers_std = vec.std()
numOutliers = int(N*0.01)
outliersSpread = 1
_mP = np.zeros((numIters, 2))
_meanShift = np.zeros((numIters, 2))
for iters in range(numIters):
outliers = 0.01*photon + inliers_mu + \
inliers_std * (minSNR + outliersSpread * np.random.rand(numOutliers))
SNRs_true = (((outliers - inliers_mu)/inliers_std) >= minSNR).sum()/numOutliers
vec_contaminated = np.hstack((vec.copy(), outliers))
_mP[iters] = RobustGaussianFittingLibrary.fitValue(vec_contaminated,
fit2Skewed = False,
minimumResidual = 0.2 * photon, MSSE_LAMBDA = 4.0)
_meanShift[iters] = RobustGaussianFittingLibrary.basic.fitValue_by_meanShift(vec_contaminated, minSNR = 6.0)
_mP = _mP.mean(0)
_meanShift = _meanShift.mean(0)
inliers_mP[Lcnt, 0] = inliers_mu/photon
inliers_mP[Lcnt, 1] = (inliers_std/photon)**2
mP[Lcnt, 0] = _mP[0]/photon
mP[Lcnt, 1] = (_mP[1]/photon)**2
meanShift[Lcnt, 0] = (_meanShift[0]/photon)
meanShift[Lcnt, 1] = (_meanShift[1]/photon)**2
str2Print = 'phi -> ' + str(pois_lambda)
str2Print += ', inliers -> ' + '%0.3f'%inliers_mu + ' ' + '%0.3f'%inliers_std
str2Print += ', mP -> ' + '%0.3f'%_mP[0] + ' ' + '%0.3f'%_mP[1]
str2Print += ', meanShift -> ' + '%0.3f'%_meanShift[0] + ' ' + '%0.3f'%_meanShift[1]
print(str2Print, flush=True)
plt.rc('font', **font)
plt.rcParams.update(params)
plt.plot(pois_lambda_list, pois_lambda_list, marker = '.', color = 'gold', label='Reference')
plt.plot(pois_lambda_list, inliers_mP[:, 0], marker = '.', color = 'red', label='Inliers $\mu$')
plt.plot(pois_lambda_list, inliers_mP[:, 1], marker = '.', color = 'red', label='Inliers $\sigma^2$')
plt.plot(pois_lambda_list, mP[:, 0], marker = '.', color = 'green', label='FLKOS $\mu^2$')
plt.plot(pois_lambda_list, mP[:, 1], marker = '.', color = 'green', label='FLKOS $\sigma^2$')
plt.plot(pois_lambda_list, meanShift[:, 0], marker = '.', color = 'blue', label='MeanShift $\mu$')
plt.plot(pois_lambda_list, meanShift[:, 1], marker = '.', color = 'tab:blue', label='MeanShift $\sigma^2$')
plt.xlim([0, pois_lambda_list.max()])
plt.ylim([0, pois_lambda_list.max()])
plt.xlabel('Poisson density average')
plt.ylabel('Calculated average')
plt.legend()
plt.grid()
plt.show()
plt.rc('font', **font)
plt.rcParams.update(params)
plt.plot(pois_lambda_list, mP[:, 0]/inliers_mP[:, 0], marker = '.', color = 'green', label='FLKOS $\mu^2$')
plt.plot(pois_lambda_list, mP[:, 1]/inliers_mP[:, 1], marker = '.', color = 'green', label='FLKOS $\sigma^2$')
plt.plot(pois_lambda_list, meanShift[:, 0]/inliers_mP[:, 0], marker = '.', color = 'blue', label='MeanShift $\mu$')
plt.plot(pois_lambda_list, meanShift[:, 1]/inliers_mP[:, 1], marker = '.', color = 'tab:blue', label='MeanShift $\sigma^2$')
plt.xlim([0, pois_lambda_list.max()])
plt.ylim([0, pois_lambda_list.max()])
plt.xlabel('Poisson density average')
plt.ylabel('Calculated average')
plt.legend()
plt.grid()
plt.show()
def test_medianOfFits():
print('test_medianOfFits')
inliers = np.random.randn(100)
outliers = np.array([100, 64])
testData = np.hstack((inliers, outliers))
np.random.shuffle(testData)
print('testing fitValue with ' + str(inliers.shape[0]) + ' inliers and ' + str(outliers.shape[0]) + ' outliers.')
mP = RobustGaussianFittingLibrary.basic.medianOfFits(inVec = testData,
inWeights = None,
topkMax = 0.7,
topkMin = 0.3,
numSamples = 50,
MSSE_LAMBDA = 3.0,
modelValueInit = 0,
optIters = 12,
minimumResidual = 0)
print('inliers mean ' + str(inliers.mean()) + ' inliers std ' + str(inliers.std()))
print(mP)
def test_gradientPlot():
x = np.arange(0, 12, 0.01)
mu = 0*x
std = 1 + 0*x
mu2 = 6 + 0*x
std2 = 1 + 0*x
gradPlot = RobustGaussianFittingLibrary.misc.plotGaussianGradient('x', 'y')
gradPlot.addPlot(x = x, mu = mu, std = std, gradient_color = 'green', label='lower')
gradPlot.addPlot(x = x, mu = mu2, std = std2, gradient_color = 'red', label='upper')
gradPlot.show()
def test_getTriangularVertices():
RobustGaussianFittingLibrary.misc.getTriangularVertices(
n = 1000,
phi_start = 0,
phi_end = np.pi,
plotIt = True)
def multiprocessor_targetFunc(idx, inputs):
data, mask, op_type, randNum = inputs
if(op_type=='median'):
to_return1 = np.median(data[idx][mask[idx]==1])
to_return1 = np.array([to_return1])
to_return2 = np.ones((int(10*np.random.rand(1)), 2, 2))
return(to_return1, 'median', to_return2)
def test_multiprocessor():
N = 1000
D = 100000
Data = (10+100*np.random.randn(N,D)).astype('int')
Mask = (2*np.random.rand(N,D)).astype('int')
Param = 'median'
randNums = np.random.rand(10000,10000)
inputs = (Data, Mask, Param, randNums)
print('id(randNums)', id(randNums))
stats = RobustGaussianFittingLibrary.misc.multiprocessor(
multiprocessor_targetFunc, N, inputs,
showProgress = True).start()
medians, otherOutput, _ids = stats
print('type(medians)', type(medians))
print('medians.shape', medians.shape)
print('type(otherOutput)', type(otherOutput))
print('len(otherOutput)', len(otherOutput))
print('otherOutput[1] ', otherOutput[1])
print('otherOutput[1][0] ', otherOutput[1][0])
print('type(_ids) ', type(_ids))
print('len(_ids) ', len(_ids))
print('type(_ids[0]) ', type(_ids[0]))
print('_ids.shape ', _ids.shape)
direct_medians = np.zeros(N)
for cnt in range(N):
direct_medians[cnt] = np.median(Data[cnt, Mask[cnt]==1])
print(np.array([ medians, direct_medians] ).T)
print('difference of results: ', (direct_medians - medians).sum())
if __name__ == '__main__':
print('PID ->' + str(os.getpid()))
test_fitValueSmallSample()
test_multiprocessor()
test_textProgBar()
test_gradientPlot()
test_fitBackgroundRadiallyTensor_multiproc()
test_fitBackgroundRadially()
test_getTriangularVertices()
test_fitBackgroundCylindrically()
test_fitValue2Skewed_sweep_over_N()
test_fitValue2SkewedSmallSample()
test_fitValue_sweep()
test_fit2Poisson()
test_medianOfFits()
test_fitValueVSMeanShiftPy()
test_fitPlaneVSMeanShiftPy()
test_RobustAlgebraicPlaneFittingPy()
test_fitBackgroundTensor()
test_fitBackgroundTensor_multiproc()
test_fitBackground()
test_MSSE()
test_SginleGaussianVec()
test_flatField()
test_fitValueTensor_MultiProc()
test_fitValue2Skewed()
test_removeIslands()
test_fitLineTensor_MultiProc()
test_bigTensor2SmallsInds()
test_PDF2Uniform()
test_RobustAlgebraicLineFittingPy()
visOrderStat()
print('This was robust fitting')
exit() |
from django.db import models
from django.contrib.auth.models import AbstractBaseUser , BaseUserManager , PermissionsMixin
from django.conf import settings
class UserManager(BaseUserManager) :
def create_user(self , email , password = None ,**extra_fields ):
if not email :
raise ValueError ('Email is required')
user = self.model(email = self.normalize_email(email) , **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self , email , password ) :
user = self.create_user(email , password)
user.is_staff = True
user.is_superuser = True
user.save(using = self._db)
return user
class User (AbstractBaseUser , PermissionsMixin) :
email = models.EmailField(max_length = 255 , unique =True )
name = models.CharField(max_length =255)
is_active = models.BooleanField(default = True)
is_staff = models.BooleanField(default = True)
objects = UserManager()
USERNAME_FIELD = 'email'
class Tag(models.Model) :
name =models.CharField(max_length = 255)
user = models.ForeignKey(
settings.AUTH_USER_MODEL ,
on_delete = models.CASCADE
)
def __str__(self) :
return self.name
class Ingredients(models.Model) :
name =models.CharField(max_length = 255)
user = models.ForeignKey(
settings.AUTH_USER_MODEL ,
on_delete = models.CASCADE
)
def __str__(self) :
return self.name
class Recipe(models.Model):
user = models.ForeignKey(
settings.AUTH_USER_MODEL ,
on_delete = models.CASCADE
)
title = models.CharField(max_length = 255)
time_minutes = models.IntegerField()
price = models.DecimalField(max_digits=5 ,decimal_places = 2)
link = models.CharField(max_length = 255 , blank = True )
ingredients = models.ManyToManyField('Ingredients')
tags = models.ManyToManyField('Tag')
def __str__(self) :
return self.title
|
# -*-coding:utf-8 -*-
#Reference:**********************************************
# @Time : 2019-09-27 22:55
# @Author : Fabrice LI
# @File : 382_triangle_count.py
# @User : liyihao
# @Software: PyCharm
# @Description: Given an array of integers, how many three numbers can be found in the array,
# so that we can build an triangle whose three edges length is the three numbers that we find?
#Reference:**********************************************
'''
E.g
Input: [3, 4, 6, 7]
Output: 3
Explanation:
They are (3, 4, 6),
(3, 6, 7),
(4, 6, 7)
Input: [4, 4, 4, 4]
Output: 4
Explanation:
Any three numbers can form a triangle.
So the answer is C(3, 4) = 4
'''
class Solution:
"""
@param S: A list of integers
@return: An integer
"""
def triangleCount(self, S):
# write your code here
if not S:
return 0
length = len(S)
S.sort()
res = 0
for i in range(length - 1, -1, -1):
left = 0
right = i - 1
while left < right:
if S[right] + S[left] > S[i]:
res += right - left
right -= 1
else:
left += 1
return res
if __name__ == '__main__':
s = Solution()
S = [4, 4, 4, 4]
print(s.triangleCount(S))
|
import pytest
import sys
import os
try:
import _clippy
except ImportError:
sys.stderr.write('''these tests need to be run with the _clippy C extension
module available. Try running "clippy runtests.py ...".
''')
sys.exit(1)
os.chdir(os.path.dirname(os.path.abspath(__file__)))
raise SystemExit(pytest.main(sys.argv[1:]))
|
# -*- coding: utf-8 -*-
from django.conf.urls import url
from collect.download_excel import ExcelDownload
from collect.import_excel import ExcelImport
urlpatterns = [
url(r'^download$', ExcelDownload.as_view()),
url(r'^import$', ExcelImport.as_view()),
]
|
"""Pending deprecation file.
To view the actual content, go to: flow/networks/minicity.py
"""
from flow.utils.flow_warnings import deprecated
from flow.networks.minicity import MiniCityNetwork
@deprecated('flow.scenarios.minicity',
'flow.networks.minicity.MiniCityNetwork')
class MiniCityScenario(MiniCityNetwork):
"""See parent class."""
pass
|
from __future__ import print_function
class Empty(Exception):
""" Error attempting to access an element from an empty container. """
pass
class ArrayStack:
""" LIFO Stack implementation using a Python list as underlying storage. """
def __init__(self):
""" Create an empty stack. """
self._data = []
def __len__(self):
""" Return the number of elements in the stack. """
return len(self._data)
def is_empty(self):
""" Return True if the stack is empty. """
return len(self._data) == 0
def push(self, e):
""" Add element e to the top of stack. """
self._data.append(e)
def top(self):
""" Return (but do not remove) the element at the top of stack.
Raise Empty exception if the stack is empty.
"""
if self.is_empty():
raise Empty('Stack is empty')
return self._data[-1]
def pop(self):
""" Remove and return the element from the top of the stack (i.e., LIFO).
Raise Empty exception if the stack is empty.
"""
if self.is_empty():
raise Empty('Stack is empty')
return self._data.pop() |
import os
import shutil
import time
from template import Template, TemplateException
from template.test import TestCase, main
def append_file(path, text):
time.sleep(2) # Ensure file time stamps are different.
fh = open(path, "a")
fh.write(text)
fh.close()
class CompileTest(TestCase):
def testCompile(self):
ttcfg = { "POST_CHOMP": 1,
"INCLUDE_PATH": "test/src",
"COMPILE_EXT": ".ttc" }
# Test process fails when EVAL_PYTHON not set.
try:
Template(ttcfg).process("evalpython", {})
self.fail("did not raise exception")
except TemplateException, e:
self.assertEquals("python", e.type())
self.assertEquals("EVAL_PYTHON not set", e.info())
# Ensure we can run compiled templates without loading parser.
ttcfg["EVAL_PYTHON"] = 1
Template(ttcfg).process("evalpython", {})
# Check that compiled template file exists and grab modification time.
path = "test/src/complex"
self.assert_(os.path.exists(path + ".ttc"))
mod = os.stat(path + ".ttc")[9]
# Save copy of the source file because we're going to try to break it.
shutil.copy(path, path + ".org")
# Sleep for a couple of seconds to ensure clock has ticked.
time.sleep(2)
# Append a harmless newline to the end of the source file to change
# its modification time.
append_file(path, "\n")
# Define "bust_it" to append a lone "[% TRY %]" onto the end of the
# source file to cause re-compilation to fail.
replace = { "bust_it": lambda: append_file(path, "[% TRY %]") }
self.Expect(DATA, ttcfg, replace)
self.assert_(os.stat(path)[9] > mod)
# Restore original source file.
shutil.copy(path + ".org", path)
DATA = r"""
-- test --
[% META author => 'albert' version => 'emc2' %]
[% INCLUDE complex %]
-- expect --
This is the header, title: Yet Another Template Test
This is a more complex file which includes some BLOCK definitions
This is the footer, author: albert, version: emc2
- 3 - 2 - 1
-- test --
[%# we want to break 'compile' to check that errors get reported -%]
[% CALL bust_it -%]
[% TRY; INCLUDE complex; CATCH; "$error"; END %]
-- expect --
file error - parse error - complex line 18: unexpected end of input
"""
main()
|
import audioop
import numpy as np
import pyaudio
import wave
CHUNK_SIZE = 1024
class Music:
def __init__(self, path = "./stereo.wav"):
self.path = path
self.wf = wave.open(path, 'rb')
self.width = self.wf.getsampwidth()
self.pa = pyaudio.PyAudio()
self.stream = self.pa.open(format = self.pa.get_format_from_width(self.width),
channels = 6,
rate = self.wf.getframerate(),
output = True)
def stop(self):
self.stream.stop_stream()
self.stream.close()
self.pa.terminate()
def replay(self):
# バイト列を取得
# [L0, R0, L1, R1, L2, R2, ...]
src_frames = self.wf.readframes(CHUNK_SIZE)
while src_frames != '':
# バイト列を取得
# [L0, R0, L1, R1, L2, R2, ...]
src_frames = self.wf.readframes(CHUNK_SIZE)
# L, Rに分割
l_frames = audioop.tomono(src_frames, self.width, 1, 0)
r_frames = audioop.tomono(src_frames, self.width, 0, 1)
# 各chに流す音量を調整
# L, FL, FR, R, Bの順
volumes = [[1, 1, 1, 1, 1], [0, 0, 0, 0, 0]]
six_ch_frames = self.set_6ch_audio(l_frames, r_frames, volumes)
# 6chオーディオをstreamに渡す
# [FL0, FR0, CT0, BA0, RL0, RR0, ...]
self.stream.write(six_ch_frames)
self.stop()
def set_6ch_audio(self, l_frames, r_frames, volumes):
all_frame = [ [np.fromstring(audioop.mul(l_frames, self.width, volumes[0][i]), dtype=np.int16)]
if volumes[0][i] != 0
else [np.fromstring(audioop.mul(r_frames, self.width, volumes[1][i]), dtype=np.int16)]
for i in range(5)]
audio = np.concatenate(all_frame, axis=0)
# insertの第2引数がミュートするチャンネル
audio = np.insert(audio, 4, 0, axis=0).T.astype(np.int16).tostring()
return audio
if __name__ == '__main__':
music = Music()
music.replay()
|
import torch.nn as nn
import torch.nn.functional as F
from pygcn.layers import GraphConvolution1
class GCN(nn.Module):
def __init__(self, nfeat, nhid, nclass, dropout):
super(GCN, self).__init__()
self.gc1 = GraphConvolution1(nfeat, nhid)
self.gc3 = GraphConvolution1(nhid, nhid)
self.gc4 = GraphConvolution1(nhid, nhid)
self.gc2 = GraphConvolution1(nhid, nclass)
self.dropout = dropout
def forward(self, x, adj):
x1 = F.relu(self.gc1(x, adj))
x1 = F.dropout(x1, self.dropout, training=self.training)
x2 = F.relu(self.gc3(x1, adj))
x2 = F.dropout(x2, self.dropout, training=self.training)
x3 = F.relu(self.gc4(x2+x1, adj))
x3 = F.dropout(x3, self.dropout, training=self.training)
x = self.gc2(x2+x1 +x3, adj)
return F.log_softmax(x, dim=1)
import torch
import torch.nn as nn
import torch.nn.functional as F
from pygcn.layers import GraphConvolution
class GCNModelVAE(nn.Module):
def __init__(self, input_feat_dim, hidden_dim1, hidden_dim2, nclass, dropout):
super(GCNModelVAE, self).__init__()
self.gc1 = GraphConvolution(input_feat_dim, hidden_dim1, dropout, act=F.relu)
self.gc2 = GraphConvolution(hidden_dim1, hidden_dim2, dropout, act=lambda x: x)
self.gc3 = GraphConvolution(hidden_dim1, hidden_dim2, dropout, act=lambda x: x)
self.gc4 = GraphConvolution(hidden_dim1, hidden_dim2, dropout, act=lambda x: x)
self.gc5 = GraphConvolution(hidden_dim1, hidden_dim2, dropout, act=lambda x: x)
self.dc = InnerProductDecoder(dropout, act=lambda x: x)
self.gc2_1 = GraphConvolution(hidden_dim1+input_feat_dim, hidden_dim1, dropout, act=F.relu)
self.gc3_1 = GraphConvolution(hidden_dim1+input_feat_dim, hidden_dim1, dropout, act=F.relu)
self.gc4_1 = GraphConvolution(hidden_dim1+input_feat_dim, hidden_dim1, dropout, act=F.relu)
self.gc5_1 = GraphConvolution(hidden_dim1+input_feat_dim, hidden_dim1, dropout, act=F.relu)
self.gc6_1 = GraphConvolution(hidden_dim1+input_feat_dim, hidden_dim1, dropout, act=F.relu)
self.gc7_1 = GraphConvolution(hidden_dim1+input_feat_dim, hidden_dim1, dropout, act=F.relu)
self.node_regen = GraphConvolution(hidden_dim1, input_feat_dim, dropout, act=F.relu)
self.gc_class = GraphConvolution(hidden_dim1+input_feat_dim, nclass)
def encode(self, x, adj, gc1, gc2, gc3, gc4, gc5):
hidden1 = gc1(x, adj)
return gc2(hidden1, adj), gc3(hidden1, adj), gc4(hidden1, adj), gc5(hidden1, adj), hidden1
def reparameterize(self, mu, logvar):
if self.training:
std = torch.exp(logvar)
eps = torch.randn_like(std)
return eps.mul(std).add_(mu)
else:
return mu
def forward(self, x, adj):
mu, logvar, mu_n, var_n, hidden1 = self.encode(x, adj, self.gc1, self.gc2, self.gc3, self.gc4, self.gc5)
z = self.reparameterize(mu, logvar)
z_n = self.reparameterize(mu_n, var_n)
adj1 = self.dc(z)
#get masked new adj
zero_vec = -9e15*torch.ones_like(adj1)
masked_adj = torch.where(adj > 0, adj1, zero_vec)
adj1 = F.softmax(masked_adj, dim=1)
a1 = self.node_regen(z_n, adj1.t())
zero_vec = -9e15*torch.ones_like(a1)
masked_nodes = torch.where(x > 0, a1, zero_vec)
a1 = F.softmax(masked_nodes, dim=1)
mu, logvar, mu_n, var_n, hidden2 = self.encode(torch.cat([a1 , hidden1],-1), adj + adj1, self.gc2_1, self.gc2, self.gc3, self.gc4, self.gc5)
z = self.reparameterize(mu, logvar)
z_n = self.reparameterize(mu_n, var_n)
adj2 = self.dc(z)
#get masked new adj
zero_vec = -9e15*torch.ones_like(adj2)
masked_adj = torch.where(adj > 0, adj2, zero_vec)
adj2 = F.softmax(masked_adj, dim=1)
a2 = self.node_regen(z_n, adj2.t())
zero_vec = -9e15*torch.ones_like(a2)
masked_nodes = torch.where(x > 0, a2, zero_vec)
a2 = F.softmax(masked_nodes, dim=1)
mu, logvar, mu_n, var_n, hidden3 = self.encode(torch.cat([a2,hidden1 + hidden2],-1), adj + adj1 + adj2, self.gc3_1, self.gc2, self.gc3, self.gc4, self.gc5)
z = self.reparameterize(mu, logvar)
z_n = self.reparameterize(mu_n, var_n)
adj3 = self.dc(z)
#get masked new adj
zero_vec = -9e15*torch.ones_like(adj3)
masked_adj = torch.where(adj > 0, adj3, zero_vec)
adj3 = F.softmax(masked_adj, dim=1)
a3 = self.node_regen(z_n, adj3.t())
zero_vec = -9e15*torch.ones_like(a3)
masked_nodes = torch.where(x > 0, a3, zero_vec)
a3 = F.softmax(masked_nodes, dim=1)
'''mu, logvar, mu_n, var_n, hidden4 = self.encode(torch.cat([a3,hidden1 + hidden2+hidden3],-1), adj + adj1 + adj2+adj3, self.gc4_1, self.gc2, self.gc3, self.gc4, self.gc5)
z = self.reparameterize(mu, logvar)
z_n = self.reparameterize(mu_n, var_n)
adj4 = self.dc(z)
#get masked new adj
zero_vec = -9e15*torch.ones_like(adj4)
masked_adj = torch.where(adj > 0, adj4, zero_vec)
adj4 = F.softmax(masked_adj, dim=1)
a4 = self.node_regen(z_n, adj4.t())
zero_vec = -9e15*torch.ones_like(a4)
masked_nodes = torch.where(x > 0, a4, zero_vec)
a4 = F.softmax(masked_nodes, dim=1)
mu, logvar, mu_n, var_n, hidden5 = self.encode(torch.cat([a4,hidden1 + hidden2+hidden3+hidden4],-1), adj + adj1 + adj2+adj3+adj4, self.gc5_1, self.gc2, self.gc3, self.gc4, self.gc5)
z = self.reparameterize(mu, logvar)
z_n = self.reparameterize(mu_n, var_n)
adj5 = self.dc(z)
#get masked new adj
zero_vec = -9e15*torch.ones_like(adj5)
masked_adj = torch.where(adj > 0, adj5, zero_vec)
adj5 = F.softmax(masked_adj, dim=1)
a5 = self.node_regen(z_n, adj5.t())
zero_vec = -9e15*torch.ones_like(a5)
masked_nodes = torch.where(x > 0, a5, zero_vec)
a5 = F.softmax(masked_nodes, dim=1)
mu, logvar, mu_n, var_n, hidden6 = self.encode(torch.cat([a5,hidden1 + hidden2+hidden3+hidden4+hidden5],-1), adj + adj1 + adj2+adj3+adj4+adj5, self.gc6_1, self.gc2, self.gc3, self.gc4, self.gc5)
z = self.reparameterize(mu, logvar)
z_n = self.reparameterize(mu_n, var_n)
adj6 = self.dc(z)
#get masked new adj
zero_vec = -9e15*torch.ones_like(adj6)
masked_adj = torch.where(adj > 0, adj6, zero_vec)
adj6 = F.softmax(masked_adj, dim=1)
a6 = self.node_regen(z_n, adj6.t())
zero_vec = -9e15*torch.ones_like(a6)
masked_nodes = torch.where(x > 0, a6, zero_vec)
a6 = F.softmax(masked_nodes, dim=1)
classifier = self.gc_class(torch.cat([a6,hidden1 + hidden2 + hidden3+ hidden4+hidden5+hidden6 ],-1), adj + adj1 + adj2 + adj3+adj4 + adj5+adj6)
return a1+a2+a3 + a4+a5+a6, adj1 + adj2+ adj3+adj4 + adj5 + adj6, mu, logvar, mu_n, var_n, F.log_softmax(classifier, dim=1)'''
classifier = self.gc_class(torch.cat([a3,hidden1+hidden2+hidden3 ],-1), adj + adj1 +adj2 + adj3)
return a1+a2+a3, adj1+adj2+adj3 , mu, logvar, mu_n, var_n, F.log_softmax(classifier, dim=1)
class InnerProductDecoder(nn.Module):
"""Decoder for using inner product for prediction."""
def __init__(self, dropout, act=torch.sigmoid):
super(InnerProductDecoder, self).__init__()
self.dropout = dropout
self.act = act
def forward(self, z):
z = F.dropout(z, self.dropout, training=self.training)
adj = self.act(torch.mm(z, z.t()))
return adj
|
import json
import os
import shutil
from datetime import datetime
from typing import List, Dict, Any, Union
from ruamel import yaml
from data_tools.wrappers.analyses import get_analysis
from data_tools.wrappers.users import is_read_permitted, is_write_permitted, get_all_read_permitted_records
from data_tools.db_models import User, Workflow, db
from data_tools.util import AuthException, NotFoundException
from config.config import DATADIR, MODULEDIR
class WorkflowModule:
def __init__(self,
path: str,
package_name: str,
package_description: str,
subpackage_name: str,
subpackage_description: str):
self.path = path
self.package_name = package_name
self.package_description = package_description
self.subpackage_name = subpackage_name
self.subpackage_description = subpackage_description
tool_definition = self.get_workflow_module_contents()
self.label = tool_definition['label'] if 'label' in tool_definition else os.path.basename(path)
self.description = tool_definition['doc'] if 'doc' in tool_definition else os.path.basename(path)
self.id = tool_definition['id'] if 'id' in tool_definition else path
self.name = self.id
self.all_can_read = True
self.all_can_write = True
self.group_can_read = True
self.group_can_write = True
def get_workflow_module_contents(self, as_text=False) -> Union[str, Dict[str, Any]]:
with open(self.path, 'r') as stream:
if as_text:
stream.seek(0)
return stream.read()
data = yaml.safe_load(stream)
if 'cwlVersion' not in data:
raise ValueError('Not a CWL file')
return data
def to_dict(self):
return {
'id': self.id,
'path': self.path,
'label': self.label,
'description': self.description,
'package': self.package_name,
'package_description': self.package_description,
'subpackage': self.subpackage_name,
'subpackage_description': self.subpackage_description,
'tool_definition': self.get_workflow_module_contents(),
'created_on': datetime.fromtimestamp(os.path.getctime(self.path)).isoformat(),
'updated_on': datetime.fromtimestamp(os.path.getmtime(self.path)).isoformat()
}
def get_workflow_template(name: str, description: str, workflow_id: int) -> Dict[str, Any]:
return {
'class': 'Workflow',
'cwlVersion': 'v1.0',
'$namespaces': {'sbg': 'https://www.sevenbridges.com'},
'label': name,
'doc': description,
'id': f'workflow{workflow_id}',
'inputs': [],
'outputs': [],
'steps': []
}
def get_workflows(user: User, filter_by: Dict[str, Any] = None) -> List[Workflow]:
"""
Get a list of available saved workflows.
:param user:
:return:
"""
return get_all_read_permitted_records(user, Workflow, filter_by)
def get_workflow(user: User, workflow_id: int) -> Workflow:
"""
Get workflow metadata.
:param user:
:param workflow_id:
:return:
"""
workflow = Workflow.query.filter_by(id=workflow_id).first()
if workflow is None:
raise NotFoundException(f'No workflow with id {workflow_id}.')
if is_read_permitted(user, workflow):
return workflow
raise AuthException(f'User {user.email} is not permitted to access workflow {workflow_id}')
def update_workflow(user: User, workflow: Workflow, new_data: Dict[str, Any], filename: str = None) -> Workflow:
"""
Update workflow metadata.
:param user:
:param workflow:
:param new_data:
:parma filename:
:return:
"""
if is_write_permitted(user, workflow):
if 'id' in new_data:
if workflow.id != int(new_data['id']) and Workflow.query.filter_by(id=new_data['id']) is not None:
raise ValueError(f'Workflow with id {new_data["id"]} already exists!')
if 'analysis_ids' in new_data:
new_analyses = [get_analysis(user, analysis_id) for analysis_id in new_data['analysis_ids']]
remove_analyses = [analysis for analysis in workflow.analyses if analysis.id not in new_data['analysis_ids']]
for analysis in new_analyses:
if not is_write_permitted(user, analysis):
raise AuthException(f'User {user.email} is not permitted to attach workflow {workflow.id} to analysis {analysis.id}')
for analysis in remove_analyses:
if not is_write_permitted(user, analysis):
raise AuthException(f'User {user.email} is not permitted to detach workflow {workflow.id} from analysis {analysis.id}')
workflow.analyses = new_analyses
workflow.update(new_data)
if 'workflow_definition' in new_data:
if workflow.file_type == 'json':
json.dump(new_data['workflow_definition'], open(workflow.filename, 'w+'))
elif workflow.file_type == 'yaml':
yaml.dump(new_data['workflow_definition'], open(workflow.filename, 'w+'))
else:
open(workflow.filename, 'w+').write(new_data['workflow_definition'])
if filename is not None:
os.remove(workflow.filename)
shutil.copy(filename, workflow.filename)
os.remove(filename)
db.session.commit()
return workflow
raise AuthException(f'User {user.email} is not permitted to modify workflow {workflow.id}')
def create_workflow(user: User, data: Dict[str, Any]) -> Workflow:
"""
Create a new workflow.
:param user:
:param data:
:return:
"""
if 'id' in data: # cannot create with designated id
del data['id']
workflow = Workflow(creator=user, owner=user, last_editor=user, name=data['name'])
db.session.add(workflow)
db.session.commit()
workflow.filename = f'{DATADIR}/workflows/{workflow.id}.{workflow.workflow_language}'
db.session.commit()
if 'workflow_definition' not in data:
data['workflow_definition'] = get_workflow_template(workflow.name,
data['description'] if 'description' in data else '',
workflow.id)
with open(workflow.filename, 'w+') as file:
if workflow.file_type == 'json':
json.dump(data['workflow_definition'], file)
elif workflow.file_type == 'yaml':
yaml.dump(data['workflow_definition'], file)
else:
file.write(data['workflow_definition'])
for key, value in data.items():
if key in workflow.to_dict() and key is not 'filename':
workflow.__setattr__(key, value)
db.session.commit()
return workflow
def delete_workflow(user: User, workflow: Workflow) -> Dict[str, str]:
"""
Delete a workflow from the database and filesystem
:param user:
:param workflow:
:return:
"""
if is_write_permitted(user, workflow):
workflow_id = workflow.id
db.session.delete(workflow)
db.session.commit()
return {'message': f'Workflow {workflow_id} deleted.'}
raise AuthException(f'User {user.email} is not permitted to modify analysis {workflow.id}')
def get_tool_definition(path: str) -> Dict[str, Any]:
with open(path, 'r') as stream:
data = yaml.safe_load(stream)
if 'cwlVersion' not in data:
raise yaml.YAMLError('Not a CWL file')
data['module_path'] = path
return data
def get_modules(module_path: str=None) -> List[WorkflowModule]:
"""
Get available modules for use in workflows.
:return:
"""
module_path = MODULEDIR if module_path is None else module_path
modules = []
# find packages
for directory, subdirectories, files in os.walk(module_path):
dir_info = {}
if 'info.json' in files:
dir_info = json.load(open(os.path.join(directory, 'info.json')))
if 'name' not in dir_info:
dir_info['name'] = directory
package_name = dir_info['name']
package_description = dir_info['description'] if 'description' in dir_info else ''
for filename in files: # this is for files that are one directory deep
if os.path.splitext(filename)[1] == '.cwl':
path = os.path.join(directory, filename)
tool_def = get_tool_definition(path)
modules.append(WorkflowModule(path, package_name, package_description, '', ''))
# Find subpackages
for subdirectory in subdirectories:
for _, subsubdirectories, subfiles in os.walk(subdirectory):
subdir_info = {}
if 'info.json' in subfiles:
subdir_info = json.load(open(os.path.join(directory, subdirectory, 'info.json')))
if 'name' not in dir_info:
subdir_info['name'] = subdirectory
subpackage_name = subdir_info['name']
subpackage_description = subdir_info['description'] if 'description' in dir_info else ''
for filename in subfiles:
if os.path.splitext(filename)[1] == '.cwl':
path = os.path.join(module_path, directory, filename)
tool_def = get_tool_definition(path)
modules.append(WorkflowModule(path, package_name, package_description, subpackage_name,
subpackage_description))
return modules
def get_module(path: str) -> WorkflowModule:
"""
Get CWL CommandLineTool definition as a dictionary.
:param path:
:return:
"""
directory = os.path.dirname(path) # a full path
parent_dir = os.path.dirname(os.path.dirname(path)) # a full path
# We try to find info.json in both paths. It's ok to not find it here, but if we can't find it in parent, we assume
# current dir is a "package" and that this module belongs to no subpackages
subdir_info_file = os.path.join(directory, 'info.json')
subdir_info = json.load(open(subdir_info_file, 'r')) if os.path.isfile(subdir_info_file) \
else {'name': os.path.basename(os.path.dirname(directory))}
parent_dir_info_file = os.path.join(parent_dir, 'info.json')
parent_dir_info = json.load(open(parent_dir_info_file, 'r')) if os.path.isfile(parent_dir_info_file) \
else {'name': os.path.basename(os.path.dirname(parent_dir))}
package_name = parent_dir_info['name'] if os.path.isfile(parent_dir_info_file) else subdir_info['name']
package_description = parent_dir_info['description'] if 'description' in parent_dir_info \
else subdir_info['description'] if 'description' in subdir_info else ''
subpackage_name = subdir_info['name'] if os.path.isfile(parent_dir_info_file) else ''
subpackage_description = \
subdir_info['description'] if os.path.isfile(parent_dir_info_file) and 'description' in subdir_info else ''
tool_def = yaml.safe_load(open(path, 'r'))
if 'cwlVersion' not in tool_def:
raise ValueError('Not a CWL file')
return WorkflowModule(path, package_name, package_description, subpackage_name, subpackage_description)
def get_module_by_id(module_id: str, module_path: str = None) -> WorkflowModule:
modules = get_modules(module_path)
val = next((module for module in modules if module.id == module_id), None) if module_id is not None else None
if val is None:
raise ValueError(f'Module with id {module_id} not found in {module_path} or immediate subdirectories.')
return val
|
import Common as common
from Common import opencv
class Camera(object):
def __init__(self):
# Initialize the camera capture
self.camera_capture = opencv.VideoCapture(0)
def capture_frame(self, ignore_first_frame):
# Get frame, ignore the first one if needed
if(ignore_first_frame):
self.camera_capture.read()
(capture_status, self.current_camera_frame) = self.camera_capture.read()
# Verify capture status
if(capture_status):
return self.current_camera_frame
else:
# Print error to the console
print(common.CAPTURE_FAILED)
def display_image_with_label(self, image, label):
# Put label on the image
image_with_label = opencv.putText(image, label,
common.TEXT_ORIGIN,
common.FONT_FACE,
common.FONT_SCALE,
common.GREEN,
common.FONT_THICKNESS,
common.FONT_LINE)
# Display image
opencv.imshow(common.PREVIEW_WINDOW_NAME, image_with_label)
# Wait until user presses any key
opencv.waitKey()
def display_current_frame_with_label(self, label):
self.display_image_with_label(self.current_camera_frame, label) |
from .base import Filth
class OrganizationFilth(Filth):
type = 'organization'
|
from kivy.uix.widget import Widget
from kivy.graphics import Rectangle, Color
from kivy.app import App
from kivy.config import Config
from random import randint
from kivy.uix.gridlayout import GridLayout
from kivy.uix.label import Label
from kivy.uix.button import Button
class CornerRectangleWidget(Widget):
def __init__(self, **kwargs):
super(CornerRectangleWidget, self).__init__(**kwargs)
with self.canvas:
Rectangle(source='back.png', size=(450,415))
with self.canvas:
Rectangle(source='title.png', pos=(25,315), size=(400,100))
with self.canvas:
Rectangle(source='button_area.png', pos=(25,5), size=(400,200))
class cointossApp(App):
#homeee=homepage()
def build(self):
App.get_running_app().stop()
Config.set('graphics', 'width', '450')
Config.set('graphics', 'height', '415')
Config.set('graphics','resizable', True)
self.layout = GridLayout()
bt = Button(pos=(120,95),text='', color=(0,1,0,1),font_size=15, width=200, height=75, id=str('hjh'), background_normal='flip.png',background_down='flip_hover.png')
t=CornerRectangleWidget()
self.layout.add_widget(t)
res=Button(pos=(25,210),text='', color=(0,1,0,1),font_size=15, width=400, height=100, id=str('res'), background_normal='null.png',background_down='null.png')
reset_btn=Button(pos=(119,50),text='', color=(0,1,0,1),font_size=15, width=98, height=40, id=str('resett'), background_normal='reset.png',background_down='reset_hover.png')
reset_btn.bind(on_release=self.press_reset)
self.layout.add_widget(bt)
self.layout.add_widget(reset_btn)
quit_btn=Button(pos=(223,50),text='', color=(0,1,0,1),font_size=15, width=98, height=40, id=str('quit'), background_normal='quit.png',background_down='quit _hover.png')
quit_btn.bind(on_release=self.press_quit)
self.layout.add_widget(quit_btn)
self.layout.add_widget(res)
bt.bind(on_release=self.press_flip)
return self.layout
def press_flip(self,res):
if (randint(0,1)==0):
res=Button(pos=(25,210),text='', color=(0,1,0,1),font_size=15, width=400, height=100, id=str('res1'), background_normal='head.png',background_down='head.png')
self.layout.add_widget(res)
else:
res=Button(pos=(25,210),text='', color=(0,1,0,1),font_size=15, width=400, height=100, id=str('res1'), background_normal='tail.png',background_down='tail.png')
self.layout.add_widget(res)
#return self.layout
def press_quit(self,quit_btn):
hom.run()
def press_reset(self,res):
res=Button(pos=(25,210),text='', color=(0,1,0,1),font_size=15, width=400, height=100, id=str('res1'), background_normal='null.png',background_down='null.png')
self.layout.add_widget(res)
cointoss=cointossApp()
class homewidget(Widget):
def __init__(self, **kwargs):
super(homewidget, self).__init__(**kwargs)
with self.canvas:
Rectangle(source='back.png', size=(450,415))
with self.canvas:
Rectangle(source='title.png', pos=(25,315), size=(400,100))
cointoss=cointossApp()
class homepage(App):
coin=cointossApp()
def build(self):
App.get_running_app().stop()
Config.set('graphics', 'width', '450')
Config.set('graphics', 'height', '415')
Config.set('graphics','resizable', True)
self.layout = GridLayout()
ar=homewidget()
self.layout.add_widget(ar)
bt_play=Button(pos=(25,210),text='', color=(0,1,0,1),font_size=15, width=400, height=100, id=str('play'), background_normal='Play.png',background_down='Play_hover.png')
self.layout.add_widget(bt_play)
bt_play.bind(on_release=self.press_play)
return self.layout
def press_play(self,bt_play):
self.coin.run()
#hom=homepage()
cointoss=cointossApp()
if __name__ == '__main__':
hom=homepage()
homepage().run()
|
import os
import numpy as np
import random
from skimage import io
from scipy.ndimage import zoom
import matplotlib.pyplot as plt
from tqdm import tqdm as tqdm
from pandas import read_csv
from math import floor, ceil, sqrt, exp
from IPython import display
import time
from itertools import chain
import warnings
from pprint import pprint
# Import PyTorch modules
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from torch.autograd import Variable
import torchvision.transforms as tr
from torch.nn.modules.padding import ReplicationPad2d
# Import loss functions, see losses.py file for options
from change_detection.deep_change_detection.losses import DiceLoss
# OSCD dataset stored on a Google Drive. If stored the dataset is stored locally, this is not required.
from google.colab import drive
drive.mount('/content/drive')
# Replace PATH_TO_DATASET with your path to the OSCD dataset.
PATH_TO_DATASET = '/content/drive/MyDrive/onera/'
IS_PROTOTYPE = False
# This cell defines a number of functions that we will need to train the network e.g. data augmentation functions,
# functions that call the different bands of the sentinel data, etc.
def adjust_shape(I, s):
"""Adjust shape of grayscale image I to s."""
# crop if necesary
I = I[:s[0],:s[1]]
si = I.shape
# pad if necessary
p0 = max(0,s[0] - si[0])
p1 = max(0,s[1] - si[1])
return np.pad(I,((0,p0),(0,p1)),'edge')
def read_sentinel_img(path):
"""Read cropped Sentinel-2 image: RGB bands."""
im_name = os.listdir(path)[0][:-7]
r = io.imread(path + im_name + "B04.tif")
g = io.imread(path + im_name + "B03.tif")
b = io.imread(path + im_name + "B02.tif")
I = np.stack((r,g,b),axis=2).astype('float')
if NORMALISE_IMGS:
I = (I - I.mean()) / I.std()
return I
def read_sentinel_img_4(path):
"""Read cropped Sentinel-2 image: RGB and NIR bands."""
im_name = os.listdir(path)[0][:-7]
r = io.imread(path + im_name + "B04.tif")
g = io.imread(path + im_name + "B03.tif")
b = io.imread(path + im_name + "B02.tif")
nir = io.imread(path + im_name + "B08.tif")
I = np.stack((r,g,b,nir),axis=2).astype('float')
if NORMALISE_IMGS:
I = (I - I.mean()) / I.std()
return I
def read_sentinel_img_leq20(path):
"""Read cropped Sentinel-2 image: bands with resolution less than or equals to 20m."""
im_name = os.listdir(path)[0][:-7]
r = io.imread(path + im_name + "B04.tif")
s = r.shape
g = io.imread(path + im_name + "B03.tif")
b = io.imread(path + im_name + "B02.tif")
nir = io.imread(path + im_name + "B08.tif")
ir1 = adjust_shape(zoom(io.imread(path + im_name + "B05.tif"),2),s)
ir2 = adjust_shape(zoom(io.imread(path + im_name + "B06.tif"),2),s)
ir3 = adjust_shape(zoom(io.imread(path + im_name + "B07.tif"),2),s)
nir2 = adjust_shape(zoom(io.imread(path + im_name + "B8A.tif"),2),s)
swir2 = adjust_shape(zoom(io.imread(path + im_name + "B11.tif"),2),s)
swir3 = adjust_shape(zoom(io.imread(path + im_name + "B12.tif"),2),s)
I = np.stack((r,g,b,nir,ir1,ir2,ir3,nir2,swir2,swir3),axis=2).astype('float')
if NORMALISE_IMGS:
I = (I - I.mean()) / I.std()
return I
def read_sentinel_img_leq60(path):
"""Read cropped Sentinel-2 image: all bands."""
im_name = os.listdir(path)[0][:-7]
r = io.imread(path + im_name + "B04.tif")
s = r.shape
g = io.imread(path + im_name + "B03.tif")
b = io.imread(path + im_name + "B02.tif")
nir = io.imread(path + im_name + "B08.tif")
ir1 = adjust_shape(zoom(io.imread(path + im_name + "B05.tif"),2),s)
ir2 = adjust_shape(zoom(io.imread(path + im_name + "B06.tif"),2),s)
ir3 = adjust_shape(zoom(io.imread(path + im_name + "B07.tif"),2),s)
nir2 = adjust_shape(zoom(io.imread(path + im_name + "B8A.tif"),2),s)
swir2 = adjust_shape(zoom(io.imread(path + im_name + "B11.tif"),2),s)
swir3 = adjust_shape(zoom(io.imread(path + im_name + "B12.tif"),2),s)
uv = adjust_shape(zoom(io.imread(path + im_name + "B01.tif"),6),s)
wv = adjust_shape(zoom(io.imread(path + im_name + "B09.tif"),6),s)
swirc = adjust_shape(zoom(io.imread(path + im_name + "B10.tif"),6),s)
I = np.stack((r,g,b,nir,ir1,ir2,ir3,nir2,swir2,swir3,uv,wv,swirc),axis=2).astype('float')
if NORMALISE_IMGS:
I = (I - I.mean()) / I.std()
return I
def read_sentinel_img_trio(path):
"""Read cropped Sentinel-2 image pair and change map."""
# read images
if TYPE == 0:
I1 = read_sentinel_img(path + '/imgs_1/')
I2 = read_sentinel_img(path + '/imgs_2/')
elif TYPE == 1:
I1 = read_sentinel_img_4(path + '/imgs_1/')
I2 = read_sentinel_img_4(path + '/imgs_2/')
elif TYPE == 2:
I1 = read_sentinel_img_leq20(path + '/imgs_1/')
I2 = read_sentinel_img_leq20(path + '/imgs_2/')
elif TYPE == 3:
I1 = read_sentinel_img_leq60(path + '/imgs_1/')
I2 = read_sentinel_img_leq60(path + '/imgs_2/')
cm = io.imread(path + '/cm/cm.png', as_gray=True) != 0
# crop if necessary
s1 = I1.shape
s2 = I2.shape
I2 = np.pad(I2,((0, s1[0] - s2[0]), (0, s1[1] - s2[1]), (0,0)),'edge')
return I1, I2, cm
def reshape_for_torch(I):
"""Transpose image for PyTorch coordinates."""
# out = np.swapaxes(I,1,2)
# out = np.swapaxes(out,0,1)
# out = out[np.newaxis,:]
out = I.transpose((2, 0, 1))
return torch.from_numpy(out)
# Create the dataset class
class ChangeDetectionDataset(Dataset):
def __init__(self, path, train = True, patch_side = 96, stride = None, use_all_bands = False, transform=None):
"""
Args:
csv_file (string): Path to the csv file with annotations.
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
# basics
self.transform = transform
self.path = path
self.patch_side = patch_side
if not stride:
self.stride = 1
else:
self.stride = stride
if train:
fname = 'large_train.txt' # .txt file containing the names of cities used for training, usually just train.txt
else:
fname = 'large_test.txt' # .txt file containing names of cities used for testing, usually just test.txt
# print(path + fname)
self.names = read_csv(path + fname).columns
self.n_imgs = self.names.shape[0]
n_pix = 0
true_pix = 0
# load images
self.imgs_1 = {}
self.imgs_2 = {}
self.change_maps = {}
self.n_patches_per_image = {}
self.n_patches = 0
self.patch_coords = []
for im_name in tqdm(self.names):
# load and store each image
I1, I2, cm = read_sentinel_img_trio(self.path + im_name)
self.imgs_1[im_name] = reshape_for_torch(I1)
self.imgs_2[im_name] = reshape_for_torch(I2)
self.change_maps[im_name] = cm
s = cm.shape
n_pix += np.prod(s)
true_pix += cm.sum()
# calculate the number of patches
s = self.imgs_1[im_name].shape
n1 = ceil((s[1] - self.patch_side + 1) / self.stride)
n2 = ceil((s[2] - self.patch_side + 1) / self.stride)
n_patches_i = n1 * n2
self.n_patches_per_image[im_name] = n_patches_i
self.n_patches += n_patches_i
# generate path coordinates
for i in range(n1):
for j in range(n2):
# coordinates in (x1, x2, y1, y2)
current_patch_coords = (im_name,
[self.stride*i, self.stride*i + self.patch_side, self.stride*j, self.stride*j + self.patch_side],
[self.stride*(i + 1), self.stride*(j + 1)])
self.patch_coords.append(current_patch_coords)
self.weights = [ FP_MODIFIER * 2 * true_pix / n_pix, 2 * (n_pix - true_pix) / n_pix]
def get_img(self, im_name):
return self.imgs_1[im_name], self.imgs_2[im_name], self.change_maps[im_name]
def __len__(self):
return self.n_patches
def __getitem__(self, idx):
current_patch_coords = self.patch_coords[idx]
im_name = current_patch_coords[0]
limits = current_patch_coords[1]
centre = current_patch_coords[2]
I1 = self.imgs_1[im_name][:, limits[0]:limits[1], limits[2]:limits[3]]
I2 = self.imgs_2[im_name][:, limits[0]:limits[1], limits[2]:limits[3]]
label = self.change_maps[im_name][limits[0]:limits[1], limits[2]:limits[3]]
label = torch.from_numpy(1*np.array(label)).float()
sample = {'I1': I1, 'I2': I2, 'label': label}
if self.transform:
sample = self.transform(sample)
return sample
# Below are classes to perform augmentations on the given training images.
class RandomFlip(object):
"""Flip randomly the images in a sample."""
def __call__(self, sample):
I1, I2, label = sample['I1'], sample['I2'], sample['label']
if random.random() > 0.5:
I1 = I1.numpy()[:,:,::-1].copy()
I1 = torch.from_numpy(I1)
I2 = I2.numpy()[:,:,::-1].copy()
I2 = torch.from_numpy(I2)
label = label.numpy()[:,::-1].copy()
label = torch.from_numpy(label)
return {'I1': I1, 'I2': I2, 'label': label}
class RandomRot(object):
"""Rotate randomly the images in a sample."""
def __call__(self, sample):
I1, I2, label = sample['I1'], sample['I2'], sample['label']
n = random.randint(0, 3)
if n:
I1 = sample['I1'].numpy()
I1 = np.rot90(I1, n, axes=(1, 2)).copy()
I1 = torch.from_numpy(I1)
I2 = sample['I2'].numpy()
I2 = np.rot90(I2, n, axes=(1, 2)).copy()
I2 = torch.from_numpy(I2)
label = sample['label'].numpy()
label = np.rot90(label, n, axes=(0, 1)).copy()
label = torch.from_numpy(label)
return {'I1': I1, 'I2': I2, 'label': label}
class Unet(nn.Module):
def __init__(self, input_nbr, label_nbr):
super(Unet, self).__init__()
self.input_nbr = input_nbr
self.conv11 = nn.Conv2d(input_nbr, 16, kernel_size=3, padding=1)
self.bn11 = nn.BatchNorm2d(16)
self.do11 = nn.Dropout2d(p=0.2)
self.conv12 = nn.Conv2d(16, 16, kernel_size=3, padding=1)
self.bn12 = nn.BatchNorm2d(16)
self.do12 = nn.Dropout2d(p=0.2)
self.conv21 = nn.Conv2d(16, 32, kernel_size=3, padding=1)
self.bn21 = nn.BatchNorm2d(32)
self.do21 = nn.Dropout2d(p=0.2)
self.conv22 = nn.Conv2d(32, 32, kernel_size=3, padding=1)
self.bn22 = nn.BatchNorm2d(32)
self.do22 = nn.Dropout2d(p=0.2)
self.conv31 = nn.Conv2d(32, 64, kernel_size=3, padding=1)
self.bn31 = nn.BatchNorm2d(64)
self.do31 = nn.Dropout2d(p=0.2)
self.conv32 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
self.bn32 = nn.BatchNorm2d(64)
self.do32 = nn.Dropout2d(p=0.2)
self.conv33 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
self.bn33 = nn.BatchNorm2d(64)
self.do33 = nn.Dropout2d(p=0.2)
self.conv41 = nn.Conv2d(64, 128, kernel_size=3, padding=1)
self.bn41 = nn.BatchNorm2d(128)
self.do41 = nn.Dropout2d(p=0.2)
self.conv42 = nn.Conv2d(128, 128, kernel_size=3, padding=1)
self.bn42 = nn.BatchNorm2d(128)
self.do42 = nn.Dropout2d(p=0.2)
self.conv43 = nn.Conv2d(128, 128, kernel_size=3, padding=1)
self.bn43 = nn.BatchNorm2d(128)
self.do43 = nn.Dropout2d(p=0.2)
self.upconv4 = nn.ConvTranspose2d(128, 128, kernel_size=3, padding=1, stride=2, output_padding=1)
self.conv43d = nn.ConvTranspose2d(256, 128, kernel_size=3, padding=1)
self.bn43d = nn.BatchNorm2d(128)
self.do43d = nn.Dropout2d(p=0.2)
self.conv42d = nn.ConvTranspose2d(128, 128, kernel_size=3, padding=1)
self.bn42d = nn.BatchNorm2d(128)
self.do42d = nn.Dropout2d(p=0.2)
self.conv41d = nn.ConvTranspose2d(128, 64, kernel_size=3, padding=1)
self.bn41d = nn.BatchNorm2d(64)
self.do41d = nn.Dropout2d(p=0.2)
self.upconv3 = nn.ConvTranspose2d(64, 64, kernel_size=3, padding=1, stride=2, output_padding=1)
self.conv33d = nn.ConvTranspose2d(128, 64, kernel_size=3, padding=1)
self.bn33d = nn.BatchNorm2d(64)
self.do33d = nn.Dropout2d(p=0.2)
self.conv32d = nn.ConvTranspose2d(64, 64, kernel_size=3, padding=1)
self.bn32d = nn.BatchNorm2d(64)
self.do32d = nn.Dropout2d(p=0.2)
self.conv31d = nn.ConvTranspose2d(64, 32, kernel_size=3, padding=1)
self.bn31d = nn.BatchNorm2d(32)
self.do31d = nn.Dropout2d(p=0.2)
self.upconv2 = nn.ConvTranspose2d(32, 32, kernel_size=3, padding=1, stride=2, output_padding=1)
self.conv22d = nn.ConvTranspose2d(64, 32, kernel_size=3, padding=1)
self.bn22d = nn.BatchNorm2d(32)
self.do22d = nn.Dropout2d(p=0.2)
self.conv21d = nn.ConvTranspose2d(32, 16, kernel_size=3, padding=1)
self.bn21d = nn.BatchNorm2d(16)
self.do21d = nn.Dropout2d(p=0.2)
self.upconv1 = nn.ConvTranspose2d(16, 16, kernel_size=3, padding=1, stride=2, output_padding=1)
self.conv12d = nn.ConvTranspose2d(32, 16, kernel_size=3, padding=1)
self.bn12d = nn.BatchNorm2d(16)
self.do12d = nn.Dropout2d(p=0.2)
self.conv11d = nn.ConvTranspose2d(16, label_nbr, kernel_size=3, padding=1)
self.sm = nn.LogSoftmax(dim=1)
def forward(self, x1, x2):
x = torch.cat((x1, x2), 1)
"""Forward method."""
# Stage 1
x11 = self.do11(F.relu(self.bn11(self.conv11(x))))
x12 = self.do12(F.relu(self.bn12(self.conv12(x11))))
x1p = F.max_pool2d(x12, kernel_size=2, stride=2)
# Stage 2
x21 = self.do21(F.relu(self.bn21(self.conv21(x1p))))
x22 = self.do22(F.relu(self.bn22(self.conv22(x21))))
x2p = F.max_pool2d(x22, kernel_size=2, stride=2)
# Stage 3
x31 = self.do31(F.relu(self.bn31(self.conv31(x2p))))
x32 = self.do32(F.relu(self.bn32(self.conv32(x31))))
x33 = self.do33(F.relu(self.bn33(self.conv33(x32))))
x3p = F.max_pool2d(x33, kernel_size=2, stride=2)
# Stage 4
x41 = self.do41(F.relu(self.bn41(self.conv41(x3p))))
x42 = self.do42(F.relu(self.bn42(self.conv42(x41))))
x43 = self.do43(F.relu(self.bn43(self.conv43(x42))))
x4p = F.max_pool2d(x43, kernel_size=2, stride=2)
# Stage 4d
x4d = self.upconv4(x4p)
pad4 = ReplicationPad2d((0, x43.size(3) - x4d.size(3), 0, x43.size(2) - x4d.size(2)))
x4d = torch.cat((pad4(x4d), x43), 1)
x43d = self.do43d(F.relu(self.bn43d(self.conv43d(x4d))))
x42d = self.do42d(F.relu(self.bn42d(self.conv42d(x43d))))
x41d = self.do41d(F.relu(self.bn41d(self.conv41d(x42d))))
# Stage 3d
x3d = self.upconv3(x41d)
pad3 = ReplicationPad2d((0, x33.size(3) - x3d.size(3), 0, x33.size(2) - x3d.size(2)))
x3d = torch.cat((pad3(x3d), x33), 1)
x33d = self.do33d(F.relu(self.bn33d(self.conv33d(x3d))))
x32d = self.do32d(F.relu(self.bn32d(self.conv32d(x33d))))
x31d = self.do31d(F.relu(self.bn31d(self.conv31d(x32d))))
# Stage 2d
x2d = self.upconv2(x31d)
pad2 = ReplicationPad2d((0, x22.size(3) - x2d.size(3), 0, x22.size(2) - x2d.size(2)))
x2d = torch.cat((pad2(x2d), x22), 1)
x22d = self.do22d(F.relu(self.bn22d(self.conv22d(x2d))))
x21d = self.do21d(F.relu(self.bn21d(self.conv21d(x22d))))
# Stage 1d
x1d = self.upconv1(x21d)
pad1 = ReplicationPad2d((0, x12.size(3) - x1d.size(3), 0, x12.size(2) - x1d.size(2)))
x1d = torch.cat((pad1(x1d), x12), 1)
x12d = self.do12d(F.relu(self.bn12d(self.conv12d(x1d))))
x11d = self.conv11d(x12d)
return self.sm(x11d)
# Implementation of a Siamese network
# Daudt, R. C., Le Saux, B., & Boulch, A. "Fully convolutional siamese networks for change detection". In 2018 25th IEEE International Conference on Image Processing (ICIP) (pp. 4063-4067). IEEE.
class SiamUnet(nn.Module):
"""SiamUnet segmentation network."""
def __init__(self, input_nbr, label_nbr):
super(SiamUnet, self).__init__()
self.input_nbr = input_nbr
self.conv11 = nn.Conv2d(input_nbr, 16, kernel_size=3, padding=1)
self.bn11 = nn.BatchNorm2d(16)
self.do11 = nn.Dropout2d(p=0.2)
self.conv12 = nn.Conv2d(16, 16, kernel_size=3, padding=1)
self.bn12 = nn.BatchNorm2d(16)
self.do12 = nn.Dropout2d(p=0.2)
self.conv21 = nn.Conv2d(16, 32, kernel_size=3, padding=1)
self.bn21 = nn.BatchNorm2d(32)
self.do21 = nn.Dropout2d(p=0.2)
self.conv22 = nn.Conv2d(32, 32, kernel_size=3, padding=1)
self.bn22 = nn.BatchNorm2d(32)
self.do22 = nn.Dropout2d(p=0.2)
self.conv31 = nn.Conv2d(32, 64, kernel_size=3, padding=1)
self.bn31 = nn.BatchNorm2d(64)
self.do31 = nn.Dropout2d(p=0.2)
self.conv32 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
self.bn32 = nn.BatchNorm2d(64)
self.do32 = nn.Dropout2d(p=0.2)
self.conv33 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
self.bn33 = nn.BatchNorm2d(64)
self.do33 = nn.Dropout2d(p=0.2)
self.conv41 = nn.Conv2d(64, 128, kernel_size=3, padding=1)
self.bn41 = nn.BatchNorm2d(128)
self.do41 = nn.Dropout2d(p=0.2)
self.conv42 = nn.Conv2d(128, 128, kernel_size=3, padding=1)
self.bn42 = nn.BatchNorm2d(128)
self.do42 = nn.Dropout2d(p=0.2)
self.conv43 = nn.Conv2d(128, 128, kernel_size=3, padding=1)
self.bn43 = nn.BatchNorm2d(128)
self.do43 = nn.Dropout2d(p=0.2)
self.upconv4 = nn.ConvTranspose2d(128, 128, kernel_size=3, padding=1, stride=2, output_padding=1)
self.conv43d = nn.ConvTranspose2d(256, 128, kernel_size=3, padding=1)
self.bn43d = nn.BatchNorm2d(128)
self.do43d = nn.Dropout2d(p=0.2)
self.conv42d = nn.ConvTranspose2d(128, 128, kernel_size=3, padding=1)
self.bn42d = nn.BatchNorm2d(128)
self.do42d = nn.Dropout2d(p=0.2)
self.conv41d = nn.ConvTranspose2d(128, 64, kernel_size=3, padding=1)
self.bn41d = nn.BatchNorm2d(64)
self.do41d = nn.Dropout2d(p=0.2)
self.upconv3 = nn.ConvTranspose2d(64, 64, kernel_size=3, padding=1, stride=2, output_padding=1)
self.conv33d = nn.ConvTranspose2d(128, 64, kernel_size=3, padding=1)
self.bn33d = nn.BatchNorm2d(64)
self.do33d = nn.Dropout2d(p=0.2)
self.conv32d = nn.ConvTranspose2d(64, 64, kernel_size=3, padding=1)
self.bn32d = nn.BatchNorm2d(64)
self.do32d = nn.Dropout2d(p=0.2)
self.conv31d = nn.ConvTranspose2d(64, 32, kernel_size=3, padding=1)
self.bn31d = nn.BatchNorm2d(32)
self.do31d = nn.Dropout2d(p=0.2)
self.upconv2 = nn.ConvTranspose2d(32, 32, kernel_size=3, padding=1, stride=2, output_padding=1)
self.conv22d = nn.ConvTranspose2d(64, 32, kernel_size=3, padding=1)
self.bn22d = nn.BatchNorm2d(32)
self.do22d = nn.Dropout2d(p=0.2)
self.conv21d = nn.ConvTranspose2d(32, 16, kernel_size=3, padding=1)
self.bn21d = nn.BatchNorm2d(16)
self.do21d = nn.Dropout2d(p=0.2)
self.upconv1 = nn.ConvTranspose2d(16, 16, kernel_size=3, padding=1, stride=2, output_padding=1)
self.conv12d = nn.ConvTranspose2d(32, 16, kernel_size=3, padding=1)
self.bn12d = nn.BatchNorm2d(16)
self.do12d = nn.Dropout2d(p=0.2)
self.conv11d = nn.ConvTranspose2d(16, label_nbr, kernel_size=3, padding=1)
self.sm = nn.LogSoftmax(dim=1)
def forward(self, x1, x2):
"""Forward method."""
# Stage 1
x11 = self.do11(F.relu(self.bn11(self.conv11(x1))))
x12_1 = self.do12(F.relu(self.bn12(self.conv12(x11))))
x1p = F.max_pool2d(x12_1, kernel_size=2, stride=2)
# Stage 2
x21 = self.do21(F.relu(self.bn21(self.conv21(x1p))))
x22_1 = self.do22(F.relu(self.bn22(self.conv22(x21))))
x2p = F.max_pool2d(x22_1, kernel_size=2, stride=2)
# Stage 3
x31 = self.do31(F.relu(self.bn31(self.conv31(x2p))))
x32 = self.do32(F.relu(self.bn32(self.conv32(x31))))
x33_1 = self.do33(F.relu(self.bn33(self.conv33(x32))))
x3p = F.max_pool2d(x33_1, kernel_size=2, stride=2)
# Stage 4
x41 = self.do41(F.relu(self.bn41(self.conv41(x3p))))
x42 = self.do42(F.relu(self.bn42(self.conv42(x41))))
x43_1 = self.do43(F.relu(self.bn43(self.conv43(x42))))
x4p = F.max_pool2d(x43_1, kernel_size=2, stride=2)
################################ Now forward pass of the second image
# Stage 1
x11 = self.do11(F.relu(self.bn11(self.conv11(x2))))
x12_2 = self.do12(F.relu(self.bn12(self.conv12(x11))))
x1p = F.max_pool2d(x12_2, kernel_size=2, stride=2)
# Stage 2
x21 = self.do21(F.relu(self.bn21(self.conv21(x1p))))
x22_2 = self.do22(F.relu(self.bn22(self.conv22(x21))))
x2p = F.max_pool2d(x22_2, kernel_size=2, stride=2)
# Stage 3
x31 = self.do31(F.relu(self.bn31(self.conv31(x2p))))
x32 = self.do32(F.relu(self.bn32(self.conv32(x31))))
x33_2 = self.do33(F.relu(self.bn33(self.conv33(x32))))
x3p = F.max_pool2d(x33_2, kernel_size=2, stride=2)
# Stage 4
x41 = self.do41(F.relu(self.bn41(self.conv41(x3p))))
x42 = self.do42(F.relu(self.bn42(self.conv42(x41))))
x43_2 = self.do43(F.relu(self.bn43(self.conv43(x42))))
x4p = F.max_pool2d(x43_2, kernel_size=2, stride=2)
# Stage 4d
x4d = self.upconv4(x4p)
pad4 = ReplicationPad2d((0, x43_1.size(3) - x4d.size(3), 0, x43_1.size(2) - x4d.size(2)))
x4d = torch.cat((pad4(x4d), torch.abs(x43_1 - x43_2)), 1)
x43d = self.do43d(F.relu(self.bn43d(self.conv43d(x4d))))
x42d = self.do42d(F.relu(self.bn42d(self.conv42d(x43d))))
x41d = self.do41d(F.relu(self.bn41d(self.conv41d(x42d))))
# Stage 3d
x3d = self.upconv3(x41d)
pad3 = ReplicationPad2d((0, x33_1.size(3) - x3d.size(3), 0, x33_1.size(2) - x3d.size(2)))
x3d = torch.cat((pad3(x3d), torch.abs(x33_1 - x33_2)), 1)
x33d = self.do33d(F.relu(self.bn33d(self.conv33d(x3d))))
x32d = self.do32d(F.relu(self.bn32d(self.conv32d(x33d))))
x31d = self.do31d(F.relu(self.bn31d(self.conv31d(x32d))))
# Stage 2d
x2d = self.upconv2(x31d)
pad2 = ReplicationPad2d((0, x22_1.size(3) - x2d.size(3), 0, x22_1.size(2) - x2d.size(2)))
x2d = torch.cat((pad2(x2d), torch.abs(x22_1 - x22_2)), 1)
x22d = self.do22d(F.relu(self.bn22d(self.conv22d(x2d))))
x21d = self.do21d(F.relu(self.bn21d(self.conv21d(x22d))))
# Stage 1d
x1d = self.upconv1(x21d)
pad1 = ReplicationPad2d((0, x12_1.size(3) - x1d.size(3), 0, x12_1.size(2) - x1d.size(2)))
x1d = torch.cat((pad1(x1d), torch.abs(x12_1 - x12_2)), 1)
x12d = self.do12d(F.relu(self.bn12d(self.conv12d(x1d))))
x11d = self.conv11d(x12d)
return self.sm(x11d)
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def train(n_epochs = N_EPOCHS, save = True):
t = np.linspace(1, n_epochs, n_epochs)
epoch_train_loss = 0 * t
epoch_train_accuracy = 0 * t
epoch_train_change_accuracy = 0 * t
epoch_train_nochange_accuracy = 0 * t
epoch_train_precision = 0 * t
epoch_train_recall = 0 * t
epoch_train_Fmeasure = 0 * t
epoch_test_loss = 0 * t
epoch_test_accuracy = 0 * t
epoch_test_change_accuracy = 0 * t
epoch_test_nochange_accuracy = 0 * t
epoch_test_precision = 0 * t
epoch_test_recall = 0 * t
epoch_test_Fmeasure = 0 * t
# mean_acc = 0
# best_mean_acc = 0
fm = 0
best_fm = 0
lss = 1000
best_lss = 1000
plt.figure(num=1)
plt.figure(num=2)
plt.figure(num=3)
optimizer = torch.optim.Adam(net.parameters(), weight_decay=1e-4)
# optimizer = torch.optim.Adam(net.parameters(), lr=0.0005)
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, 0.95)
for epoch_index in tqdm(range(n_epochs)):
net.train()
print('Epoch: ' + str(epoch_index + 1) + ' of ' + str(N_EPOCHS))
tot_count = 0
tot_loss = 0
tot_accurate = 0
class_correct = list(0. for i in range(2))
class_total = list(0. for i in range(2))
# for batch_index, batch in enumerate(tqdm(data_loader)):
for batch in train_loader:
I1 = Variable(batch['I1'].float().cuda())
I2 = Variable(batch['I2'].float().cuda())
label = torch.squeeze(Variable(batch['label'].cuda()))
#label = F.one_hot(label.long())
### Non-cuda options for CPU running
#I1 = Variable(batch['I1'].float())
#I2 = Variable(batch['I2'].float())
#label = torch.squeeze(Variable(batch['label']))
optimizer.zero_grad()
output = net(I1, I2)
loss = criterion(output, label.long())
loss.backward()
optimizer.step()
scheduler.step()
epoch_train_loss[epoch_index], epoch_train_accuracy[epoch_index], cl_acc, pr_rec = test(train_dataset)
epoch_train_nochange_accuracy[epoch_index] = cl_acc[0]
epoch_train_change_accuracy[epoch_index] = cl_acc[1]
epoch_train_precision[epoch_index] = pr_rec[0]
epoch_train_recall[epoch_index] = pr_rec[1]
epoch_train_Fmeasure[epoch_index] = pr_rec[2]
# epoch_test_loss[epoch_index], epoch_test_accuracy[epoch_index], cl_acc, pr_rec = test(test_dataset)
epoch_test_loss[epoch_index], epoch_test_accuracy[epoch_index], cl_acc, pr_rec = test(test_dataset)
epoch_test_nochange_accuracy[epoch_index] = cl_acc[0]
epoch_test_change_accuracy[epoch_index] = cl_acc[1]
epoch_test_precision[epoch_index] = pr_rec[0]
epoch_test_recall[epoch_index] = pr_rec[1]
epoch_test_Fmeasure[epoch_index] = pr_rec[2]
# Plotting graphs of progress
plt.figure(num=1)
plt.clf()
l1_1, = plt.plot(t[:epoch_index + 1], epoch_train_loss[:epoch_index + 1], label='Train loss')
l1_2, = plt.plot(t[:epoch_index + 1], epoch_test_loss[:epoch_index + 1], label='Test loss')
plt.legend(handles=[l1_1, l1_2])
plt.grid()
# plt.gcf().gca().set_ylim(bottom = 0)
plt.gcf().gca().set_xlim(left = 0)
plt.title('Loss')
display.clear_output(wait=True)
display.display(plt.gcf())
plt.figure(num=2)
plt.clf()
l2_1, = plt.plot(t[:epoch_index + 1], epoch_train_accuracy[:epoch_index + 1], label='Train accuracy')
l2_2, = plt.plot(t[:epoch_index + 1], epoch_test_accuracy[:epoch_index + 1], label='Test accuracy')
plt.legend(handles=[l2_1, l2_2])
plt.grid()
plt.gcf().gca().set_ylim(0, 100)
# plt.gcf().gca().set_ylim(bottom = 0)
# plt.gcf().gca().set_xlim(left = 0)
plt.title('Accuracy')
display.clear_output(wait=True)
display.display(plt.gcf())
plt.figure(num=3)
plt.clf()
l3_1, = plt.plot(t[:epoch_index + 1], epoch_train_nochange_accuracy[:epoch_index + 1], label='Train accuracy: no change')
l3_2, = plt.plot(t[:epoch_index + 1], epoch_train_change_accuracy[:epoch_index + 1], label='Train accuracy: change')
l3_3, = plt.plot(t[:epoch_index + 1], epoch_test_nochange_accuracy[:epoch_index + 1], label='Test accuracy: no change')
l3_4, = plt.plot(t[:epoch_index + 1], epoch_test_change_accuracy[:epoch_index + 1], label='Test accuracy: change')
plt.legend(handles=[l3_1, l3_2, l3_3, l3_4])
plt.grid()
plt.gcf().gca().set_ylim(0, 100)
# plt.gcf().gca().set_ylim(bottom = 0)
# plt.gcf().gca().set_xlim(left = 0)
plt.title('Accuracy per class')
display.clear_output(wait=True)
display.display(plt.gcf())
plt.figure(num=4)
plt.clf()
l4_1, = plt.plot(t[:epoch_index + 1], epoch_train_precision[:epoch_index + 1], label='Train precision')
l4_2, = plt.plot(t[:epoch_index + 1], epoch_train_recall[:epoch_index + 1], label='Train recall')
l4_3, = plt.plot(t[:epoch_index + 1], epoch_train_Fmeasure[:epoch_index + 1], label='Train Dice/F1')
l4_4, = plt.plot(t[:epoch_index + 1], epoch_test_precision[:epoch_index + 1], label='Test precision')
l4_5, = plt.plot(t[:epoch_index + 1], epoch_test_recall[:epoch_index + 1], label='Test recall')
l4_6, = plt.plot(t[:epoch_index + 1], epoch_test_Fmeasure[:epoch_index + 1], label='Test Dice/F1')
plt.legend(handles=[l4_1, l4_2, l4_3, l4_4, l4_5, l4_6])
plt.grid()
plt.gcf().gca().set_ylim(0, 1)
# plt.gcf().gca().set_ylim(bottom = 0)
# plt.gcf().gca().set_xlim(left = 0)
plt.title('Precision, Recall and F-score')
display.clear_output(wait=True)
display.display(plt.gcf())
# mean_acc = (epoch_test_nochange_accuracy[epoch_index] + epoch_test_change_accuracy[epoch_index])/2
# if mean_acc > best_mean_acc:
# best_mean_acc = mean_acc
# save_str = 'net-best_epoch-' + str(epoch_index + 1) + '_acc-' + str(mean_acc) + '.pth.tar'
# torch.save(net.state_dict(), save_str)
# fm = pr_rec[2]
fm = epoch_train_Fmeasure[epoch_index]
if fm > best_fm:
best_fm = fm
save_str = 'net-best_epoch-' + str(epoch_index + 1) + '_fm-' + str(fm) + '.pth.tar'
torch.save(net.state_dict(), save_str)
lss = epoch_train_loss[epoch_index]
if lss < best_lss:
best_lss = lss
save_str = 'net-best_epoch-' + str(epoch_index + 1) + '_loss-' + str(lss) + '.pth.tar'
torch.save(net.state_dict(), save_str)
# print('Epoch loss: ' + str(tot_loss/tot_count))
if save:
im_format = 'png'
# im_format = 'eps'
plt.figure(num=1)
plt.savefig(net_name + '-01-loss.' + im_format)
plt.figure(num=2)
plt.savefig(net_name + '-02-accuracy.' + im_format)
plt.figure(num=3)
plt.savefig(net_name + '-03-accuracy-per-class.' + im_format)
plt.figure(num=4)
plt.savefig(net_name + '-04-prec-rec-fmeas.' + im_format)
out = {'train_loss': epoch_train_loss[-1],
'train_accuracy': epoch_train_accuracy[-1],
'train_nochange_accuracy': epoch_train_nochange_accuracy[-1],
'train_change_accuracy': epoch_train_change_accuracy[-1],
'test_loss': epoch_test_loss[-1],
'test_accuracy': epoch_test_accuracy[-1],
'test_nochange_accuracy': epoch_test_nochange_accuracy[-1],
'test_change_accuracy': epoch_test_change_accuracy[-1]}
print('pr_c, rec_c, f_meas, pr_nc, rec_nc')
print(pr_rec)
return out
L = 1024
N = 2
def test(dset):
net.eval()
tot_loss = 0
tot_count = 0
tot_accurate = 0
n = 2
class_correct = list(0. for i in range(n))
class_total = list(0. for i in range(n))
class_accuracy = list(0. for i in range(n))
tp = 0
tn = 0
fp = 0
fn = 0
for img_index in dset.names:
I1_full, I2_full, cm_full = dset.get_img(img_index)
s = cm_full.shape
steps0 = np.arange(0,s[0],ceil(s[0]/N))
steps1 = np.arange(0,s[1],ceil(s[1]/N))
for ii in range(N):
for jj in range(N):
xmin = steps0[ii]
if ii == N-1:
xmax = s[0]
else:
xmax = steps0[ii+1]
ymin = jj
if jj == N-1:
ymax = s[1]
else:
ymax = steps1[jj+1]
I1 = I1_full[:, xmin:xmax, ymin:ymax]
I2 = I2_full[:, xmin:xmax, ymin:ymax]
cm = cm_full[xmin:xmax, ymin:ymax]
I1 = Variable(torch.unsqueeze(I1, 0).float()).cuda()
I2 = Variable(torch.unsqueeze(I2, 0).float()).cuda()
cm = Variable(torch.unsqueeze(torch.from_numpy(1.0*cm),0).float()).cuda()
output = net(I1, I2)
loss = criterion(output, cm.long())
# print(loss)
tot_loss += loss.data * np.prod(cm.size())
tot_count += np.prod(cm.size())
_, predicted = torch.max(output.data, 1)
c = (predicted.int() == cm.data.int())
for i in range(c.size(1)):
for j in range(c.size(2)):
l = int(cm.data[0, i, j])
class_correct[l] += c[0, i, j]
class_total[l] += 1
pr = (predicted.int() > 0).cpu().numpy()
gt = (cm.data.int() > 0).cpu().numpy()
tp += np.logical_and(pr, gt).sum()
tn += np.logical_and(np.logical_not(pr), np.logical_not(gt)).sum()
fp += np.logical_and(pr, np.logical_not(gt)).sum()
fn += np.logical_and(np.logical_not(pr), gt).sum()
net_loss = tot_loss/tot_count
net_accuracy = 100 * (tp + tn)/tot_count
for i in range(n):
class_accuracy[i] = 100 * class_correct[i] / max(class_total[i],0.00001)
prec = tp / (tp + fp)
rec = tp / (tp + fn)
f_meas = 2 * prec * rec / (prec + rec)
prec_nc = tn / (tn + fn)
rec_nc = tn / (tn + fp)
pr_rec = [prec, rec, f_meas, prec_nc, rec_nc]
return net_loss, net_accuracy, class_accuracy, pr_rec
def save_test_results(dset):
for name in tqdm(dset.names):
with warnings.catch_warnings():
I1, I2, cm = dset.get_img(name)
I1 = Variable(torch.unsqueeze(I1, 0).float()).cuda()
I2 = Variable(torch.unsqueeze(I2, 0).float()).cuda()
out = net(I1, I2)
_, predicted = torch.max(out.data, 1)
I = np.stack((255*cm,255*np.squeeze(predicted.cpu().numpy()),255*cm),2)
io.imsave(f'{net_name}-{name}.png',I)
def kappa(tp, tn, fp, fn):
N = tp + tn + fp + fn
p0 = (tp + tn) / N
pe = ((tp+fp)*(tp+fn) + (tn+fp)*(tn+fn)) / (N * N)
return (p0 - pe) / (1 - pe)
def test(dset):
net.eval()
tot_loss = 0
tot_count = 0
tot_accurate = 0
n = 2
class_correct = list(0. for i in range(n))
class_total = list(0. for i in range(n))
class_accuracy = list(0. for i in range(n))
tp = 0
tn = 0
fp = 0
fn = 0
for img_index in tqdm(dset.names):
I1_full, I2_full, cm_full = dset.get_img(img_index)
s = cm_full.shape
for ii in range(ceil(s[0]/L)):
for jj in range(ceil(s[1]/L)):
xmin = L*ii
xmax = min(L*(ii+1),s[1])
ymin = L*jj
ymax = min(L*(jj+1),s[1])
I1 = I1_full[:, xmin:xmax, ymin:ymax]
I2 = I2_full[:, xmin:xmax, ymin:ymax]
cm = cm_full[xmin:xmax, ymin:ymax]
I1 = Variable(torch.unsqueeze(I1, 0).float()).cuda()
I2 = Variable(torch.unsqueeze(I2, 0).float()).cuda()
cm = Variable(torch.unsqueeze(torch.from_numpy(1.0*cm),0).float()).cuda()
output = net(I1, I2)
loss = criterion(output, cm.long())
tot_loss += loss.data * np.prod(cm.size())
tot_count += np.prod(cm.size())
_, predicted = torch.max(output.data, 1)
c = (predicted.int() == cm.data.int())
for i in range(c.size(1)):
for j in range(c.size(2)):
l = int(cm.data[0, i, j])
class_correct[l] += c[0, i, j]
class_total[l] += 1
pr = (predicted.int() > 0).cpu().numpy()
gt = (cm.data.int() > 0).cpu().numpy()
tp += np.logical_and(pr, gt).sum()
tn += np.logical_and(np.logical_not(pr), np.logical_not(gt)).sum()
fp += np.logical_and(pr, np.logical_not(gt)).sum()
fn += np.logical_and(np.logical_not(pr), gt).sum()
net_loss = tot_loss/tot_count
net_loss = float(net_loss.cpu().numpy())
net_accuracy = 100 * (tp + tn)/tot_count
for i in range(n):
class_accuracy[i] = 100 * class_correct[i] / max(class_total[i],0.00001)
class_accuracy[i] = float(class_accuracy[i].cpu().numpy())
prec = tp / (tp + fp)
rec = tp / (tp + fn)
dice = 2 * prec * rec / (prec + rec)
prec_nc = tn / (tn + fn)
rec_nc = tn / (tn + fp)
pr_rec = [prec, rec, dice, prec_nc, rec_nc]
k = kappa(tp, tn, fp, fn)
return {'net_loss': net_loss,
'net_accuracy': net_accuracy,
'class_accuracy': class_accuracy,
'precision': prec,
'recall': rec,
'dice': dice,
'kappa': k}
def main():
# Replace PATH_TO_DATASET with your path to the OSCD dataset.
PATH_TO_DATASET = '/content/drive/MyDrive/onera/'
IS_PROTOTYPE = False
# Define variables such as batch size, number of epochs etc.
FP_MODIFIER = 1 # Tuning parameter, use 1 if unsure
BATCH_SIZE = 32
N_EPOCHS = 40
PATCH_SIDE = 48
NORMALISE_IMGS = True
TRAIN_STRIDE = int(PATCH_SIDE/2) - 1
# TYPE defines the number of Sentinel bands used for training: 0-RGB | 1-RGB + IR |
# 2-All bands s.t. resolution <= 20m | 3-All bands
# We tested with both TYPE = 0 and TYPE = 1, and found RGB + IR to be more successful.
TYPE = 1
# If simply using this notebook to test a pre-trained model, set LOAD_TRAINED to True.
LOAD_TRAINED = False
# Here DATA_AUG is set to True to augment the existing dataset by applying a number of transforms to the existing data,
# such as reflections, rotations and scaling. See the next cell for details of these transforms.
DATA_AUG = True
# Carry out data augmentation with the augmentation classes defined above.
if DATA_AUG:
data_transform = tr.Compose([RandomFlip(), RandomRot()])
else:
data_transform = None
# Load the training and test datasets using the ChangeDetectionDataset class
train_dataset = ChangeDetectionDataset(PATH_TO_DATASET, train = True, stride = TRAIN_STRIDE, transform=data_transform)
weights = torch.FloatTensor(train_dataset.weights).cuda()
print(weights)
train_loader = DataLoader(train_dataset, batch_size = BATCH_SIZE, shuffle = True, num_workers = 2)
test_dataset = ChangeDetectionDataset(PATH_TO_DATASET, train = False, stride = TRAIN_STRIDE)
test_loader = DataLoader(test_dataset, batch_size = BATCH_SIZE, shuffle = True, num_workers = 2)
print('DATASETS OK')
# Determining the shapes of the networks given the number of Sentinel-2 bands used, defined by the TYPE.
if TYPE == 0:
net, net_name = Unet(2*3, 2), 'FC-EF'
# net, net_name = SiamUnet(3, 2), 'FC-Siam'
elif TYPE == 1:
net, net_name = Unet(2*4, 2), 'FC-EF'
# net, net_name = SiamUnet(4, 2), 'FC-Siam'
elif TYPE == 2:
net, net_name = Unet(2*10, 2), 'FC-EF'
# net, net_name = SiamUnet(10, 2), 'FC-Siam'
elif TYPE == 3:
net, net_name = Unet(2*13, 2), 'FC-EF'
# net, net_name = SiamUnet(13, 2), 'FC-Siam'
# CUDA the network to facilitate running on GPU
net.cuda()
# Options of loss functions, including non-standard ones defined in losses.py
criterion = nn.NLLLoss(weight=weights) # to be used with logsoftmax output
#criterion = DiceLoss() # second option of loss function, requires reshaping of target and input to loss function
if LOAD_TRAINED:
net.load_state_dict(torch.load('weights/unet_final_weights.pth.tar'))
print('LOAD OK')
else:
t_start = time.time()
out_dic = train()
t_end = time.time()
print(out_dic)
print('Elapsed time:')
print(t_end - t_start)
### This cell saves the weights of the trained network for future use.
if not LOAD_TRAINED:
torch.save(net.state_dict(), 'net_final.pth.tar')
print('SAVE OK')
save_test_results(test_dataset)
L = 1024
results = test(test_dataset)
print(results)
|
#! /usr/bin/env python3
import struct
import sys
mappers = ["NROM", "MMC1", "UxROM", "CNROM", "MMC3", "MMC5", "FFE F4xxx", "AxROM", "FFE F3xxx", "MMC2", "MMC4"]
def main():
dump = False
for arg in sys.argv[1:]:
if arg[0] == '-':
if arg == '-d':
dump = True
else:
raise ValueError(f"Unsupported flag: {arg}")
sys.argv.remove(arg)
if len(sys.argv) < 2:
raise ValueError(f"Usage: {sys.argv[0]} [-d] file")
print_info(sys.argv[1], dump)
def print_info(fp, dump=False):
with open(fp, 'rb') as f:
buf = f.read(16)
if len(buf) < 16:
raise ValueError(f"Unable to read 16 bytes from {fp}")
header, nb_16k_rom_banks, nb_8k_vrom_banks, b6, b7, nb_8k_ram_banks, video_mode, *padding = struct.unpack(">4s12B", buf)
if header != b'NES\x1a':
raise ValueError(f"Invalid header for nes file: {fp}: {header}")
print("%-40s%s" % ("File:", fp))
print("%-40s%s" % ("Header:", header))
# Byte 4
print("%-40s%d" % ("16KiB ROM Banks:", nb_16k_rom_banks))
# Byte 5
print("%-40s%d" % ("8KiB VROM Banks:", nb_8k_vrom_banks))
# Byte 6
print("%-40s%s" % ("Mirroring:", 'Vertical' if _get_bit_at(b6, 0) == 1 else "Horizontal"))
print("%-40s%s" % ("Battery RAM at 0x6000-0x7FFF:", bool(_get_bit_at(b6, 1))))
print("%-40s%s" % ("512 bytes trainer at 0x7000-0x71FF:", bool(_get_bit_at(b6, 2))))
print("%-40s%s" % ("Four screen VRAM layout:", bool(_get_bit_at(b6, 3))))
# Byte 7
print("%-40s%s" % ("VS-System cartridge:", bool(_get_bit_at(b7, 0))))
print("%-40s%s" % ("Reserved (must be zero):", b7 & 0b00001110))
# Bytes 6 and 7, bits 4-7
mapper = (b6 >> 4 & 0x0f) | (b7 & 0xf0)
print("%-40s%s (%d)" % ("Mapper:", mappers[mapper], mapper))
# Byte 8
print("%-40s%d" % ("8KiB RAM Banks:", nb_8k_ram_banks))
# Byte 9
print("%-40s%s" % ("Video mode:", "PAL" if _get_bit_at(video_mode, 0) else "NTSC"))
print("%-40s%s" % ("Reserved (must be zero):", video_mode & 0b11111110))
# Bytes 10-15
print("%-40s%s" % ("Reserved (must be zeroes):", padding))
def _get_bit_at(val, pos):
if 0 > pos >=8:
raise ValueError(f"pos must be between 0 and 7: {pos}")
return val >> pos & 0x01
if __name__ == "__main__":
main()
|
from django.urls import path
from .views import get_products, searchProducts, get_topSales, get_product
from .views import get_orders, post_orders, refund_order, orderCalculateTotal
from .views import get_categories
from .views import check_promoCode
from .views import get_carrouselPromos
from .views import postReview
urlpatterns = [
path("getProducts", get_products),
path("get_product/<product_id>", get_product),
path("getTopSales", get_topSales),
path("searchProduct/<product_name>", searchProducts),
path("getOrders", get_orders),
path("postOrders", post_orders),
path("refund_order/<order_id>", refund_order),
path("orderCalculateTotal", orderCalculateTotal),
path("getCategories", get_categories),
path("checkPromoCode", check_promoCode),
path("getCarrouselPromo", get_carrouselPromos),
path("postReview", postReview),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.