content
stringlengths 5
1.05M
|
|---|
import synapse
from synapse.rest.client.v1 import login, room
from tests import unittest
from tests.unittest import HomeserverTestCase
ONE_DAY_IN_SECONDS = 86400
class PhoneHomeTestCase(HomeserverTestCase):
servlets = [
synapse.rest.admin.register_servlets_for_client_rest_resource,
room.register_servlets,
login.register_servlets,
]
# Override the retention time for the user_ips table because otherwise it
# gets pruned too aggressively for our R30 test.
@unittest.override_config({"user_ips_max_age": "365d"})
def test_r30_minimum_usage(self):
"""
Tests the minimum amount of interaction necessary for the R30 metric
to consider a user 'retained'.
"""
# Register a user, log it in, create a room and send a message
user_id = self.register_user("u1", "secret!")
access_token = self.login("u1", "secret!")
room_id = self.helper.create_room_as(room_creator=user_id, tok=access_token)
self.helper.send(room_id, "message", tok=access_token)
# Check the R30 results do not count that user.
r30_results = self.get_success(self.hs.get_datastore().count_r30_users())
self.assertEqual(r30_results, {"all": 0})
# Advance 30 days (+ 1 second, because strict inequality causes issues if we are
# bang on 30 days later).
self.reactor.advance(30 * ONE_DAY_IN_SECONDS + 1)
# (Make sure the user isn't somehow counted by this point.)
r30_results = self.get_success(self.hs.get_datastore().count_r30_users())
self.assertEqual(r30_results, {"all": 0})
# Send a message (this counts as activity)
self.helper.send(room_id, "message2", tok=access_token)
# We have to wait some time for _update_client_ips_batch to get
# called and update the user_ips table.
self.reactor.advance(2 * 60 * 60)
# *Now* the user is counted.
r30_results = self.get_success(self.hs.get_datastore().count_r30_users())
self.assertEqual(r30_results, {"all": 1, "unknown": 1})
# Advance 29 days. The user has now not posted for 29 days.
self.reactor.advance(29 * ONE_DAY_IN_SECONDS)
# The user is still counted.
r30_results = self.get_success(self.hs.get_datastore().count_r30_users())
self.assertEqual(r30_results, {"all": 1, "unknown": 1})
# Advance another day. The user has now not posted for 30 days.
self.reactor.advance(ONE_DAY_IN_SECONDS)
# The user is now no longer counted in R30.
r30_results = self.get_success(self.hs.get_datastore().count_r30_users())
self.assertEqual(r30_results, {"all": 0})
def test_r30_minimum_usage_using_default_config(self):
"""
Tests the minimum amount of interaction necessary for the R30 metric
to consider a user 'retained'.
N.B. This test does not override the `user_ips_max_age` config setting,
which defaults to 28 days.
"""
# Register a user, log it in, create a room and send a message
user_id = self.register_user("u1", "secret!")
access_token = self.login("u1", "secret!")
room_id = self.helper.create_room_as(room_creator=user_id, tok=access_token)
self.helper.send(room_id, "message", tok=access_token)
# Check the R30 results do not count that user.
r30_results = self.get_success(self.hs.get_datastore().count_r30_users())
self.assertEqual(r30_results, {"all": 0})
# Advance 30 days (+ 1 second, because strict inequality causes issues if we are
# bang on 30 days later).
self.reactor.advance(30 * ONE_DAY_IN_SECONDS + 1)
# (Make sure the user isn't somehow counted by this point.)
r30_results = self.get_success(self.hs.get_datastore().count_r30_users())
self.assertEqual(r30_results, {"all": 0})
# Send a message (this counts as activity)
self.helper.send(room_id, "message2", tok=access_token)
# We have to wait some time for _update_client_ips_batch to get
# called and update the user_ips table.
self.reactor.advance(2 * 60 * 60)
# *Now* the user is counted.
r30_results = self.get_success(self.hs.get_datastore().count_r30_users())
self.assertEqual(r30_results, {"all": 1, "unknown": 1})
# Advance 27 days. The user has now not posted for 27 days.
self.reactor.advance(27 * ONE_DAY_IN_SECONDS)
# The user is still counted.
r30_results = self.get_success(self.hs.get_datastore().count_r30_users())
self.assertEqual(r30_results, {"all": 1, "unknown": 1})
# Advance another day. The user has now not posted for 28 days.
self.reactor.advance(ONE_DAY_IN_SECONDS)
# The user is now no longer counted in R30.
# (This is because the user_ips table has been pruned, which by default
# only preserves the last 28 days of entries.)
r30_results = self.get_success(self.hs.get_datastore().count_r30_users())
self.assertEqual(r30_results, {"all": 0})
def test_r30_user_must_be_retained_for_at_least_a_month(self):
"""
Tests that a newly-registered user must be retained for a whole month
before appearing in the R30 statistic, even if they post every day
during that time!
"""
# Register a user and send a message
user_id = self.register_user("u1", "secret!")
access_token = self.login("u1", "secret!")
room_id = self.helper.create_room_as(room_creator=user_id, tok=access_token)
self.helper.send(room_id, "message", tok=access_token)
# Check the user does not contribute to R30 yet.
r30_results = self.get_success(self.hs.get_datastore().count_r30_users())
self.assertEqual(r30_results, {"all": 0})
for _ in range(30):
# This loop posts a message every day for 30 days
self.reactor.advance(ONE_DAY_IN_SECONDS)
self.helper.send(room_id, "I'm still here", tok=access_token)
# Notice that the user *still* does not contribute to R30!
r30_results = self.get_success(self.hs.get_datastore().count_r30_users())
self.assertEqual(r30_results, {"all": 0})
self.reactor.advance(ONE_DAY_IN_SECONDS)
self.helper.send(room_id, "Still here!", tok=access_token)
# *Now* the user appears in R30.
r30_results = self.get_success(self.hs.get_datastore().count_r30_users())
self.assertEqual(r30_results, {"all": 1, "unknown": 1})
|
#A part of NonVisual Desktop Access (NVDA)
#Copyright (C) 2015-2016 NV Access Limited
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
from comtypes import COMError
import ctypes
import UIAHandler
def createUIAMultiPropertyCondition(*dicts):
"""
A helper function that Creates a complex UI Automation Condition matching on various UI Automation properties with both 'and' and 'or'.
Arguments to this function are dicts whos keys are UI Automation property IDs, and whos values are a list of possible values for the property ID.
The dicts are joined with 'or', the keys in each dict are joined with 'and', and the values for each key are joined with 'or'.
For example, to create a condition that matches on a controlType of button or edit and where isReadOnly is True, or, className is 'ding', you would provide arguments of:
{UIA_ControlTypePropertyId:[UIA_ButtonControlTypeId,UIA_EditControlTypeId],UIA_Value_ValueIsReadOnly:[True]},{UIA_ClassNamePropertyId:['ding']}
"""
outerOrList=[]
for dict in dicts:
andList=[]
for key,values in dict.iteritems():
innerOrList=[]
if not isinstance(values,(list,set)):
values=[values]
for value in values:
condition=UIAHandler.handler.clientObject.createPropertyCondition(key,value)
innerOrList.append(condition)
if len(innerOrList)==0:
continue
elif len(innerOrList)==1:
condition=innerOrList[0]
else:
condition=UIAHandler.handler.clientObject.createOrConditionFromArray(innerOrList)
andList.append(condition)
if len(andList)==0:
continue
elif len(andList)==1:
condition=andList[0]
else:
condition=UIAHandler.handler.clientObject.createAndConditionFromArray(andList)
outerOrList.append(condition)
if len(outerOrList)==0:
raise ValueError("no properties")
elif len(outerOrList)==1:
condition=outerOrList[0]
else:
condition=UIAHandler.handler.clientObject.createOrConditionFromArray(outerOrList)
return condition
def UIATextRangeFromElement(documentTextPattern,element):
"""Wraps IUIAutomationTextRange::getEnclosingElement, returning None on COMError."""
try:
childRange=documentTextPattern.rangeFromChild(element)
except COMError:
childRange=None
return childRange
def isUIAElementInWalker(element,walker):
"""
Checks if the given IUIAutomationElement exists in the given IUIAutomationTreeWalker by calling IUIAutomationTreeWalker::normalizeElement and comparing the fetched element with the given element.
"""
try:
newElement=walker.normalizeElement(element)
except COMError:
newElement=None
return newElement and UIAHandler.handler.clientObject.compareElements(element,newElement)
def getDeepestLastChildUIAElementInWalker(element,walker):
"""
Starting from the given IUIAutomationElement, walks to the deepest last child of the given IUIAutomationTreeWalker.
"""
descended=False
while True:
lastChild=walker.getLastChildElement(element)
if lastChild:
descended=True
element=lastChild
else:
break
return element if descended else None
class UIAMixedAttributeError(ValueError):
"""Raised when a function would return a UIAutomation text attribute value that is mixed."""
pass
def getUIATextAttributeValueFromRange(range,attrib,ignoreMixedValues=False):
"""
Wraps IUIAutomationTextRange::getAttributeValue, returning UIAutomation's reservedNotSupportedValue on COMError, and raising UIAMixedAttributeError if a mixed value would be returned and ignoreMixedValues is False.
"""
try:
val=range.GetAttributeValue(attrib)
except COMError:
return UIAHandler.handler.reservedNotSupportedValue
if val==UIAHandler.handler.ReservedMixedAttributeValue:
if not ignoreMixedValues:
raise UIAMixedAttributeError
return val
def iterUIARangeByUnit(rangeObj,unit):
"""
Splits a given UI Automation text range into smaller text ranges the size of the given unit and yields them.
@param rangeObj: the UI Automation text range to split.
@type rangeObj: L{UIAHandler.IUIAutomationTextRange}
@param unit: a UI Automation text unit.
@rtype: a generator that yields L{UIAHandler.IUIAutomationTextRange} objects.
"""
tempRange=rangeObj.clone()
tempRange.MoveEndpointByRange(UIAHandler.TextPatternRangeEndpoint_End,rangeObj,UIAHandler.TextPatternRangeEndpoint_Start)
endRange=tempRange.Clone()
while endRange.Move(unit,1)>0:
tempRange.MoveEndpointByRange(UIAHandler.TextPatternRangeEndpoint_End,endRange,UIAHandler.TextPatternRangeEndpoint_Start)
pastEnd=tempRange.CompareEndpoints(UIAHandler.TextPatternRangeEndpoint_End,rangeObj,UIAHandler.TextPatternRangeEndpoint_End)>0
if pastEnd:
tempRange.MoveEndpointByRange(UIAHandler.TextPatternRangeEndpoint_End,rangeObj,UIAHandler.TextPatternRangeEndpoint_End)
yield tempRange.clone()
if pastEnd:
return
tempRange.MoveEndpointByRange(UIAHandler.TextPatternRangeEndpoint_Start,tempRange,UIAHandler.TextPatternRangeEndpoint_End)
# Ensure that we always reach the end of the outer range, even if the units seem to stop somewhere inside
if tempRange.CompareEndpoints(UIAHandler.TextPatternRangeEndpoint_End,rangeObj,UIAHandler.TextPatternRangeEndpoint_End)<0:
tempRange.MoveEndpointByRange(UIAHandler.TextPatternRangeEndpoint_End,rangeObj,UIAHandler.TextPatternRangeEndpoint_End)
yield tempRange.clone()
def getEnclosingElementWithCacheFromUIATextRange(textRange,cacheRequest):
"""A thin wrapper around IUIAutomationTextRange3::getEnclosingElementBuildCache if it exists, otherwise IUIAutomationTextRange::getEnclosingElement and then IUIAutomationElement::buildUpdatedCache."""
if not isinstance(textRange,UIAHandler.IUIAutomationTextRange):
raise ValueError("%s is not a text range"%textRange)
try:
textRange=textRange.QueryInterface(UIAHandler.IUIAutomationTextRange3)
except (COMError,AttributeError):
e=textRange.getEnclosingElement()
if e:
e=e.buildUpdatedCache(cacheRequest)
return e
return textRange.getEnclosingElementBuildCache(cacheRequest)
class CacheableUIAElementArray(object):
def __init__(self,elementArray,cacheRequest=None):
self._elementArray=elementArray
self._cacheRequest=cacheRequest
@property
def length(self):
return self._elementArray.length if self._elementArray else 0
def getElement(self,index):
e=self._elementArray.getElement(index)
if e and self._cacheRequest:
e=e.buildUpdatedCache(self._cacheRequest)
return e
def getChildrenWithCacheFromUIATextRange(textRange,cacheRequest):
"""A thin wrapper around IUIAutomationTextRange3::getChildrenBuildCache if it exists, otherwise IUIAutomationTextRange::getChildren but wraps the result in an object that automatically calls IUIAutomationElement::buildUpdateCache on any element retreaved."""
if not isinstance(textRange,UIAHandler.IUIAutomationTextRange):
raise ValueError("%s is not a text range"%textRange)
try:
textRange=textRange.QueryInterface(UIAHandler.IUIAutomationTextRange3)
except (COMError,AttributeError):
c=textRange.getChildren()
c=CacheableUIAElementArray(c,cacheRequest)
return c
c=textRange.getChildrenBuildCache(cacheRequest)
c=CacheableUIAElementArray(c)
return c
class UIATextRangeAttributeValueFetcher(object):
def __init__(self,textRange):
self.textRange=textRange
def getValue(self,ID,ignoreMixedValues=False):
try:
val=self.textRange.getAttributeValue(ID)
except COMError:
# #7124: some text attributes are not supported in older Operating Systems
return UIAHandler.handler.reservedNotSupportedValue
if not ignoreMixedValues and val==UIAHandler.handler.ReservedMixedAttributeValue:
raise UIAMixedAttributeError
return val
class BulkUIATextRangeAttributeValueFetcher(UIATextRangeAttributeValueFetcher):
def __init__(self,textRange,IDs):
IDs=list(IDs)
self.IDsToValues={}
super(BulkUIATextRangeAttributeValueFetcher,self).__init__(textRange)
IDsArray=(ctypes.c_long*len(IDs))(*IDs)
values=textRange.GetAttributeValues(IDsArray,len(IDsArray))
self.IDsToValues={IDs[x]:values[x] for x in xrange(len(IDs))}
def getValue(self,ID,ignoreMixedValues=False):
val=self.IDsToValues[ID]
if not ignoreMixedValues and val==UIAHandler.handler.ReservedMixedAttributeValue:
raise UIAMixedAttributeError
return val
|
#! /usr/bin/python3 -W all
# -*- coding: utf-8 -*-
##
# scrape-cfr.py - convert the Code of Federal Regulations into RDF
#
usage="""
scrape-cfr.py - convert the Code of Federal Regulations into RDF
This little script converts the GPO FDsys bulk XML files into
RDF for further semantic annoation and processing. Get the data from
<http://www.gpo.gov/fdsys/bulkdata/CFR/> or let this program
download it for you.
Usage: scrape-cfr.py [options] [file [file ..]]
Arguments:
file GPO FDsys XML file
-o file output filename (default: stdout)
-d, --debug enable debuging output (twice for verbose)
"""
import sys
import getopt
import os
import os.path
import lxml.etree
import re
import string
#
# Globals.
#
flags = {'debug': False, 'verbose': False}
##
# Entry function. Parse paramters, call main function.
#
def main():
ifn = None
ofn = None
# parse commandline for flags and arguments
try:
opts, args = getopt.getopt(sys.argv[1:], 'd')
except getopt.GetoptError:
fatal('getopt error', usage, end='')
# parse flags
for opt, arg in opts:
if opt in {'-d', '--debug'}:
if flags['debug']:
flags['verbose'] = True
flags['debug'] = True
else:
fatal('invalid flag', opt, usage)
# parse arguments
if len(args) > 0:
for arg in args:
if not ifn:
ifn = arg
elif not ofn:
ofn = arg
else:
fatal('too many files', usage)
else:
fatal('need file', usage)
# open files
try:
fin = open(ifn, 'r')
if ofn:
fout = open(ofn, 'wb')
else:
fout = sys.stdout
except IOError as e:
fatal(e)
# do it
do_it(fin, fout)
# cleanup
fin.close()
fout.close()
##
# Do it.
#
def do_it(fin, fout):
parser = lxml.etree.XMLParser(remove_blank_text=True, huge_tree=True)
tree = lxml.etree.parse(fin, parser)
r = tree.getroot()
assert r.tag == 'CFRDOC'
state = {'title': None, 'subtitle': None, 'chapter': None, 'subchapter': None, 'part': None}
lookup = {'enum': {}, 'title': {}}
# get org
for el in r.xpath('.//*[self::TITLE or self::SUBTITLE or self::CHAPTER or self::SUBCHAP or self::PART]'):
if el.tag in orgtypes.keys():
org = orgtypes[el.tag](el)
header, content = org
# debug(header, content)
subel = org_tup2el_r(lookup, org)
# get sections
for el in r.xpath('//SECTION'):
assert el.tag == 'SECTION'
sel, enum, title, status = new_sec(el)
if enum in lookup['enum']:
debug('section', repr(enum), repr(title))
elif status and 'reserved' in status:
warn('reserved enum not in lookup', repr(enum))
else:
warn('enum not in lookup', repr(enum), repr(title))
#
# Parse organization.
#
##
# Convert (recursively) org tuple into XML element. Also add
# sections (recursively) from org tuple so we can match them later.
#
def org_tup2el_r(lookup, org):
assert type(org) == tuple
if len(org) == 2:
header, content = org
debug(header)
if content is not None:
for sub in content:
org_tup2el_r(lookup, sub)
elif len(org) == 1:
header, = org
debug(header)
typ, enum, title, stat = header
lookup['enum'][enum] = lookup['title'][title] = None
else:
fatal('org_tup2el_r: invalid org')
##
#
#
def cfrdoc_iter_title(el):
header = None
tel = el.find('CFRTITLE/TITLEHD/HD')
if tel is None:
warn(el, 'has no derp', repr(lxml.etree.tostring(el, encoding=str)))
else:
header = parse_comb_header(tel)
return (header, None)
##
#
#
def cfrdoc_iter_subtitle(el):
header = None
tel = el.find('HD')
if tel is None:
tel = el.find('RESERVED')
if tel is None:
warn(el, 'has no derp', repr(lxml.etree.tostring(el, encoding=str)))
else:
header = parse_comb_header(tel)
return (header, None)
##
#
#
def cfrdoc_iter_chapter(el):
header = None
tel = el.find('TOC/TOCHD/HD')
if tel is None:
tel = el.find('HD')
if tel is None:
tel = el.find('RESERVED')
if tel is None:
warn(el, 'has no derp', repr(lxml.etree.tostring(el, encoding=str)))
else:
header = parse_comb_header(tel)
return (header, None)
##
#
#
def cfrdoc_iter_subchap(el):
header = None
tel = el.find('HD')
if tel is None:
tel = el.find('RESERVED')
if tel is None:
warn(el, 'has no derp', repr(lxml.etree.tostring(el, encoding=str)))
else:
header = parse_comb_header(tel)
return (header, None)
##
#
#
def cfrdoc_iter_part(el):
# find header
tel = el.find('HD')
if tel is None:
tel = el.find('RESERVED')
# parse header
header = parse_comb_header(tel)
sectioncontent = []
sectioncur = {'SECTNO': None, 'SUBJECT': None}
sectionstatus = set()
for subel in el.xpath('CONTENTS/*'):
if subel.tag in parttypes.keys():
keyvals = parttypes[subel.tag](subel)
for key, val in keyvals:
# is reserved
if subel.tag == 'RESERVED':
sectionstatus.add('reserved')
# add SECTNO to cur
if subel.tag == 'SECTNO':
sectioncur[key] = val
# add to contents
if subel.tag == 'SUBJECT' or subel.tag == 'RESERVED':
if sectioncur['SECTNO'] != None:
# extract
typ = 'section'
enum = sectioncur['SECTNO']
title = val
if sectionstatus == set():
sectionstatus = None
item = ((typ, enum, title, sectionstatus),)
sectioncontent.append(item)
# reset
sectioncur['SECTNO'] = sectioncur['SUBJECT'] = None
sectionstatus = set()
elif val == None:
pass
else:
warn('cfrdoc_iter_part subject: None in cur', repr(sectioncur), repr(lxml.etree.tostring(el, encoding=str)))
# handle SUBPART
if subel.tag == 'SUBPART':
sectioncontent.append(val)
# handle SUBJGRP
if subel.tag == 'SUBJGRP':
for pair in val:
sectioncontent.append(pair)
else:
print('cfrdoc_iter_part skip', subel.tag)
if None not in sectioncur.values():
typ = 'section'
enum = sectioncur['SECTNO']
title = sectioncur['SUBJECT']
item = ((typ, enum, title, sectionstatus), [])
sectioncontent.append(item)
warn('cfrdoc_iter_part: added cur')
elif list(sectioncur.values()) != [None, None]:
warn('cfrdoc_iter_part: None in cur', repr(sectioncur), repr(lxml.etree.tostring(el, encoding=str)))
return (header, sectioncontent)
##
#
#
def part_iter_subpart(el):
# find header
for i,actel in enumerate(el):
if actel.tag in {'HD', 'SUBJECT', 'RESERVED'}:
break
# parse header
header = parse_comb_header(actel)
if i == len(el)-1:
return [(None, (header, []))]
sectioncontent = []
sectioncur = {'SECTNO': None, 'SUBJECT': None}
sectionstatus = set()
for subel in el[i+1:]:
if subel.tag in subparttypes.keys():
keyvals = subparttypes[subel.tag](subel)
for key, val in keyvals:
# is reserved
if subel.tag == 'RESERVED':
sectionstatus.add('reserved')
# add SECTNO to cur
if subel.tag == 'SECTNO':
sectioncur[key] = val
# add to contents
if subel.tag == 'SUBJECT' or subel.tag == 'RESERVED':
if sectioncur['SECTNO'] != None:
# extract
typ = 'section'
enum = sectioncur['SECTNO']
title = val
if sectionstatus == set():
sectionstatus = None
item = ((typ, enum, title, sectionstatus),)
sectioncontent.append(item)
# reset
sectioncur['SECTNO'] = sectioncur['SUBJECT'] = None
sectionstatus = set()
elif val == None:
pass
else:
warn('part_iter_subpart subject: None in cur', repr(sectioncur), repr(lxml.etree.tostring(el, encoding=str)))
# handle SUBJGRP
if subel.tag == 'SUBJGRP':
for pair in val:
sectioncontent.append(pair)
else:
warn('part_iter_subpart skip', subel.tag)
if None not in sectioncur.values():
typ = 'section'
enum = sectioncur['SECTNO']
title = sectioncur['SUBJECT']
item = ((typ, enum, title, sectionstatus), [])
sectioncontent.append(item)
warn('part_iter_subpart: added cur')
elif list(sectioncur.values()) != [None, None]:
warn('part_iter_subpart: None in cur', repr(sectioncur), repr(lxml.etree.tostring(el, encoding=str)))
return [(None, (header, sectioncontent))]
##
#
#
def iter_subjgrp(el):
t = ' '.join(lxml.etree.tostring(el[0], method='text', encoding=str).split())
sectioncontent = []
sectioncur = {'SECTNO': None, 'SUBJECT': None}
sectionstatus = set()
for subel in el[1:]:
if subel.tag in subparttypes.keys():
keyvals = subparttypes[subel.tag](subel)
for key, val in keyvals:
# is reserved
if subel.tag == 'RESERVED':
sectionstatus.add('reserved')
# add SECTNO to cur
if subel.tag == 'SECTNO':
sectioncur[key] = val
# add to contents
if subel.tag == 'SUBJECT' or subel.tag == 'RESERVED':
if sectioncur['SECTNO'] != None:
# extract
typ = 'section'
enum = sectioncur['SECTNO']
title = val
if sectionstatus == set():
sectionstatus = None
item = ((typ, enum, title, sectionstatus),)
sectioncontent.append(item)
# reset
sectioncur['SECTNO'] = sectioncur['SUBJECT'] = None
sectionstatus = set()
elif val == None:
pass
else:
warn('part_iter_subpart subject: None in cur', repr(sectioncur), repr(lxml.etree.tostring(el, encoding=str)))
return [(None, sectioncontent)]
##
#
#
def part_iter_sectno(el):
t = ' '.join(lxml.etree.tostring(el, method='text', encoding=str).split())
if t == '':
t = None
return [('SECTNO', t)]
##
#
#
def part_iter_subject(el):
t = ' '.join(lxml.etree.tostring(el, method='text', encoding=str).split())
if t == '':
t = None
return [('SUBJECT', t)]
##
#
#
orgtypes = {'TITLE': cfrdoc_iter_title, 'SUBTITLE': cfrdoc_iter_subtitle, 'CHAPTER': cfrdoc_iter_chapter, 'SUBCHAP': cfrdoc_iter_subchap, 'PART': cfrdoc_iter_part}
##
#
#
parttypes = {'SECTNO': part_iter_sectno, 'SUBJECT': part_iter_subject, 'RESERVED': part_iter_subject, 'SUBJGRP': iter_subjgrp, 'SUBPART': part_iter_subpart}
subparttypes = {'SECTNO': part_iter_sectno, 'SUBJECT': part_iter_subject, 'RESERVED': part_iter_subject, 'SUBJGRP': iter_subjgrp}
##
# Parse a combined header.
#
def parse_comb_header(el):
typ = enum = t = None
elt = ' '.join(lxml.etree.tostring(el, method='text', encoding=str).split())
status = set()
typs = {'title', 'subtitle', 'chapter', 'subchapter', 'part', 'subpart'}
# is reserved
if el.tag == 'RESERVED':
status.add('reserved')
if '[Reserved]' in elt:
status.add('reserved')
rets = elt.split('[Reserved]', 1)
nelt = rets[0].strip()
warn('merged new elt: reserved', repr(elt), repr(nelt))
elt = nelt
if '[RESERVED]' in elt:
status.add('reserved')
rets = elt.split('[RESERVED]', 1)
nelt = rets[0].strip()
warn('merged new elt: reserved', repr(elt), repr(nelt))
elt = nelt
# special case: 'S ubpart'
if elt[:8] == 'S ubpart':
nelt = 'Subpart' + elt[8:]
warn('merged new elt: S ubpart', repr(elt), repr(nelt))
elt = nelt
# special case: 'Supart'
if elt[:6] == 'Supart':
nelt = 'Subpart' + elt[6:]
warn('merged new elt: Supart', repr(elt), repr(nelt))
elt = nelt
# special case: 1st word merges 'Subpart' with enum
if elt[0:7] == 'Subpart' and elt[7] not in {'s',' ','โ'} or elt[0:8] == 'Subparts' and elt[8] not in {' ','โ'}:
if elt[0:8] == 'Subparts':
nelt = 'Subparts ' + elt[8:]
else:
nelt = 'Subpart ' + elt[7:]
warn('merged new elt: merged enum', repr(elt), repr(nelt))
elt = nelt
# normal case: contains 'โ'
if 'โ' in elt:
rets = elt.split('โ',1)
assert len(rets) == 2
rets2 = rets[0].split(None,1)
t = rets[1]
if len(rets2) == 2:
typ = rets2[0].lower()
enum = rets2[1]
else:
typ = rets2[0].lower()
enum = None
# normal case: plural and contains '-'
elif '-' in elt and elt.split(None,1)[0].lower()[-1] == 's':
rets = elt.split()
typ = rets[0].lower()
enums = rets[1].split('-')
assert len(enums) == 2
enum = (enums[0], enums[1])
t = ' '.join(rets[2:])
# normal case: contains '-'
elif '-' in elt:
rets = elt.split('-',1)
assert len(rets) == 2
rets2 = rets[0].split(None,1)
t = rets[1]
if len(rets2) == 2:
typ = rets2[0].lower()
enum = rets2[1]
else:
typ = rets2[0].lower()
enum = None
# special case: is still obviously a header
elif elt.split(None,1) != [] and (elt.split(None,1)[0].lower() in typs or elt.split(None,1)[0][:-1].lower() in typs):
warn('header without hyphen', repr(elt))
rets = elt.split()
typ = rets[0].lower()
# special case: 2nd word merges enum with 1st word of description
yep = None
for i,c in enumerate(rets[1]):
if c in string.ascii_lowercase:
yep = i-1
break
if yep is not None and yep > 0:
newrets = rets[2:]
newrets.insert(0, rets[1][yep:])
enum = rets[1][:yep]
t = ' '.join(newrets)
warn('2nd word merges enum with 1st word of description', repr(enum), repr(t))
# normal special case: 'typ enum title...'
else:
desc = ' '.join(rets[2:])
if desc == '':
desc = None
enum = rets[1]
t = desc
warn('normal?', repr(typ), repr(enum), repr(t))
# unknown
else:
warn('part_iter_subpart: cant parse header', repr(elt), repr(lxml.etree.tostring(el, encoding=str)))
t = elt
# remove plural type
if typ is not None and typ[-1] == 's':
typ = typ[:-1]
warn('removed plural type', repr(typ))
# confirm typ
if typ not in typs:
warn('unknown type', repr(typ))
if t == '':
t = None
if status == set():
status = None
return typ, enum, t, status
#
# Parse sections.
#
##
#
#
def new_sec(el):
enum = title = status = None
sel = lxml.etree.Element('section')
iel = lxml.etree.SubElement(sel, 'info')
enum, title, status = parse_el_info(el)
# add info
if enum:
if isinstance(enum, str):
enumel = lxml.etree.SubElement(iel, 'enum')
enumel.text = enum
elif isinstance(enum, tuple):
enumsel = lxml.etree.SubElement(iel, 'enums')
enumel0 = lxml.etree.SubElement(enumsel, 'enum')
enumel0.attrib['type'] = 'beg'
enumel0.text = enum[0]
enumel1 = lxml.etree.SubElement(enumsel, 'enum')
enumel1.attrib['type'] = 'end'
enumel1.text = enum[1]
else:
fatal('new_sec unknown enum type:', type(enum))
if title:
titel = lxml.etree.SubElement(iel, 'title')
titel.text = title
if status:
sel.attrib['status'] = ','.join(status)
# get and add text
for subpel in el.xpath('P'):
textel = lxml.etree.SubElement(sel, 'text')
text = lxml.etree.tostring(subpel, method='text', encoding=str).replace('\n', '').strip()
textel.text = text
return sel, enum, title, status
##
#
#
def parse_el_info(el):
enum = title = None
status = set()
# get number
sn = el.find('SECTNO')
if sn is None:
warn('new_sec no SECTNO:', repr(lxml.etree.tostring(el, encoding=str)))
else:
snt = ' '.join(lxml.etree.tostring(sn, method='text', encoding=str).split())
# debug('new_sec snt:', repr(snt))
# numbers
sntnew = snt.replace('ยง', '').strip()
if 'ยงยง' in snt:
if 'โ' in snt:
sntnewnew = sntnew.split('โ')
assert len(sntnewnew) == 2
sntnew = (sntnewnew[0], sntnewnew[1])
elif ' through ' in snt:
sntnewnew = sntnew.split(' through ')
assert len(sntnewnew) == 2
sntnew = (sntnewnew[0], sntnewnew[1])
elif '-' in snt:
if snt.count('-') == 1:
sntnewnew = sntnew.split('-')
assert len(sntnewnew) == 2
sntnew = (sntnewnew[0], sntnewnew[1])
elif snt.count('-') == 2:
sntnewnew = '.'.join(sntnew.rsplit('-',1))
sntnewnewnew = sntnewnew.split('-')
assert len(sntnewnewnew) == 2
warn('parse_el_info sntnew converted', repr(sntnew), repr(sntnewnewnew))
sntnew = (sntnewnewnew[0], sntnewnewnew[1])
elif snt.count('-') == 3:
sntnewnew = sntnew.split('-')
assert len(sntnewnew) == 4
left = '-'.join([sntnewnew[0], sntnewnew[1]])
right = '-'.join([sntnewnew[2], sntnewnew[3]])
sntnew = (left, right)
if isinstance(sntnew, str) or len(sntnew) != 2:
warn('parse_el_info len(sntnew) != 2', repr(sntnew), repr(lxml.etree.tostring(el, encoding=str)))
if sntnew is not None and len(sntnew):
enum = sntnew
else:
warn('new_sec empty SECTNO.text:', repr(sntnew), repr(lxml.etree.tostring(el, encoding=str)))
enum = None
# special case: 'Sec.' in enum
# special case: 'Section' in enum
# special case: whitespace in enum
# get title
tel = el.find('SUBJECT')
if tel is None:
tel = el.find('HD')
if tel is None:
tel = el.find('RESERVED')
if tel is None:
warn('parse_el_info no SUBJECT or HD', repr(lxml.etree.tostring(el, encoding=str)))
t = ''
else:
t = ' '.join(lxml.etree.tostring(tel, method='text', encoding=str).split())
status.add('reserved')
else:
t = ' '.join(lxml.etree.tostring(tel, method='text', encoding=str).split())
else:
t = ' '.join(lxml.etree.tostring(tel, method='text', encoding=str).split())
# is reserved; remove '[Reserved]' and '[RESERVED]' from title and normalize
if tel.tag == 'RESERVED':
status.add('reserved')
if '[Reserved]' in t:
status.add('reserved')
rets = t.split('[Reserved]', 1)
nt = rets[0].strip()
warn('merged new t: reserved', repr(t), repr(nt))
t = nt
if '[RESERVED]' in t:
status.add('reserved')
rets = t.split('[RESERVED]', 1)
nt = rets[0].strip()
warn('merged new t: reserved', repr(t), repr(nt))
t = nt
# parse title
if enum is None:
# if the enum was accidentally part of header
rets = t.split()
try:
i = float(rets[0])
# made it
enum = rets[0]
t = ' '.join(rets[1:])
warn('new_sec_info extracted enum', repr(enum), repr(title))
except Exception:
pass
# normalize
if t == '':
t = None
if status == set():
status = None
return enum, t, status
##
#
#
def debug(*args, prefix='DEBUG:', file=sys.stdout, output=False, **kwargs):
if output or flags['verbose']:
if prefix is not None:
print(prefix, *args, file=file, **kwargs)
else:
print(*args, file=file, **kwargs)
##
# Print error info and exit.
#
def fatal(*args, prefix='FATAL:', **kwargs):
debug(*args, prefix=prefix, file=sys.stderr, output=True, **kwargs)
sys.exit(1)
##
# Print warning info.
#
def warn(*args, prefix='WARNING:', output=False, **kwargs):
if output or flags['debug']:
debug(*args, prefix=prefix, file=sys.stderr, output=True, **kwargs)
##
# Print info.
#
def info(*args, prefix='INFO:', output=False, **kwargs):
if output or flags['debug']:
debug(*args, prefix=prefix, output=True, **kwargs)
# do it
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 10 19:38:46 2019
@author: Scarc
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
dataset = pd.read_csv('50_Startups.csv')
X = dataset.iloc[:, :-1]
Y = dataset.iloc[:, 4]
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelEncoder_X = LabelEncoder()
X.iloc[: , 3] = labelEncoder_X.fit_transform(X.iloc[:,3])
ohe = OneHotEncoder(categorical_features = [3])
X = ohe.fit_transform(X).toarray()
#Avoiding the dummy var trap
X = X[:,1:]
from sklearn.model_selection import train_test_split as tts
X_train,X_test,Y_train,Y_test = tts(X,Y,test_size = 0.2,random_state = 0)
#Fitting Multiple Linear regression to the training set
from sklearn.linear_model import LinearRegression as lr
regressor = lr()
regressor.fit(X_train,Y_train)
#Predicting the test set results
Y_pred = regressor.predict(X_test)
#Building the optimal model using Backward Elimination
import statsmodels.api as sm
X = np.append(arr = np.ones((50,1)).astype(int), values = X, axis = 1)
X_opt = X[:,[0,1,2,3,4,5]]
regressor_OLS = sm.OLS(endog = Y,exog = X_opt).fit()
regressor_OLS.summary()
X_opt = X[:,[0,1,3,4,5]]
regressor_OLS = sm.OLS(endog = Y,exog = X_opt).fit()
regressor_OLS.summary()
X_opt = X[:,[0,3,4,5]]
regressor_OLS = sm.OLS(endog = Y,exog = X_opt).fit()
regressor_OLS.summary()
X_opt = X[:,[0,3,5]]
regressor_OLS = sm.OLS(endog = Y,exog = X_opt).fit()
regressor_OLS.summary()
X_opt = X[:,[0,3]]
regressor_OLS = sm.OLS(endog = Y,exog = X_opt).fit()
regressor_OLS.summary()
|
from django.conf.urls import url
from . import views
SLUG_RE = r'(?P<slug>[-a-zA-Z0-9_@]+)'
urlpatterns = [
url('^$', views.HomeView.as_view(), name='status.index'),
url('^team/%s/$' % SLUG_RE, views.TeamView.as_view(), name='status.team'),
url('^project/%s/$' % SLUG_RE, views.ProjectView.as_view(), name='status.project'),
url('^user/%s/$' % SLUG_RE, views.UserView.as_view(), name='status.user'),
url('^status/(?P<pk>\d{1,8})/$', views.StatusView.as_view(), name='status.status'),
url('^weekly/$', views.WeeklyView.as_view(), name='status.weekly'),
url('^statusize/$', views.statusize, name='status.statusize'),
url('^search/$', views.SearchView.as_view(), name='status.search'),
# profile and signin
url('^accounts/profile/$', views.ProfileView.as_view(), name='users.profile'),
url('^accounts/login/$', views.LoginView.as_view(), name='users.loginform'),
# feeds
url('^statuses.xml$', views.MainFeed(), name='status.index_feed'),
url('^user/%s.xml$' % SLUG_RE, views.UserFeed(), name='status.user_feed'),
url('^user/%s.json$' % SLUG_RE, views.UserFeedJSON.as_view(), name='status.user_feed'),
url('^team/%s.xml$' % SLUG_RE, views.TeamFeed(), name='status.team_feed'),
url('^project/%s.xml$' % SLUG_RE, views.ProjectFeed(), name='status.project_feed'),
# csp
url('^csp-violation-capture$', views.csp_violation_capture),
# robots
url('^robots\\.txt$', views.robots_txt),
]
|
from abc import ABC, abstractmethod, abstractproperty, ABCMeta
class istrategy(ABC):
@abstractmethod
def buildmaps(self, start, end):
pass
|
# terrascript/mysql/__init__.py
import terrascript
class mysql(terrascript.Provider):
pass
|
#
# import pytest
#
# from twisted.web.resource import NoResource
#
# from txweb.web_views import WebSite
# from txweb.http_codes import UnrenderableException
# from .helper import MockRequest
#
# def test_basic_idea():
#
# app = WebSite()
#
# @app.add("/nexus")
# class PersistentObject(object):
#
# def __init__(self):
# self._number = 0
#
#
# @app.expose("/number")
# def respond_number(self, request):
# return 1234
#
# @app.expose("/greeting")
# def render_response_says_hello(self, request):
# return "Hello"
#
#
# @app.expose("/add_one")
# def adds_to_passed_get_argument(self, request):
# """
# subviews do not need to start with render_
# """
# input = int(request.args[b'number'][0])
#
# return input + 1
#
# @app.expose("/counter")
# def increments_persistant_value(self, request):
# self._number += 1
# return self._number
#
#
# assert len(app.resource._route_map._rules) == 4
#
# number_request = MockRequest([], "/nexus/number")
# number_resource = app.getResourceFor(number_request)
#
# assert isinstance(number_resource, NoResource) is False
# expected = b"1234"
# actual = number_resource.render(number_request)
# assert actual == expected
#
# add_request = MockRequest([], "/nexus/add_one", {b"number":5})
# resource = app.getResourceFor(add_request)
# expected = b"6"
# actual = resource.render(add_request)
# assert actual == expected
#
# incrementer = MockRequest([], "/nexus/counter")
# assert app.getResourceFor(incrementer).render(incrementer) == 1 #This is a bug because NOT_DONE_YET =='s 1
# assert app.getResourceFor(incrementer).render(incrementer) == b"2"
# assert app.getResourceFor(incrementer).render(incrementer) == b"3"
#
#
#
#
# def test_throws_exception_on_inaccessible_view_class():
#
#
# app = WebSite()
#
# with pytest.raises(UnrenderableException):
# @app.add("/base")
# class Foo:
# pass
#
#
#
#
#
#
|
from application import db
from datetime import datetime
from flask import Blueprint
from flask_login import current_user
from flask import current_app as app
from flask import request, jsonify, make_response
from flask_jwt_extended import jwt_required, get_jwt_identity
from application.auth.models import User
from .models import Memory
import logging
# Blueprint Configuration
memories_bp = Blueprint('memories_bp', __name__)
@memories_bp.route('/api/memories', methods=['GET'])
@jwt_required
def getAllMemories():
response = {}
username = get_jwt_identity()
user = User.query.filter(User.username == username).first()
userMemories = Memory.query.filter(Memory.user_id == user.id).order_by(Memory.id.asc()).all()
from .models import memories_schema
response["items"] = memories_schema.dump(userMemories)
return make_response(jsonify(response), 200)
@memories_bp.route('/api/memory', methods=['POST'])
@jwt_required
def addMemory():
response = {}
data = request.get_json()
if not data:
response["message"] = "No input data provided"
return make_response(jsonify(response), 400)
try:
from .models import memory_schema
memory = memory_schema.load(data)
if memory.title and memory.description and memory.image:
username = get_jwt_identity()
user = User.query.filter(User.username == username).first()
memory.user_id=user.id
memory.uploaded = datetime.now()
# Validate friends list
existing_friends = []
for friend_name in memory.friends:
friend = User.query.filter(User.username == friend_name).first()
if user.is_friend(friend):
existing_friends.append(friend_name)
memory.friends = existing_friends
db.session.add(memory)
db.session.commit()
response["memory"] = memory_schema.dump(memory)
return make_response(jsonify(response), 200)
else:
response["message"] = "Incorrect request parameters"
return make_response(jsonify(data), 400)
except Exception as ex:
response["message"] = "Error occured during request processing"
logging.error(ex)
return make_response(jsonify(response), 500)
@memories_bp.route('/api/memories/<int:memory_id>', methods=['PUT'])
@jwt_required
def updateMemory(memory_id):
response = {}
if not request.view_args:
response["message"] = "No query parameters received"
return make_response(jsonify(response), 200)
username = get_jwt_identity()
user = User.query.filter(User.username == username).first()
from .models import Memory
memory = Memory.query.filter(Memory.id == memory_id and Memory.id == user.id).first()
if memory == None:
response["message"] = "Memory with current ID was not found"
return make_response(jsonify(response), 404)
data = request.get_json()
if not data:
response["message"] = "No input data provided"
return make_response(jsonify(response), 400)
try:
from .models import memory_schema
loaded_memory = memory_schema.load(data)
loaded_memory.id = memory.id
loaded_memory.uploaded = datetime.now()
db.session.merge(loaded_memory)
db.session.commit()
response["memory"] = memory_schema.dump(loaded_memory)
return make_response(jsonify(response), 200)
except Exception as ex:
response["message"] = "Error occured during request processing"
logging.error(ex)
return make_response(jsonify(response), 500)
@memories_bp.route('/api/memories/<int:memory_id>', methods=['DELETE'])
@jwt_required
def deleteMemory(memory_id):
response = {}
username = get_jwt_identity()
user = User.query.filter(User.username == username).first()
from .models import Memory
memory = Memory.query.filter(Memory.id == memory_id and Memory.id == user.id).first()
if memory == None:
response["message"] = "Memory with current ID was not found"
return make_response(jsonify(response), 404)
try:
db.session.delete(memory)
db.session.commit()
from .models import memory_schema
response["memory"] = memory_schema.dump(memory)
return make_response(jsonify(response), 200)
except Exception as ex:
response["message"] = "Error occured during request processing"
logging.error(ex)
return make_response(jsonify(response), 500)
@memories_bp.route('/api/memories/drafts', methods=['GET'])
@jwt_required
def getAllMemoryDrafts():
response = {}
try:
username = get_jwt_identity()
user = User.query.filter(User.username == username).first()
from .models import MemoryDraft
from .models import memories_drafts_schema
userMemoriesDrafts = MemoryDraft.query.filter(MemoryDraft.user_id == user.id).order_by(MemoryDraft.id.asc()).all()
response["items"] = memories_drafts_schema.dump(userMemoriesDrafts)
return make_response(jsonify(response), 200)
except Exception as ex:
response["message"] = "Error occured during request processing"
logging.error(ex)
return make_response(jsonify(response), 500)
@memories_bp.route('/api/memories/draft', methods=['POST'])
@jwt_required
def addMemoryDraft():
response = {}
data = request.get_json()
if not data:
response["message"] = "No input data provided"
return make_response(jsonify(response), 400)
try:
from .models import memory_draft_schema
memory_draft = memory_draft_schema.load(data)
username = get_jwt_identity()
user = User.query.filter(User.username == username).first()
memory_draft.user_id=user.id
# Validate friends list
existing_friends = []
for friend_name in memory_draft.friends:
friend = User.query.filter(User.username == friend_name).first()
if user.is_friend(friend):
existing_friends.append(friend_name)
memory_draft.friends = existing_friends
db.session.add(memory_draft)
db.session.commit()
response["memory_draft"] = memory_draft_schema.dump(memory_draft)
return make_response(jsonify(response), 200)
except Exception as ex:
response["message"] = "Error occured during request processing"
logging.error(ex)
return make_response(jsonify(response), 500)
@memories_bp.route('/api/memories/draft/<int:draft_id>', methods=['PUT'])
@jwt_required
def updateMemoryDraft(draft_id):
response = {}
if not request.view_args:
response["message"] = "No query parameters received"
return make_response(jsonify(response), 200)
username = get_jwt_identity()
user = User.query.filter(User.username == username).first()
from .models import MemoryDraft
memory_draft = MemoryDraft.query.filter(MemoryDraft.id == draft_id and MemoryDraft.id == user.id).first()
if memory_draft == None:
response["message"] = "MemoryDraft with current ID was not found"
return make_response(jsonify(response), 404)
data = request.get_json()
if not data:
response["message"] = "No input data provided"
return make_response(jsonify(response), 400)
try:
from .models import memory_draft_schema
loaded_memory_draft = memory_draft_schema.load(data)
loaded_memory_draft.id = memory_draft.id
db.session.merge(loaded_memory_draft)
db.session.commit()
response["memory_draft"] = memory_draft_schema.dump(loaded_memory_draft)
return make_response(jsonify(response), 200)
except Exception as ex:
response["message"] = "Error occured during request processing"
logging.error(ex)
return make_response(jsonify(response), 500)
@memories_bp.route('/api/memories/draft/<int:draft_id>', methods=['DELETE'])
@jwt_required
def deleteMemoryDraft(draft_id):
response = {}
username = get_jwt_identity()
user = User.query.filter(User.username == username).first()
from .models import MemoryDraft
memory_draft = MemoryDraft.query.filter(MemoryDraft.id == draft_id and MemoryDraft.id == user.id).first()
if memory_draft == None:
response["message"] = "MemoryDraft with current ID was not found"
return make_response(jsonify(response), 404)
try:
db.session.delete(memory_draft)
db.session.commit()
from .models import memory_draft_schema
response["memory_draft"] = memory_draft_schema.dump(memory_draft)
return make_response(jsonify(response), 200)
except Exception as ex:
response["message"] = "Error occured during request processing"
logging.error(ex)
return make_response(jsonify(response), 500)
|
'''
This file contains classes and functions for representing, solving, and simulating
agents who must allocate their resources among consumption, saving in a risk-free
asset (with a low return), and saving in a risky asset (with higher average return).
'''
import numpy as np
from scipy.optimize import minimize_scalar
from copy import deepcopy
from HARK import Solution, NullFunc, AgentType # Basic HARK features
from HARK.ConsumptionSaving.ConsIndShockModel import(
IndShockConsumerType, # PortfolioConsumerType inherits from it
ValueFunc, # For representing 1D value function
MargValueFunc, # For representing 1D marginal value function
utility, # CRRA utility function
utility_inv, # Inverse CRRA utility function
utilityP, # CRRA marginal utility function
utility_invP, # Derivative of inverse CRRA utility function
utilityP_inv, # Inverse CRRA marginal utility function
init_idiosyncratic_shocks # Baseline dictionary to build on
)
from HARK.ConsumptionSaving.ConsGenIncProcessModel import(
ValueFunc2D, # For representing 2D value function
MargValueFunc2D # For representing 2D marginal value function
)
from HARK.distribution import combineIndepDstns
from HARK.distribution import Lognormal, Bernoulli # Random draws for simulating agents
from HARK.interpolation import(
LinearInterp, # Piecewise linear interpolation
CubicInterp, # Piecewise cubic interpolation
LinearInterpOnInterp1D, # Interpolator over 1D interpolations
BilinearInterp, # 2D interpolator
ConstantFunction, # Interpolator-like class that returns constant value
IdentityFunction # Interpolator-like class that returns one of its arguments
)
# Define a class to represent the single period solution of the portfolio choice problem
class PortfolioSolution(Solution):
'''
A class for representing the single period solution of the portfolio choice model.
Parameters
----------
cFuncAdj : Interp1D
Consumption function over normalized market resources when the agent is able
to adjust their portfolio shares.
ShareFuncAdj : Interp1D
Risky share function over normalized market resources when the agent is able
to adjust their portfolio shares.
vFuncAdj : ValueFunc
Value function over normalized market resources when the agent is able to
adjust their portfolio shares.
vPfuncAdj : MargValueFunc
Marginal value function over normalized market resources when the agent is able
to adjust their portfolio shares.
cFuncFxd : Interp2D
Consumption function over normalized market resources and risky portfolio share
when the agent is NOT able to adjust their portfolio shares, so they are fixed.
ShareFuncFxd : Interp2D
Risky share function over normalized market resources and risky portfolio share
when the agent is NOT able to adjust their portfolio shares, so they are fixed.
This should always be an IdentityFunc, by definition.
vFuncFxd : ValueFunc2D
Value function over normalized market resources and risky portfolio share when
the agent is NOT able to adjust their portfolio shares, so they are fixed.
dvdmFuncFxd : MargValueFunc2D
Marginal value of mNrm function over normalized market resources and risky
portfolio share when the agent is NOT able to adjust their portfolio shares,
so they are fixed.
dvdsFuncFxd : MargValueFunc2D
Marginal value of Share function over normalized market resources and risky
portfolio share when the agent is NOT able to adjust their portfolio shares,
so they are fixed.
mNrmMin
'''
distance_criteria = ['vPfuncAdj']
def __init__(self,
cFuncAdj=None,
ShareFuncAdj=None,
vFuncAdj=None,
vPfuncAdj=None,
cFuncFxd=None,
ShareFuncFxd=None,
vFuncFxd=None,
dvdmFuncFxd=None,
dvdsFuncFxd=None
):
# Change any missing function inputs to NullFunc
if cFuncAdj is None:
cFuncAdj = NullFunc()
if cFuncFxd is None:
cFuncFxd = NullFunc()
if ShareFuncAdj is None:
ShareFuncAdj = NullFunc()
if ShareFuncFxd is None:
ShareFuncFxd = NullFunc()
if vFuncAdj is None:
vFuncAdj = NullFunc()
if vFuncFxd is None:
vFuncFxd = NullFunc()
if vPfuncAdj is None:
vPfuncAdj = NullFunc()
if dvdmFuncFxd is None:
dvdmFuncFxd = NullFunc()
if dvdsFuncFxd is None:
dvdsFuncFxd = NullFunc()
# Set attributes of self
self.cFuncAdj = cFuncAdj
self.cFuncFxd = cFuncFxd
self.ShareFuncAdj = ShareFuncAdj
self.ShareFuncFxd = ShareFuncFxd
self.vFuncAdj = vFuncAdj
self.vFuncFxd = vFuncFxd
self.vPfuncAdj = vPfuncAdj
self.dvdmFuncFxd = dvdmFuncFxd
self.dvdsFuncFxd = dvdsFuncFxd
class PortfolioConsumerType(IndShockConsumerType):
"""
A consumer type with a portfolio choice. This agent type has log-normal return
factors. Their problem is defined by a coefficient of relative risk aversion,
intertemporal discount factor, risk-free interest factor, and time sequences of
permanent income growth rate, survival probability, and permanent and transitory
income shock standard deviations (in logs). The agent may also invest in a risky
asset, which has a higher average return than the risk-free asset. He *might*
have age-varying beliefs about the risky-return; if he does, then "true" values
of the risky asset's return distribution must also be specified.
"""
poststate_vars_ = ['aNrmNow', 'pLvlNow', 'ShareNow', 'AdjustNow']
time_inv_ = deepcopy(IndShockConsumerType.time_inv_)
time_inv_ = time_inv_ + ['AdjustPrb', 'DiscreteShareBool']
def __init__(self, cycles=1, verbose=False, quiet=False, **kwds):
params = init_portfolio.copy()
params.update(kwds)
kwds = params
# Initialize a basic consumer type
IndShockConsumerType.__init__(
self,
cycles=cycles,
verbose=verbose,
quiet=quiet,
**kwds
)
# Set the solver for the portfolio model, and update various constructed attributes
self.solveOnePeriod = solveConsPortfolio
self.update()
def preSolve(self):
AgentType.preSolve(self)
self.updateSolutionTerminal()
def update(self):
IndShockConsumerType.update(self)
self.updateRiskyDstn()
self.updateShockDstn()
self.updateShareGrid()
self.updateShareLimit()
def updateSolutionTerminal(self):
'''
Solves the terminal period of the portfolio choice problem. The solution is
trivial, as usual: consume all market resources, and put nothing in the risky
asset (because you have nothing anyway).
Parameters
----------
None
Returns
-------
None
'''
# Consume all market resources: c_T = m_T
cFuncAdj_terminal = IdentityFunction()
cFuncFxd_terminal = IdentityFunction(i_dim=0, n_dims=2)
# Risky share is irrelevant-- no end-of-period assets; set to zero
ShareFuncAdj_terminal = ConstantFunction(0.)
ShareFuncFxd_terminal = IdentityFunction(i_dim=1, n_dims=2)
# Value function is simply utility from consuming market resources
vFuncAdj_terminal = ValueFunc(cFuncAdj_terminal, self.CRRA)
vFuncFxd_terminal = ValueFunc2D(cFuncFxd_terminal, self.CRRA)
# Marginal value of market resources is marg utility at the consumption function
vPfuncAdj_terminal = MargValueFunc(cFuncAdj_terminal, self.CRRA)
dvdmFuncFxd_terminal = MargValueFunc2D(cFuncFxd_terminal, self.CRRA)
dvdsFuncFxd_terminal = ConstantFunction(0.) # No future, no marg value of Share
# Construct the terminal period solution
self.solution_terminal = PortfolioSolution(
cFuncAdj=cFuncAdj_terminal,
ShareFuncAdj=ShareFuncAdj_terminal,
vFuncAdj=vFuncAdj_terminal,
vPfuncAdj=vPfuncAdj_terminal,
cFuncFxd=cFuncFxd_terminal,
ShareFuncFxd=ShareFuncFxd_terminal,
vFuncFxd=vFuncFxd_terminal,
dvdmFuncFxd=dvdmFuncFxd_terminal,
dvdsFuncFxd=dvdsFuncFxd_terminal
)
def updateRiskyDstn(self):
'''
Creates the attributes RiskyDstn from the primitive attributes RiskyAvg,
RiskyStd, and RiskyCount, approximating the (perceived) distribution of
returns in each period of the cycle.
Parameters
----------
None
Returns
-------
None
'''
# Determine whether this instance has time-varying risk perceptions
if (type(self.RiskyAvg) is list) and (type(self.RiskyStd) is list) and (len(self.RiskyAvg) == len(self.RiskyStd)) and (len(self.RiskyAvg) == self.T_cycle):
self.addToTimeVary('RiskyAvg','RiskyStd')
elif (type(self.RiskyStd) is list) or (type(self.RiskyAvg) is list):
raise AttributeError('If RiskyAvg is time-varying, then RiskyStd must be as well, and they must both have length of T_cycle!')
else:
self.addToTimeInv('RiskyAvg','RiskyStd')
# Generate a discrete approximation to the risky return distribution if the
# agent has age-varying beliefs about the risky asset
if 'RiskyAvg' in self.time_vary:
RiskyDstn = []
for t in range(self.T_cycle):
RiskyAvgSqrd = self.RiskyAvg[t] ** 2
RiskyVar = self.RiskyStd[t] ** 2
mu = np.log(self.RiskyAvg[t] / (np.sqrt(1. + RiskyVar / RiskyAvgSqrd)))
sigma = np.sqrt(np.log(1. + RiskyVar / RiskyAvgSqrd))
RiskyDstn.append(Lognormal(mu=mu, sigma=sigma).approx(self.RiskyCount))
self.RiskyDstn = RiskyDstn
self.addToTimeVary('RiskyDstn')
# Generate a discrete approximation to the risky return distribution if the
# agent does *not* have age-varying beliefs about the risky asset (base case)
else:
RiskyAvgSqrd = self.RiskyAvg ** 2
RiskyVar = self.RiskyStd ** 2
mu = np.log(self.RiskyAvg / (np.sqrt(1. + RiskyVar / RiskyAvgSqrd)))
sigma = np.sqrt(np.log(1. + RiskyVar / RiskyAvgSqrd))
self.RiskyDstn = Lognormal(mu=mu, sigma=sigma).approx(self.RiskyCount)
self.addToTimeInv('RiskyDstn')
def updateShockDstn(self):
'''
Combine the income shock distribution (over PermShk and TranShk) with the
risky return distribution (RiskyDstn) to make a new attribute called ShockDstn.
Parameters
----------
None
Returns
-------
None
'''
if 'RiskyDstn' in self.time_vary:
self.ShockDstn = [combineIndepDstns(self.IncomeDstn[t], self.RiskyDstn[t]) for t in range(self.T_cycle)]
else:
self.ShockDstn = [combineIndepDstns(self.IncomeDstn[t], self.RiskyDstn) for t in range(self.T_cycle)]
self.addToTimeVary('ShockDstn')
# Mark whether the risky returns and income shocks are independent (they are)
self.IndepDstnBool = True
self.addToTimeInv('IndepDstnBool')
def updateShareGrid(self):
'''
Creates the attribute ShareGrid as an evenly spaced grid on [0.,1.], using
the primitive parameter ShareCount.
Parameters
----------
None
Returns
-------
None
'''
self.ShareGrid = np.linspace(0.,1.,self.ShareCount)
self.addToTimeInv('ShareGrid')
def updateShareLimit(self):
'''
Creates the attribute ShareLimit, representing the limiting lower bound of
risky portfolio share as mNrm goes to infinity.
Parameters
----------
None
Returns
-------
None
'''
if 'RiskyDstn' in self.time_vary:
self.ShareLimit = []
for t in range(self.T_cycle):
RiskyDstn = self.RiskyDstn[t]
temp_f = lambda s : -((1.-self.CRRA)**-1)*np.dot((self.Rfree + s*(RiskyDstn.X-self.Rfree))**(1.-self.CRRA), RiskyDstn.pmf)
SharePF = minimize_scalar(temp_f, bounds=(0.0, 1.0), method='bounded').x
self.ShareLimit.append(SharePF)
self.addToTimeVary('ShareLimit')
else:
RiskyDstn = self.RiskyDstn
temp_f = lambda s : -((1.-self.CRRA)**-1)*np.dot((self.Rfree + s*(RiskyDstn.X-self.Rfree))**(1.-self.CRRA), RiskyDstn.pmf)
SharePF = minimize_scalar(temp_f, bounds=(0.0, 1.0), method='bounded').x
self.ShareLimit = SharePF
self.addToTimeInv('ShareLimit')
def getRisky(self):
'''
Sets the attribute RiskyNow as a single draw from a lognormal distribution.
Uses the attributes RiskyAvgTrue and RiskyStdTrue if RiskyAvg is time-varying,
else just uses the single values from RiskyAvg and RiskyStd.
Parameters
----------
None
Returns
-------
None
'''
if 'RiskyDstn' in self.time_vary:
RiskyAvg = self.RiskyAvgTrue
RiskyStd = self.RiskyStdTrue
else:
RiskyAvg = self.RiskyAvg
RiskyStd = self.RiskyStd
RiskyAvgSqrd = RiskyAvg**2
RiskyVar = RiskyStd**2
mu = np.log(RiskyAvg / (np.sqrt(1. + RiskyVar / RiskyAvgSqrd)))
sigma = np.sqrt(np.log(1. + RiskyVar / RiskyAvgSqrd))
self.RiskyNow = Lognormal(mu, sigma).draw(1, seed=self.RNG.randint(0, 2**31-1))
def getAdjust(self):
'''
Sets the attribute AdjustNow as a boolean array of size AgentCount, indicating
whether each agent is able to adjust their risky portfolio share this period.
Uses the attribute AdjustPrb to draw from a Bernoulli distribution.
Parameters
----------
None
Returns
-------
None
'''
self.AdjustNow = Bernoulli(self.AdjustPrb).draw(self.AgentCount, seed=self.RNG.randint(0, 2**31-1))
def getRfree(self):
'''
Calculates realized return factor for each agent, using the attributes Rfree,
RiskyNow, and ShareNow. This method is a bit of a misnomer, as the return
factor is not riskless, but would more accurately be labeled as Rport. However,
this method makes the portfolio model compatible with its parent class.
Parameters
----------
None
Returns
-------
Rport : np.array
Array of size AgentCount with each simulated agent's realized portfolio
return factor. Will be used by getStates() to calculate mNrmNow, where it
will be mislabeled as "Rfree".
'''
Rport = self.ShareNow*self.RiskyNow + (1.-self.ShareNow)*self.Rfree
self.RportNow = Rport
return Rport
def initializeSim(self):
'''
Initialize the state of simulation attributes. Simply calls the same method
for IndShockConsumerType, then sets the type of AdjustNow to bool.
Parameters
----------
None
Returns
-------
None
'''
IndShockConsumerType.initializeSim(self)
self.AdjustNow = self.AdjustNow.astype(bool)
def simBirth(self,which_agents):
'''
Create new agents to replace ones who have recently died; takes draws of
initial aNrm and pLvl, as in ConsIndShockModel, then sets Share and Adjust
to zero as initial values.
Parameters
----------
which_agents : np.array
Boolean array of size AgentCount indicating which agents should be "born".
Returns
-------
None
'''
IndShockConsumerType.simBirth(self,which_agents)
self.ShareNow[which_agents] = 0.
self.AdjustNow[which_agents] = False
def getShocks(self):
'''
Draw idiosyncratic income shocks, just as for IndShockConsumerType, then draw
a single common value for the risky asset return. Also draws whether each
agent is able to update their risky asset share this period.
Parameters
----------
None
Returns
-------
None
'''
IndShockConsumerType.getShocks(self)
self.getRisky()
self.getAdjust()
def getControls(self):
'''
Calculates consumption cNrmNow and risky portfolio share ShareNow using
the policy functions in the attribute solution. These are stored as attributes.
Parameters
----------
None
Returns
-------
None
'''
cNrmNow = np.zeros(self.AgentCount) + np.nan
ShareNow = np.zeros(self.AgentCount) + np.nan
# Loop over each period of the cycle, getting controls separately depending on "age"
for t in range(self.T_cycle):
these = t == self.t_cycle
# Get controls for agents who *can* adjust their portfolio share
those = np.logical_and(these, self.AdjustNow)
cNrmNow[those] = self.solution[t].cFuncAdj(self.mNrmNow[those])
ShareNow[those] = self.solution[t].ShareFuncAdj(self.mNrmNow[those])
# Get Controls for agents who *can't* adjust their portfolio share
those = np.logical_and(these, np.logical_not(self.AdjustNow))
cNrmNow[those] = self.solution[t].cFuncFxd(self.mNrmNow[those], self.ShareNow[those])
ShareNow[those] = self.solution[t].ShareFuncFxd(self.mNrmNow[those], self.ShareNow[those])
# Store controls as attributes of self
self.cNrmNow = cNrmNow
self.ShareNow = ShareNow
# Define a non-object-oriented one period solver
def solveConsPortfolio(solution_next,ShockDstn,IncomeDstn,RiskyDstn,
LivPrb,DiscFac,CRRA,Rfree,PermGroFac,
BoroCnstArt,aXtraGrid,ShareGrid,vFuncBool,AdjustPrb,
DiscreteShareBool,ShareLimit,IndepDstnBool):
'''
Solve the one period problem for a portfolio-choice consumer.
Parameters
----------
solution_next : PortfolioSolution
Solution to next period's problem.
ShockDstn : [np.array]
List with four arrays: discrete probabilities, permanent income shocks,
transitory income shocks, and risky returns. This is only used if the
input IndepDstnBool is False, indicating that income and return distributions
can't be assumed to be independent.
IncomeDstn : [np.array]
List with three arrays: discrete probabilities, permanent income shocks,
and transitory income shocks. This is only used if the input IndepDsntBool
is True, indicating that income and return distributions are independent.
RiskyDstn : [np.array]
List with two arrays: discrete probabilities and risky asset returns. This
is only used if the input IndepDstnBool is True, indicating that income
and return distributions are independent.
LivPrb : float
Survival probability; likelihood of being alive at the beginning of
the succeeding period.
DiscFac : float
Intertemporal discount factor for future utility.
CRRA : float
Coefficient of relative risk aversion.
Rfree : float
Risk free interest factor on end-of-period assets.
PermGroFac : float
Expected permanent income growth factor at the end of this period.
BoroCnstArt: float or None
Borrowing constraint for the minimum allowable assets to end the
period with. In this model, it is *required* to be zero.
aXtraGrid: np.array
Array of "extra" end-of-period asset values-- assets above the
absolute minimum acceptable level.
ShareGrid : np.array
Array of risky portfolio shares on which to define the interpolation
of the consumption function when Share is fixed.
vFuncBool: boolean
An indicator for whether the value function should be computed and
included in the reported solution.
AdjustPrb : float
Probability that the agent will be able to update his portfolio share.
DiscreteShareBool : bool
Indicator for whether risky portfolio share should be optimized on the
continuous [0,1] interval using the FOC (False), or instead only selected
from the discrete set of values in ShareGrid (True). If True, then
vFuncBool must also be True.
ShareLimit : float
Limiting lower bound of risky portfolio share as mNrm approaches infinity.
IndepDstnBool : bool
Indicator for whether the income and risky return distributions are in-
dependent of each other, which can speed up the expectations step.
Returns
-------
solution_now : PortfolioSolution
The solution to the single period consumption-saving with portfolio choice
problem. Includes two consumption and risky share functions: one for when
the agent can adjust his portfolio share (Adj) and when he can't (Fxd).
'''
# Make sure the individual is liquidity constrained. Allowing a consumer to
# borrow *and* invest in an asset with unbounded (negative) returns is a bad mix.
if BoroCnstArt != 0.0:
raise ValueError('PortfolioConsumerType must have BoroCnstArt=0.0!')
# Make sure that if risky portfolio share is optimized only discretely, then
# the value function is also constructed (else this task would be impossible).
if (DiscreteShareBool and (not vFuncBool)):
raise ValueError('PortfolioConsumerType requires vFuncBool to be True when DiscreteShareBool is True!')
# Define temporary functions for utility and its derivative and inverse
u = lambda x : utility(x, CRRA)
uP = lambda x : utilityP(x, CRRA)
uPinv = lambda x : utilityP_inv(x, CRRA)
n = lambda x : utility_inv(x, CRRA)
nP = lambda x : utility_invP(x, CRRA)
# Unpack next period's solution
vPfuncAdj_next = solution_next.vPfuncAdj
dvdmFuncFxd_next = solution_next.dvdmFuncFxd
dvdsFuncFxd_next = solution_next.dvdsFuncFxd
vFuncAdj_next = solution_next.vFuncAdj
vFuncFxd_next = solution_next.vFuncFxd
# Major method fork: (in)dependent risky asset return and income distributions
if IndepDstnBool: # If the distributions ARE independent...
# Unpack the shock distribution
IncPrbs_next = IncomeDstn.pmf
PermShks_next = IncomeDstn.X[0]
TranShks_next = IncomeDstn.X[1]
Rprbs_next = RiskyDstn.pmf
Risky_next = RiskyDstn.X
zero_bound = (np.min(TranShks_next) == 0.) # Flag for whether the natural borrowing constraint is zero
RiskyMax = np.max(Risky_next)
# bNrm represents R*a, balances after asset return shocks but before income.
# This just uses the highest risky return as a rough shifter for the aXtraGrid.
if zero_bound:
aNrmGrid = aXtraGrid
bNrmGrid = np.insert(RiskyMax*aXtraGrid, 0, np.min(Risky_next)*aXtraGrid[0])
else:
aNrmGrid = np.insert(aXtraGrid, 0, 0.0) # Add an asset point at exactly zero
bNrmGrid = RiskyMax*np.insert(aXtraGrid, 0, 0.0)
# Get grid and shock sizes, for easier indexing
aNrm_N = aNrmGrid.size
bNrm_N = bNrmGrid.size
Share_N = ShareGrid.size
Income_N = IncPrbs_next.size
Risky_N = Rprbs_next.size
# Make tiled arrays to calculate future realizations of mNrm and Share when integrating over IncomeDstn
bNrm_tiled = np.tile(np.reshape(bNrmGrid, (bNrm_N,1,1)), (1,Share_N,Income_N))
Share_tiled = np.tile(np.reshape(ShareGrid, (1,Share_N,1)), (bNrm_N,1,Income_N))
IncPrbs_tiled = np.tile(np.reshape(IncPrbs_next, (1,1,Income_N)), (bNrm_N,Share_N,1))
PermShks_tiled = np.tile(np.reshape(PermShks_next, (1,1,Income_N)), (bNrm_N,Share_N,1))
TranShks_tiled = np.tile(np.reshape(TranShks_next, (1,1,Income_N)), (bNrm_N,Share_N,1))
# Calculate future realizations of market resources
mNrm_next = bNrm_tiled/(PermShks_tiled*PermGroFac) + TranShks_tiled
Share_next = Share_tiled
# Evaluate realizations of marginal value of market resources next period
dvdmAdj_next = vPfuncAdj_next(mNrm_next)
if AdjustPrb < 1.:
dvdmFxd_next = dvdmFuncFxd_next(mNrm_next, Share_next)
dvdm_next = AdjustPrb*dvdmAdj_next + (1.-AdjustPrb)*dvdmFxd_next # Combine by adjustment probability
else: # Don't bother evaluating if there's no chance that portfolio share is fixed
dvdm_next = dvdmAdj_next
# Evaluate realizations of marginal value of risky share next period
dvdsAdj_next = np.zeros_like(mNrm_next) # No marginal value of Share if it's a free choice!
if AdjustPrb < 1.:
dvdsFxd_next = dvdsFuncFxd_next(mNrm_next, Share_next)
dvds_next = AdjustPrb*dvdsAdj_next + (1.-AdjustPrb)*dvdsFxd_next # Combine by adjustment probability
else: # Don't bother evaluating if there's no chance that portfolio share is fixed
dvds_next = dvdsAdj_next
# If the value function has been requested, evaluate realizations of value
if vFuncBool:
vAdj_next = vFuncAdj_next(mNrm_next)
if AdjustPrb < 1.:
vFxd_next = vFuncFxd_next(mNrm_next, Share_next)
v_next = AdjustPrb*vAdj_next + (1.-AdjustPrb)*vFxd_next
else: # Don't bother evaluating if there's no chance that portfolio share is fixed
v_next = vAdj_next
else:
v_next = np.zeros_like(dvdm_next) # Trivial array
# Calculate intermediate marginal value of bank balances by taking expectations over income shocks
temp_fac_A = uP(PermShks_tiled*PermGroFac) # Will use this in a couple places
dvdb_intermed = np.sum(IncPrbs_tiled*temp_fac_A*dvdm_next, axis=2)
dvdbNvrs_intermed = uPinv(dvdb_intermed)
dvdbNvrsFunc_intermed = BilinearInterp(dvdbNvrs_intermed, bNrmGrid, ShareGrid)
dvdbFunc_intermed = MargValueFunc2D(dvdbNvrsFunc_intermed, CRRA)
# Calculate intermediate value by taking expectations over income shocks
temp_fac_B = (PermShks_tiled*PermGroFac)**(1.-CRRA) # Will use this below
if vFuncBool:
v_intermed = np.sum(IncPrbs_tiled*temp_fac_B*v_next, axis=2)
vNvrs_intermed = n(v_intermed)
vNvrsFunc_intermed = BilinearInterp(vNvrs_intermed, bNrmGrid, ShareGrid)
vFunc_intermed = ValueFunc2D(vNvrsFunc_intermed, CRRA)
# Calculate intermediate marginal value of risky portfolio share by taking expectations
dvds_intermed = np.sum(IncPrbs_tiled*temp_fac_B*dvds_next, axis=2)
dvdsFunc_intermed = BilinearInterp(dvds_intermed, bNrmGrid, ShareGrid)
# Make tiled arrays to calculate future realizations of bNrm and Share when integrating over RiskyDstn
aNrm_tiled = np.tile(np.reshape(aNrmGrid, (aNrm_N,1,1)), (1,Share_N,Risky_N))
Share_tiled = np.tile(np.reshape(ShareGrid, (1,Share_N,1)), (aNrm_N,1,Risky_N))
Rprbs_tiled = np.tile(np.reshape(Rprbs_next, (1,1,Risky_N)), (aNrm_N,Share_N,1))
Risky_tiled = np.tile(np.reshape(Risky_next, (1,1,Risky_N)), (aNrm_N,Share_N,1))
# Calculate future realizations of bank balances bNrm
Share_next = Share_tiled
Rxs = Risky_tiled - Rfree
Rport = Rfree + Share_next*Rxs
bNrm_next = Rport*aNrm_tiled
# Evaluate realizations of value and marginal value after asset returns are realized
dvdb_next = dvdbFunc_intermed(bNrm_next, Share_next)
dvds_next = dvdsFunc_intermed(bNrm_next, Share_next)
if vFuncBool:
v_next = vFunc_intermed(bNrm_next, Share_next)
else:
v_next = np.zeros_like(dvdb_next)
# Calculate end-of-period marginal value of assets by taking expectations
EndOfPrddvda = DiscFac*LivPrb*np.sum(Rprbs_tiled*Rport*dvdb_next, axis=2)
EndOfPrddvdaNvrs = uPinv(EndOfPrddvda)
# Calculate end-of-period value by taking expectations
if vFuncBool:
EndOfPrdv = DiscFac*LivPrb*np.sum(Rprbs_tiled*v_next, axis=2)
EndOfPrdvNvrs = n(EndOfPrdv)
# Calculate end-of-period marginal value of risky portfolio share by taking expectations
EndOfPrddvds = DiscFac*LivPrb*np.sum(Rprbs_tiled*(Rxs*aNrm_tiled*dvdb_next + dvds_next), axis=2)
else: # If the distributions are NOT independent...
# Unpack the shock distribution
ShockPrbs_next = ShockDstn[0]
PermShks_next = ShockDstn[1]
TranShks_next = ShockDstn[2]
Risky_next = ShockDstn[3]
zero_bound = (np.min(TranShks_next) == 0.) # Flag for whether the natural borrowing constraint is zero
# Make tiled arrays to calculate future realizations of mNrm and Share; dimension order: mNrm, Share, shock
if zero_bound:
aNrmGrid = aXtraGrid
else:
aNrmGrid = np.insert(aXtraGrid, 0, 0.0) # Add an asset point at exactly zero
aNrm_N = aNrmGrid.size
Share_N = ShareGrid.size
Shock_N = ShockPrbs_next.size
aNrm_tiled = np.tile(np.reshape(aNrmGrid, (aNrm_N,1,1)), (1,Share_N,Shock_N))
Share_tiled = np.tile(np.reshape(ShareGrid, (1,Share_N,1)), (aNrm_N,1,Shock_N))
ShockPrbs_tiled = np.tile(np.reshape(ShockPrbs_next, (1,1,Shock_N)), (aNrm_N,Share_N,1))
PermShks_tiled = np.tile(np.reshape(PermShks_next, (1,1,Shock_N)), (aNrm_N,Share_N,1))
TranShks_tiled = np.tile(np.reshape(TranShks_next, (1,1,Shock_N)), (aNrm_N,Share_N,1))
Risky_tiled = np.tile(np.reshape(Risky_next, (1,1,Shock_N)), (aNrm_N,Share_N,1))
# Calculate future realizations of market resources
Rport = (1.-Share_tiled)*Rfree + Share_tiled*Risky_tiled
mNrm_next = Rport*aNrm_tiled/(PermShks_tiled*PermGroFac) + TranShks_tiled
Share_next = Share_tiled
# Evaluate realizations of marginal value of market resources next period
dvdmAdj_next = vPfuncAdj_next(mNrm_next)
if AdjustPrb < 1.:
dvdmFxd_next = dvdmFuncFxd_next(mNrm_next, Share_next)
dvdm_next = AdjustPrb*dvdmAdj_next + (1.-AdjustPrb)*dvdmFxd_next # Combine by adjustment probability
else: # Don't bother evaluating if there's no chance that portfolio share is fixed
dvdm_next = dvdmAdj_next
# Evaluate realizations of marginal value of risky share next period
dvdsAdj_next = np.zeros_like(mNrm_next) # No marginal value of Share if it's a free choice!
if AdjustPrb < 1.:
dvdsFxd_next = dvdsFuncFxd_next(mNrm_next, Share_next)
dvds_next = AdjustPrb*dvdsAdj_next + (1.-AdjustPrb)*dvdsFxd_next # Combine by adjustment probability
else: # Don't bother evaluating if there's no chance that portfolio share is fixed
dvds_next = dvdsAdj_next
# If the value function has been requested, evaluate realizations of value
if vFuncBool:
vAdj_next = vFuncAdj_next(mNrm_next)
if AdjustPrb < 1.:
vFxd_next = vFuncFxd_next(mNrm_next, Share_next)
v_next = AdjustPrb*vAdj_next + (1.-AdjustPrb)*vFxd_next
else: # Don't bother evaluating if there's no chance that portfolio share is fixed
v_next = vAdj_next
else:
v_next = np.zeros_like(dvdm_next) # Trivial array
# Calculate end-of-period marginal value of assets by taking expectations
temp_fac_A = uP(PermShks_tiled*PermGroFac) # Will use this in a couple places
EndOfPrddvda = DiscFac*LivPrb*np.sum(ShockPrbs_tiled*Rport*temp_fac_A*dvdm_next, axis=2)
EndOfPrddvdaNvrs = uPinv(EndOfPrddvda)
# Calculate end-of-period value by taking expectations
temp_fac_B = (PermShks_tiled*PermGroFac)**(1.-CRRA) # Will use this below
if vFuncBool:
EndOfPrdv = DiscFac*LivPrb*np.sum(ShockPrbs_tiled*temp_fac_B*v_next, axis=2)
EndOfPrdvNvrs = n(EndOfPrdv)
# Calculate end-of-period marginal value of risky portfolio share by taking expectations
Rxs = Risky_tiled - Rfree
EndOfPrddvds = DiscFac*LivPrb*np.sum(ShockPrbs_tiled*(Rxs*aNrm_tiled*temp_fac_A*dvdm_next + temp_fac_B*dvds_next), axis=2)
# Major method fork: discrete vs continuous choice of risky portfolio share
if DiscreteShareBool: # Optimization of Share on the discrete set ShareGrid
opt_idx = np.argmax(EndOfPrdv, axis=1)
Share_now = ShareGrid[opt_idx] # Best portfolio share is one with highest value
cNrmAdj_now = EndOfPrddvdaNvrs[np.arange(aNrm_N), opt_idx] # Take cNrm at that index as well
if not zero_bound:
Share_now[0] = 1. # aNrm=0, so there's no way to "optimize" the portfolio
cNrmAdj_now[0] = EndOfPrddvdaNvrs[0,-1] # Consumption when aNrm=0 does not depend on Share
else: # Optimization of Share on continuous interval [0,1]
# For values of aNrm at which the agent wants to put more than 100% into risky asset, constrain them
FOC_s = EndOfPrddvds
Share_now = np.zeros_like(aNrmGrid) # Initialize to putting everything in safe asset
cNrmAdj_now = np.zeros_like(aNrmGrid)
constrained = FOC_s[:,-1] > 0. # If agent wants to put more than 100% into risky asset, he is constrained
Share_now[constrained] = 1.0
if not zero_bound:
Share_now[0] = 1. # aNrm=0, so there's no way to "optimize" the portfolio
cNrmAdj_now[0] = EndOfPrddvdaNvrs[0,-1] # Consumption when aNrm=0 does not depend on Share
cNrmAdj_now[constrained] = EndOfPrddvdaNvrs[constrained,-1] # Get consumption when share-constrained
# For each value of aNrm, find the value of Share such that FOC-Share == 0.
# This loop can probably be eliminated, but it's such a small step that it won't speed things up much.
crossing = np.logical_and(FOC_s[:,1:] <= 0., FOC_s[:,:-1] >= 0.)
for j in range(aNrm_N):
if Share_now[j] == 0.:
try:
idx = np.argwhere(crossing[j,:])[0][0]
bot_s = ShareGrid[idx]
top_s = ShareGrid[idx+1]
bot_f = FOC_s[j,idx]
top_f = FOC_s[j,idx+1]
bot_c = EndOfPrddvdaNvrs[j,idx]
top_c = EndOfPrddvdaNvrs[j,idx+1]
alpha = 1. - top_f/(top_f-bot_f)
Share_now[j] = (1.-alpha)*bot_s + alpha*top_s
cNrmAdj_now[j] = (1.-alpha)*bot_c + alpha*top_c
except:
print('No optimal controls found for a=' + str(aNrmGrid[j]))
# Calculate the endogenous mNrm gridpoints when the agent adjusts his portfolio
mNrmAdj_now = aNrmGrid + cNrmAdj_now
# Construct the risky share function when the agent can adjust
if DiscreteShareBool:
mNrmAdj_mid = (mNrmAdj_now[1:] + mNrmAdj_now[:-1])/2
mNrmAdj_plus = mNrmAdj_mid*(1.+1e-12)
mNrmAdj_comb = (np.transpose(np.vstack((mNrmAdj_mid,mNrmAdj_plus)))).flatten()
mNrmAdj_comb = np.append(np.insert(mNrmAdj_comb,0,0.0), mNrmAdj_now[-1])
Share_comb = (np.transpose(np.vstack((Share_now,Share_now)))).flatten()
ShareFuncAdj_now = LinearInterp(mNrmAdj_comb, Share_comb)
else:
if zero_bound:
Share_lower_bound = ShareLimit
else:
Share_lower_bound = 1.0
Share_now = np.insert(Share_now, 0, Share_lower_bound)
ShareFuncAdj_now = LinearInterp(
np.insert(mNrmAdj_now,0,0.0),
Share_now,
intercept_limit=ShareLimit,
slope_limit=0.0)
# Construct the consumption function when the agent can adjust
cNrmAdj_now = np.insert(cNrmAdj_now, 0, 0.0)
cFuncAdj_now = LinearInterp(np.insert(mNrmAdj_now,0,0.0), cNrmAdj_now)
# Construct the marginal value (of mNrm) function when the agent can adjust
vPfuncAdj_now = MargValueFunc(cFuncAdj_now, CRRA)
# Construct the consumption function when the agent *can't* adjust the risky share, as well
# as the marginal value of Share function
cFuncFxd_by_Share = []
dvdsFuncFxd_by_Share = []
for j in range(Share_N):
cNrmFxd_temp = EndOfPrddvdaNvrs[:,j]
mNrmFxd_temp = aNrmGrid + cNrmFxd_temp
cFuncFxd_by_Share.append(LinearInterp(np.insert(mNrmFxd_temp, 0, 0.0), np.insert(cNrmFxd_temp, 0, 0.0)))
dvdsFuncFxd_by_Share.append(LinearInterp(np.insert(mNrmFxd_temp, 0, 0.0), np.insert(EndOfPrddvds[:,j], 0, EndOfPrddvds[0,j])))
cFuncFxd_now = LinearInterpOnInterp1D(cFuncFxd_by_Share, ShareGrid)
dvdsFuncFxd_now = LinearInterpOnInterp1D(dvdsFuncFxd_by_Share, ShareGrid)
# The share function when the agent can't adjust his portfolio is trivial
ShareFuncFxd_now = IdentityFunction(i_dim=1, n_dims=2)
# Construct the marginal value of mNrm function when the agent can't adjust his share
dvdmFuncFxd_now = MargValueFunc2D(cFuncFxd_now, CRRA)
# If the value function has been requested, construct it now
if vFuncBool:
# First, make an end-of-period value function over aNrm and Share
EndOfPrdvNvrsFunc = BilinearInterp(EndOfPrdvNvrs, aNrmGrid, ShareGrid)
EndOfPrdvFunc = ValueFunc2D(EndOfPrdvNvrsFunc, CRRA)
# Construct the value function when the agent can adjust his portfolio
mNrm_temp = aXtraGrid # Just use aXtraGrid as our grid of mNrm values
cNrm_temp = cFuncAdj_now(mNrm_temp)
aNrm_temp = mNrm_temp - cNrm_temp
Share_temp = ShareFuncAdj_now(mNrm_temp)
v_temp = u(cNrm_temp) + EndOfPrdvFunc(aNrm_temp, Share_temp)
vNvrs_temp = n(v_temp)
vNvrsP_temp= uP(cNrm_temp)*nP(v_temp)
vNvrsFuncAdj = CubicInterp(
np.insert(mNrm_temp,0,0.0), # x_list
np.insert(vNvrs_temp,0,0.0), # f_list
np.insert(vNvrsP_temp,0,vNvrsP_temp[0])) # dfdx_list
vFuncAdj_now = ValueFunc(vNvrsFuncAdj, CRRA) # Re-curve the pseudo-inverse value function
# Construct the value function when the agent *can't* adjust his portfolio
mNrm_temp = np.tile(np.reshape(aXtraGrid, (aXtraGrid.size, 1)), (1, Share_N))
Share_temp = np.tile(np.reshape(ShareGrid, (1, Share_N)), (aXtraGrid.size, 1))
cNrm_temp = cFuncFxd_now(mNrm_temp, Share_temp)
aNrm_temp = mNrm_temp - cNrm_temp
v_temp = u(cNrm_temp) + EndOfPrdvFunc(aNrm_temp, Share_temp)
vNvrs_temp = n(v_temp)
vNvrsP_temp= uP(cNrm_temp)*nP(v_temp)
vNvrsFuncFxd_by_Share = []
for j in range(Share_N):
vNvrsFuncFxd_by_Share.append(CubicInterp(
np.insert(mNrm_temp[:,0],0,0.0), # x_list
np.insert(vNvrs_temp[:,j],0,0.0), # f_list
np.insert(vNvrsP_temp[:,j],0,vNvrsP_temp[j,0]))) #dfdx_list
vNvrsFuncFxd = LinearInterpOnInterp1D(vNvrsFuncFxd_by_Share, ShareGrid)
vFuncFxd_now = ValueFunc2D(vNvrsFuncFxd, CRRA)
else: # If vFuncBool is False, fill in dummy values
vFuncAdj_now = None
vFuncFxd_now = None
# Create and return this period's solution
return PortfolioSolution(
cFuncAdj = cFuncAdj_now,
ShareFuncAdj = ShareFuncAdj_now,
vPfuncAdj = vPfuncAdj_now,
vFuncAdj = vFuncAdj_now,
cFuncFxd = cFuncFxd_now,
ShareFuncFxd = ShareFuncFxd_now,
dvdmFuncFxd = dvdmFuncFxd_now,
dvdsFuncFxd = dvdsFuncFxd_now,
vFuncFxd = vFuncFxd_now
)
# Make a dictionary to specify a portfolio choice consumer type
init_portfolio = init_idiosyncratic_shocks.copy()
init_portfolio['RiskyAvg'] = 1.08 # Average return of the risky asset
init_portfolio['RiskyStd'] = 0.20 # Standard deviation of (log) risky returns
init_portfolio['RiskyCount'] = 5 # Number of integration nodes to use in approximation of risky returns
init_portfolio['ShareCount'] = 25 # Number of discrete points in the risky share approximation
init_portfolio['AdjustPrb'] = 1.0 # Probability that the agent can adjust their risky portfolio share each period
init_portfolio['DiscreteShareBool'] = False # Flag for whether to optimize risky share on a discrete grid only
# Adjust some of the existing parameters in the dictionary
init_portfolio['aXtraMax'] = 100 # Make the grid of assets go much higher...
init_portfolio['aXtraCount'] = 200 # ...and include many more gridpoints...
init_portfolio['aXtraNestFac'] = 1 # ...which aren't so clustered at the bottom
init_portfolio['BoroCnstArt'] = 0.0 # Artificial borrowing constraint must be turned on
init_portfolio['CRRA'] = 5.0 # Results are more interesting with higher risk aversion
init_portfolio['DiscFac'] = 0.90 # And also lower patience
|
import re, os
from template_lib.utils.plot_utils import MatPlot
def parse_logfile(args, myargs):
config = getattr(myargs.config, args.command)
matplot = MatPlot()
fig, ax = matplot.get_fig_and_ax()
if len(config.logfiles) == 1:
logfiles = config.logfiles * len(config.re_strs)
for logfile, re_str in zip(logfiles, config.re_strs):
RE_STR = re.compile(re_str)
_, step = matplot.parse_logfile_using_re(
logfile=logfile, re_str=re.compile('Step (\d*)'))
(idx, val) = matplot.parse_logfile_using_re(logfile=logfile, re_str=RE_STR)
ax.plot(step, val, label=re_str)
ax.legend()
matplot.save_to_png(
fig, filepath=os.path.join(args.outdir, config.title + '.png'))
pass
|
import requests
from nba_api.stats.endpoints import commonplayerinfo, shotchartdetail, playerdashptshots
from nba_api.stats.static import players
import json as json
from Enums import Output
class NBAData:
def __init__(self):
self.FileMappings = {
Output.json: self.__getDataJson,
Output.csv: self.__getDataCsv
}
def getData(self, player: str, fileType: Output ):
playerData = players.find_players_by_full_name(player)
shotCharts = {}
for player in playerData:
shotCharts[player['full_name']] = playerdashptshots.PlayerDashPtShots(team_id=0,player_id=player['id'])
return self.FileMappings[fileType](shotCharts)
def __getDataJson(self, shotCharts) -> str:
return {key : value.overall.get_json() for key, value in shotCharts.items()}
def __getDataCsv(self, shotCharts) -> str:
return {key : value.overall.get_data_frame().to_csv() for key, value in shotCharts.items()}
|
import uuid
from rest_framework import status
from lego.apps.flatpages.models import Page
from lego.apps.users.models import AbakusGroup
from lego.apps.users.tests.utils import create_normal_user, create_user_with_permissions
from lego.utils.test_utils import BaseAPITestCase
def get_new_unique_page():
return {
"title": f"title-{str(uuid.uuid4())}",
"slug": f"slug-{str(uuid.uuid4())}",
"content": f"content-{str(uuid.uuid4())}",
}
def create_group(**kwargs):
return AbakusGroup.objects.create(name=str(uuid.uuid4()), **kwargs)
class PageAPITestCase(BaseAPITestCase):
fixtures = ["test_pages.yaml"]
def setUp(self):
self.pages = Page.objects.all().order_by("created_at")
def test_get_pages(self):
response = self.client.get("/api/v1/pages/")
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.json()["results"]), 4)
first = response.json()["results"][0]
self.assertEqual(first["title"], self.pages.first().title)
self.assertEqual(first["slug"], self.pages.first().slug)
self.assertFalse("content" in first)
def test_get_page_with_id(self):
slug = "webkom"
response = self.client.get("/api/v1/pages/{0}/".format(slug))
expected = self.pages.get(slug=slug)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.json()["title"], expected.title)
self.assertEqual(response.json()["slug"], expected.slug)
self.assertEqual(response.json()["content"], expected.content)
def test_non_existing_retrieve(self):
response = self.client.get("/api/v1/pages/badslug/")
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_unauthenticated(self):
slug = "webkom"
methods = ["post", "patch", "put", "delete"]
for method in methods:
call = getattr(self.client, method)
response = call("/api/v1/pages/{0}/".format(slug))
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_unauthorized(self):
slug = "webkom"
methods = ["post", "patch", "put", "delete"]
user = create_normal_user()
self.client.force_authenticate(user)
for method in methods:
call = getattr(self.client, method)
response = call("/api/v1/pages/{0}/".format(slug))
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_create_page(self):
page = {"title": "cat", "content": "hei"}
user = create_user_with_permissions("/sudo/admin/pages/")
self.client.force_authenticate(user)
response = self.client.post("/api/v1/pages/", data=page)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_list_with_keyword_permissions(self):
user = create_user_with_permissions("/sudo/admin/pages/list/")
self.client.force_authenticate(user)
response = self.client.get("/api/v1/pages/")
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.json()["results"]), 5)
def test_edit_with_object_permissions(self):
slug = "webkom"
page = self.pages.get(slug=slug)
user = create_normal_user()
group = create_group()
group.add_user(user)
group.save()
page.can_edit_groups.add(group)
self.client.force_authenticate(user)
response = self.client.patch(
"/api/v1/pages/{0}/".format(slug), get_new_unique_page()
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_edit_without_object_permissions(self):
slug = "webkom"
page = self.pages.get(slug=slug)
user = create_normal_user()
group = create_group()
page.can_edit_groups.add(group)
wrong_group = create_group()
wrong_group.add_user(user)
wrong_group.save()
self.client.force_authenticate(user)
response = self.client.patch(
"/api/v1/pages/{0}/".format(slug), get_new_unique_page()
)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
|
from requests import get
from bs4 import BeautifulSoup
from datetime import datetime as dt
from random import shuffle
from pyquery import PyQuery
from pymongo import MongoClient
###Connect db
def connect_db(collection: str, db='Hyplate'):
conn = MongoClient(host = '127.0.0.1', port = 27017)
return conn[db][collection]
##########
def read_rss(url):
article_list = []
r = get(url)
soup = BeautifulSoup(r.content, features='xml')
articles = soup.findAll('item')
for a in articles:
try:
article_obj = {}
try:
article_obj['title'] = a.find('title').text
except:
article_obj['title'] = ''
try:
article_obj['link'] = a.find('link').text
except:
article_obj['link'] = ''
try:
date = a.find('pubDate').text[0:17]
article_obj['date'] = dt.strptime(date, '%a, %d %b %Y').date()
except:
article_obj['date'] = date
try:
p1 = a.find('content:encoded')
p1 = p1.text.replace('<', '<').replace('>', '>').replace(']]>', '>').replace('<![CDATA[', '')
p = PyQuery(p1)
img = p('div img').attr('src') or ('img').attr('src')
article_obj['description'] = p('p').text()[0:200]+'...'
article_obj['img'] = img
except:
p1 = a.find('description').text
p1 = p1.replace('<![CDATA[', '')
article_obj['description'] = p1[0:200]+'...'
article_obj['img'] = '/static/assets/img/logos/infographics.png'
try:
article_obj['category'] =a.find('category').text
except:
article_obj['category'] = 'Web Scraping'
article_list.append(article_obj)
except:
print('Cannot Pass'+url)
pass
return article_list
|
import pytest
from tests.fixtures import typical_request
from elasticpypi.handler import auth
def test_auth_can_decode_basic_auth_header_for_users():
expected = {
'principalId': 'elasticpypi',
'policyDocument': {
'Version': '2012-10-17',
'Statement': [
{
'Action': 'execute-api:Invoke',
'Effect': 'Allow',
'Resource': ['arn:aws:execute-api:us-artic-1:1234567890:*/packages/*/*']
}
]
}
}
policy_document = auth(typical_request(), {})
assert policy_document == expected
policy_document = auth(typical_request(user='user2', password='blah'), {})
expected['principalId'] = "user2"
assert policy_document == expected
def test_get_username_and_password_form_environment():
from elasticpypi import config
del config.config['users']
config.config['username'] = 'user3'
config.config['password'] = 'blah'
expected = {
'principalId': 'user3',
'policyDocument': {
'Version': '2012-10-17',
'Statement': [
{
'Action': 'execute-api:Invoke',
'Effect': 'Allow',
'Resource': ['arn:aws:execute-api:us-artic-1:1234567890:*/packages/*/*']
}
]
}
}
policy_document = auth(typical_request(user='user3', password='blah'), {})
assert policy_document == expected
def test_auth_raises_401_when_comparison_fails():
with pytest.raises(Exception):
auth(typical_request(password='notCorrect'), {})
|
from M2Crypto.EVP import Cipher
from M2Crypto.EVP import pbkdf2
from M2Crypto.Rand import rand_bytes
g_encrypt = 1
g_decrypt = 0
g_salt1 = b"12345678"
g_salt2 = bytes("12345678", "utf8")
g_iv = b"0000000000000000"
def p_example1_hard_coded1(password, data):
key = pbkdf2(password, b"12345678", 1000, 32)
cipher = Cipher("aes_256_ecb", key, g_iv, g_encrypt)
cipher_text = cipher.update(data) + cipher.final()
return cipher_text
def p_example2_hard_coded2(password, data):
key = pbkdf2(password, bytes("12345678", "utf8"), 1000, 32)
cipher = Cipher("aes_256_ecb", key, g_iv, g_encrypt)
cipher_text = cipher.update(data) + cipher.final()
return cipher_text
def p_example3_local_variable1(password, data):
salt = b"12345678"
key = pbkdf2(password, salt, 1000, 32)
cipher = Cipher("aes_256_ecb", key, g_iv, g_encrypt)
cipher_text = cipher.update(data) + cipher.final()
return cipher_text
def p_example4_local_variable2(password, data):
salt = bytes("12345678", "utf8")
key = pbkdf2(password, salt, 1000, 32)
cipher = Cipher("aes_256_ecb", key, g_iv, g_encrypt)
cipher_text = cipher.update(data) + cipher.final()
return cipher_text
def p_example5_nested_local_variable1(password, data):
salt1 = b"12345678"
salt2 = salt1
salt3 = salt2
key = pbkdf2(password, salt3, 1000, 32)
cipher = Cipher("aes_256_ecb", key, g_iv, g_encrypt)
cipher_text = cipher.update(data) + cipher.final()
return cipher_text
def p_example6_nested_local_variable2(password, data):
salt1 = bytes("12345678", "utf8")
salt2 = salt1
salt3 = salt2
key = pbkdf2(password, salt3, 1000, 32)
cipher = Cipher("aes_256_ecb", key, g_iv, g_encrypt)
cipher_text = cipher.update(data) + cipher.final()
return cipher_text
def p_example_method_call(password, salt, data):
key = pbkdf2(password, salt, 1000, 32)
cipher = Cipher("aes_256_ecb", key, g_iv, g_encrypt)
cipher_text = cipher.update(data) + cipher.final()
return cipher_text
def p_example_nested_method_call(password, salt, data):
return p_example_method_call(password, salt, data)
def p_example7_direct_method_call1(password, data):
salt = b"12345678"
return p_example_method_call(password, salt, data)
def p_example8_direct_method_call2(password, data):
salt = bytes("12345678", "utf8")
return p_example_method_call(password, salt, data)
def p_example9_nested_method_call1(password, data):
salt = b"12345678"
return p_example_nested_method_call(password, salt, data)
def p_example10_nested_method_call2(password, data):
salt = bytes("12345678", "utf8")
return p_example_nested_method_call(password, salt, data)
def p_example11_direct_g_variable_access1(password, data):
key = pbkdf2(password, g_salt1, 1000, 32)
cipher = Cipher("aes_256_ecb", key, g_iv, g_encrypt)
cipher_text = cipher.update(data) + cipher.final()
return cipher_text
def p_example12_direct_g_variable_access2(password, data):
key = pbkdf2(password, g_salt2, 1000, 32)
cipher = Cipher("aes_256_ecb", key, g_iv, g_encrypt)
cipher_text = cipher.update(data) + cipher.final()
return cipher_text
def p_example13_indirect_g_variable_access1(password, data):
salt = g_salt1
key = pbkdf2(password, salt, 1000, 32)
cipher = Cipher("aes_256_ecb", key, g_iv, g_encrypt)
cipher_text = cipher.update(data) + cipher.final()
return cipher_text
def p_example14_indirect_g_variable_access2(password, data):
salt = g_salt2
key = pbkdf2(password, salt, 1000, 32)
cipher = Cipher("aes_256_ecb", key, g_iv, g_encrypt)
cipher_text = cipher.update(data) + cipher.final()
return cipher_text
def p_example15_warning_parameter_not_resolvable(password, salt, data):
key = pbkdf2(password, salt, 1000, 32)
cipher = Cipher("aes_256_ecb", key, g_iv, g_encrypt)
cipher_text = cipher.update(data) + cipher.final()
return cipher_text
def n_example1_random_salt(password, data):
salt = rand_bytes(8)
key = pbkdf2(password, salt, 1000, 32)
cipher = Cipher("aes_256_ecb", key, g_iv, g_encrypt)
cipher_text = cipher.update(data) + cipher.final()
return cipher_text
|
import google
import requests
from bs4 import BeautifulSoup
import re
TAG_RE = re.compile(r'<[^>]+>')
WordBank = open("WordBank.txt","w+")
def googleSearch(topic):
try:
from googlesearch import search
except ImportError:
print("No module named 'google' found")
query = topic
for j in search(query, tld="co.in", num=10, stop=25, pause=2):
print(j)
#make html object
r=requests.get(j)
c=r.content
#make soup object
soup=BeautifulSoup(c,"html.parser")
cleanSoup = soup
#clean the soup
for script in cleanSoup("script"):
script.extract()
for style in cleanSoup("style"):
style.extract()
for tag in cleanSoup(True):
tag.unwrap()
#almost there
cleanSoup = remove_tags(soup.prettify())
cleanSoup = cleanSoup.replace("\n","")
finalText = str(cleanSoup.encode("utf8"))
#save the soup for later
WordBank.write(finalText + "<|endoftext|>")
def remove_tags(text):
return TAG_RE.sub('', text)
topicIn = input("Pick A Topic. ")
googleSearch(topicIn)
input("enter to exit")
|
# Generated by Django 2.1.4 on 2018-12-11 15:58
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("api", "0026_merge_20181210_2232"),
("api", "0025_merge_20181210_2243"),
]
operations = []
|
test = { 'name': 'q2b',
'points': 1,
'suites': [ { 'cases': [ { 'code': '>>> diabetes_mean.size\n10',
'hidden': False,
'locked': False},
{ 'code': '>>> '
'np.all(np.isclose(diabetes_mean, '
'[0]*10))\n'
'True',
'hidden': False,
'locked': False},
{ 'code': '>>> '
'np.all(np.isclose(np.zeros(10), '
'np.mean(normalized_features, '
'axis=0))) # make sure data is '
'centered at 0\n'
'True',
'hidden': False,
'locked': False},
{ 'code': '>>> -.003 < '
'np.sum(normalized_features[0]) '
'< 0.003 # make sure scaling '
'was done correctly\n'
'True',
'hidden': False,
'locked': False}],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'}]}
|
import argparse
import sys
try:
parser = argparse.ArgumentParser()
parser.add_argument("square", help="display a square of a given number",
type=int)
args = parser.parse_args()
#print the square of user input from cmd line.
print args.square**2
#print all the sys argument passed from cmd line including the program name.
print sys.argv
#print the second argument passed from cmd line; Note it starts from ZERO
print sys.argv[1]
except:
# e = sys.exc_info()[0]
# print e
# rompt the user to select a HTTP Method of the following options:
# GET
# POST
# PUT
# DELETE
# HEAD
# PATCH
OPTIONS
|
"""
Model Calibration Tools
Tools for displaying and recalibration model predictions.
Usage:
import mct
# get your model predictions and true class labels
preds, labels = ...
# display calibration of original model
fig, estimate, ci = mct.display_calibration(preds,
labels,
bandwidth=0.05)
plt.show(block=False)
# Recalibrate predictions
calibrator = mct.create_calibrator(estimate.orig, estimate.calibrated)
calibrated = calibrator(preds)
# Display recalibrated predictions
plt.figure()
mct.display_calibration(calibrated, labels, bandwidth=0.05)
plt.show()
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from sklearn.neighbors import KernelDensity
from collections import namedtuple
from scipy import interpolate
KdeResult = namedtuple('KdeResult',
'orig calibrated ici pos_intensity all_intensity')
"""Encapsulates the components of a KDE calibration.
Args:
orig (array-like of float in [0,1]): a grid of original probabilities, usually equally spaced over the domain.
calibrated (array-like of float in [0,1]): the calibrated probabilities
corresponding to those in `orig`
ici (float): the Integrated Calibration Index of `calibrated`, given
`all_intensity`
pos_intensity (array-like of float): The intensity of elements with positive
labels, computed at values in `orig`
all_intensity (array-like of float): The intensity of all elements, computed
at values in `orig`.
"""
def histograms(probs, actual, bins=100):
"""
Calculates two histograms over [0, 1] by partitioning `probs` with `actual`
and sorting each partition into `bins` sub-intervals.
"""
actual = actual.astype(np.bool)
edges, step = np.linspace(0., 1., bins, retstep=True, endpoint=False)
idx = np.digitize(probs, edges) - 1
top = np.bincount(idx, weights=actual, minlength=bins)
bot = np.bincount(idx, weights=(~actual), minlength=bins)
return top, bot, edges, step
def _compute_intensity(x_values, probs, kernel, bandwidth, **kde_args):
kde = KernelDensity(kernel=kernel, bandwidth=bandwidth, **kde_args)
kde.fit(probs.reshape(-1, 1))
log_density = kde.score_samples(x_values.reshape(-1, 1))
# We want the area under `intensity` to be the number of samples
intensity = np.exp(log_density) * len(probs)
return intensity
def _compute_single_calibration(x_values, probs, actual, kernel, bandwidth,
**kde_args):
positives = probs[actual == 1]
pos_intensity = _compute_intensity(x_values, positives, kernel, bandwidth)
all_intensity = _compute_intensity(x_values, probs, kernel, bandwidth,
**kde_args)
calibrated = pos_intensity / all_intensity
ici = compute_ici(x_values, calibrated, all_intensity)
return KdeResult(orig=x_values,
calibrated=calibrated,
ici=ici,
pos_intensity=pos_intensity,
all_intensity=all_intensity)
def _resample_calibration(num_iterations, x_values, probs, actual, kernel,
bandwidth, **kde_args):
calibrated = []
ici = []
pos_intensity = []
all_intensity = []
for _ in range(num_iterations):
indices = np.random.randint(probs.size, size=probs.size)
samp_probs = probs[indices]
samp_actual = actual[indices]
cal = _compute_single_calibration(x_values, samp_probs, samp_actual,
kernel, bandwidth, **kde_args)
calibrated.append(cal.calibrated)
ici.append(cal.ici)
pos_intensity.append(cal.pos_intensity)
all_intensity.append(cal.all_intensity)
return KdeResult(orig=x_values,
calibrated=np.vstack(calibrated),
ici=ici,
pos_intensity=np.vstack(pos_intensity),
all_intensity=np.vstack(all_intensity))
def create_calibrator(orig, calibrated):
"""Create a function to calibrate new predictions.
The calibration function is a linear interpolation of `calibrated` vs `orig`.
Points outside the range of `orig` are interpolated as if (0,0) and (1,1)
are included points.
Args:
orig (array-like of float): Original model predictions in [0,1]
calibrated ([type]): Calibrated versions in [0,1] of `orig`.
Returns:
f(x) -> calibrated_x: a function returning calibrated versions
`calibrated_x` of inputs `x`, where x is array-like of float
in [0,1].
"""
if orig[0] > 0:
orig = np.insert(orig, 0, 0)
calibrated = np.insert(calibrated, 0, 0)
if orig[-1] < 1:
orig = np.append(orig, 1.0)
calibrated = np.append(calibrated, 1.0)
return interpolate.interp1d(orig, calibrated, 'linear', bounds_error=True)
def compute_kde_calibration(probs,
actual,
resolution=0.01,
kernel='gaussian',
n_resamples=None,
bandwidth=0.1,
alpha=None,
**kde_args):
"""Generate a calibration curve using kernel density estimation.
The curve is generated by computing the intensity (= probability density *
number of samples) of the positive-labeled instances, and dividing that by
the intensity of all instances.
Uses bootstrap resampling to estimate the confidence intervals, if
requested.
Args:
probs (array-like of float in [0,1]): model predicted probability for
each instance.
actual (array-like of int in {0,1}): class label for each instance.
resolution (float, optional): Desired curve grid resolution.
Defaults to 0.01.
kernel (str, optional): Any valid kernel name for
sklearn.neighbors.KernelDensity. Defaults to 'gaussian'.
n_resamples (int, optional): Number of iterations of bootstrap
resampling for computing confidence intervals. If None (default),
a value is chosen such that the CIs are reasonably repeatable.
Ignored if alpha=None.
bandwidth (float, optional): Desired kernel bandwidth. Defaults to 0.1.
alpha (float, optional): Desired significance level for the confidence
intervals. Defaults to None.
**kde_args: Additional args for sklearn.neighbors.KernelDensity.
Returns:
A tuple containing:
a KdeResult of the best estimates
a KdeResult of the confidence intervals
"""
x_min = max((0, np.amin(probs) - resolution))
x_max = min((1, np.amax(probs) + resolution))
x_values = np.arange(x_min, x_max, step=resolution)
estimate = _compute_single_calibration(x_values,
probs,
actual,
kernel=kernel,
bandwidth=bandwidth,
**kde_args)
calibration_ci = None
ici_ci = None
pos_ci = None
all_ci = None
if alpha is not None:
if n_resamples is None:
# Choose a number of iterations such that there are about 50 points outside each end of the confidence interval.
n_resamples = int(100 / alpha)
samples = _resample_calibration(n_resamples, x_values, probs, actual,
kernel, bandwidth, **kde_args)
calibration_ci = np.quantile(samples.calibrated,
(alpha / 2, 1 - alpha / 2),
axis=0)
ici_ci = np.quantile(samples.ici, (alpha / 2, 1 - alpha / 2))
pos_ci = np.quantile(samples.pos_intensity, (alpha / 2, 1 - alpha / 2),
axis=0)
all_ci = np.quantile(samples.all_intensity, (alpha / 2, 1 - alpha / 2),
axis=0)
ci = KdeResult(orig=x_values,
calibrated=calibration_ci,
ici=ici_ci,
pos_intensity=pos_ci,
all_intensity=all_ci)
return (estimate, ci)
def compute_ici(orig, calibrated, all_intensity):
ici = (np.sum(all_intensity * np.abs(calibrated - orig)) /
np.sum(all_intensity))
return ici
def plot_histograms(top, bot, edges, resolution, *, ax=None):
"""
Plots the two histograms generated by ``histograms``; the histogram of
actual negatives is plotted underneath the x axis while the histogram of
actual positives is plotted above.
"""
if ax is None:
ax = plt.gca()
ax.hlines(y=0,
xmin=0,
xmax=1,
linestyle='dashed',
color='black',
alpha=0.2)
ax.bar(edges, top, width=resolution)
ax.bar(edges, -bot, width=resolution)
# Set some sensible defaults - these can be overridden after the fact,
# since we return the axes object
ax.set_xlim((-0.05, 1.05))
ax.set_xlabel('Predicted Probability')
height = max(abs(x) for x in ax.get_ylim())
ax.set_ylim((-height, height))
ax.set_ylabel('Count')
return ax
def plot_calibration_curve(orig,
calibrated,
calibrated_ci=None,
ici=None,
ici_ci=None,
pos_intensity=None,
all_intensity=None,
*,
label=None,
ax=None):
"""
Plots a calibration curve.
"""
plot_intensities = pos_intensity is not None and all_intensity is not None
if ax is None:
ax = plt.gca()
ax.set_aspect('equal')
limits = (-0.05, 1.05)
ax.set_ylim(limits)
ax.set_xlim(limits)
ici_ci_label = ('' if ici_ci is None else
f' (ICI [{ici_ci[0]:0.3f}, {ici_ci[1]:0.3f}])')
ici_label = '' if ici is None else f' (ICI {ici:0.3f})'
ax.plot((0, 1), (0, 1), 'black', linewidth=0.2)
ax.plot(orig, calibrated, label=f'Estimated Calibration{ici_label}')
if calibrated_ci is not None:
ax.fill_between(orig,
calibrated_ci[0],
calibrated_ci[1],
color='C0',
alpha=0.3,
edgecolor='C0',
label=f'Confidence Interval{ici_ci_label}')
if plot_intensities:
# We normalize the intensities to a max of 1, so they can plot on the same y axis as the calibration curve.
pos_intensity /= all_intensity.max()
all_intensity /= all_intensity.max()
ax.plot(orig,
pos_intensity,
color='C1',
alpha=0.4,
label='Positive Intensity')
ax.plot(orig,
all_intensity,
color='C2',
alpha=0.4,
label='All Intensity')
ax.legend(loc='best')
ax.set_xlabel('Predicted Probability')
ax.set_ylabel('Actual Probability')
if label is not None:
ax.set_title(f'{label}')
return ax
def display_calibration(probs,
actual,
*,
figure=None,
bins=100,
label=None,
show_ici=True,
alpha=0.05,
n_resamples=None,
kernel='gaussian',
bandwidth=0.1,
plot_intensities=False):
"""Generates a calibration display.
The display contains by default a calibration curve with confidence intervals, an
estimate of the Integrated Calibration Index (ICI), and a histogram of
the positive and negative values.
Args:
See `compute_kde_calibration` for `probs`, `actual`, `kernel`,
`alpha`, `n_resamples`, `bandwidth`, and `plot_intensities`.
Args specific to this function are:
figure (Matplotlib figure, optional): Figure to use for plotting.
If None (default) a new figure is created.
bins (int, optional): Number of bins for value histograms. Defaults to 100.
label (string, optional): Legend label for calibration curve.
Defaults to None.
show_ici (bool, optional): If true (default), the ICI value is stated
in the legend.
Returns:
(figure, KdeResult, KdeResult): A tuple of the figure object, the
KDE estimate for the calibration curve, and the KDE estimate
for the confidence intervals.
"""
resolution = 1.0 / bins
if figure is None:
figure = plt.gcf()
ax1, ax2 = figure.subplots(
nrows=2,
ncols=1,
sharex=True,
gridspec_kw=dict(height_ratios=(3, 1)),
)
estimate, ci = compute_kde_calibration(probs,
actual,
resolution=resolution,
kernel=kernel,
bandwidth=bandwidth,
alpha=alpha)
ax1 = plot_calibration_curve(
orig=estimate.orig,
calibrated=estimate.calibrated,
calibrated_ci=ci.calibrated,
ici=estimate.ici if show_ici else None,
ici_ci=ci.ici if show_ici else None,
pos_intensity=estimate.pos_intensity if plot_intensities else None,
all_intensity=estimate.all_intensity if plot_intensities else None,
label=label,
ax=ax1)
ax1.set_xlabel('')
ax2 = plot_histograms(*histograms(probs, actual, bins=bins), ax=ax2)
ax2.set_box_aspect(1. / 3.)
ax1.xaxis.set_ticks_position('none')
figure.tight_layout()
return figure, estimate, ci
|
from binary_euler_tour import *
class BinaryLayout(BinaryEulerTour):
"""Class for computing (x,y) coordinates for each node of a binary tree."""
def __init__(self,tree):
super().__init__(tree) # must call the parent constructor
self._count = 0 # initialize count of processed nodes
def _hook_invisit(self,p,d,path):
p.element().setX(self._count) # x-coordinate serialized by count
p.element().setY(self,d) # y-coordinate is depth
self._count +=1 # advaced count for processed nodes
|
### means I need to add it to my python environment
#Indicate operating environment and import core modules
import os
location_input = input("what computer are you on? a = Ben's laptop, b = gpucluster, c = Ben's desktop, d = other")
location_dict = {'a': "C:\\Users\\BMH_work\\", 'b': "/home/heineike/",
'c': "C:\\Users\\Ben\\Documents\\", 'd':'you need to add your location to the location_dict'}
figsave_dict = {'a': "C:\\Users\\BMH_work\\Google Drive\\UCSF\\ElSamad_Lab\\PKA\\Manuscript\\" ,
'b': "/home/heineike/scratch/",
'c': "C:\\Users\\Ben\\Google Drive\\UCSF\\ElSamad_Lab\\PKA\\Manuscript\\",
'd': 'you need to add your location to the figsave dict'}
figsave_dir = figsave_dict[location_input]
home_dir = location_dict[location_input]
print("home directory is " + home_dir)
base_dir = home_dir + os.path.normpath('github/y1000plus_tools') + os.sep
print("y1000plus_tools dir is " + base_dir )
y1000plus_dir_options = {'a': base_dir + os.path.normpath('genomes/y1000plus') + os.sep,
'b':home_dir + os.path.normpath("genomes/y1000plus") + os.sep,
'c': home_dir + os.path.normpath('github/yeast_esr_expression_analysis/expression_data/promoter_phylogenies/y1000plus') + os.sep
}
y1000plus_dir = y1000plus_dir_options[location_input]
print("y1000plus data dir is " + y1000plus_dir)
import sys
if not(base_dir in sys.path):
sys.path.append(base_dir)
print("Added " + base_dir + " to path" )
yeast_esr_exp_path = home_dir + os.path.normpath('github/yeast_esr_expression_analysis') + os.sep
#io_library_path_core = io_library_path + 'core' + os.sep
if not(yeast_esr_exp_path in sys.path):
sys.path.append(yeast_esr_exp_path)
print("Added " + yeast_esr_exp_path + " to path" )
print("Importing y1000plus_tools.py")
import y1000plus_tools
y1000plus_tools.home_dir = home_dir
y1000plus_tools.base_dir = base_dir
y1000plus_tools.y1000plus_dir = y1000plus_dir
y1000plus_tools.yeast_esr_exp.base_dir = yeast_esr_exp_path
y1000plus_tools.yeast_esr_exp.data_processing_dir = yeast_esr_exp_path + os.path.normpath('expression_data') + os.sep
# Since y1000plus_tools loads io_library, just use those functions
# if not(io_library_path_core in sys.path):
# sys.path.append(io_library_path_core)
# print("Added " + io_library_path_core + " to path" )
print("importing yeast_esr_exp")
print(sys.path)
import yeast_esr_exp
yeast_esr_exp.base_dir = yeast_esr_exp_path
yeast_esr_exp.data_processing_dir = yeast_esr_exp_path + os.path.normpath('expression_data') + os.sep
print('sys.path : \n')
print(sys.path)
import copy
import shutil
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.colors as colors
import matplotlib.colorbar as colorbar
from matplotlib.gridspec import GridSpec
import seaborn as sns
## Add to std library
import pickle
import subprocess
from collections import Counter, OrderedDict
from itertools import chain
import scipy.spatial.distance as spd
#import statsmodels.graphics.gofplots as stats_graph
import scipy.cluster.hierarchy as sch
from statsmodels.distributions.empirical_distribution import ECDF
from Bio.Seq import Seq
from Bio.Alphabet import generic_dna, IUPAC
from Bio import SeqIO
from Bio import pairwise2
from Bio import motifs
from Bio import AlignIO
from Bio import Align
import gffutils
from ete3 import Tree, SeqMotifFace, TreeStyle, add_face_to_node, RectFace, NodeStyle, TextFace, AttrFace
#ete3 is not officially supported on windows, and so must be loaded via pip:
# pip install -U https://github.com/etetoolkit/ete/archive/qt5.zip
# ref: https://groups.google.com/forum/#!topic/etetoolkit/6NblSBPij4o
#20181031: got this error message: twisted 18.7.0 requires PyHamcrest>=1.9.0, which is not installed.
# In order to view ete3 created trees on the gpucluster, you need to use a virtual X server:
### from pyvirtualdisplay import Display
### display = Display(visible=False, size=(1024, 768), color_depth=24)
### display.start()
#for scraping internet data (e.g. ncbi, YGOB)
import requests
from bs4 import BeautifulSoup
#from lxml import etree #parses xml output
|
"""
A baseline model
"""
from torch import Tensor
import torch
def buy_and_hold(price_diff: Tensor) -> Tensor:
"""
Performs the buy and hold strategy using tomorrow's price difference.
This basically means that we are aggregating the total results from the price differences
from each day and seeing how much they have at any given point in time
Args:
:param price_diff: The price difference from each day. The first
element corresponds to the price difference of the second - first days,
then the second element corresponds to the price difference between the
third - second days + second - first days, and so forth.
"""
return [0] + torch.cumsum(price_diff, dim=0).view(-1).tolist()
|
from typing import List, Optional, Dict, Callable
from mypy.types import (
Type, AnyType, UnboundType, TypeVisitor, ErrorType, FormalArgument, Void, NoneTyp,
Instance, TypeVarType, CallableType, TupleType, TypedDictType, UnionType, Overloaded,
ErasedType, TypeList, PartialType, DeletedType, UninhabitedType, TypeType, is_named_instance
)
import mypy.applytype
import mypy.constraints
# Circular import; done in the function instead.
# import mypy.solve
from mypy import messages, sametypes
from mypy.nodes import (
CONTRAVARIANT, COVARIANT,
ARG_POS, ARG_OPT, ARG_NAMED, ARG_NAMED_OPT, ARG_STAR, ARG_STAR2,
)
from mypy.maptype import map_instance_to_supertype
from mypy import experiments
TypeParameterChecker = Callable[[Type, Type, int], bool]
def check_type_parameter(lefta: Type, righta: Type, variance: int) -> bool:
if variance == COVARIANT:
return is_subtype(lefta, righta, check_type_parameter)
elif variance == CONTRAVARIANT:
return is_subtype(righta, lefta, check_type_parameter)
else:
return is_equivalent(lefta, righta, check_type_parameter)
def is_subtype(left: Type, right: Type,
type_parameter_checker: TypeParameterChecker = check_type_parameter,
*, ignore_pos_arg_names: bool = False) -> bool:
"""Is 'left' subtype of 'right'?
Also consider Any to be a subtype of any type, and vice versa. This
recursively applies to components of composite types (List[int] is subtype
of List[Any], for example).
type_parameter_checker is used to check the type parameters (for example,
A with B in is_subtype(C[A], C[B]). The default checks for subtype relation
between the type arguments (e.g., A and B), taking the variance of the
type var into account.
"""
if (isinstance(right, AnyType) or isinstance(right, UnboundType)
or isinstance(right, ErasedType)):
return True
elif isinstance(right, UnionType) and not isinstance(left, UnionType):
return any(is_subtype(left, item, type_parameter_checker,
ignore_pos_arg_names=ignore_pos_arg_names)
for item in right.items)
else:
return left.accept(SubtypeVisitor(right, type_parameter_checker,
ignore_pos_arg_names=ignore_pos_arg_names))
def is_subtype_ignoring_tvars(left: Type, right: Type) -> bool:
def ignore_tvars(s: Type, t: Type, v: int) -> bool:
return True
return is_subtype(left, right, ignore_tvars)
def is_equivalent(a: Type,
b: Type,
type_parameter_checker: TypeParameterChecker = check_type_parameter,
*,
ignore_pos_arg_names: bool = False
) -> bool:
return (
is_subtype(a, b, type_parameter_checker, ignore_pos_arg_names=ignore_pos_arg_names)
and is_subtype(b, a, type_parameter_checker, ignore_pos_arg_names=ignore_pos_arg_names))
def satisfies_upper_bound(a: Type, upper_bound: Type) -> bool:
"""Is 'a' valid value for a type variable with the given 'upper_bound'?
Same as is_subtype except that Void is considered to be a subtype of
any upper_bound. This is needed in a case like
def f(g: Callable[[], T]) -> T: ...
def h() -> None: ...
f(h)
"""
return isinstance(a, Void) or is_subtype(a, upper_bound)
class SubtypeVisitor(TypeVisitor[bool]):
def __init__(self, right: Type,
type_parameter_checker: TypeParameterChecker,
*, ignore_pos_arg_names: bool = False) -> None:
self.right = right
self.check_type_parameter = type_parameter_checker
self.ignore_pos_arg_names = ignore_pos_arg_names
# visit_x(left) means: is left (which is an instance of X) a subtype of
# right?
def visit_unbound_type(self, left: UnboundType) -> bool:
return True
def visit_error_type(self, left: ErrorType) -> bool:
return False
def visit_type_list(self, t: TypeList) -> bool:
assert False, 'Not supported'
def visit_any(self, left: AnyType) -> bool:
return True
def visit_void(self, left: Void) -> bool:
return isinstance(self.right, Void)
def visit_none_type(self, left: NoneTyp) -> bool:
if experiments.STRICT_OPTIONAL:
return (isinstance(self.right, NoneTyp) or
is_named_instance(self.right, 'builtins.object'))
else:
return not isinstance(self.right, Void)
def visit_uninhabited_type(self, left: UninhabitedType) -> bool:
return not isinstance(self.right, Void)
def visit_erased_type(self, left: ErasedType) -> bool:
return True
def visit_deleted_type(self, left: DeletedType) -> bool:
return True
def visit_instance(self, left: Instance) -> bool:
if left.type.fallback_to_any:
return True
right = self.right
if isinstance(right, TupleType) and right.fallback.type.is_enum:
return is_subtype(left, right.fallback)
if isinstance(right, Instance):
if left.type._promote and is_subtype(
left.type._promote, self.right, self.check_type_parameter,
ignore_pos_arg_names=self.ignore_pos_arg_names):
return True
rname = right.type.fullname()
if not left.type.has_base(rname) and rname != 'builtins.object':
return False
# Map left type to corresponding right instances.
t = map_instance_to_supertype(left, right.type)
return all(self.check_type_parameter(lefta, righta, tvar.variance)
for lefta, righta, tvar in
zip(t.args, right.args, right.type.defn.type_vars))
else:
return False
def visit_type_var(self, left: TypeVarType) -> bool:
right = self.right
if isinstance(right, TypeVarType) and left.id == right.id:
return True
return is_subtype(left.upper_bound, self.right)
def visit_callable_type(self, left: CallableType) -> bool:
right = self.right
if isinstance(right, CallableType):
return is_callable_subtype(
left, right,
ignore_pos_arg_names=self.ignore_pos_arg_names)
elif isinstance(right, Overloaded):
return all(is_subtype(left, item, self.check_type_parameter,
ignore_pos_arg_names=self.ignore_pos_arg_names)
for item in right.items())
elif isinstance(right, Instance):
return is_subtype(left.fallback, right,
ignore_pos_arg_names=self.ignore_pos_arg_names)
elif isinstance(right, TypeType):
# This is unsound, we don't check the __init__ signature.
return left.is_type_obj() and is_subtype(left.ret_type, right.item)
else:
return False
def visit_tuple_type(self, left: TupleType) -> bool:
right = self.right
if isinstance(right, Instance):
if is_named_instance(right, 'typing.Sized'):
return True
elif (is_named_instance(right, 'builtins.tuple') or
is_named_instance(right, 'typing.Iterable') or
is_named_instance(right, 'typing.Container') or
is_named_instance(right, 'typing.Sequence') or
is_named_instance(right, 'typing.Reversible')):
if right.args:
iter_type = right.args[0]
else:
iter_type = AnyType()
return all(is_subtype(li, iter_type) for li in left.items)
elif is_subtype(left.fallback, right, self.check_type_parameter):
return True
return False
elif isinstance(right, TupleType):
if len(left.items) != len(right.items):
return False
for l, r in zip(left.items, right.items):
if not is_subtype(l, r, self.check_type_parameter):
return False
if not is_subtype(left.fallback, right.fallback, self.check_type_parameter):
return False
return True
else:
return False
def visit_typeddict_type(self, left: TypedDictType) -> bool:
right = self.right
if isinstance(right, Instance):
return is_subtype(left.fallback, right, self.check_type_parameter)
elif isinstance(right, TypedDictType):
if not left.names_are_wider_than(right):
return False
for (_, l, r) in left.zip(right):
if not is_equivalent(l, r, self.check_type_parameter):
return False
# (NOTE: Fallbacks don't matter.)
return True
else:
return False
def visit_overloaded(self, left: Overloaded) -> bool:
right = self.right
if isinstance(right, Instance):
return is_subtype(left.fallback, right)
elif isinstance(right, CallableType) or is_named_instance(
right, 'builtins.type'):
for item in left.items():
if is_subtype(item, right, self.check_type_parameter,
ignore_pos_arg_names=self.ignore_pos_arg_names):
return True
return False
elif isinstance(right, Overloaded):
# TODO: this may be too restrictive
if len(left.items()) != len(right.items()):
return False
for i in range(len(left.items())):
if not is_subtype(left.items()[i], right.items()[i], self.check_type_parameter,
ignore_pos_arg_names=self.ignore_pos_arg_names):
return False
return True
elif isinstance(right, UnboundType):
return True
elif isinstance(right, TypeType):
# All the items must have the same type object status, so
# it's sufficient to query only (any) one of them.
# This is unsound, we don't check the __init__ signature.
return left.is_type_obj() and is_subtype(left.items()[0].ret_type, right.item)
else:
return False
def visit_union_type(self, left: UnionType) -> bool:
return all(is_subtype(item, self.right, self.check_type_parameter)
for item in left.items)
def visit_partial_type(self, left: PartialType) -> bool:
# This is indeterminate as we don't really know the complete type yet.
raise RuntimeError
def visit_type_type(self, left: TypeType) -> bool:
right = self.right
if isinstance(right, TypeType):
return is_subtype(left.item, right.item)
if isinstance(right, CallableType):
# This is unsound, we don't check the __init__ signature.
return right.is_type_obj() and is_subtype(left.item, right.ret_type)
if (isinstance(right, Instance) and
right.type.fullname() in ('builtins.type', 'builtins.object')):
# Treat builtins.type the same as Type[Any];
# treat builtins.object the same as Any.
return True
return False
def is_callable_subtype(left: CallableType, right: CallableType,
ignore_return: bool = False,
ignore_pos_arg_names: bool = False) -> bool:
"""Is left a subtype of right?"""
# If either function is implicitly typed, ignore positional arg names too
if left.implicit or right.implicit:
ignore_pos_arg_names = True
# Non-type cannot be a subtype of type.
if right.is_type_obj() and not left.is_type_obj():
return False
# A callable L is a subtype of a generic callable R if L is a
# subtype of every type obtained from R by substituting types for
# the variables of R. We can check this by simply leaving the
# generic variables of R as type variables, effectively varying
# over all possible values.
# It's okay even if these variables share ids with generic
# type variables of L, because generating and solving
# constraints for the variables of L to make L a subtype of R
# (below) treats type variables on the two sides as independent.
if left.variables:
# Apply generic type variables away in left via type inference.
left = unify_generic_callable(left, right, ignore_return=ignore_return)
if left is None:
return False
# Check return types.
if not ignore_return and not is_subtype(left.ret_type, right.ret_type):
return False
if right.is_ellipsis_args:
return True
right_star_type = None # type: Optional[Type]
right_star2_type = None # type: Optional[Type]
# Match up corresponding arguments and check them for compatibility. In
# every pair (argL, argR) of corresponding arguments from L and R, argL must
# be "more general" than argR if L is to be a subtype of R.
# Arguments are corresponding if they either share a name, share a position,
# or both. If L's corresponding argument is ambiguous, L is not a subtype of
# R.
# If left has one corresponding argument by name and another by position,
# consider them to be one "merged" argument (and not ambiguous) if they're
# both optional, they're name-only and position-only respectively, and they
# have the same type. This rule allows functions with (*args, **kwargs) to
# properly stand in for the full domain of formal arguments that they're
# used for in practice.
# Every argument in R must have a corresponding argument in L, and every
# required argument in L must have a corresponding argument in R.
done_with_positional = False
for i in range(len(right.arg_types)):
right_kind = right.arg_kinds[i]
if right_kind in (ARG_STAR, ARG_STAR2, ARG_NAMED, ARG_NAMED_OPT):
done_with_positional = True
right_required = right_kind in (ARG_POS, ARG_NAMED)
right_pos = None if done_with_positional else i
right_arg = FormalArgument(
right.arg_names[i],
right_pos,
right.arg_types[i],
right_required)
if right_kind == ARG_STAR:
right_star_type = right_arg.typ
# Right has an infinite series of optional positional arguments
# here. Get all further positional arguments of left, and make sure
# they're more general than their corresponding member in this
# series. Also make sure left has its own inifite series of
# optional positional arguments.
if not left.is_var_arg:
return False
j = i
while j < len(left.arg_kinds) and left.arg_kinds[j] in (ARG_POS, ARG_OPT):
left_by_position = left.argument_by_position(j)
assert left_by_position is not None
# This fetches the synthetic argument that's from the *args
right_by_position = right.argument_by_position(j)
assert right_by_position is not None
if not are_args_compatible(left_by_position, right_by_position,
ignore_pos_arg_names):
return False
j += 1
continue
if right_kind == ARG_STAR2:
right_star2_type = right_arg.typ
# Right has an infinite set of optional named arguments here. Get
# all further named arguments of left and make sure they're more
# general than their corresponding member in this set. Also make
# sure left has its own infinite set of optional named arguments.
if not left.is_kw_arg:
return False
left_names = {name for name in left.arg_names if name is not None}
right_names = {name for name in right.arg_names if name is not None}
left_only_names = left_names - right_names
for name in left_only_names:
left_by_name = left.argument_by_name(name)
assert left_by_name is not None
# This fetches the synthetic argument that's from the **kwargs
right_by_name = right.argument_by_name(name)
assert right_by_name is not None
if not are_args_compatible(left_by_name, right_by_name,
ignore_pos_arg_names):
return False
continue
# Left must have some kind of corresponding argument.
left_arg = left.corresponding_argument(right_arg)
if left_arg is None:
return False
if not are_args_compatible(left_arg, right_arg, ignore_pos_arg_names):
return False
done_with_positional = False
for i in range(len(left.arg_types)):
left_kind = left.arg_kinds[i]
if left_kind in (ARG_STAR, ARG_STAR2, ARG_NAMED, ARG_NAMED_OPT):
done_with_positional = True
left_arg = FormalArgument(
left.arg_names[i],
None if done_with_positional else i,
left.arg_types[i],
left_kind in (ARG_POS, ARG_NAMED))
# Check that *args and **kwargs types match in this loop
if left_kind == ARG_STAR:
if right_star_type is not None and not is_subtype(right_star_type, left_arg.typ):
return False
continue
elif left_kind == ARG_STAR2:
if right_star2_type is not None and not is_subtype(right_star2_type, left_arg.typ):
return False
continue
right_by_name = (right.argument_by_name(left_arg.name)
if left_arg.name is not None
else None)
right_by_pos = (right.argument_by_position(left_arg.pos)
if left_arg.pos is not None
else None)
# If the left hand argument corresponds to two right-hand arguments,
# neither of them can be required.
if (right_by_name is not None
and right_by_pos is not None
and right_by_name != right_by_pos
and (right_by_pos.required or right_by_name.required)):
return False
# All *required* left-hand arguments must have a corresponding
# right-hand argument. Optional args it does not matter.
if left_arg.required and right_by_pos is None and right_by_name is None:
return False
return True
def are_args_compatible(
left: FormalArgument,
right: FormalArgument,
ignore_pos_arg_names: bool) -> bool:
# If right has a specific name it wants this argument to be, left must
# have the same.
if right.name is not None and left.name != right.name:
# But pay attention to whether we're ignoring positional arg names
if not ignore_pos_arg_names or right.pos is None:
return False
# If right is at a specific position, left must have the same:
if right.pos is not None and left.pos != right.pos:
return False
# Left must have a more general type
if not is_subtype(right.typ, left.typ):
return False
# If right's argument is optional, left's must also be.
if not right.required and left.required:
return False
return True
def unify_generic_callable(type: CallableType, target: CallableType,
ignore_return: bool) -> CallableType:
"""Try to unify a generic callable type with another callable type.
Return unified CallableType if successful; otherwise, return None.
"""
import mypy.solve
constraints = [] # type: List[mypy.constraints.Constraint]
for arg_type, target_arg_type in zip(type.arg_types, target.arg_types):
c = mypy.constraints.infer_constraints(
arg_type, target_arg_type, mypy.constraints.SUPERTYPE_OF)
constraints.extend(c)
if not ignore_return:
c = mypy.constraints.infer_constraints(
type.ret_type, target.ret_type, mypy.constraints.SUBTYPE_OF)
constraints.extend(c)
type_var_ids = [tvar.id for tvar in type.variables]
inferred_vars = mypy.solve.solve_constraints(type_var_ids, constraints)
if None in inferred_vars:
return None
msg = messages.temp_message_builder()
applied = mypy.applytype.apply_generic_arguments(type, inferred_vars, msg, context=target)
if msg.is_errors():
return None
return applied
def restrict_subtype_away(t: Type, s: Type) -> Type:
"""Return a supertype of (t intersect not s)
Currently just remove elements of a union type.
"""
if isinstance(t, UnionType):
new_items = [item for item in t.items if (not is_subtype(item, s)
or isinstance(item, AnyType))]
return UnionType.make_union(new_items)
else:
return t
def is_proper_subtype(t: Type, s: Type) -> bool:
"""Check if t is a proper subtype of s?
For proper subtypes, there's no need to rely on compatibility due to
Any types. Any instance type t is also a proper subtype of t.
"""
# FIX tuple types
if isinstance(t, Instance):
if isinstance(s, Instance):
if not t.type.has_base(s.type.fullname()):
return False
def check_argument(left: Type, right: Type, variance: int) -> bool:
if variance == COVARIANT:
return is_proper_subtype(left, right)
elif variance == CONTRAVARIANT:
return is_proper_subtype(right, left)
else:
return sametypes.is_same_type(left, right)
# Map left type to corresponding right instances.
t = map_instance_to_supertype(t, s.type)
return all(check_argument(ta, ra, tvar.variance) for ta, ra, tvar in
zip(t.args, s.args, s.type.defn.type_vars))
return False
else:
return sametypes.is_same_type(t, s)
def is_more_precise(t: Type, s: Type) -> bool:
"""Check if t is a more precise type than s.
A t is a proper subtype of s, t is also more precise than s. Also, if
s is Any, t is more precise than s for any t. Finally, if t is the same
type as s, t is more precise than s.
"""
# TODO Should List[int] be more precise than List[Any]?
if isinstance(s, AnyType):
return True
if isinstance(s, Instance):
if isinstance(t, CallableType):
# Fall back to subclass check and ignore other properties of the callable.
return is_proper_subtype(t.fallback, s)
return is_proper_subtype(t, s)
return sametypes.is_same_type(t, s)
|
import os
import sys
# As this plugin is typically only sym-linked into a gerrit checkout and both os.getcwd and
# os.path.abspath follow symbolic links, they would not allow us to find the gerrit root
# directory. So we have to resort to the PWD environment variable to find the place we're
# symlinked to.
#
# We append __file__ to avoid having to require to run it from a well-know directory.
ABS_FILE_PARTS = os.path.join(os.getenv('PWD'), __file__).split(os.sep)
PLUGIN_NAME = ABS_FILE_PARTS[-3]
GERRIT_ROOT = os.sep.join(ABS_FILE_PARTS[:-4])
sys.path = [os.sep.join([GERRIT_ROOT, 'tools'])] + sys.path
from workspace_status_release import revision
def get_plugin_revision(name):
os.chdir(os.path.join(GERRIT_ROOT, 'plugins', name))
ret=revision(GERRIT_VERSION)
return ret
os.chdir(GERRIT_ROOT)
GERRIT_VERSION=revision()
ITS_BASE_VERSION=get_plugin_revision('its-base')
PLUGIN_RAW_VERSION=get_plugin_revision(PLUGIN_NAME)
PLUGIN_FULL_VERSION="%s(its-base:%s)" % (PLUGIN_RAW_VERSION, ITS_BASE_VERSION)
print("STABLE_BUILD_%s_LABEL %s" % (PLUGIN_NAME.upper(), PLUGIN_FULL_VERSION))
|
import asyncio
import youtube_dl
from googleapiclient.discovery import build
from settings import YOUTUBE_API_KEY
ytdl_format_options = {
'format': 'bestaudio/best',
'outtmpl': '%(extractor)s-%(id)s-%(title)s.%(ext)s',
'restrictfilenames': True,
'noplaylist': True,
'nocheckcertificate': True,
'ignoreerrors': False,
'logtostderr': False,
'quiet': True,
'no_warnings': True,
'default_search': 'auto',
'source_address': '0.0.0.0'
}
ytdl = youtube_dl.YoutubeDL(params=ytdl_format_options)
async def youtube_search(query: str):
loop = asyncio.get_event_loop()
youtube = await loop.run_in_executor(None, lambda: build('youtube', 'v3', developerKey=YOUTUBE_API_KEY))
search_response = await loop.run_in_executor(None, lambda: youtube.search().list(
q=query,
part='id,snippet',
maxResults=1,
type='video'
).execute())
for search_result in search_response.get('items', []):
return search_result['id']['videoId']
async def youtube_playlist(query: str):
playlist_id = query.split("list=")[1]
loop = asyncio.get_event_loop()
youtube = await loop.run_in_executor(None, lambda: build('youtube', 'v3', developerKey=YOUTUBE_API_KEY))
response = await loop.run_in_executor(None, lambda: youtube.playlistItems().list(
part="snippet",
playlistId=playlist_id,
maxResults=50
).execute())
result = []
for item in response["items"]:
result.append((item["snippet"]["resourceId"]["videoId"], item['snippet']['title']))
return result
class Video:
def __init__(self, url: str = None, title: str = None, video_id: str = None):
self.url = url
self.title = title
self.video_id = video_id
async def get_music_info(self):
if self.url is not None:
loop = asyncio.get_event_loop()
data = await loop.run_in_executor(None, lambda: ytdl.extract_info(url=self.url, download=False))
return data
elif self.title is not None:
vid_id = await youtube_search(self.title)
if not vid_id:
return
loop = asyncio.get_event_loop()
data = await loop.run_in_executor(None, lambda: ytdl.extract_info(
url="https://www.youtube.com/watch?v=" + vid_id, download=False))
return data
elif self.video_id is not None:
loop = asyncio.get_event_loop()
data = await loop.run_in_executor(None, lambda: ytdl.extract_info(
url="https://www.youtube.com/watch?v=" + self.video_id, download=False))
return data
|
import logging
import os
from dotenv import load_dotenv
from telegram import Update, ForceReply, Bot
from telegram.ext import (
Updater,
CommandHandler,
MessageHandler,
Filters,
CallbackContext,
)
from tg_log_handler import TelegramLogsHandler
from dialog_flow import detect_intent_texts
logger = logging.getLogger(__name__)
def start(update: Update, context: CallbackContext):
user = update.effective_user
update.message.reply_markdown_v2(
fr"ะะดัะฐะฒััะฒัะนัะต {user.mention_markdown_v2()}\!",
reply_markup=ForceReply(selective=True),
)
def responds_to_messages(update: Update, context: CallbackContext):
dialogflow_query_result = detect_intent_texts(
context.bot_data["project_id"],
context.bot_data["sesion_id"],
update.message.text,
)
update.message.reply_text(dialogflow_query_result.fulfillment_text)
def main():
load_dotenv()
project_id = os.getenv("PROJECT_ID")
sesion_id = os.getenv("SESSION_ID")
telegram_token = os.getenv("TELEGRAM_TOKEN")
chat_id = os.getenv("CHAT_ID")
bot_data = {
"project_id": project_id,
"sesion_id": sesion_id,
}
bot = Bot(token=telegram_token)
logging.basicConfig(format="%(levelname)s %(message)s")
logger.setLevel(logging.DEBUG)
logger.addHandler(TelegramLogsHandler(bot, chat_id))
logger.info("Telegram ะฑะพั ะทะฐะฟััะตะฝ!")
try:
updater = Updater(telegram_token)
dispatcher = updater.dispatcher
dispatcher.add_handler(CommandHandler("start", start))
dispatcher.add_handler(
MessageHandler(
Filters.text & ~Filters.command, responds_to_messages
)
)
dispatcher.bot_data = bot_data
updater.start_polling()
updater.idle()
except Exception as err:
logger.exception(f"Telegram ะฑะพั ัะฟะฐะป ั ะพัะธะฑะบะพะน: {err}")
if __name__ == "__main__":
main()
|
class MinStack:
def __init__(self):
self.data = []
self.datamin = []
def push(self, x):
self.data.append(x)
if not self.datamin or x <= self.datamin[-1]:
self.datamin.append(x)
def pop(self):
r = self.data.pop()
if self.datamin and self.datamin[-1] == r:
self.datamin.pop()
def top(self):
return self.data[-1]
def getMin(self):
return self.datamin[-1]
|
#################################################
### THIS FILE WAS AUTOGENERATED! DO NOT EDIT! ###
#################################################
# file to edit: dev_nb/denoiser.ipynb
from sklearn.decomposition import TruncatedSVD
def denoise(data):
svd = TruncatedSVD(n_components=1, n_iter=7, random_state=0)
svd.fit(data)
pc = svd.components_
data -= data.dot(pc.T) * pc
return data
|
import cPickle as pickle
import logging
from twisted.internet import defer, reactor
from jasmin.protocols.smpp.configs import SMPPClientConfig, UnknownValue
from jasmin.protocols.cli.managers import PersistableManager, Session
from jasmin.vendor.smpp.pdu.constants import addr_ton_name_map, addr_ton_value_map
from jasmin.vendor.smpp.pdu.constants import addr_npi_name_map, addr_npi_value_map
from jasmin.vendor.smpp.pdu.constants import replace_if_present_flap_name_map, replace_if_present_flap_value_map
from jasmin.vendor.smpp.pdu.constants import priority_flag_name_map, priority_flag_value_map
from jasmin.protocols.cli.protocol import str2num
# A config map between console-configuration keys and SMPPClientConfig keys.
SMPPClientConfigKeyMap = {
'cid': 'id', 'host': 'host', 'port': 'port', 'username': 'username', 'logrotate': 'log_rotate',
'password': 'password', 'systype': 'systemType', 'logfile': 'log_file', 'loglevel': 'log_level',
'bind_to': 'sessionInitTimerSecs', 'elink_interval': 'enquireLinkTimerSecs',
'res_to': 'responseTimerSecs', 'con_loss_retry': 'reconnectOnConnectionLoss', 'bind_npi': 'addressNpi',
'con_loss_delay': 'reconnectOnConnectionLossDelay', 'con_fail_delay': 'reconnectOnConnectionFailureDelay',
'pdu_red_to': 'pduReadTimerSecs', 'bind': 'bindOperation', 'bind_ton': 'addressTon',
'src_ton': 'source_addr_ton', 'src_npi': 'source_addr_npi', 'dst_ton': 'dest_addr_ton',
'addr_range': 'addressRange', 'src_addr': 'source_addr', 'proto_id': 'protocol_id',
'priority': 'priority_flag', 'validity': 'validity_period', 'ripf': 'replace_if_present_flag',
'def_msg_id': 'sm_default_msg_id', 'coding': 'data_coding', 'requeue_delay': 'requeue_delay',
'submit_throughput': 'submit_sm_throughput', 'dlr_expiry': 'dlr_expiry', 'dlr_msgid': 'dlr_msg_id_bases',
'con_fail_retry': 'reconnectOnConnectionFailure', 'dst_npi': 'dest_addr_npi',
'trx_to': 'inactivityTimerSecs', 'ssl': 'useSSL'}
# Keys to be kept in string type, as requested in #64 and #105
SMPPClientConfigStringKeys = [
'host', 'systemType', 'username', 'password', 'addressRange', 'useSSL']
# When updating a key from RequireRestartKeys, the connector need restart for update to take effect
RequireRestartKeys = ['host', 'port', 'username', 'password', 'systemType']
def castOutputToBuiltInType(key, value):
'Will cast value to the correct type depending on the key'
if isinstance(value, bool):
return 'yes' if value else 'no'
if key in ['bind_npi', 'dst_npi', 'src_npi']:
return addr_npi_name_map[str(value)]
if key in ['bind_ton', 'dst_ton', 'src_ton']:
return addr_ton_name_map[str(value)]
if key == 'ripf':
return replace_if_present_flap_name_map[str(value)]
if key == 'priority':
return priority_flag_name_map[str(value)]
else:
return value
def castInputToBuiltInType(key, value):
'Will cast value to the correct type depending on the key'
try:
if key in ['bind_npi', 'dst_npi', 'src_npi']:
return addr_npi_value_map[value]
elif key in ['bind_ton', 'dst_ton', 'src_ton']:
return addr_ton_value_map[value]
elif key == 'ripf':
return replace_if_present_flap_value_map[value]
elif key == 'priority':
return priority_flag_value_map[value]
elif key in ['con_fail_retry', 'con_loss_retry', 'ssl']:
if value == 'yes':
return True
elif value == 'no':
return False
else:
raise KeyError('Boolean value must be expressed by yes or no.')
elif (key == 'loglevel' and
value not in [logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR, logging.CRITICAL]):
raise KeyError('loglevel must be numeric value of 10, 20, 30, 40 or 50.')
elif isinstance(value, str) == str and value.lower() == 'none':
value = None
except KeyError:
raise UnknownValue('Unknown value for key %s: %s' % (key, value))
return value
class JCliSMPPClientConfig(SMPPClientConfig):
'Overload SMPPClientConfig with getters and setters for JCli'
PendingRestart = False
def set(self, key, value):
setattr(self, key, value)
if key in RequireRestartKeys:
self.PendingRestart = True
def getAll(self):
r = {}
for key, value in SMPPClientConfigKeyMap.iteritems():
if hasattr(self, value):
r[key] = castOutputToBuiltInType(key, getattr(self, value))
else:
# Related to #192
r[key] = 'Unknown (object is from an old Jasmin release !)'
return r
def SMPPClientConfigBuild(fCallback):
'Parse args and try to build a JCliSMPPClientConfig instance to pass it to fCallback'
def parse_args_and_call_with_instance(self, *args, **kwargs):
cmd = args[0]
arg = args[1]
# Empty line
if cmd is None:
return self.protocol.sendData()
# Initiate JCliSMPPClientConfig with sessBuffer content
if cmd == 'ok':
if len(self.sessBuffer) == 0:
return self.protocol.sendData('You must set at least connector id (cid) before saving !')
connector = {}
for key, value in self.sessBuffer.iteritems():
connector[key] = value
try:
SMPPClientConfigInstance = JCliSMPPClientConfig(**connector)
# Hand the instance to fCallback
return fCallback(self, SMPPClientConfigInstance)
except Exception, e:
return self.protocol.sendData('Error: %s' % str(e))
else:
# Unknown key
if cmd not in SMPPClientConfigKeyMap:
return self.protocol.sendData('Unknown SMPPClientConfig key: %s' % cmd)
try:
# Buffer key for later SMPPClientConfig initiating
SMPPClientConfigKey = SMPPClientConfigKeyMap[cmd]
if isinstance(arg, str) and SMPPClientConfigKey not in SMPPClientConfigStringKeys:
self.sessBuffer[SMPPClientConfigKey] = castInputToBuiltInType(cmd, str2num(arg))
else:
self.sessBuffer[SMPPClientConfigKey] = castInputToBuiltInType(cmd, arg)
except Exception, e:
return self.protocol.sendData('Error: %s' % str(e))
return self.protocol.sendData()
return parse_args_and_call_with_instance
def SMPPClientConfigUpdate(fCallback):
'''Get connector configuration and log update requests passing to fCallback
The log will be handed to fCallback when 'ok' is received'''
def log_update_requests_and_call(self, *args, **kwargs):
cmd = args[0]
arg = args[1]
# Empty line
if cmd is None:
return self.protocol.sendData()
# Pass sessBuffer as updateLog to fCallback
if cmd == 'ok':
if len(self.sessBuffer) == 0:
return self.protocol.sendData('Nothing to save')
try:
# Initiate a volatile SMPPClientConfig instance to run through it's constructor
# validation steps, this will raise an exception whenever an error is detected
configArgs = self.sessBuffer
configArgs['id'] = self.sessionContext['cid']
SMPPClientConfig(**configArgs)
except Exception, e:
return self.protocol.sendData('Error: %s' % str(e))
return fCallback(self, self.sessBuffer)
else:
# Unknown key
if cmd not in SMPPClientConfigKeyMap:
return self.protocol.sendData('Unknown SMPPClientConfig key: %s' % cmd)
if cmd == 'cid':
return self.protocol.sendData('Connector id can not be modified !')
try:
# Buffer key for later (when receiving 'ok')
SMPPClientConfigKey = SMPPClientConfigKeyMap[cmd]
if isinstance(arg, str) and SMPPClientConfigKey not in SMPPClientConfigStringKeys:
self.sessBuffer[SMPPClientConfigKey] = castInputToBuiltInType(cmd, str2num(arg))
else:
self.sessBuffer[SMPPClientConfigKey] = castInputToBuiltInType(cmd, arg)
except Exception, e:
return self.protocol.sendData('Error: %s' % str(e))
return self.protocol.sendData()
return log_update_requests_and_call
class ConnectorExist(object):
'Check if connector cid exist before passing it to fCallback'
def __init__(self, cid_key):
self.cid_key = cid_key
def __call__(self, fCallback):
cid_key = self.cid_key
def exist_connector_and_call(self, *args, **kwargs):
opts = args[1]
cid = getattr(opts, cid_key)
if self.pb['smppcm'].getConnector(cid) is not None:
return fCallback(self, *args, **kwargs)
return self.protocol.sendData('Unknown connector: %s' % cid)
return exist_connector_and_call
class SmppCCManager(PersistableManager):
"SMPP Client Connector manager logics"
managerName = 'smppcc'
def persist(self, arg, opts):
if self.pb['smppcm'].perspective_persist(opts.profile):
self.protocol.sendData(
'%s configuration persisted (profile:%s)' % (self.managerName, opts.profile), prompt=False)
else:
self.protocol.sendData(
'Failed to persist %s configuration (profile:%s)' % (
self.managerName, opts.profile), prompt=False)
@defer.inlineCallbacks
def load(self, arg, opts):
r = yield self.pb['smppcm'].perspective_load(opts.profile)
if r:
self.protocol.sendData(
'%s configuration loaded (profile:%s)' % (self.managerName, opts.profile), prompt=False)
else:
self.protocol.sendData(
'Failed to load %s configuration (profile:%s)' % (
self.managerName, opts.profile), prompt=False)
def list(self, arg, opts):
connectors = self.pb['smppcm'].perspective_connector_list()
counter = 0
if (len(connectors)) > 0:
self.protocol.sendData("#%s %s %s %s %s" % (
'Connector id'.ljust(35),
'Service'.ljust(7),
'Session'.ljust(16),
'Starts'.ljust(6),
'Stops'.ljust(5)), prompt=False)
for connector in connectors:
counter += 1
self.protocol.sendData("#%s %s %s %s %s" % (
str(connector['id']).ljust(35),
str('started' if connector['service_status'] == 1 else 'stopped').ljust(7),
str(connector['session_state']).ljust(16),
str(connector['start_count']).ljust(6),
str(connector['stop_count']).ljust(5),
), prompt=False)
self.protocol.sendData(prompt=False)
self.protocol.sendData('Total connectors: %s' % counter)
@Session
@SMPPClientConfigBuild
@defer.inlineCallbacks
def add_session(self, SMPPClientConfigInstance):
st = yield self.pb['smppcm'].perspective_connector_add(
pickle.dumps(SMPPClientConfigInstance, pickle.HIGHEST_PROTOCOL))
if st:
self.protocol.sendData(
'Successfully added connector [%s]' % SMPPClientConfigInstance.id, prompt=False)
self.stopSession()
else:
self.protocol.sendData('Failed adding connector, check log for details')
def add(self, arg, opts):
return self.startSession(self.add_session,
annoucement='Adding a new connector: (ok: save, ko: exit)',
completitions=SMPPClientConfigKeyMap.keys())
@Session
@SMPPClientConfigUpdate
@defer.inlineCallbacks
def update_session(self, updateLog):
connector = self.pb['smppcm'].getConnector(self.sessionContext['cid'])
connectorDetails = self.pb['smppcm'].getConnectorDetails(self.sessionContext['cid'])
for key, value in updateLog.iteritems():
connector['config'].set(key, value)
if connector['config'].PendingRestart and connectorDetails['service_status'] == 1:
self.protocol.sendData(
'Restarting connector [%s] for updates to take effect ...' % self.sessionContext['cid'],
prompt=False)
st = yield self.pb['smppcm'].perspective_connector_stop(self.sessionContext['cid'])
if not st:
self.protocol.sendData('Failed stopping connector, check log for details', prompt=False)
else:
st = yield self.pb['smppcm'].perspective_connector_start(self.sessionContext['cid'])
if not st:
self.protocol.sendData(
'Failed starting connector, will retry in 5 seconds', prompt=False)
# Wait before start retrial
exitDeferred = defer.Deferred()
reactor.callLater(5, exitDeferred.callback, None)
yield exitDeferred
st = yield self.pb['smppcm'].perspective_connector_start(self.sessionContext['cid'])
if not st:
self.protocol.sendData('Permanently failed starting connector !', prompt=False)
self.protocol.sendData(
'Successfully updated connector [%s]' % self.sessionContext['cid'], prompt=False)
self.stopSession()
@ConnectorExist(cid_key='update')
def update(self, arg, opts):
return self.startSession(
self.update_session,
annoucement='Updating connector id [%s]: (ok: save, ko: exit)' % opts.update,
completitions=SMPPClientConfigKeyMap.keys(),
sessionContext={'cid': opts.update})
@ConnectorExist(cid_key='remove')
@defer.inlineCallbacks
def remove(self, arg, opts):
st = yield self.pb['smppcm'].perspective_connector_remove(opts.remove)
if st:
self.protocol.sendData('Successfully removed connector id:%s' % opts.remove)
else:
self.protocol.sendData('Failed removing connector, check log for details')
@ConnectorExist(cid_key='show')
def show(self, arg, opts):
connector = self.pb['smppcm'].getConnector(opts.show)
for k, v in connector['config'].getAll().iteritems():
self.protocol.sendData('%s %s' % (k, v), prompt=False)
self.protocol.sendData()
@ConnectorExist(cid_key='stop')
@defer.inlineCallbacks
def stop(self, arg, opts):
st = yield self.pb['smppcm'].perspective_connector_stop(opts.stop)
if st:
self.protocol.sendData('Successfully stopped connector id:%s' % opts.stop)
else:
self.protocol.sendData('Failed stopping connector, check log for details')
@ConnectorExist(cid_key='start')
@defer.inlineCallbacks
def start(self, arg, opts):
st = yield self.pb['smppcm'].perspective_connector_start(opts.start)
if st:
self.protocol.sendData('Successfully started connector id:%s' % opts.start)
else:
self.protocol.sendData('Failed starting connector, check log for details')
|
# Copyright 2014 Ahmed El-Hassany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import mock
import unittest
from pox.lib.addresses import EthAddr
from pox.lib.util import TimeoutError
import sts.replay_event
from sts.replay_event import AddIntent
from sts.replay_event import RemoveIntent
from sts.replay_event import CheckInvariants
from sts.replay_event import ControllerFailure
from sts.replay_event import ControllerRecovery
from sts.replay_event import LinkFailure
from sts.replay_event import LinkRecovery
from sts.replay_event import SwitchFailure
from sts.replay_event import SwitchRecovery
from sts.replay_event import NOPInput
from sts.replay_event import InvariantViolation
class ConnectToControllersTest(unittest.TestCase):
def test_proceed(self):
pass
class CheckInvariantsTest(unittest.TestCase):
def test_proceed(self):
# Arrange
name = 'mock_invariant'
check = mock.Mock(name='InvarCheck')
check.return_value = []
sts.replay_event.name_to_invariant_check = {name: check}
label = 'e1'
logical_round = 1
event_time = [1, 1]
simulation = mock.Mock(name='Simulation')
simulation = mock.Mock(name='Simulation')
simulation.fail_to_interactive = False
simulation.fail_to_interactive_on_persistent_violations = False
simulation.violation_tracker.persistent_violations = []
# Act
event = CheckInvariants(invariant_check_name=name, label=label,
logical_round=logical_round, event_time=event_time)
ret_val = event.proceed(simulation)
# Assert
self.assertTrue(ret_val)
def test_to_json(self):
# Arrange
name = 'mock_invariant'
check = mock.Mock(name='InvarCheck')
check.return_value = []
sts.replay_event.name_to_invariant_check = {name: check}
label = 'e1'
logical_round = 1
event_time = [1, 1]
expected = dict(invariant_name=name, invariant_check=None,
invariant_check_name=name,
legacy_invariant_check=False,
label=label, event_time=event_time,
logical_round=logical_round, dependent_labels=[],
prunable=True, timed_out=False,
fingerprint="N/A")
expected['class'] = "CheckInvariants"
# Act
event = CheckInvariants(invariant_check_name=name, label=label,
logical_round=logical_round, event_time=event_time)
json_dump = event.to_json()
# Assert
self.assertEquals(expected, json.loads(json_dump))
def test_from_json(self):
# Arrange
name = 'mock_invariant'
check = mock.Mock(name='InvarCheck')
check.return_value = []
sts.replay_event.name_to_invariant_check = {name: check}
label = 'e1'
logical_round = 1
event_time = [1, 1]
json_dict = dict(invariant_name=name, invariant_check=None,
invariant_check_name=name,
legacy_invariant_check=False,
label=label, event_time=event_time,
logical_round=logical_round, dependent_labels=[],
prunable=True, timed_out=False,
fingerprint="N/A")
json_dict['class'] = "CheckInvariants"
expected_event = CheckInvariants(invariant_check_name=name, label=label,
logical_round=logical_round,
event_time=event_time)
# Act
event = CheckInvariants.from_json(json_dict)
# Assert
self.assertEquals(expected_event, event)
class SwitchFailureTest(unittest.TestCase):
def test_proceed(self):
# Arrange
dpid = 1
label = 'e1'
logical_round = 1
event_time = [1, 1]
simulation = mock.Mock(name='Simulation')
switch = mock.Mock(name='Switch')
simulation.topology.switches_manager.get_switch_dpid.return_value = switch
# Act
event = SwitchFailure(dpid=dpid, label=label, logical_round=logical_round,
event_time=event_time)
ret_val = event.proceed(simulation)
# Assert
self.assertTrue(ret_val)
sw_mgm = simulation.topology.switches_manager
sw_mgm.crash_switch.assert_called_once_with(switch)
def test_to_json(self):
# Arrange
dpid = 1
label = 'e1'
logical_round = 1
event_time = [1, 1]
expected = dict(label=label, dpid=dpid, event_time=event_time,
logical_round=logical_round, dependent_labels=[],
prunable=True, timed_out=False,
fingerprint=["SwitchFailure", 1])
expected['class'] = "SwitchFailure"
# Act
event = SwitchFailure(dpid=dpid, label=label, logical_round=logical_round,
event_time=event_time)
json_dump = event.to_json()
# Assert
self.assertEquals(expected, json.loads(json_dump))
def test_from_json(self):
# Arrange
dpid = 1
label = 'e1'
logical_round = 1
event_time = [1, 1]
json_dict = dict(label=label, dpid=dpid, event_time=event_time,
logical_round=logical_round, dependent_labels=[],
prunable=True, timed_out=False,
fingerprint=["SwitchFailure", 1])
json_dict['class'] = "SwitchFailure"
expected_event = SwitchFailure(dpid=dpid, label=label,
logical_round=logical_round,
event_time=event_time)
# Act
event = SwitchFailure.from_json(json_dict)
# Assert
self.assertEquals(expected_event, event)
class SwitchRecoveryTest(unittest.TestCase):
def test_proceed(self):
# Arrange
dpid = 1
label = 'e1'
logical_round = 1
event_time = [1, 1]
simulation = mock.Mock(name='Simulation')
switch = mock.Mock(name='Switch')
sw_mgm = simulation.topology.switches_manager
sw_mgm.get_switch_dpid.return_value = switch
def raise_error(x):
raise TimeoutError()
# Act
event = SwitchRecovery(dpid=dpid, label=label, logical_round=logical_round,
event_time=event_time)
event2 = SwitchRecovery(dpid=dpid, label='e2', logical_round=logical_round,
event_time=event_time)
ret_val = event.proceed(simulation)
# Test timeouts
sw_mgm.recover_switch.side_effect = raise_error
timeout_event = event2.proceed(simulation)
# Assert
self.assertTrue(ret_val)
self.assertFalse(timeout_event)
sw_mgm.recover_switch.assert_called_with(switch)
def test_to_json(self):
# Arrange
dpid = 1
label = 'e1'
logical_round = 1
event_time = [1, 1]
expected = dict(label=label, dpid=dpid, event_time=event_time,
logical_round=logical_round, dependent_labels=[],
prunable=True, timed_out=False,
fingerprint=["SwitchRecovery", 1])
expected['class'] = "SwitchRecovery"
# Act
event = SwitchRecovery(dpid=dpid, label=label, logical_round=logical_round,
event_time=event_time)
json_dump = event.to_json()
# Assert
self.assertEquals(expected, json.loads(json_dump))
def test_from_json(self):
# Arrange
dpid = 1
label = 'e1'
logical_round = 1
event_time = [1, 1]
json_dict = dict(label=label, dpid=dpid, event_time=event_time,
logical_round=logical_round, dependent_labels=[],
prunable=True, timed_out=False,
fingerprint=["SwitchRecovery", 1])
json_dict['class'] = "SwitchRecovery"
expected_event = SwitchRecovery(dpid=dpid, label=label,
logical_round=logical_round,
event_time=event_time)
# Act
event = SwitchRecovery.from_json(json_dict)
# Assert
self.assertEquals(expected_event, event)
class LinkFailureTest(unittest.TestCase):
def test_proceed(self):
# Arrange
start_dpid, end_dpid = 1, 2
start_port_no, end_port_no = 10, 20
label = 'e1'
logical_round = 1
event_time = [1, 1]
simulation = mock.Mock(name='Simulation')
sw1 = mock.Mock(name='Switch1')
sw2 = mock.Mock(name='Switch2')
sw1.ports = {start_port_no: mock.Mock(name='s1-1')}
sw2.ports = {end_port_no: mock.Mock(name='s2-1')}
link = mock.Mock(name='Link')
def get_sw(dpid):
return sw1 if dpid == start_dpid else sw2
simulation.topology.switches_manager.get_switch_dpid.side_effect = get_sw
simulation.topology.patch_panel.query_network_links.return_value = [link]
# Act
event = LinkFailure(start_dpid=start_dpid, start_port_no=start_port_no,
end_dpid=end_dpid, end_port_no=end_port_no, label=label,
logical_round=logical_round, event_time=event_time)
ret_val = event.proceed(simulation)
# Assert
self.assertTrue(ret_val)
patch_panel = simulation.topology.patch_panel
patch_panel.sever_network_link.assert_called_once_with(link)
def test_to_json(self):
# Arrange
start_dpid, end_dpid = 1, 2
start_port_no, end_port_no = 10, 20
label = 'e1'
logical_round = 1
event_time = [1, 1]
expected = dict(label=label, start_dpid=start_dpid,
start_port_no=start_port_no, end_dpid=end_dpid,
end_port_no=end_port_no, event_time=event_time,
logical_round=logical_round, dependent_labels=[],
prunable=True, timed_out=False,
fingerprint=["LinkFailure", start_dpid, start_port_no,
end_dpid, end_port_no])
expected['class'] = "LinkFailure"
# Act
event = LinkFailure(start_dpid=start_dpid, start_port_no=start_port_no,
end_dpid=end_dpid, end_port_no=end_port_no, label=label,
logical_round=logical_round, event_time=event_time)
json_dump = event.to_json()
# Assert
self.assertEquals(expected, json.loads(json_dump))
def test_from_json(self):
# Arrange
start_dpid, end_dpid = 1, 2
start_port_no, end_port_no = 10, 20
label = 'e1'
logical_round = 1
event_time = [1, 1]
json_dict = dict(label=label, start_dpid=start_dpid,
start_port_no=start_port_no, end_dpid=end_dpid,
end_port_no=end_port_no, event_time=event_time,
logical_round=logical_round, dependent_labels=[],
prunable=True, timed_out=False,
fingerprint=["LinkFailure", start_dpid, start_port_no,
end_dpid, end_port_no])
json_dict['class'] = "LinkFailure"
expected_event = LinkFailure(start_dpid=start_dpid,
start_port_no=start_port_no,
end_dpid=end_dpid,
end_port_no=end_port_no,
label=label,
logical_round=logical_round,
event_time=event_time)
# Act
event = LinkFailure.from_json(json_dict)
# Assert
self.assertEquals(expected_event, event)
class LinkRecoveryTest(unittest.TestCase):
def test_proceed(self):
# Arrange
start_dpid, end_dpid = 1, 2
start_port_no, end_port_no = 10, 20
label = 'e1'
logical_round = 1
event_time = [1, 1]
simulation = mock.Mock(name='Simulation')
sw1 = mock.Mock(name='Switch1')
sw2 = mock.Mock(name='Switch2')
sw1.ports = {start_port_no: mock.Mock(name='s1-1')}
sw2.ports = {end_port_no: mock.Mock(name='s2-1')}
link = mock.Mock(name='Link')
def get_sw(dpid):
return sw1 if dpid == start_dpid else sw2
simulation.topology.switches_manager.get_switch_dpid.side_effect = get_sw
simulation.topology.patch_panel.query_network_links.return_value = [link]
# Act
event = LinkRecovery(start_dpid=start_dpid, start_port_no=start_port_no,
end_dpid=end_dpid, end_port_no=end_port_no, label=label,
logical_round=logical_round, event_time=event_time)
ret_val = event.proceed(simulation)
# Assert
self.assertTrue(ret_val)
patch_panel = simulation.topology.patch_panel
patch_panel.repair_network_link.assert_called_once_with(link)
def test_to_json(self):
# Arrange
start_dpid, end_dpid = 1, 2
start_port_no, end_port_no = 10, 20
label = 'e1'
logical_round = 1
event_time = [1, 1]
expected = dict(label=label, start_dpid=start_dpid,
start_port_no=start_port_no, end_dpid=end_dpid,
end_port_no=end_port_no, event_time=event_time,
logical_round=logical_round, dependent_labels=[],
prunable=True, timed_out=False,
fingerprint=["LinkRecovery", start_dpid, start_port_no,
end_dpid, end_port_no])
expected['class'] = "LinkRecovery"
# Act
event = LinkRecovery(start_dpid=start_dpid, start_port_no=start_port_no,
end_dpid=end_dpid, end_port_no=end_port_no, label=label,
logical_round=logical_round, event_time=event_time)
json_dump = event.to_json()
# Assert
self.assertEquals(expected, json.loads(json_dump))
def test_from_json(self):
# Arrange
start_dpid, end_dpid = 1, 2
start_port_no, end_port_no = 10, 20
label = 'e1'
logical_round = 1
event_time = [1, 1]
json_dict = dict(label=label, start_dpid=start_dpid,
start_port_no=start_port_no, end_dpid=end_dpid,
end_port_no=end_port_no, event_time=event_time,
logical_round=logical_round, dependent_labels=[],
prunable=True, timed_out=False,
fingerprint=["LinkRecovery", start_dpid, start_port_no,
end_dpid, end_port_no])
json_dict['class'] = "LinkRecovery"
expected_event = LinkRecovery(start_dpid=start_dpid,
start_port_no=start_port_no,
end_dpid=end_dpid,
end_port_no=end_port_no,
label=label,
logical_round=logical_round,
event_time=event_time)
# Act
event = LinkRecovery.from_json(json_dict)
# Assert
self.assertEquals(expected_event, event)
class ControllerFailureTest(unittest.TestCase):
def test_proceed(self):
# Arrange
cid = 1
label = 'e1'
logical_round = 1
event_time = [1, 1]
simulation = mock.Mock(name='Simulation')
c1 = mock.Mock(name='Controller')
simulation.topology.controllers_manager.get_controller.return_value = c1
# Act
event = ControllerFailure(controller_id=cid, label=label,
logical_round=logical_round,
event_time=event_time)
ret_val = event.proceed(simulation)
# Assert
self.assertTrue(ret_val)
c_mgm = simulation.topology.controllers_manager
c_mgm.crash_controller.assert_called_once_with(c1)
def test_to_json(self):
# Arrange
cid = 1
label = 'e1'
logical_round = 1
event_time = [1, 1]
expected = dict(controller_id=cid, label=label, event_time=event_time,
logical_round=logical_round, dependent_labels=[],
prunable=True, timed_out=False,
fingerprint=["ControllerFailure", cid])
expected['class'] = "ControllerFailure"
# Act
event = ControllerFailure(controller_id=cid, label=label,
logical_round=logical_round,
event_time=event_time)
json_dump = event.to_json()
# Assert
self.assertEquals(expected, json.loads(json_dump))
def test_from_json(self):
# Arrange
cid = 1
label = 'e1'
logical_round = 1
event_time = [1, 1]
json_dict = dict(controller_id=cid, label=label, event_time=event_time,
logical_round=logical_round, dependent_labels=[],
prunable=True, timed_out=False,
fingerprint=["ControllerFailure", cid])
json_dict['class'] = "ControllerFailure"
expected_event = ControllerFailure(controller_id=cid, label=label,
logical_round=logical_round,
event_time=event_time)
# Act
event = ControllerFailure.from_json(json_dict)
# Assert
self.assertEquals(expected_event, event)
class ControllerRecoveryTest(unittest.TestCase):
def test_proceed(self):
# Arrange
cid = 1
label = 'e1'
logical_round = 1
event_time = [1, 1]
simulation = mock.Mock(name='Simulation')
c1 = mock.Mock(name='Controller')
simulation.topology.controllers_manager.get_controller.return_value = c1
# Act
event = ControllerRecovery(controller_id=cid, label=label,
logical_round=logical_round,
event_time=event_time)
ret_val = event.proceed(simulation)
# Assert
self.assertTrue(ret_val)
c_mgm = simulation.topology.controllers_manager
c_mgm.recover_controller.assert_called_once_with(c1)
def test_to_json(self):
# Arrange
cid = 1
label = 'e1'
logical_round = 1
event_time = [1, 1]
expected = dict(controller_id=cid, label=label, event_time=event_time,
logical_round=logical_round, dependent_labels=[],
prunable=True, timed_out=False,
fingerprint=["ControllerRecovery", cid])
expected['class'] = "ControllerRecovery"
# Act
event = ControllerRecovery(controller_id=cid, label=label,
logical_round=logical_round,
event_time=event_time)
json_dump = event.to_json()
# Assert
self.assertEquals(expected, json.loads(json_dump))
def test_from_json(self):
# Arrange
cid = 1
label = 'e1'
logical_round = 1
event_time = [1, 1]
json_dict = dict(controller_id=cid, label=label, event_time=event_time,
logical_round=logical_round, dependent_labels=[],
prunable=True, timed_out=False,
fingerprint=["ControllerRecovery", cid])
json_dict['class'] = "ControllerRecovery"
expected_event = ControllerRecovery(controller_id=cid, label=label,
logical_round=logical_round,
event_time=event_time)
# Act
event = ControllerRecovery.from_json(json_dict)
# Assert
self.assertEquals(expected_event, event)
class NOPEventTest(unittest.TestCase):
def test_proceed(self):
# Arrange
label = 'e1'
logical_round = 1
event_time = [1, 1]
simulation = mock.Mock(name='Simulation')
# Act
event = NOPInput(label=label, logical_round=logical_round,
event_time=event_time)
ret_val = event.proceed(simulation)
# Assert
self.assertTrue(ret_val)
def test_to_json(self):
# Arrange
label = 'e1'
logical_round = 1
event_time = [1, 1]
expected = dict(label=label, event_time=event_time,
logical_round=logical_round, dependent_labels=[],
prunable=True, timed_out=False,
fingerprint=["NOPInput"])
expected['class'] = "NOPInput"
# Act
event = NOPInput(label=label, logical_round=logical_round,
event_time=event_time)
json_dump = event.to_json()
# Assert
self.assertEquals(expected, json.loads(json_dump))
def test_from_json(self):
# Arrange
label = 'e1'
logical_round = 1
event_time = [1, 1]
json_dict = dict(label=label, event_time=event_time,
logical_round=logical_round, dependent_labels=[],
prunable=True, timed_out=False,
fingerprint=["NOPInput"])
json_dict['class'] = "NOPInput"
expected_event = NOPInput(label=label, logical_round=logical_round,
event_time=event_time)
# Act
event = NOPInput.from_json(json_dict)
# Assert
self.assertEquals(expected_event, event)
class InvariantViolationTest(unittest.TestCase):
def test_proceed(self):
# Arrange
violations = ["Mock Violation"]
label = 'e1'
logical_round = 1
event_time = [1, 1]
simulation = mock.Mock(name='Simulation')
# Act
event = InvariantViolation(violations=violations, label=label,
logical_round=logical_round,
event_time=event_time, persistent=False)
ret_val = lambda: event.proceed(simulation)
invalid_violation = lambda: InvariantViolation(violations=[])
str_violations = InvariantViolation(violations=violations[0])
# Assert
self.assertRaises(ValueError, invalid_violation)
self.assertRaises(RuntimeError, ret_val)
self.assertFalse(event.persistent)
self.assertEquals(event.violations, violations)
self.assertEquals(str_violations.violations, violations)
def test_to_json(self):
# Arrange
violations = ["Mock Violation"]
label = 'e1'
logical_round = 1
event_time = [1, 1]
expected = dict(violations=violations, label=label, event_time=event_time,
logical_round=logical_round, dependent_labels=[],
prunable=True, timed_out=False, persistent=False,
fingerprint=["InvariantViolation"])
expected['class'] = "InvariantViolation"
# Act
event = InvariantViolation(violations=violations, label=label,
logical_round=logical_round,
event_time=event_time, persistent=False)
json_dump = event.to_json()
# Assert
self.assertEquals(expected, json.loads(json_dump))
def test_from_json(self):
# Arrange
violations = ["Mock Violation"]
label = 'e1'
logical_round = 1
event_time = [1, 1]
json_dict = dict(violations=violations, label=label, event_time=event_time,
logical_round=logical_round, dependent_labels=[],
prunable=True, timed_out=False, persistent=False,
fingerprint=["InvariantViolation"])
json_dict['class'] = "NOPInput"
expected_event = InvariantViolation(violations=violations, label=label,
logical_round=logical_round,
event_time=event_time, persistent=False)
# Act
event = InvariantViolation.from_json(json_dict)
# Assert
self.assertEquals(expected_event, event)
class AddIntentEventTest(unittest.TestCase):
def test_proceed(self):
# Arrange
cid = 1
intent_id = 100
src_dpid, dst_dpid = 201, 202
src_port, dst_port = 301, 302
src_mac = EthAddr('00:00:00:00:00:01')
dst_mac = EthAddr('00:00:00:00:00:02')
static_path = ''
intent_type = 'SHORTEST_PATH'
intent_ip, intent_port, intent_url = '127.0.0.1', 8080, 'wm/intents'
label = 'e1'
logical_round = 1
event_time = [1, 1]
simulation = mock.Mock(name='Simulation')
c1 = mock.Mock(name='Controller')
c1.add_intent.return_value = True
simulation.topology.controllers_manager.get_controller.return_value = c1
h1 = mock.Mock(name='h1')
h1_eth1 = mock.Mock(name='h1-eth1')
h1_eth1.hw_addr = src_mac
h1.interfaces = [h1_eth1]
h2 = mock.Mock(name='h2')
h2_eth1 = mock.Mock(name='h2-eth1')
h2_eth1.hw_addr = dst_mac
h2.interfaces = [h2_eth1]
simulation.topology.hosts_manager.hosts = [h1, h2]
# Act
event = AddIntent(cid=cid, intent_id=intent_id, src_dpid=src_dpid,
dst_dpid=dst_dpid, src_port=src_port,
dst_port=dst_port, src_mac=src_mac, dst_mac=dst_mac,
static_path=static_path, intent_type=intent_type,
intent_ip=intent_ip, intent_port=intent_port,
intent_url=intent_url, label=label,
logical_round=logical_round, event_time=event_time)
ret_val = event.proceed(simulation)
# Assert
self.assertTrue(ret_val)
track = simulation.topology.connectivity_tracker
track.add_connected_hosts.assert_called_once_with(h1, h1_eth1, h2, h2_eth1,
intent_id)
def test_to_json(self):
# Arrange
cid = 1
intent_id = 100
src_dpid, dst_dpid = 201, 202
src_port, dst_port = 301, 302
src_mac = str(EthAddr('00:00:00:00:00:01'))
dst_mac = str(EthAddr('00:00:00:00:00:02'))
static_path = ''
intent_type = 'SHORTEST_PATH'
intent_ip, intent_port, intent_url = '127.0.0.1', 8080, 'wm/intents'
label = 'e1'
logical_round = 1
event_time = [1, 1]
expected = dict(cid=cid, intent_id=intent_id, src_dpid=src_dpid,
dst_dpid=dst_dpid, src_port=src_port,
dst_port=dst_port, src_mac=src_mac, dst_mac=dst_mac,
static_path=static_path, intent_type=intent_type,
intent_ip=intent_ip, intent_port=intent_port,
intent_url=intent_url, label=label, event_time=event_time,
logical_round=logical_round, dependent_labels=[],
prunable=True, timed_out=False, request_type='AddIntent',
fingerprint=["AddIntent", cid, intent_id, src_dpid,
dst_dpid, src_port, dst_port, src_mac, dst_mac,
static_path, intent_type, intent_ip,
intent_port, intent_url])
expected['class'] = "AddIntent"
# Act
event = AddIntent(cid=cid, intent_id=intent_id, src_dpid=src_dpid,
dst_dpid=dst_dpid, src_port=src_port,
dst_port=dst_port, src_mac=src_mac, dst_mac=dst_mac,
static_path=static_path, intent_type=intent_type,
intent_ip=intent_ip, intent_port=intent_port,
intent_url=intent_url, label=label,
logical_round=logical_round, event_time=event_time)
json_dump = event.to_json()
# Assert
self.assertEquals(expected, json.loads(json_dump))
def test_from_json(self):
# Arrange
cid = 1
intent_id = 100
src_dpid, dst_dpid = 201, 202
src_port, dst_port = 301, 302
src_mac = str(EthAddr('00:00:00:00:00:01'))
dst_mac = str(EthAddr('00:00:00:00:00:02'))
static_path = ''
intent_type = 'SHORTEST_PATH'
intent_ip, intent_port, intent_url = '127.0.0.1', 8080, 'wm/intents'
label = 'e1'
logical_round = 1
event_time = [1, 1]
json_dict = dict(cid=cid, intent_id=intent_id, src_dpid=src_dpid,
dst_dpid=dst_dpid, src_port=src_port,
dst_port=dst_port, src_mac=src_mac, dst_mac=dst_mac,
static_path=static_path, intent_type=intent_type,
intent_ip=intent_ip, intent_port=intent_port,
intent_url=intent_url, label=label, event_time=event_time,
logical_round=logical_round, dependent_labels=[],
prunable=True, timed_out=False, request_type='AddIntent',
fingerprint=["AddIntent", cid, intent_id, src_dpid,
dst_dpid, src_port, dst_port, src_mac,
dst_mac, static_path, intent_type, intent_ip,
intent_port, intent_url])
json_dict['class'] = "AddIntent"
expected_event = AddIntent(cid=cid, intent_id=intent_id, src_dpid=src_dpid,
dst_dpid=dst_dpid, src_port=src_port,
dst_port=dst_port, src_mac=src_mac,
dst_mac=dst_mac, static_path=static_path,
intent_type=intent_type, intent_ip=intent_ip,
intent_port=intent_port, intent_url=intent_url,
label=label, logical_round=logical_round,
event_time=event_time)
# Act
event = AddIntent.from_json(json_dict)
# Assert
self.assertEquals(expected_event, event)
self.assertEquals(expected_event.src_mac, src_mac)
class RemoveIntentEventTest(unittest.TestCase):
def test_proceed(self):
# Arrange
cid = 1
intent_id = 100
intent_ip, intent_port, intent_url = '127.0.0.1', 8080, 'wm/intents'
label = 'e1'
logical_round = 1
event_time = [1, 1]
simulation = mock.Mock(name='Simulation')
c1 = mock.Mock(name='Controller')
c1.remove_intent.return_value = True
simulation.topology.controllers_manager.get_controller.return_value = c1
# Act
event = RemoveIntent(cid=cid, intent_id=intent_id, intent_ip=intent_ip,
intent_port=intent_port, intent_url=intent_url,
label=label, logical_round=logical_round,
event_time=event_time)
ret_val = event.proceed(simulation)
# Assert
self.assertTrue(ret_val)
track = simulation.topology.connectivity_tracker
track.remove_policy.assert_called_once_with(intent_id)
def test_to_json(self):
# Arrange
cid = 1
intent_id = 100
request_type = 'RemoveIntent'
intent_ip, intent_port, intent_url = '127.0.0.1', 8080, 'wm/intents'
label = 'e1'
logical_round = 1
event_time = [1, 1]
expected = dict(cid=cid, intent_id=intent_id, intent_ip=intent_ip,
intent_port=intent_port, intent_url=intent_url, label=label,
event_time=event_time, logical_round=logical_round,
dependent_labels=[], prunable=True, timed_out=False,
request_type=request_type,
fingerprint=['RemoveIntent', cid, intent_id, intent_ip,
intent_port, intent_url])
expected['class'] = "RemoveIntent"
# Act
event = RemoveIntent(cid=cid, intent_id=intent_id, intent_ip=intent_ip,
intent_port=intent_port, intent_url=intent_url,
label=label, logical_round=logical_round,
event_time=event_time)
json_dump = event.to_json()
# Assert
self.assertEquals(expected, json.loads(json_dump))
def test_from_json(self):
# Arrange
cid = 1
intent_id = 100
request_type = 'RemoveIntent'
intent_ip, intent_port, intent_url = '127.0.0.1', 8080, 'wm/intents'
label = 'e1'
logical_round = 1
event_time = [1, 1]
json_dict = dict(cid=cid, intent_id=intent_id, intent_ip=intent_ip,
intent_port=intent_port, intent_url=intent_url, label=label,
event_time=event_time, logical_round=logical_round,
dependent_labels=[], prunable=True, timed_out=False,
request_type=request_type,
fingerprint=['RemoveIntent', cid, intent_id, intent_ip,
intent_port, intent_url])
json_dict['class'] = "RemoveIntent"
expected_event = RemoveIntent(cid=cid, intent_id=intent_id, intent_ip=intent_ip,
intent_port=intent_port, intent_url=intent_url,
label=label, logical_round=logical_round,
event_time=event_time)
# Act
event = RemoveIntent.from_json(json_dict)
# Assert
self.assertEquals(expected_event, event)
|
# -*- coding: utf-8 -*-
import sys
import hmac
import hashlib
import base64
import time
import json
import uuid
import requests
def is_python3():
if sys.version > '3':
return True
return False
_PROTOCOL = "https://"
_HOST = "tts.cloud.tencent.com"
_PATH = "/stream"
_ACTION = "TextToStreamAudio"
class SpeechSynthesisListener:
'''
reponse:
ๆๆๅ่ฐๅๅ
ๅซsession_idๅญๆฎต
on_messageไธon_messageๅ
ๅซdataๅญๆฎต
on_failๅ
ๅซCodeใMessageๅญๆฎตใ
ๅญๆฎตๅ ็ฑปๅ ่ฏดๆ
session_id String ๆฌๆฌก่ฏทๆฑid
data String ่ฏญ้ณๆฐๆฎ
Code String ้่ฏฏ็
Message String ้่ฏฏไฟกๆฏ
'''
def on_message(self, response):
pass
def on_complete(self, response):
pass
def on_fail(self, response):
pass
class SpeechSynthesizer:
def __init__(self, appid, credential, voice_type, listener):
self.appid = appid
self.credential = credential
self.voice_type = voice_type
self.codec = "pcm"
self.sample_rate = 16000
self.volume = 0
self.speed = 0
self.listener = listener
def set_voice_type(self, voice_type):
self.voice_type = voice_type
def set_codec(self, codec):
self.codec = codec
def set_sample_rate(self, sample_rate):
self.sample_rate = sample_rate
def set_speed(self, speed):
self.speed = speed
def set_volume(self, volume):
self.volume = volume
def synthesis(self, text):
session_id = str(uuid.uuid1())
params = self.__gen_params(session_id, text)
signature = self.__gen_signature(params)
headers = {
"Content-Type": "application/json",
"Authorization": str(signature)
}
url = _PROTOCOL + _HOST + _PATH
r = requests.post(url, headers=headers,
data=json.dumps(params), stream=True)
data = None
response = dict()
response["session_id"] = session_id
for chunk in r.iter_content(None):
if data is None:
try:
rsp = json.loads(chunk)
response["Code"] = rsp["Response"]["Error"]["Code"]
response["Message"] = rsp["Response"]["Error"]["Message"]
self.listener.on_fail(response)
return
except:
data = chunk
response["data"] = data
self.listener.on_message(response)
continue
data = data + chunk
response["data"] = data
self.listener.on_message(response)
response["data"] = data
self.listener.on_complete(response)
def __gen_signature(self, params):
sort_dict = sorted(params.keys())
sign_str = "POST" + _HOST + _PATH + "?"
for key in sort_dict:
sign_str = sign_str + key + "=" + str(params[key]) + '&'
sign_str = sign_str[:-1]
hmacstr = hmac.new(self.credential.secret_key.encode('utf-8'),
sign_str.encode('utf-8'), hashlib.sha1).digest()
s = base64.b64encode(hmacstr)
s = s.decode('utf-8')
return s
def __sign(self, signstr, secret_key):
hmacstr = hmac.new(secret_key.encode('utf-8'),
signstr.encode('utf-8'), hashlib.sha1).digest()
s = base64.b64encode(hmacstr)
s = s.decode('utf-8')
return s
def __gen_params(self, session_id, text):
params = dict()
params['Action'] = _ACTION
params['AppId'] = int(self.appid)
params['SecretId'] = self.credential.secret_id
params['ModelType'] = 1
params['VoiceType'] = self.voice_type
params['Codec'] = self.codec
params['SampleRate'] = self.sample_rate
params['Speed'] = self.speed
params['Volume'] = self.volume
params['SessionId'] = session_id
params['Text'] = text
timestamp = int(time.time())
params['Timestamp'] = timestamp
params['Expired'] = timestamp + 24 * 60 * 60
return params
|
#!/usr/bin/python3
import sys
import random
def cmdlinearg(name, default=None):
for arg in sys.argv:
if arg.startswith(name + "="):
return arg.split("=")[1]
assert default is not None
return default
random.seed(int(cmdlinearg('seed', sys.argv[-1])))
n = int(cmdlinearg('n'))
k = int(cmdlinearg('k'))
ak = int(cmdlinearg('ak'))
bk = int(cmdlinearg('bk'))
av = int(cmdlinearg('av'))
bv = int(cmdlinearg('bv'))
lastsmall = int(cmdlinearg('lastsmall', '0'))
print(n, k)
li = list(range(n))
random.shuffle(li)
ar = [None] * n
br = [None] * n
for i in range(n):
ar[i] = li[i] * ak + random.randint(0, av)
br[i] = li[i] * bk + random.randint(0, bv)
if lastsmall and li[i] < n-1:
br[i] += 10
print(*ar)
print(*br)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This code is under MIT license. See the License.txt file.
This module contains the functions useful to numerically solve the model
Boris Sauterey
boris.sauterey@ens.fr
"""
import numpy as np
from Metabolisme.Energy import *
from Metabolisme.Rates import *
from PBE.Balancefun import *
#from Environment import *
#from Traits import *
from scipy.stats import truncnorm
def Step_Profile(NC,X0,traits,S,gamma,T=TS,dt = 0.01):
"""
Computes one timestep of the profile evolution with profile at time t N
where S is the substrate concentration vector [H,C,N,G]
returns the profile after t+dt without touching to
nutrients concentrations
traits should be a vector with following order:
[rc,Vc,ks,qmax,mg,kd,thresh,slope]
for more information on these traits, see module traits
"""
## Extraction of substrate concentrations !! Memory efficient?
for i in range(0,len(S)):
if S[i] < 1e-100:
S[i] = 1e-100
H = S[0]
C = S[1]
N = S[2]
G = S[3]
## Traits extraction !! This could be a waste of time and memory as I don't know how memory is managed when it comes to put variables in a list and then out
rc = traits[0]
Vc = traits[1]
Qc = traits[2]
ks = traits[3]
qmax = traits[4]
mg = traits[5]
kd = traits[6]
mort = traits[7]
thresh = traits[8]
slope = traits[9]
gmax = traits[10]
## Computing energetical values that are constant across x
dgcat = DeltaGcat(T,H,C,G) # Energy that a run of metabolic reaction yields
qcat = QCat(dgcat,H,C,qmax,ks) # Rate at which catabolic reaction occurs
mreq = Mreq(mg,dgcat) # Minimum rate of catabolic reaction for cell maintenance
decay = Decay(mreq,qcat,dgcat,kd) # Decay rate of the cells, that only depends on energy available in the environment
## Adjusting the delta t so that the numerical scheme remains stable
lim = np.min([H,C])
if qcat > 0:
dt = lim/(qcat*NC*1000)
else:
dt = 0.01
## Cell dynamics
dgana = DeltaGana(T,H,C,N,X0) # Energy requirements for anabolic reaction
Lam = -((dgana+dgdiss)/dgcat) # Metabolic coupling
Y = Yl(Lam) # Metabolic stochiometry
slim = Slim([H,C,N],Y[:-2]) # Limiting substrate
QMet_t = QMet(dgcat,qmax,ks,slim) # Metabolic rate
qana = QAna(dgcat,dgana,Lam,qcat,QMet_t,mreq,qmax,ks,slim) # Anabolic rate
qcat = qcat # Catabolic rates
new_cell = Gamma(thresh,slope,gmax,X0)
nNC = NC + (new_cell - decay - mort)*NC*dt # First part of time derivative addition
if nNC < 0: nNC = 0
nX0 = (X0 + qana*dt) / (1+new_cell*dt)
return(nNC,nX0,qana,qcat,decay,mort,dt) # It is critical to note that qanamap, Decaymap and qcatmap are extracted from N at t and that nNc is N at t+dt
def Step_Substrates(S,Hinf,Cinf,Ninf,Ginf,QH,QC,QN,QG,NC,qana,qcat,dt,Vc):
"""
Computes the new S substrates vector after dt
if several cell populations are competing, one should put as arguments:
Nc = sum(Nci)
qanamap = sum(qanamapi)
qcatmap = sum(qcatmapi)
"""
H = S[0]
C = S[1]
N = S[2]
G = S[3]
nH = H + (QH*(Hinf-H)+(qcat*Catabolism[0]+qana*Anabolism[0])*NC)*dt
nC = C + (QC*(Cinf-C)+(qcat*Catabolism[1]+qana*Anabolism[1])*NC)*dt
nN = N + (QN + (qcat*Catabolism[2]+qana*Anabolism[2])*NC)*dt
nG = G + (QG*(Ginf-G)+(qcat*Catabolism[3]+qana*Anabolism[3])*NC)*dt
nS = np.array([nH,nC,nN,nG])
nS[np.where(nS <= 1e-100)] = 1e-100
return(nS)
def Step_DeadBiomass(Xo,Hinf,Cinf,Ninf,Ginf,QH,QC,QN,QG,Nc,decay,mort,Qc,X,dt,Vc):
"""
Computes the increase in dead biomass between t and t+dt
"""
return(Xo + (-0.1*Xo + (decay+mort)*Nc*(Qc+X))*dt) #Here the term with Q can be replaced with a specific biomass sedimentation flux
def Run_Profile(init,traits,Env,sig = 0.0001,Ntot0 = 10,tmax = 100,T=TS,dt = 0.01,mu=0.005):
"""
This function runs the profile evolution with high output volume because it computes
and save the whole profile evolution across time tmax with initial conditions init
and for microbial population with traits traits
for a single population?
init should be [H0,C0,N0,G0]
"""
## Environmental conditions
Hinf = Env[0]
Cinf = Env[1]
Ninf = Env[2]
Ginf = Env[3]
QH = Env[4]
QC = Env[5]
QN = Env[6]
QG = Env[7]
## Traits
thresh = traits[7]
slope = traits[8]
gmax = traits[9]
Vc = traits[1]
Qc = traits[2]
## Calculation of constants over timescale of interest (here, the temperature is constant)
DeltaG0catT = DeltaG0(T,deltaG0Cat,deltaH0Cat)
DeltaG0anaT = DeltaG0(T,deltaG0Ana,deltaH0Ana)
## Initialization
HT = []
CT = []
NT = []
GT = []
XoT = []
NCT = []
XT = []
D = []
time = []
t=1
HT.append(init[0])
CT.append(init[1])
NT.append(init[2])
GT.append(init[3])
XoT.append(init[4])
NCT.append(init[5])
XT.append(init[6])
D.append(0)
time.append(0)
t=1
while time[t-1] < tmax:
H = HT[t-1]
C = CT[t-1]
N = NT[t-1]
G = GT[t-1]
Xo = XoT[t-1]
NC = NCT[t-1]
X0 = XT[t-1]
nNCT,nXT,qana,qcat,decay,mort,dt = Step_Profile(NC,X0,traits,[H,C,N,G],gamma,T,dt)
NCT.append(nNCT)
XT.append(nXT)
D.append(decay+mort)
nS = Step_Substrates([H,C,N,G],Hinf,Cinf,Ninf,Ginf,QH,QC,QN,QG,NCT[t-1],qana,qcat,dt,Vc)
HT.append(nS[0])
CT.append(nS[1])
NT.append(nS[2])
GT.append(nS[3])
nXo = Step_DeadBiomass(Xo,Hinf,Cinf,Ninf,Ginf,QH,QC,QN,QG,NCT[t-1],decay,mort,Qc,XT[t-1],dt,Vc)
XoT.append(nXo)
time.append(time[t-1] + dt)
t=t+1
return(NCT,XT,HT,CT,NT,GT,XoT,D,time)
|
'''
Author: Jack Morikka.
This program is intended to take bulk collected data from an IMARIS batch
run that outputs channels (or spots) and convert it into a format ready for
LAM analysis. This bulk data consists of e.g. a Spots_1 directory which
contains .csv files such as 'Area.csv' and 'positions.csv' etc. for every
sample that was processed in the IMARIS batch. This program essentially
separates out these samples creating the same excel files for single
samples in their own unique folders with the spot name (e.g. DAPI, GFP,
MP etc.) clearly indicated in a fashion readable by LAM.
The user selects the bulk 'spot' folder e.g. Spots_1_Statistics, and
chooses an empty output folder where the new folders with .csv files will
be sent. The user then names this 'spot' e.g. DAPI, GFP or MP. The user
then runs the program. For the same bulk data the user then reruns the
program and picks another 'spot' folder e.g. Spots_2_Statistics and chooses
the SAME output folder which they selected for the first 'spot' folder.
'''
from tkinter import *
from tkinter import filedialog
import logging
import istl
class Imaris_to_lam:
def __init__(self):
# Creates the structure for the GUI with the title
self.__window = Tk()
self.__window.title('Imaris_to_LAM')
# Creates label for select spot folder selection prompt
self.__s_ij_prompt = Label(self.__window,
text='Select spot folder:') \
.grid(row=3, column=1, sticky=E)
# Creates the browse button for getting the spot folder path
Button(self.__window, text='Browse', command=self.retrieve_csv_folder) \
.grid(row=3, column=2)
# Creates the variable label for spot folder path text
self.__csv_folder = StringVar()
self.__selectij = Label(self.__window, text=self.__csv_folder.get(),
bg='white', bd=2,
textvariable=self.__csv_folder, relief='sunken')
self.__selectij.grid(row=3, column=3, columnspan=3, sticky=W)
# Creates label for select output folder prompt
self.__r_dir_prompt = Label(self.__window,
text='Select output folder:') \
.grid(row=5, column=1, sticky=E)
# Creates the browse button for getting the output folder
Button(self.__window, text='Browse', command=self.retrieve_ofolder) \
.grid(row=5, column=2)
# Creates the variable label for output folder text
self.__ofolder = StringVar()
self.__selectDir = Label(self.__window, text=self.__ofolder.get(),
bg='white', bd=2,
textvariable=self.__ofolder, relief='sunken')
self.__selectDir.grid(row=5, column=3, columnspan=3, sticky=W)
# Creates the spot name entry input field
self.__name_prompt = Label(self.__window,
text='Enter spot name '
'(e.g. DAPI, GFP, MP etc.:)') \
.grid(row=9, column=1)
self.__name_input = Entry(self.__window, width=5)
self.__name_input.grid(row=9, column=2, padx=5, ipadx=5)
# Creates the run button for running the simulator
Button(self.__window, text='Run', command=self.go) \
.grid(row=11, column=1, sticky=E)
# Creates button for quitting the stitcher
Button(self.__window, text='Quit', command=self.quit_func) \
.grid(row=11, column=2, sticky=W)
def retrieve_csv_folder(self):
''' Prompts the user to select the buld 'spot' folder'''
selected_directory = filedialog.askdirectory()
self.__csv_folder.set(selected_directory)
def retrieve_ofolder(self):
''' Prompts the user to select an output folder'''
selected_directory = filedialog.askdirectory()
self.__ofolder.set(selected_directory)
def go(self):
''' If an input folder, output folder and spot name are selected, this
function imports the istl csv_create function to use on the bulk .csv
files to create new directories for each sample and new .csv files
for each sample to be used with LAM'''
# Checks that no fields are left blank
if self.__ofolder.get() == '' or self.__csv_folder.get() == '' or self.__name_input.get() == '':
from tkinter import messagebox
# Shows a warning message if a field is blank upon running
messagebox.showinfo("Warning", "spot.csv path or output folder not"
" selected! Or name not entered!")
else:
# Sets up a log in the chosen output folder to log any errors.
logging.basicConfig(
filename='%s/IMARIS_to_LAM.log' % self.__ofolder.get(),
format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.INFO,
datefmt='%d-%m-%Y %H:%M:%S')
try:
csv_path = str(self.__csv_folder.get())
output_folder_path = str(self.__ofolder.get())
spot = str(self.__name_input.get())
self.__window.destroy()
logging.info(
"Process started for %s" % spot)
# Calls the csv_create function from the istl.py file which
# should be in the same directory as this istl_RUN.py
istl.csv_create(csv_path, output_folder_path, spot)
logging.info(
"Process finished for %s" % spot)
except Exception as e:
logging.exception(str(e))
def quit_func(self):
self.__window.destroy()
def start(self):
self.__window.mainloop()
def main():
ui = Imaris_to_lam()
ui.start()
main()
|
##############################################################################
# Copyright (c) 2017 ZTE Corp and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
from qtip.base.constant import BaseProp
from qtip.collector.parser.grep import GrepParser
class CollectorProp(BaseProp):
TYPE = 'type'
PARSERS = 'parsers'
PATHS = 'paths'
def load_parser(type_name):
if type_name == GrepParser.TYPE:
return GrepParser
else:
raise Exception("Invalid parser type: {}".format(type_name))
|
import time
import io
from flask import send_file
from flask.views import MethodView
from flask_login import current_user
from sqlalchemy import func, and_
from openpyxl import Workbook
from openpyxl.styles import PatternFill, Font, Border, Side
from backend.plugins import db
from backend.models import Order, OrderSheet
from backend.extensions import Blueprint, roles_required
bp = Blueprint('reports',
'reports',
description='Get report like first rides assignment')
@bp.route('/firstrides/<sheet_id_or_latest>')
class FirstRides(MethodView):
@bp.response(code=200)
@bp.alt_response("NOT_FOUND", code=404)
@roles_required('view-only', 'planner', 'administrator')
def get(self, sheet_id_or_latest):
"""
Get a workbook containing a first rides report from an order sheet.
In case 'sheet_id_or_latest is 'latest', the most recently uploaded
order sheet will be used.
Required roles: view-only, planner, administrator
"""
# Request the order sheet for this planning
is_view_only = current_user.role == 'view-only'
order_sheet = OrderSheet.query.get_sheet_or_404(sheet_id_or_latest,
is_view_only)
sheet_id = order_sheet.id
# Get a list of truck ids and their earliest departure time
subq = db.session.query(
Order.truck_s_number,
func.min(Order.departure_time).label('mintime')
).group_by(Order.truck_s_number).filter(Order.sheet_id == sheet_id) \
.subquery()
# Get the first orders for each truck
first_orders = db.session.query(Order).join(
subq,
and_(
Order.truck_s_number == subq.c.truck_s_number,
Order.departure_time == subq.c.mintime
)
).all()
# Create a new sheet file
book = Workbook()
sheet = book.active
now = time.strftime("%d %b %Y %X")
now_save = time.strftime("%Y-%m-%d-%H-%M-%S")
# Set the column names
sheet['A1'] = now
sheet['C4'] = 'Sno'
sheet['D4'] = 'Driver Name'
sheet['E4'] = 'Truck ID'
sheet['F4'] = 'Terminal'
sheet['G4'] = 'chassis'
sheet['H4'] = 'Starting Time'
sheet['I4'] = 'Delivery Deadline'
sheet['J4'] = 'Customer'
sheet['K4'] = 'Container No.'
sheet['L4'] = 'City'
sheet['M4'] = 'Container Type'
sheet['N4'] = 'Shipping company'
sheet['O4'] = 'Remarks'
# Styling
yellow_fill = PatternFill(start_color='FFFF00',
fill_type='solid')
border = Border(left=Side(border_style='thin', color='000000'),
right=Side(border_style='thin', color='000000'),
top=Side(border_style='thin', color='000000'),
bottom=Side(border_style='thin', color='000000'))
# Set the styling to each of the column header cells
for cell in sheet.iter_cols(3, 15, 4, 4):
cell[0].fill = yellow_fill
cell[0].font = Font(bold=True)
cell[0].border = border
# Set the width of cells that contain long values
sheet.column_dimensions['D'].width = 20
sheet.column_dimensions['H'].width = 15
sheet.column_dimensions['I'].width = 15
sheet.column_dimensions['N'].width = 20
sheet.column_dimensions['O'].width = 40
# Create a row for each truck that has assigned an order
for count, order in zip(range(5, len(first_orders)+5), first_orders):
sheet.cell(row=count, column=3).value = \
order.truck.s_number # s number
sheet.cell(row=count, column=4).value = \
order.truck.others.get('Driver', '') # driver
sheet.cell(row=count, column=5).value = \
order.truck.truck_id # truck id
sheet.cell(row=count, column=6).value = \
order.inl_terminal # terminal
sheet.cell(row=count, column=7).value = \
'' # chassis?
sheet.cell(row=count, column=8).value = \
order.departure_time # dep time
sheet.cell(row=count, column=9).value = \
order.delivery_deadline # deadline
sheet.cell(row=count, column=10).value = \
order.others.get('Client', '') # client
sheet.cell(row=count, column=11).value = \
order.others.get('Container', '') # container number
sheet.cell(row=count, column=12).value = \
order.others.get('City', '') # city
sheet.cell(row=count, column=13).value = \
order.others.get('Unit type', '') # container type
sheet.cell(row=count, column=14).value = \
order.others.get('Ship. comp.', '') # shipping company
sheet.cell(row=count, column=15).value = \
order.truck.others.get('Remarks', '') # remarks
# Set the borders of each cell of the row
for i in range(3, 16):
sheet.cell(row=count, column=i).border = border
# Save the file to an io stream
filename = 'first-rides-' + now_save + '.xlsx'
file = io.BytesIO()
book.save(file)
file.seek(0)
return send_file(
file,
attachment_filename=filename,
as_attachment=True
), 200
|
class ImagesClient():
pass
|
from django.shortcuts import render
from rest_framework.views import APIView
from rest_framework.response import Response#Return response object from API view
from rest_framework import status#List of handy HTTP status codes that we can use when returning responses from our API
from profiles_api import serializers
# Create your views here.
class HelloAPIView(APIView):
"""Test APIView"""
serializer_class= serializers.HelloSerializer
#Function for a GET HTTP request
def get(self, request, format=None):#format parameter is just best practice to include
"""Returns a list of APIView features"""
an_api_view=[
'Uses HTTP methods as function (get,post,patch,put,delete)',
"Is similar to a traditional Django View",
"Gives you the most control over your application logic",
"Is mapped manually to URLs",
]
return Response({'message':'Hello',
'an_api_view':an_api_view})#return in JSON format
def post(self, request):
"""Create a hello message with our name"""
serializer=self.serializer_class(data=request.data)#retrieve the serializer that we defined in the serializer_class attribute above. We pass in the request data to the class
if serializer.is_valid():
name=serializer.validated_data.get('name') #retrieve the name field defined in our serializer
message=f'Hello {name}'
return Response({'message':message})
else:
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
def put(self, request, pk=None):#Need pk to identify which object to put
"""Handle updating an object"""
return Response({'method':'PUT'})
def patch(self, request,pk=None):
"""Handle partial update of object"""
return Response({'method':"PATCH"})
def delete(self, request, pk=None):
"""Delete and object"""
return Response({'method':'DELETE'})
from rest_framework import viewsets
class HelloViewSet(viewsets.ViewSet):
"""Test api viewset"""
serializer_class=serializers.HelloSerializer
def list(self, request):
"""Return hellp message"""
a_view_set=[
'Uses actions (list, create, retrieve, update, partial_update)',
"Automatically maps to URLs using Routers",
"Provides more functionality with less code",
]
return Response({'message':'Hello',
'view_set':a_view_set})
def create(self, request):
"""Create a new hello message"""
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name=serializer.validated_data.get('name')
message=f'Hello {name}'
return Response({'message':message})
else:
return Response(serializer.errors,
status=status.HTTP_400_BAD_REQUEST)
def retrieve(self, request, pk=None):
"""Handle getting an object by its ID"""
return Response({'http_method':'GET'})
def update(self, request, pk=None):
"""Update"""
return Response({'http_method':'PUT'})
def partial_update(self, request, pk=None):
"""partial update"""
return Response({'http_method':'PATCH'})
def destroy(self, request, pk=None):
"""removing"""
return Response({'http_method':'DELETE'})
from profiles_api import models,permissions
from rest_framework import authentication, filters
class UserProfileViewSet(viewsets.ModelViewSet):
"""Handle creating and updating profiles"""
serializer_class=serializers.UserProfileSerializer
queryset= models.UserProfile.objects.all()
authentication_classes= (authentication.TokenAuthentication,)#Specify the authentication process we use in our API so we can verify who the user is
permission_classes=(permissions.UpdateOwnProfile,)#Specify the permission level of the authenticated user
filter_backends=(filters.SearchFilter,)
search_fields=('name', 'email',)
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
class UserLoginApiView(ObtainAuthToken):
"""Handle creating user authentication tokens"""
#make it visible in browsable API, the other viewsets have this by default, but not obtainauthtoken
renderer_classes= api_settings.DEFAULT_RENDERER_CLASSES
from rest_framework import permissions as rest_permissions
class UserProfileFeedViewSet(viewsets.ModelViewSet):
"""Handling creating, reading and updating profile feeds"""
authentication_classes = (authentication.TokenAuthentication,)
serializer_class= serializers.ProfileFeedSerializer
queryset=models.ProfileFeedItem.objects.all()
permission_classes = (
permissions.UpdateOwnStatus,#only able to update their own
rest_permissions.IsAuthenticated,#Limit viewing to authenticated user only
rest_permissions.IsAuthenticatedOrReadOnly#if not logged in, can't edit
)
def perform_create(self, serializer):#Override behavior for creating objects through a model viewset
"""Sets the user profile to the logged-in user"""
#because we have added token_authentication, the request contains info on user
serializer.save(user_profile=self.request.user)#since the serializer is a modelserializer, it has save function to save content from serializer to database
|
import torch
import numpy as np
from scipy.io import loadmat
from skimage.io import imread
def default_loader(path_):
return imread(path_)
def mat_loader(path_):
return loadmat(path_)
def make_onehot(index_map, n):
# Only deals with tensors with no batch dim
old_size = index_map.size()
z = torch.zeros(n, *old_size[-2:]).type_as(index_map)
z.scatter_(0, index_map, 1)
return z
def to_tensor(arr):
if arr.ndim < 3:
return torch.from_numpy(arr)
elif arr.ndim == 3:
return torch.from_numpy(np.ascontiguousarray(np.transpose(arr, (2,0,1))))
else:
raise NotImplementedError
def to_array(tensor):
if tensor.ndimension() < 3:
return tensor.data.cpu().numpy()
elif tensor.ndimension() in (3, 4):
return np.ascontiguousarray(np.moveaxis(tensor.data.cpu().numpy(), -3, -1))
else:
raise NotImplementedError
|
from fastapi import APIRouter
from api.endpoints.routes import connections, endorse, reports, endorser_admin
endorser_router = APIRouter()
endorser_router.include_router(connections.router, prefix="/connections", tags=[])
endorser_router.include_router(endorse.router, prefix="/endorse", tags=[])
endorser_router.include_router(reports.router, prefix="/reports", tags=[])
endorser_router.include_router(endorser_admin.router, prefix="/admin", tags=[])
|
from string import lower, replace
from PIL import Image
from optparse import OptionParser
import glob
import os
from lxml import etree
from pymei.Helpers import generate_mei_id
from pymei.Components import Modules as mod, MeiDocument
from pymei.Export import meitoxml
from pymei.Import import xmltomei
from spellcheck import correct
"""
Generates mei files and outputs to ../mei_corrtxt or ../mei_uncorrtxt if the -u flag is given. Make sure that ????.html and its corresponding ????.png and ????_uncoor.mei (if it exists) are in the pwd.
If no flag is given, this script uses Peter Norvig's spelling checker to attempt to improve the quality of the hocr text output. Make sure that spellcheck.py and latin-english.txt are also in this dir. It also removes dashes from lyrics and fixes common spelling errors that are not corrected by the spell-checker.
If the flag -u (--uncorrected) is given, this script uses text from the hocr output without any correction.
"""
parser=OptionParser()
parser.add_option("-u", "--uncorrected", action="store_false", dest="corrected", default=True)
(options, args)=parser.parse_args()
# make output directory
if options.corrected:
os.system('mkdir ../mei_corrtxt')
else:
os.system('mkdir ../mei_uncorrtxt')
def getlines(hocrfile):
"""
arg: hocrfile as string (ex. '0001')
return: lines as list of dictionaries [{'bbox' : (ulx, uly, lrx, lry), 'text' : 'TEXT'}, ...]
"""
parser=etree.HTMLParser()
tree=etree.parse(hocrfile+'.html', parser)
im=Image.open(hocrfile+'.png')
l=[]
for element in tree.getroot().iter("span"):
bbox=[int(x) for x in element.attrib['title'].split()[1:]]
corrected=[bbox[0], im.size[1]-bbox[3], bbox[2], im.size[1]-bbox[1]]
d={}
d['bbox']=tuple(corrected)
d['text']=element.text
l.append(d)
return l
def force_correct(word):
"""
arg: commonly misspelt word that the spell-checker cannot catch
return: correct spelling of word
"""
if word=='unc':
return 'nunc'
elif word=='gnus':
return 'agnus'
elif word=='yrie':
return 'kyrie'
elif word=='redo':
return 'credo'
elif word=='ominus':
return 'dominus'
elif word=='remus':
return 'oremus'
elif word=='ectio':
return 'lectio'
elif word=='er':
return 'per'
elif word=='eus':
return 'deus'
elif word=='hriste':
return 'christe'
elif word=='ector':
return 'rector'
elif word=='niquo':
return 'iniquo'
elif word=='ucis':
return 'lucis'
elif word=='iliae':
return 'filiae'
elif word=='isirere':
return 'misirere'
elif word=='alva':
return 'salva'
elif word=='ripe':
return 'eripe'
else:
return word
def correct_text(line):
"""
fixes text in lines - removes dashes from lyrics, corrects spelling
"""
# check if text output should be corrected or not
if options.corrected:
# fix strange problem where 'lu-' is read as 'hb'
line['text']=replace(line['text'], 'hb', 'lu-')
# remove dashes from text
line['text']=replace(line['text'], '- ', '')
line['text']=replace(line['text'], '-', '')
# correct common spelling errors that the spell-checker cannot catch
words=line['text'].split()
words[0]=force_correct(words[0])
# correct spelling if corrected output is not 's' (short words sometimes get corrected to 's' - weird)
words=[correct(lower(word)) for word in words if correct(lower(word))!='s']
return ' '.join(words)
else:
return line['text']
def add_text_lines(hocrfile, surface, section):
"""
helper method that adds lines in hocr file to 'surface' and 'section' in mei file
"""
div=mod.div_()
div.id=generate_mei_id()
lg=mod.lg_()
lg.id=generate_mei_id()
section.add_child(div)
div.add_child(lg)
for line in getlines(hocrfile):
# for each line: make new zone and l objects, add zone to surface
zone=mod.zone_()
zone.id=generate_mei_id()
zone.ulx=line['bbox'][0]
zone.uly=line['bbox'][1]
zone.lrx=line['bbox'][2]
zone.lry=line['bbox'][3]
l=mod.l_()
l.id=generate_mei_id()
l.facs=zone.id
l.value=correct_text(line)
lg.add_child(l)
surface.add_child(zone)
def create_mei(filename):
# build new mei file
meifile=MeiDocument.MeiDocument()
mei=mod.mei_()
# header
meihead=mod.meihead_()
filedesc=mod.filedesc_()
titlestmt=mod.titlestmt_()
title=mod.title_()
pubstmt=mod.pubstmt_()
meihead.add_child(filedesc)
filedesc.add_children([titlestmt, pubstmt])
titlestmt.add_child(title)
# music - facsimile, layout, body
music=mod.music_()
facsimile=mod.facsimile_()
facsimile.id=generate_mei_id()
surface=mod.surface_()
surface.id=generate_mei_id()
graphic=mod.graphic_()
graphic.id=generate_mei_id()
graphic.attributes={'xlink:href':'%s_original_image.tiff' % (filename,)}
facsimile.add_child(surface)
surface.add_child(graphic)
layout=mod.layout_()
layout.id=generate_mei_id()
page=mod.page_()
page.id=generate_mei_id()
page.attributes={'n':filename}
layout.add_child(page)
body=mod.body_()
mdiv=mod.mdiv_()
mdiv.attributes={'type':'solesmes'}
score=mod.score_()
section=mod.section_()
pb=mod.pb_()
pb.id=generate_mei_id()
pb.attributes={'pageref':page.id}
body.add_child(mdiv)
mdiv.add_child(score)
score.add_child(section)
section.add_child(pb)
music.add_children([facsimile, layout, body])
mei.add_children([meihead, music])
meifile.addelement(mei)
return meifile
# import hocr and mei files into lists and strip extension where useful
hocrfiles=[x.split('.')[0] for x in glob.glob('????.html')]
allmeifiles=glob.glob('*.mei')
meifiles=[x.split('_')[0] for x in allmeifiles]
# for each hocr file: if corresponding mei file exists, open mei and edit - if not, create new mei
if options.corrected:
for hocrfile in hocrfiles:
output_name='%s_corr.mei' % (hocrfile,) if '%s_corr.mei' % (hocrfile,) in allmeifiles else '%s_uncorr.mei' % (hocrfile,)
meifile=xmltomei.xmltomei(output_name) if hocrfile in meifiles else create_mei(hocrfile)
surface=meifile.search('surface')[0]
section=meifile.search('section')[0]
add_text_lines(hocrfile, surface, section)
meitoxml.meitoxml(meifile, '../mei_corrtxt/%s' % (output_name,))
else:
for hocrfile in hocrfiles:
meifile=MeiDocument.MeiDocument()
mei=mod.mei_()
surface=mod.surface_()
section=mod.section_()
mei.add_children([surface, section])
add_text_lines(hocrfile, surface, section)
meifile.addelement(mei)
meitoxml.meitoxml(meifile, '../mei_uncorrtxt/%s_mei_fragment.mei' % (hocrfile,))
|
# Copyright (C) 2001,2002 Python Software Foundation
# email package unit tests
# The specific tests now live in Lib/email/test
from email.test.test_email import TestEncoders, suite
from test import support
def test_main():
#This one doesn't work on Jython
del TestEncoders.test_encode7or8bit
s = suite()
support.run_unittest(suite())
if __name__ == '__main__':
test_main()
|
def greeting():
try:
from blessings import Terminal
term = Terminal()
print(term.green + term.bold + "Hello World!" + term.normal)
except ImportError:
print("Hello World!")
|
import re
from typing import Optional
import cryptography
import cryptography.x509
from cryptography.hazmat.backends.openssl import backend as crypto_x509_backend
from . import Rut, constants
def get_subject_rut_from_certificate_pfx(pfx_file_bytes: bytes, password: Optional[str]) -> Rut:
"""
Return the Chilean RUT stored in a digital certificate.
Original source URL: https://github.com/fyntex/fd-cl-data/blob/cfd5a716fb9b2cbd8a03fca1bacfd1b844b1337f/fd_cl_data/apps/sii_auth/models/sii_auth_credential.py#L701-L745 # noqa: E501
:param pfx_file_bytes: Digital certificate in PKCS12 format
:param password: (Optional) The password to use to decrypt the PKCS12 file
"""
(
private_key,
x509_cert,
additional_certs,
) = crypto_x509_backend.load_key_and_certificates_from_pkcs12(
data=pfx_file_bytes,
password=password.encode() if password is not None else None,
)
# https://cryptography.io/en/latest/hazmat/primitives/asymmetric/serialization/#cryptography.hazmat.primitives.serialization.pkcs12.load_key_and_certificates # noqa: E501
subject_alt_name_ext = x509_cert.extensions.get_extension_for_class(
cryptography.x509.extensions.SubjectAlternativeName,
)
# Search for the RUT in the certificate.
try:
results = [
x.value
for x in subject_alt_name_ext.value._general_names
if hasattr(x, 'type_id') and x.type_id == constants.SII_CERT_TITULAR_RUT_OID
]
except AttributeError as exc:
raise Exception(f'Malformed certificate extension: {subject_alt_name_ext.oid}') from exc
if not results:
raise Exception('Certificate has no RUT information')
elif len(results) > 1:
raise Exception(f'len(results) == {len(results)}')
subject_rut_raw: bytes = results[0]
subject_rut = re.sub(r'[^0-9-]', '', subject_rut_raw.decode('utf-8'))
return Rut(subject_rut)
|
"""Batch the glove embeddings for the generator"""
from __future__ import absolute_import
from __future__ import division
import random
import time
import re
import numpy as np
from six.moves import xrange
from vocab import PAD_ID, UNK_ID
import torch
def split_by_whitespace(sentence):
words = []
for space_separated_fragment in sentence.strip().split():
words.extend(re.split(" ", space_separated_fragment))
return [w for w in words if w]
def intstr_to_intlist(string):
"""Given a string e.g. '311 9 1334 635 6192 56 639', returns as a list of integers"""
return [int(s) for s in string.split()]
def sentence_to_token_ids(sentence, word2id):
"""Turns an already-tokenized sentence string into word indices
e.g. "i do n't know" -> [9, 32, 16, 96]
Note any token that isn't in the word2id mapping gets mapped to the id for UNK
"""
tokens = split_by_whitespace(sentence) # list of strings
ids = [word2id.get(w, UNK_ID) for w in tokens]
return tokens, ids
def padded(token_batch, batch_pad=0):
"""
Inputs:
token_batch: List (length batch size) of lists of ints.
batch_pad: Int. Length to pad to. If 0, pad to maximum length sequence in token_batch.
Returns:
List (length batch_size) of padded of lists of ints.
All are same length - batch_pad if batch_pad!=0, otherwise the maximum length in token_batch
"""
maxlen = max(map(lambda x: len(x), token_batch)) if batch_pad == 0 else batch_pad
masks = map(lambda x: [1] * len(x) + [0] * (maxlen - len(x)), token_batch)
return map(lambda token_list: token_list + [PAD_ID] * (maxlen - len(token_list)), token_batch), masks
def get_text_description(caption_dict, batch_keys):
g_idx = [np.random.randint(len(caption_dict[batch_keys[0]])) for i in range(len(batch_keys))]
g_text_des = [caption_dict[k][i] for k,i in zip(batch_keys, g_idx)]
return g_text_des
def get_captions_batch(batch_keys, caption_dict, word2id):
"""
Inputs:
caption_dict: filename --> caption (dictionary)
batch_keys: filenames in the batch
Returns:
batch of indices representing each sentence
"""
tokens_batch = []
raw_batch = get_text_description(caption_dict, batch_keys)
for capt in raw_batch:
tokens, ids = sentence_to_token_ids(capt, word2id)
tokens_batch.append(ids)
captions_batch, masks = padded(tokens_batch)
return captions_batch, masks
|
from unittest import TestCase
from configuration_py.configuration_load import _normalize_environment_label
class TestNormalizeEnvironmentLabel(TestCase):
def test_normalize_environment_label_should_return_label_for_available_environment(self):
available_environments = ['production', 'development']
label = 'development'
expected_value = label
actual_value = _normalize_environment_label(label, available_environments)
self.assertEqual(expected_value, actual_value)
def test_normalize_environment_label_should_return_label_for_short_development_environment(self):
available_environments = ['production', 'development']
label = 'dev'
expected_value = 'development'
actual_value = _normalize_environment_label(label, available_environments)
self.assertEqual(expected_value, actual_value)
def test_normalize_environment_label_should_return_label_for_short_production_environment(self):
available_environments = ['production', 'development']
label = 'prod'
expected_value = 'production'
actual_value = _normalize_environment_label(label, available_environments)
self.assertEqual(expected_value, actual_value)
def test_normalize_environment_label_should_raise_exception_if_no_such_environment_in_config(self):
available_environments = ['production']
label = 'development'
self.assertRaises(EnvironmentError, _normalize_environment_label, label, available_environments)
|
# Generated by Django 2.2.14 on 2020-07-14 09:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('forms', '0003_auto_20180522_0820'),
]
operations = [
migrations.AlterField(
model_name='field',
name='help_text',
field=models.CharField(blank=True, max_length=300, verbose_name='Help text'),
),
migrations.AlterField(
model_name='field',
name='label',
field=models.CharField(max_length=300, verbose_name='Label'),
),
migrations.AlterField(
model_name='field',
name='slug',
field=models.SlugField(blank=True, default='', max_length=300, verbose_name='Slug'),
),
migrations.AlterField(
model_name='form',
name='send_email',
field=models.BooleanField(default=False, help_text='If checked, the person entering the form will be sent an email', verbose_name='Send email'),
),
migrations.AlterField(
model_name='form',
name='slug',
field=models.SlugField(editable=False, max_length=220, unique=True, verbose_name='Slug'),
),
migrations.AlterField(
model_name='form',
name='title',
field=models.CharField(max_length=220, verbose_name='Title'),
),
]
|
import argparse
import code
import sys
import traceback
from voussoirkit import interactive
from voussoirkit import pipeable
from voussoirkit import vlogging
import bringrss
def bringrepl_argparse(args):
global B
try:
B = bringrss.bringdb.BringDB.closest_bringdb()
except bringrss.exceptions.NoClosestBringDB as exc:
pipeable.stderr(exc.error_message)
pipeable.stderr('Try `bringrss_cli.py init` to create the database.')
return 1
if args.exec_statement:
exec(args.exec_statement)
B.commit()
else:
while True:
try:
code.interact(banner='', local=dict(globals(), **locals()))
except SystemExit:
pass
if len(B.savepoints) == 0:
break
print('You have uncommited changes, are you sure you want to quit?')
if interactive.getpermission():
break
@vlogging.main_decorator
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--exec', dest='exec_statement', default=None)
parser.set_defaults(func=bringrepl_argparse)
args = parser.parse_args(argv)
return args.func(args)
if __name__ == '__main__':
raise SystemExit(main(sys.argv[1:]))
|
from pydy import *
from sympy import *
from bicycle_lib_hand import mj_params as mj_p
# Reference frames
N = NewtonianReferenceFrame('N')
(q,), (qd,) = N.declare_coords('q', 1)
lmbda=q
var('rr rrt rf rft xb zb xh zh mc md me mf mr mb mh w c')
var('xrb zrb xhf zhf')
var('l1 l2 l3 l4 lr lf ls')
var('IC22 IF11 IF22 ICD11 ICD13 ICD22 ICD33 IEF11 IEF13 IEF22 IEF33 IF11')
var('IRxx IRyy IBxx IBxz IByy IBzz IHxx IHxz IHyy IHzz IFxx IFyy')
# Subs dict and a collection list
sin_over_tan = {sin(lmbda)/tan(lmbda): cos(lmbda)}
col = [sin(lmbda), cos(lmbda)]
output_eqns = [Eq(rr, rr),
Eq(rrt, rrt),
Eq(rf, rf),
Eq(rft, rft)]
output_eqns = []
# Total mass of rear and front assemblies
mc = mr
md = mb
me = mh
# D Frame in the upright configuration
D = N.rotate('D', 2, lmbda)
# In the upright zero-steer configuration, the D and E frames are aligned
E = D
# Take N.O to be the rear contact point
# Locate all points using my parameters
CO = N.O.locate('CO', -(rr+rrt)*N[3], mass=mc)
CDO = CO.locate('CDO', l1*D[1] + l2*D[3], mass=md)
FO1 = CO.locate('FO1', lr*D[1] + ls*D[3] + lf*E[1], mass=mf)
EFO = FO1.locate('EFO', l3*E[1] + l4*E[3], mass=me)
FN1 = FO1.locate('FN1', (rf+rft)*N[3])
# Locate all points using Meijaard parameters
RO = N.O.locate('RO', -(rr + rrt)*N[3], mass=mr) # Same point as CO
BO = N.O.locate('BO', xb*N[1] + zb*N[3], mass=mb)
HO = N.O.locate('HO', xh*N[1] + zh*N[3], mass=mh)
FO2 = N.O.locate('FO2', w*N[1] - (rf+rft)*N[3], mass=mf) # Same point as FO
FN2 = N.O.locate('FN2', w*N[1])
CDO2 = N.O.locate('CDO2', xrb*N[1] + zrb*N[3])
EFO2 = N.O.locate('EFO2', xhf*N[1] + zhf*N[3])
# Convert geometric frame parameters
geom_eqs = [dot(FN1.rel(N.O)-FN2.rel(N.O), D[1]),
dot(FN1.rel(N.O)-FN2.rel(N.O), D[3]),
rf+rft-lf/sin(lmbda)-c/tan(lmbda)] # From similar triangles
soln_geom = solve(geom_eqs, [lr, lf, ls])
for l in (lr, ls, lf):
output_eqns.append(Eq(l,
collect(soln_geom[l].expand().subs(sin_over_tan), col)))
# Mass center's of Meijaards bodies
RBO = mass_center(N.O, [RO, BO]) # Relative to rear contact point
HFO = mass_center(FN2, [HO, FO2]) # Relative to front contact point
cm_eqs = [dot(CDO.rel(N.O) - RBO, D[1]),
dot(CDO.rel(N.O) - RBO, D[3]),
dot(EFO.rel(FN1) - HFO, D[1]),
dot(EFO.rel(FN1) - HFO, D[3])]
soln_cm = {l1: -cm_eqs[0] + l1,
l2: -cm_eqs[1] + l2,
l3: -cm_eqs[2] + l3,
l4: -cm_eqs[3] + l4}
for l in (l1, l2, l3, l4):
output_eqns.append(Eq(l, soln_cm[l]))
# Test to ensure that the l1, l2, l3, l4 expressions just solved for result in
# the same mass center location as what is published by Sharp
xrbs = CDO.rel(N.O).dot(N[1])
zrbs = CDO.rel(N.O).dot(N[3])
xhfs = EFO.rel(N.O).dot(N[1])
zhfs = EFO.rel(N.O).dot(N[3])
# Below results match COM locations of Sharp 2008
#print 'xrb =', xrbs.subs(soln_cm).subs(mj_p).n()
#print 'zrb =', zrbs.subs(soln_cm).subs(mj_p).n()
#print 'xhf =', xhfs.subs(soln_cm).subs(soln_geom).subs(mj_p).n()
#print 'zhf =', zhfs.subs(soln_cm).subs(soln_geom).subs(mj_p).n()
# Distances from rear assembly mass center to rear wheel center
l1c = RO.rel(CDO).dot(D[1])
l3c = RO.rel(CDO).dot(D[3])
# Distances from rear assembly mass center to rear frame and rider mass center
l1d = BO.rel(CDO).dot(D[1])
l3d = BO.rel(CDO).dot(D[3])
# Distance from front assembly mass center to front fork/handle bar
l1e = HO.rel(EFO).dot(D[1])
l3e = HO.rel(EFO).dot(D[3])
# Distance from front assembly mass center to front wheel mass center
l1f = FO1.rel(EFO).dot(D[1])
l3f = FO1.rel(EFO).dot(D[3])
nt = {Symbol('l1c'): l1c,
Symbol('l3c'): l3c,
Symbol('l1d'): l1d,
Symbol('l3d'): l3d,
Symbol('l1e'): l1e,
Symbol('l3e'): l3e,
Symbol('l1f'): l1f,
Symbol('l3f'): l3f}
# Using Meijaard's parameters
I_C_CO = Inertia(N, [IRxx, IRyy, IRxx, 0, 0, 0])
I_D_DO = Inertia(N, [IBxx, IByy, IBzz, 0, 0, IBxz])
I_E_EO = Inertia(N, [IHxx, IHyy, IHzz, 0, 0, IHxz])
I_F_FO = Inertia(N, [IFxx, IFyy, IFxx, 0, 0, 0])
# In plane Inertia of rear wheel
I_C_CO_p = Inertia(D, [IRxx, 0, IRxx, 0, 0, 0])
# In plane Inertia of front wheel
I_F_FO_p = Inertia(E, [IFxx, 0, IFxx, 0, 0, 0])
l1c, l3c, l1d, l3d, l1e, l3e, l1f, l3f = symbols('l1c l3c l1d l3d l1e l3e l1f\
l3f')
# Position from CDO to CO
CO_rel_CDO = Vector(l1c*D[1] + l3c*D[3])
# Position from CDO to DO
DO_rel_CDO = Vector(l1d*D[1] + l3d*D[3])
# Position from EFO to EO
EO_rel_EFO = Vector(l1e*E[1] + l3e*E[3])
# Position from EFO to FO
FO_rel_EFO = Vector(l1f*E[1] + l3f*E[3])
# Inertia of a particle, of mass mc, relative to CDO
I_CO_CDO = inertia_of_point_mass(mc, CO_rel_CDO, D)
# Parallel axis theorem for rear wheel, except out of plane inertia of wheel
I_C_CDO = I_C_CO_p + I_CO_CDO
# Inertia of a particle of mass md relative to the rear assembly mass center
I_DO_CDO = inertia_of_point_mass(md, DO_rel_CDO, D)
# Parallel axis theorem for rider
I_D_CDO = I_D_DO.express(D) + I_DO_CDO
I_CD_CDO = I_C_CDO + I_D_CDO
# Inertia of a particle, of mass me, relative to EFO
I_EO_EFO = inertia_of_point_mass(me, EO_rel_EFO, E)
# Parallel axis theorem for fork handlebar assembly
I_E_EFO = I_E_EO.express(E) + I_EO_EFO
# Inertia of a particle of mass md relative to the rear assembly mass center
I_FO_EFO = inertia_of_point_mass(mf, FO_rel_EFO, E)
# Parallel axis theorem for rider
I_F_EFO = I_F_FO_p + I_FO_EFO
I_EF_EFO = I_E_EFO + I_F_EFO
mcd = mc + md
mef = me + mf
output_eqns.append(Eq(Symbol('mcd'), mcd))
output_eqns.append(Eq(Symbol('mef'), mef))
output_eqns.append(Eq(Symbol('IC22'), IRyy))
ICD11 = dot(D[1], dot(I_CD_CDO, D[1]))
output_eqns.append(Eq(Symbol('ICD11'), ICD11))
ICD22 = dot(D[2], dot(I_CD_CDO, D[2]))
ICD13 = dot(D[1], dot(I_CD_CDO, D[3]))
output_eqns.append(Eq(Symbol('ICD13'), ICD13))
output_eqns.append(Eq(Symbol('ICD22'), ICD22))
ICD33 = dot(D[3], dot(I_CD_CDO, D[3]))
output_eqns.append(Eq(Symbol('ICD33'), ICD33))
ICD13 = dot(D[1], dot(I_CD_CDO, D[3]))
output_eqns.append(Eq(Symbol('ICD13'), ICD13))
IEF11 = dot(D[1], dot(I_EF_EFO, D[1]))
output_eqns.append(Eq(Symbol('IEF11'), IEF11))
IEF22 = dot(D[2], dot(I_EF_EFO, D[2]))
output_eqns.append(Eq(Symbol('IEF22'), IEF22))
IEF33 = dot(D[3], dot(I_EF_EFO, D[3]))
output_eqns.append(Eq(Symbol('IEF33'), IEF33))
IEF13 = dot(D[1], dot(I_EF_EFO, D[3]))
output_eqns.append(Eq(Symbol('IEF13'), IEF13))
output_eqns.append(Eq(Symbol('IF22'), IFyy))
ops = 0
for e in output_eqns:
print e
ops += e.rhs.count_ops()
print ops
params = N.declare_parameters('rr rrt rf rft lr ls lf l1 l2 l3 l4 mcd mef IC22\
ICD11 ICD22 ICD33 ICD13 IEF11 IEF22 IEF33 IEF13 IF22 g')
input = [w, c, lmbda, rr, rrt, rf, rft, xb, zb, xh, zh, mr, mb, mh, mf, IRxx, IRyy, IBxx, IByy, IBzz, IBxz, IHxx, IHyy, IHzz,
IHxz, IFxx, IFyy]
output_string = "from __future__ import division\n"
output_string += "from math import sin, cos\n\n"
output_string += generate_function("convert_params", output_eqns, input,
nested_terms=[nt])
print output_string
stop
file = open('convert_parameters.py', 'w')
file.write(output_string)
file.close()
|
#include "TStyle.h"
from ROOT import TPad, TStyle, kWhite, kTRUE, gPad
# tdrGrid: Turns the grid lines on (true) or off (false)
def tdrGrid(tdrStyle, gridOn):
tdrStyle.SetPadGridX(gridOn);
tdrStyle.SetPadGridY(gridOn);
# fixOverlay: Redraws the axis
def fixOverlay():
gPad.RedrawAxis();
def setTDRStyle():
tdrStyle = TStyle("tdrStyle","Style for P-TDR");
# For the canvas:
tdrStyle.SetCanvasBorderMode(0);
tdrStyle.SetCanvasColor(kWhite);
tdrStyle.SetCanvasDefH(600); #Height of canvas
tdrStyle.SetCanvasDefW(600); #Width of canvas
tdrStyle.SetCanvasDefX(0); #POsition on screen
tdrStyle.SetCanvasDefY(0);
# For the Pad:
tdrStyle.SetPadBorderMode(0);
# tdrStyle.SetPadBorderSize(Width_t size = 1);
tdrStyle.SetPadColor(kWhite);
tdrStyle.SetPadGridX(False);
tdrStyle.SetPadGridY(False);
tdrStyle.SetGridColor(0);
tdrStyle.SetGridStyle(3);
tdrStyle.SetGridWidth(1);
# For the frame:
tdrStyle.SetFrameBorderMode(0);
tdrStyle.SetFrameBorderSize(1);
tdrStyle.SetFrameFillColor(0);
tdrStyle.SetFrameFillStyle(0);
tdrStyle.SetFrameLineColor(1);
tdrStyle.SetFrameLineStyle(1);
tdrStyle.SetFrameLineWidth(1);
# For the histo:
# tdrStyle.SetHistFillColor(1);
# tdrStyle.SetHistFillStyle(0);
tdrStyle.SetHistLineColor(1);
tdrStyle.SetHistLineStyle(0);
tdrStyle.SetHistLineWidth(1);
# tdrStyle.SetLegoInnerR(Float_t rad = 0.5);
# tdrStyle.SetNumberContours(Int_t number = 20);
tdrStyle.SetEndErrorSize(2);
# tdrStyle.SetErrorMarker(20);
tdrStyle.SetErrorX(0.);
tdrStyle.SetMarkerStyle(20);
#For the fit/function:
tdrStyle.SetOptFit(1);
tdrStyle.SetFitFormat("5.4g");
tdrStyle.SetFuncColor(2);
tdrStyle.SetFuncStyle(1);
tdrStyle.SetFuncWidth(1);
#For the date:
tdrStyle.SetOptDate(0);
# tdrStyle.SetDateX(Float_t x = 0.01);
# tdrStyle.SetDateY(Float_t y = 0.01);
# For the statistics box:
tdrStyle.SetOptFile(0);
tdrStyle.SetOptStat(0); # To display the mean and RMS: SetOptStat("mr");
tdrStyle.SetStatColor(kWhite);
tdrStyle.SetStatFont(42);
tdrStyle.SetStatFontSize(0.025);
tdrStyle.SetStatTextColor(1);
tdrStyle.SetStatFormat("6.4g");
tdrStyle.SetStatBorderSize(1);
tdrStyle.SetStatH(0.1);
tdrStyle.SetStatW(0.15);
# tdrStyle.SetStatStyle(Style_t style = 1001);
# tdrStyle.SetStatX(Float_t x = 0);
# tdrStyle.SetStatY(Float_t y = 0);
# Margins:
tdrStyle.SetPadTopMargin(0.05);
tdrStyle.SetPadBottomMargin(0.13);
tdrStyle.SetPadLeftMargin(0.16);
tdrStyle.SetPadRightMargin(0.02);
# For the Global title:
tdrStyle.SetOptTitle(0);
tdrStyle.SetTitleFont(42);
tdrStyle.SetTitleColor(1);
tdrStyle.SetTitleTextColor(1);
tdrStyle.SetTitleFillColor(10);
tdrStyle.SetTitleFontSize(0.05);
# tdrStyle.SetTitleH(0); # Set the height of the title box
# tdrStyle.SetTitleW(0); # Set the width of the title box
# tdrStyle.SetTitleX(0); # Set the position of the title box
# tdrStyle.SetTitleY(0.985); # Set the position of the title box
# tdrStyle.SetTitleStyle(Style_t style = 1001);
# tdrStyle.SetTitleBorderSize(2);
# For the axis titles:
tdrStyle.SetTitleColor(1, "XYZ");
tdrStyle.SetTitleFont(42, "XYZ");
tdrStyle.SetTitleSize(0.06, "XYZ");
# tdrStyle.SetTitleXSize(Float_t size = 0.02); # Another way to set the size?
# tdrStyle.SetTitleYSize(Float_t size = 0.02);
tdrStyle.SetTitleXOffset(0.9);
tdrStyle.SetTitleYOffset(1.25);
# tdrStyle.SetTitleOffset(1.1, "Y"); # Another way to set the Offset
# For the axis labels:
tdrStyle.SetLabelColor(1, "XYZ");
tdrStyle.SetLabelFont(42, "XYZ");
tdrStyle.SetLabelOffset(0.007, "XYZ");
tdrStyle.SetLabelSize(0.05, "XYZ");
# For the axis:
tdrStyle.SetAxisColor(1, "XYZ");
tdrStyle.SetStripDecimals(kTRUE);
tdrStyle.SetTickLength(0.03, "XYZ");
tdrStyle.SetNdivisions(510, "XYZ");
tdrStyle.SetPadTickX(1); # To get tick marks on the opposite side of the frame
tdrStyle.SetPadTickY(1);
# Change for log plots:
tdrStyle.SetOptLogx(0);
tdrStyle.SetOptLogy(0);
tdrStyle.SetOptLogz(0);
# Postscript options:
tdrStyle.SetPaperSize(20.,20.);
# tdrStyle.SetLineScalePS(Float_t scale = 3);
# tdrStyle.SetLineStyleString(Int_t i, const char* text);
# tdrStyle.SetHeaderPS(const char* header);
# tdrStyle.SetTitlePS(const char* pstitle);
# tdrStyle.SetBarOffset(Float_t baroff = 0.5);
# tdrStyle.SetBarWidth(Float_t barwidth = 0.5);
# tdrStyle.SetPaintTextFormat(const char* format = "g");
# tdrStyle.SetPalette(Int_t ncolors = 0, Int_t* colors = 0);
# tdrStyle.SetTimeOffset(Double_t toffset);
# tdrStyle.SetHistMinimumZero(kTRUE);
tdrStyle.cd();
return tdrStyle
|
import os
from ice.logs import logger
from ice import model
class ConsoleBackedObjectAdapter(object):
def __init__(self, emulators):
self.emulators = emulators
def new(self, backing_store, identifier):
fullname = identifier
shortname = backing_store.get(identifier, 'nickname', fullname)
extensions = backing_store.get(identifier, 'extensions', "")
custom_roms_directory = backing_store.get(identifier, 'roms directory', "")
prefix = backing_store.get(identifier, 'prefix', "")
icon = backing_store.get(identifier, 'icon', "")
images_directory = backing_store.get(identifier, 'images directory', "")
emulator_identifier = backing_store.get(identifier, 'emulator', "")
icon = os.path.expanduser(icon)
custom_roms_directory = os.path.expanduser(custom_roms_directory)
images_directory = os.path.expanduser(images_directory)
emulator = self.emulators.find(emulator_identifier)
return model.Console(
fullname,
shortname,
extensions,
custom_roms_directory,
prefix,
icon,
images_directory,
emulator,
)
def verify(self, console):
if console.emulator is None:
logger.debug("No emulator provided for console `%s`" % console.fullname)
return False
return True
def save_in_store(self, backing_store, identifier, console):
backing_store.set(identifier, 'nickname', console.shortname)
backing_store.set(identifier, 'extensions', console.extensions)
backing_store.set(identifier, 'roms directory', console.custom_roms_directory)
backing_store.set(identifier, 'prefix', console.prefix)
backing_store.set(identifier, 'icon', console.icon)
backing_store.set(identifier, 'images directory', console.images_directory)
backing_store.set(identifier, 'emulator', console.emulator.name)
|
class Adaptor:
""" Base adaptor for pubsub event system """
# This key should represent key in configs that it will load form
key = None
adaptors = {}
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
cls.adaptors[cls.key] = cls
def __init__(self, config={}):
self.config = config
def connect(self):
""" Authenticate to system and cache connections """
raise NotImplementedError()
def disconnect(self):
""" Close active connections and cleanup subscribers """
raise NotImplementedError()
def subscribe(self, topic, callback):
""" Listen on a topic and pass event data to callback """
raise NotImplementedError()
def unsubscribe(self, topic):
""" Stop listening for events on a topic """
raise NotImplementedError()
def publish(self, topic, data=None):
""" Publish an event on the topic """
raise NotImplementedError()
""" No need to override this unless necessary """
def subscribe_once(self, topic, callback):
""" Subscribe to topic for only one event """
def handle_once(data):
""" Wrapper to unsubscribe after event handled """
self.unsubscribe(topic)
if callable(callback):
# Pass data to real callback
callback(data)
return self.subscribe(topic, handle_once)
def get_message(self):
""" Some protocols need to initate a poll for new messages """
pass
# Import adaptors
from . import redis, mqtt
|
from typing import Callable, Dict, Tuple
import editdistance
import numpy as np
import tensorflow as tf
from text_recognizer.datasets.emnist_lines import EmnistLinesDataset
from text_recognizer.datasets.sequence import DatasetSequence
from text_recognizer.models.base import Model
from text_recognizer.networks import line_cnn_sliding_window
def loss_ignoring_blanks(target, output):
"""This is categorical crossentropy, but with targets that correspond to the padding symbol not counting."""
import tensorflow.keras.backend as K
output /= tf.reduce_sum(output, -1, True)
_epsilon = tf.convert_to_tensor(K.epsilon(), output.dtype.base_dtype)
output = tf.clip_by_value(output, _epsilon, 1. - _epsilon)
prod = target * tf.log(output)
# TODO ??
loss = - tf.reduce_sum(prod, -1)
return loss
class LineModel(Model):
def __init__(self, dataset_cls: type=EmnistLinesDataset, network_fn: Callable=line_cnn_sliding_window, dataset_args: Dict=None, network_args: Dict=None):
"""Define the default dataset and network values for this model."""
super().__init__(dataset_cls, network_fn, dataset_args, network_args)
def evaluate(self, x, y, verbose=True):
sequence = DatasetSequence(x, y)
preds_raw = self.network.predict_generator(sequence)
trues = np.argmax(y, -1)
preds = np.argmax(preds_raw, -1)
pred_strings = [''.join(self.data.mapping.get(label, '') for label in pred).strip(' |_') for pred in preds]
true_strings = [''.join(self.data.mapping.get(label, '') for label in true).strip(' |_') for true in trues]
char_accuracies = [
1 - editdistance.eval(true_string, pred_string) / len(true_string)
for pred_string, true_string in zip(pred_strings, true_strings)
]
if verbose:
sorted_ind = np.argsort(char_accuracies)
print("\nLeast accurate predictions:")
for ind in sorted_ind[:5]:
print(f'True: {true_strings[ind]}')
print(f'Pred: {pred_strings[ind]}')
print("\nMost accurate predictions:")
for ind in sorted_ind[-5:]:
print(f'True: {true_strings[ind]}')
print(f'Pred: {pred_strings[ind]}')
print("\nRandom predictions:")
for ind in np.random.randint(0, len(char_accuracies), 5):
print(f'True: {true_strings[ind]}')
print(f'Pred: {pred_strings[ind]}')
mean_accuracy = np.mean(char_accuracies)
return mean_accuracy
def predict_on_image(self, image: np.ndarray) -> Tuple[str, float]:
if image.dtype == np.uint8:
image = (image / 255).astype(np.float32)
pred_raw = self.network.predict(np.expand_dims(image, 0), batch_size=1).squeeze()
pred = ''.join(self.data.mapping[label] for label in np.argmax(pred_raw, axis=-1).flatten()).strip()
conf = np.min(np.max(pred_raw, axis=-1)) # The least confident of the predictions.
return pred, conf
# def loss(self):
# return loss_ignoring_blanks
|
import pyBigWig
import os
import sys
import numpy as np
import glob
def anchor (ref, ori): # input 1d np array
ref_new=ref.copy()
ref_new.sort()
ori_new=ori.copy()
ori_new[np.argsort(ori)]=ref_new[:]
return ori_new
chr_all=['chr1','chr2','chr3','chr4','chr5','chr6','chr7','chr8','chr9','chr10','chr11','chr12','chr13','chr14','chr15','chr16','chr17','chr18','chr19','chr20','chr21','chr22','chrX']
num_bp=np.array([248956422,242193529,198295559,190214555,181538259,170805979,159345973,145138636,138394717,133797422,135086622,133275309,114364328,107043718,101991189,90338345,83257441,80373285,58617616,64444167,46709983,50818468,156040895])
num_bp25=[9958257, 9687742, 7931823, 7608583, 7261531, 6832240, 6373839, 5805546, 5535789, 5351897, 5403465, 5331013, 4574574, 4281749, 4079648, 3613534, 3330298, 3214932, 2344705, 2577767, 1868400, 2032739, 6241636]
chr_len={}
for i in np.arange(len(chr_all)):
chr_len[chr_all[i]]=num_bp[i]
chr_len25={}
for i in np.arange(len(chr_all)):
chr_len25[chr_all[i]]=num_bp25[i]
# number of cells used to calculate avg
assay_all=['M01','M02','M16','M17','M18','M20','M22','M29']
tmp=[4,37,25,19,25,21,33,20]
dict_assay_count={}
for i in np.arange(len(assay_all)):
dict_assay_count[assay_all[i]]=tmp[i]
# number of models
model_all=['C_D','C_E','C_F','C_G','C_H','C_I', \
'CH_D','CH_E','CH_F','CH_G','CH_I', \
'CDEH_G','CDEH_I','DEFGHI_C', \
'DGH_C','DGH_F','DGH_I', \
'DGI_C','DGI_E','DGI_F','DGI_H', \
'F_C','F_D','F_E','F_G','F_H','F_I', \
'DGHKLMN_F','DGHKLMN_I','DGHK_F','DGHK_I','DGIK_E','DGIK_F','DGIK_H']
tmp=[15,11,15,11,22,12, \
15,11,14,11,12, \
9,9,9, \
11,18,17, \
11,15,16,17, \
15,20,16,18,20,17, \
7,6,11,11,11,10,11]
dict_model_count={}
for i in np.arange(len(model_all)):
dict_model_count[model_all[i]]=tmp[i]
path0='../data_challenge/baseline_avg_final/'
os.system('mkdir -p npy')
print(sys.argv)
id_all=sys.argv[1:]
for the_id in id_all:
print(the_id)
the_assay=the_id[3:]
the_cell=the_id[:3]
bw=pyBigWig.open(path0 + 'gold_anchored_' + the_assay + '.bigwig')
w1 = 1.0; w2 = 2.0; w3 = 1.0 # HERE weights for avg, lgbm, nn
for the_chr in chr_all:
print(the_chr)
## 1. stack
# 1.1 avg
avg = np.array(bw.values(the_chr, 0, chr_len25[the_chr]))
## 3.1 save npy
np.save('./npy/pred25bp_' + the_id + '_' + the_chr, avg)
###################
|
#
# Copyright (C) 2018 Codethink Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
# Tiago Gomes <tiago.gomes@codethink.co.uk>
"""
Exceptions - API for Error Handling
===================================
This module contains some Enums used in Error Handling which are useful in
testing external plugins.
"""
from enum import Enum, unique
@unique
class ErrorDomain(Enum):
"""ErrorDomain
Describes what the error is related to.
"""
PLUGIN = 1
LOAD = 2
IMPL = 3
PLATFORM = 4
SANDBOX = 5
ARTIFACT = 6
PIPELINE = 7
UTIL = 8
SOURCE = 9
ELEMENT = 10
APP = 11
STREAM = 12
VIRTUAL_FS = 13
CAS = 14
PROG_NOT_FOUND = 15
REMOTE = 16
PROFILE = 17
class LoadErrorReason(Enum):
"""LoadErrorReason
Describes the reason why a :class:`.LoadError` was raised.
"""
MISSING_FILE = 1
"""A file was not found."""
INVALID_YAML = 2
"""The parsed data was not valid YAML."""
INVALID_DATA = 3
"""Data was malformed, a value was not of the expected type, etc"""
ILLEGAL_COMPOSITE = 4
"""An error occurred during YAML dictionary composition.
This can happen by overriding a value with a new differently typed
value, or by overwriting some named value when that was not allowed.
"""
CIRCULAR_DEPENDENCY = 5
"""A circular dependency chain was detected"""
UNRESOLVED_VARIABLE = 6
"""A variable could not be resolved. This can happen if your project
has cyclic dependencies in variable declarations, or, when substituting
a string which refers to an undefined variable.
"""
UNSUPPORTED_PROJECT = 7
"""The project requires an incompatible BuildStream version"""
UNSUPPORTED_PLUGIN = 8
"""Project requires a newer version of a plugin than the one which was
loaded
"""
EXPRESSION_FAILED = 9
"""A conditional expression failed to resolve"""
USER_ASSERTION = 10
"""An assertion was intentionally encoded into project YAML"""
TRAILING_LIST_DIRECTIVE = 11
"""A list composition directive did not apply to any underlying list"""
CONFLICTING_JUNCTION = 12
"""Conflicting junctions in subprojects"""
INVALID_JUNCTION = 13
"""Failure to load a project from a specified junction"""
SUBPROJECT_INCONSISTENT = 15
"""Subproject has no ref"""
INVALID_SYMBOL_NAME = 16
"""An invalid symbol name was encountered"""
MISSING_PROJECT_CONF = 17
"""A project.conf file was missing"""
LOADING_DIRECTORY = 18
"""Try to load a directory not a yaml file"""
PROJ_PATH_INVALID = 19
"""A project path leads outside of the project directory"""
PROJ_PATH_INVALID_KIND = 20
"""A project path points to a file of the not right kind (e.g. a
socket)
"""
RECURSIVE_INCLUDE = 21
"""A recursive include has been encountered"""
CIRCULAR_REFERENCE_VARIABLE = 22
"""A circular variable reference was detected"""
PROTECTED_VARIABLE_REDEFINED = 23
"""An attempt was made to set the value of a protected variable"""
INVALID_DEPENDENCY_CONFIG = 24
"""An attempt was made to specify dependency configuration on an element
which does not support custom dependency configuration"""
LINK_FORBIDDEN_DEPENDENCIES = 25
"""A link element declared dependencies"""
CIRCULAR_REFERENCE = 26
"""A circular element reference was detected"""
BAD_ELEMENT_SUFFIX = 27
"""
This warning will be produced when an element whose name does not end in .bst
is referenced either on the command line or by another element
"""
BAD_CHARACTERS_IN_NAME = 28
"""
This warning will be produced when a filename for a target contains invalid
characters in its name.
"""
|
from django.contrib import admin
from django.utils.translation import ugettext as _
from libya_elections.admin_models import LibyaAdminModel
from libya_elections.admin_site import admin_site
from text_messages.models import MessageText
from .models import Person, RegistrationCenter, Registration, SMS, Blacklist, Whitelist,\
Office, Constituency, SubConstituency
def national_id(reg):
return reg.citizen.national_id
class BlacklistAdmin(LibyaAdminModel):
list_display = ['phone_number', 'creation_date', 'modification_date']
search_fields = ["phone_number"]
readonly_fields = ['creation_date', 'modification_date']
class PersonAdmin(LibyaAdminModel):
list_display = ['citizen']
raw_id_fields = ['citizen']
class OfficeAdmin(LibyaAdminModel):
list_display = ['id', 'name_english', 'name_arabic', 'region']
search_fields = ['id', 'name_english', 'name_arabic']
class ConstituencyAdmin(LibyaAdminModel):
list_display = ['id', 'name_english', 'name_arabic']
search_fields = ['id', 'name_english', 'name_arabic']
class SubConstituencyAdmin(LibyaAdminModel):
list_display = ['id', 'name_english', 'name_arabic']
search_fields = ['id', 'name_english', 'name_arabic']
def delete_selected_except_copied_centers(modeladmin, request, queryset):
"""Custom admin action which checks to make sure user is not trying to delete a copied center.
If a copied center is selected, user gets a warning message and no centers are deleted.
"""
copied_ids = queryset.filter(copied_by__isnull=False).values_list('center_id', flat=True)
if copied_ids:
msg = _('The following centers are copied by other centers and cannot be deleted: {}. '
'No centers were deleted.')
modeladmin.message_user(request, msg.format(copied_ids))
else:
return admin.actions.delete_selected(modeladmin, request, queryset)
class RegistrationCenterAdmin(LibyaAdminModel):
list_display = ['center_id', 'name', 'reg_open', 'office',
'constituency', 'subconstituency']
list_filter = ['reg_open', 'center_type', 'office', 'constituency', 'subconstituency']
search_fields = ["center_id", "name"]
readonly_fields = ['copied_by_these_centers']
date_hierarchy = 'creation_date'
def copied_by_these_centers(self, instance):
centers = ', '.join([str(center.center_id) for center in instance.copied_by.all()])
return centers or _("No copies")
def get_actions(self, request):
actions = super(RegistrationCenterAdmin, self).get_actions(request)
if 'delete_selected' in actions:
# Replace it with our version
actions['delete_selected'] = (
delete_selected_except_copied_centers,
'delete_selected',
_('Permanently delete selected %(verbose_name_plural)s.')
)
return actions
def get_readonly_fields(self, request, obj=None):
"""
Don't allow changes to copy centers.
"""
# Make sure we make a modifiable copy of the readonly fields to work with
readonly_fields = list(super(RegistrationCenterAdmin, self).get_readonly_fields(
request, obj))
if obj:
if obj.copy_of:
# Copy centers are not editable, so mark all fields (except 'deleted') read-only
return [field.name for field in obj._meta.local_fields
if field.editable and not field.name == 'deleted']
if obj.has_copy:
# Copied centers can't be deleted, so mark 'deleted' read-only
if 'deleted' not in readonly_fields:
readonly_fields.append('deleted')
# 'copy_of' can only be set initially, not while editing
if 'copy_of' not in readonly_fields:
readonly_fields.append('copy_of')
return readonly_fields
def has_delete_permission(self, request, obj=None):
"""Overridden to prevent deletion of RegistrationCenters that have copies."""
delete_permission = super(RegistrationCenterAdmin, self).has_delete_permission(request, obj)
if obj and isinstance(obj, RegistrationCenter):
return not obj.has_copy
else:
return delete_permission
# See
# docs.djangoproject.com/en/dev/ref/contrib/admin/#django.contrib.admin.ModelAdmin.list_filter
# for doc on this class
class ArchivedListFilter(admin.SimpleListFilter):
title = _('archived')
parameter_name = 'arc'
def lookups(self, request, model_admin):
return (
('1', _('Yes')),
('0', _('No')),
)
def queryset(self, request, queryset):
if self.value() == '0':
return queryset.filter(archive_time=None)
if self.value() == '1':
return queryset.exclude(archive_time=None)
class RegistrationAdmin(LibyaAdminModel):
list_display = ['citizen', national_id, 'registration_center', 'archive_time']
list_display_links = [national_id]
list_filter = [ArchivedListFilter]
raw_id_fields = ['citizen', 'registration_center', 'sms']
search_fields = ["registration_center__center_id", "registration_center__name"]
class SMSAdmin(LibyaAdminModel):
list_display = ['creation_date', 'from_number', 'direction', 'to_number',
'citizen', 'carrier', 'msg_type', 'message_code', 'message']
raw_id_fields = ['citizen', 'in_response_to']
search_fields = ['from_number', 'to_number', 'carrier__name', 'msg_type', 'message']
def get_list_display(self, *args, **kwargs):
# Initialize the choices on the message_code field
# We don't do it in the model def because the values are only
# defined in the database, and we don't do it unless/until we need
# to admin the SMS model because otherwise Django migrations think
# the SMS message codes keep changing everytime someone with
# different data in their database runs it. We wait until the
# admin calls get_list_display() to be sure someone is in the admin,
# since it's only in the admin that it matters at all whether these
# choices are defined.
if not SMS._meta.get_field('message_code').choices:
message_code_choices = [
(msg.number, msg.label) for msg in MessageText.objects.all()
]
SMS._meta.get_field('message_code').choices = message_code_choices
return super(SMSAdmin, self).get_list_display(*args, **kwargs)
class WhiteListAdmin(LibyaAdminModel):
list_display = ['phone_number', 'creation_date', 'modification_date']
search_fields = ["phone_number"]
readonly_fields = ['creation_date', 'modification_date']
admin_site.register(Blacklist, BlacklistAdmin)
admin_site.register(Person, PersonAdmin)
admin_site.register(Office, OfficeAdmin)
admin_site.register(Constituency, ConstituencyAdmin)
admin_site.register(SubConstituency, SubConstituencyAdmin)
admin_site.register(RegistrationCenter, RegistrationCenterAdmin)
admin_site.register(Registration, RegistrationAdmin)
admin_site.register(SMS, SMSAdmin)
admin_site.register(Whitelist, WhiteListAdmin)
|
from PIL import Image, ImageDraw
import sys
RAD = 5
def translate(value, coordMin, coordMax, mapMin, mapMax):
# Figure out how 'wide' each range is
coordSpan = coordMax - coordMin
mapSpan = mapMax - mapMin
# Convert the left range into a 0-1 range (float)
valueScaled = float(value - coordMin) / float(coordSpan)
# Convert the 0-1 range into a value in the right range.
return mapMin + (valueScaled * mapSpan)
xy = [float(sys.argv[1]), float(sys.argv[2])]
imageFile = "code.jpg"
im = Image.open(imageFile)
draw = ImageDraw.Draw(im)
x = translate(xy[1], 0, 11.31, 0, im.size[0])
y = translate(xy[0], 0, 9.4, 0, im.size[1])
draw.ellipse([x-RAD, y-RAD, x+RAD, y+RAD], fill =128)
del draw
im.save('location.png')
im.show()
|
import collections
class Markov:
def __init__(self):
self.probabilties = {}
self.data = " A Pubg Tournament . A Mumbai Hackathon . An Amazing Mumbai Hacking Tournament . Amazing #MumbaiHackathon2019 . Amazing Night . A Mumbai Breakfast ."
self.markov()
self.calculate_predictions(self.probabilties)
def prob(self, probabilties, key, value):
if key not in self.probabilties:
self.probabilties[key] = []
self.probabilties[key].append(value)
def markov(self):
tokens = self.data.strip().split(" ")
for i in range(len(tokens)-1):
self.prob(self.probabilties, tokens[i], tokens[i+1])
def calculate_predictions(self,probabilties):
for key in probabilties:
self.probabilties[key] = collections.Counter(probabilties[key]).most_common()
self.probabilties[key] = [i[0] for i in probabilties[key]]
def markov_here(self,t):
print(self.probabilties)
#print(probabilties)
if t in self.probabilties:
top = self.probabilties[t][:3]
return top
|
import sys
import re
def decompress(data):
decompressed = ""
d = ''.join(data.split('\n'))
while len(d):
s = re.match(r'^.*?(\((\d+?)x(\d+?)\))(.*)', d)
if s is None:
decompressed += d
break
dec, marker, d = d.partition(s.group(1))
decompressed += dec
for i in range(int(s.group(3))):
decompressed += d[:int(s.group(2))]
d = d[int(s.group(2)):]
return decompressed
def dec(d):
length = 0
while len(d):
s = re.match(r'^.*?(\((\d+?)x(\d+?)\))(.*)', d)
if s is None:
return length + len(d)
decomp, marker, d = d.partition(s.group(1))
length += len(decomp) + int(s.group(3)) * dec(d[:int(s.group(2))])
d = d[int(s.group(2)):]
return length
def full_decompress(data):
length = 0
d = ''.join(data.split('\n'))
return dec(d)
def main():
if (len(sys.argv) < 2):
print("Usage python3 %s <input>" % sys.argv[0])
exit(-1)
with open(sys.argv[1], 'r') as input:
data = input.read()
print("Char count:", len(decompress(data)))
print("Char count:", full_decompress(data))
if __name__ == '__main__':
main()
|
# coding=utf-8
import unittest,time,os
from selenium import webdriver
from shadon.log import logger
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from shadon.global_control import Global_control
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options
class Publish_goods(unittest.TestCase):
'''ๅๅฎถๅๅธๅๅใไธๆถใๅ ้ค'''
def setUp(self):
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--disable-gpu')
chrome_options.add_argument('--no-sandbox')
self.driver = webdriver.Chrome(executable_path='C:\python36\Scripts\chromedriver.exe', options=chrome_options)
# chrome_options.set_headless() # ๆchrome่ฎพ็ฝฎๆๆ ็้ขๆจกๅผ๏ผไธ่ฎบwindows่ฟๆฏlinux้ฝๅฏไปฅ๏ผ่ชๅจ้้
ๅฏนๅบๅๆฐ
# self.driver = webdriver.Chrome(options=chrome_options) ## ๅๅปบchromeๆ ็้ขๅฏน่ฑก
self.base_url = "https://www.eelly.com/index.php?app=goods&act=addGoodsIndex" #็บฟไธๅฐๅ
logger.info("่ฐ็จsetup")
self.driver.set_window_size(1920, 1080) # ็ชๅฃๅคงๅฐๅๅ
self.judge = False #็จๆฅๅคๆญ่ๆฌๆฏๅฆๆง่กๅฐๆญ่จ๏ผๆฒกๆๆง่กๅ็ดๆฅๆๆต่ฏ็ปๆ็ฝฎไธบFalse,็ถๅ็ณป็ปไผ็ป็ธๅ
ณไบบๅๅ้้ฎไปถ
self.Ins = Global_control() #ๅฎไพๅๅฏผๅ
ฅ็็ฑป๏ผๆจกๅไธญ็ๆนๆณๆ่ฝ่ฐ็จ่ฏฅ็ฑปไธญ็ๆนๆณ
def login(self):
'''็ปๅฝ่กฃ่็ฝๆๅ'''
logger.info('ๅผๅง่ฐ็จloginๆนๆณ')
self.driver.get(self.base_url)
self.driver.maximize_window()
WebDriverWait(self.driver,30,1).until(EC.visibility_of_element_located((By.ID,'account_login')))
self.driver.find_element_by_id("account_login").send_keys("molimoq")
self.driver.find_element_by_id("password").send_keys("ely@95zz")
self.driver.find_element_by_id("submit_login").click()
logger.info("็ปๅฝๆๅYES")
def publish_new_goods(self):
'''ๅๅธๆฐๅๅ'''
logger.info('ๅผๅง่ฐ็จpublish_new_goodsๆนๆณ')
WebDriverWait(self.driver, 30,1).until(EC.visibility_of_element_located((By.CLASS_NAME, 'a-wrap1')))
# mouse = self.driver.find_element_by_xpath("html/body/div[3]/div/div/ul/li[3]/a")
# action = ActionChains(self.driver)
# action.move_to_element(mouse).perform() # ็งปๅจๅฐwrite๏ผๆพ็คบโMouse movedโ
# time.sleep(2)
# self.driver.find_element_by_class_name("J_newCommodity").click() # ็นๅปๅๅธๆฐๅๅๆ้ฎ
# handles = self.driver.window_handles #่ทๅๅฝๅๆๆ็ชๅฃ
# self.driver.switch_to.window(handles[1]) #driver่ทณ่ฝฌๅฐๆฐๆๅผ็็ชๅฃ
try:
#self.driver.find_element_by_xpath("html/body/div[4]/div/div/a[2]").is_displayed() # ๅคๆญๆฏๅฆๅญๅจ
self.driver.find_element_by_xpath("//a[@class='anew']").click() # ๅญๅจ็นๅป้ๆฐๅผๅง
except:
logger.info("้กต้ขไธๅจ่ฏฅ้กต้ข๏ผไธ็จ็นๅป้ๆฐๅผๅง")
WebDriverWait(self.driver,30,1).until(EC.visibility_of_element_located((By.ID,'J_goods_content')))
logger.info("ๅผๅง่พๅ
ฅๅๅๆฐๆฎ๏ผ")
#self.driver.find_element_by_xpath("//input[@id='J_goods_content']").send_keys("10086") #่พๅ
ฅ่ดงๅท
self.driver.find_element_by_id("J_goods_content").send_keys("10086") #่พๅ
ฅ่ดงๅท
self.driver.find_element_by_id("J_goods_name").send_keys("่ชๅจๅๆต่ฏ") # ่พๅ
ฅๆ ้ข
self.driver.find_element_by_id("J_stock0").send_keys("999999") # ่พๅ
ฅๅบๅญๆฐ้
self.driver.find_element_by_id("J_inventory_num").click() #ๅพ้ๅ
จ้จ็ธๅ
logger.info("ๅผๅงไธไผ ๅพ็")
WebDriverWait(self.driver, 30, 1).until(EC.visibility_of_element_located((By.ID, 'upimg_0')))
#self.driver.find_element_by_xpath("//*[starts-with(@id,'rt_rt_1c29')]").click()
#self.driver.find_element_by_name("file").send_keys(r'D:\function_test\config\dev\publish_goods_test.png') #็ปๅฏน่ทฏๅพ
case_path = os.path.dirname(__file__) + "/../config/dev" #่ทๅๅพ็็ธๅฏน่ทฏๅพ
case_path = os.path.abspath(case_path + "/publish_goods_test.png")
time.sleep(2)
self.driver.find_element_by_xpath("//input[@name='file']").is_displayed()
logger.info(case_path)
#self.driver.find_element_by_xpath("//input[@name='file']").send_keys(case_path)
try:
self.driver.find_element_by_name("file").send_keys(case_path)
except:
self.driver.find_element_by_name("file").send_keys(r'/data/web/function_test/config/dev/publish_goods_test.png')
logger.info("upload image is ok")
time.sleep(3)
self.driver.find_element_by_xpath(".//*[@id='J_step6']/div/div[3]/div/div[1]/label[2]/input").click() #ๅปๆๅบๅ
ๆจ่
# WebDriverWait(self.driver,30,2).until(EC.visibility_of_element_located((By.ID,'J_release')))
self.driver.find_element_by_xpath("//div[@id='J_release']").click();
self.driver.find_element_by_id("J_release").click() #็นๅปๅๅธๆ้ฎ
logger.info('onclick')
time.sleep(2)
WebDriverWait(self.driver,30,2).until(EC.visibility_of_element_located((By.XPATH,'html/body/div[3]/div[1]/p[1]')))
# self.result = self.driver.find_element_by_xpath("html/body/div[3]/div[1]/p[1]").text
self.result = self.driver.find_element_by_xpath("//*[@class='text_succeed']").text
logger.info(self.result)
self.assertEqual(self.result, "ๅๅธๆๅ") #ๆญ่จๆฏๅฆๆๅ
def sold_out(self):
'''ไธๆถๅๅ'''
logger.info('ๅผๅง่ฐ็จsold_outๆนๆณ')
self.driver.find_element_by_class_name("go_manage").click() #็นๅปๅๅ็ฎก็ๆ้ฎ
WebDriverWait(self.driver,30,1).until(EC.visibility_of_element_located((By.XPATH,".//*[@id='Js_page_ul']/li[3]/a"))) #็ญๅพ
้กต้ข
self.driver.find_element_by_id("foggy_search").send_keys("10086") #่พๅ
ฅๆ็ดขๅๅ่ดงๅท
self.driver.find_element_by_id("foggy_search_button").click() #็นๅปๆ็ดขๅๅๆ้ฎ
WebDriverWait(self.driver,30,1).until(EC.visibility_of_element_located((By.XPATH,".//*[@id='goods_list']/tbody/tr[1]/td[2]/p"))) #็ญๅพ
ๆ็ดขๆๅ
time.sleep(5)
self.driver.find_element_by_id("J_AllSelector").click() #ๅพ้ๅ
จ้ๆ้ฎ
self.driver.find_element_by_name("if_show").click() #็นๅปไธๆถๆ้ฎ
logger.info("ไธๆถๆๅ")
def delete_goods(self):
'''ๅ ้คๆฐๅขๅๅ'''
logger.info('ๅผๅง่ฐ็จdelete_goodsๆนๆณ')
self.base_url1 = "https://www.eelly.com/index.php?app=seller_member" # ็บฟไธๅฐๅ
self.driver.get(self.base_url1)
# WebDriverWait(self.driver,30,2).until(EC.visibility_of_element_located((By.XPATH,".//*[@id='goods_list']/tbody/tr/td")))
WebDriverWait(self.driver, 30, 1).until(EC.visibility_of_element_located((By.XPATH, ".//*[@id='Js_set_ul']/li[5]/a")))
time.sleep(3)
self.driver.find_element_by_xpath(".//*[@id='Js_set_ul']/li[5]/a").click() #็นๅปๅทฒไธๆถๅๅ
time.sleep(2)
WebDriverWait(self.driver,30,1).until(EC.visibility_of_element_located((By.ID,'foggy_search')))
self.driver.find_element_by_id("foggy_search").clear()
self.driver.find_element_by_id("foggy_search").send_keys("10086") #่พๅ
ฅ่ดงๅท
self.driver.find_element_by_id("foggy_search_button").click() #็นๅปๆ็ดขๅๅ
logger.info("ๆ็ดขๅบไบไธๆถๅๅ๏ผๅๅคๅ ้ค......")
time.sleep(3)
self.driver.find_element_by_id("J_AllSelector").click() #ๅพ้ๅ
จ้ๆก
self.driver.execute_script("window.confirm = function(msg) { return true; }") # ๅ
ผๅฎนphantomjs
self.driver.find_element_by_xpath("html/body/div[4]/div[3]/div/div[3]/div[1]/div[1]/a[3]").click() #็นๅปๅ ้คๆ้ฎ
#็ฑไบphantomjsไธๆฏๆๅผน็ช๏ผๆไปฅๆ ๆณไฝฟ็จ
#alert = self.driver.switch_to_alert() #ๅๆขๅฐalertๅผนๅบๆก
#alert.accept() #็นๅป็กฎ่ฎคๆ้ฎ
logger.info("ๅ ้คๆๅ")
time.sleep(2)
WebDriverWait(self.driver,30,1).until(EC.visibility_of_element_located((By.XPATH,".//*[@id='Js_set_ul']/li[5]/a")))
try:
self.judge = True
WebDriverWait(self.driver, 30, 1).until(EC.visibility_of_element_located((By.XPATH, "html/body/div[4]/div[3]/div/div[2]/div/span/i")))
self.result = self.driver.find_element_by_xpath("html/body/div[4]/div[3]/div/div[2]/div/span/i").text
self.assertEqual(self.result, '0') #ๆญ่จๆฏๅฆๆๅ,็ๅๅๆฏๅฆไธบ0ๆฌพ
except AssertionError:
Global_control.Run_result = False
logger.info("ๆญ่จๅผๅธธ")
self.Ins.screen_shot() # ่ฟ่กๅคๆญ๏ผ็ๆชๅพๆไปถๅคนๆฏๅฆๅๅปบ๏ผๅๅปบๅ่ทณ่ฟ๏ผๅฆๅๅๅปบๆไปถๅคน
self.driver.get_screenshot_as_file("u"+(Global_control.Screen_path + "/" + "่กฃ่็ฝๅ ้คๅๅๅคฑ่ดฅ"+ ".png"))
raise "ๆต่ฏๅบ็ฐ้่ฏฏ๏ผ้่ฆๅ้้ฎไปถ"
def tearDown(self):
'''ๅ
ณ้ญๆต่งๅจ'''
if self.judge != True:
logger.info("add goods test is False")
Global_control.Run_result = False #ๅขๅ ไธๆญฅๅคๆญ๏ผ้ฟๅ
ๅบ็ฐ่ๆฌๆชๆง่กๅฐๆญ่จ๏ผ่็ณป็ปๆฒกๆๆๅบๅผๅธธ
self.Ins.screen_shot() # ่ฟ่กๅคๆญ๏ผ็ๆชๅพๆไปถๅคนๆฏๅฆๅๅปบ๏ผๅๅปบๅ่ทณ่ฟ๏ผๅฆๅๅๅปบๆไปถๅคน
self.driver.get_screenshot_as_file(Global_control.Screen_path + "/" + "่กฃ่็ฝๅๅธๆฐๅๅๅคฑ่ดฅ"+ ".png")
self.driver.quit()
def test_demo(self):
# ๆดไธชๆฅๅฃ้่ฆ่ฐ็จ็ๆนๆณ๏ผ้ฝ้่ฟ่ฏฅๆนๆณ่ฟ่ก่ฐ็จ๏ผๆ้กบๅบ่ฐ็จๆนๆณ
'''loginใ็ปๅฝ่กฃ่็ฝๆๅใ publish_new_goodsๅๅธๆฐๅๅใ sold_outไธๆถๅๅใ delete_goodsๅ ้คๆฐๅขๅๅ'''
Publish_goods.login(self)
Publish_goods.publish_new_goods(self)
Publish_goods.sold_out(self)
Publish_goods.delete_goods(self)
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(Publish_goods.test_demo)
unittest.TextTestRunner(verbosity=2).run(suite)
|
import os
import host
class View:
# ----------------------------------------------------------------------
def __init__(self, game):
self.game = game
# ----------------------------------------------------------------------
def makeMaps(self):
# --------------------------------------------------------
# If the map is marked as text only, make no graphical map
# --------------------------------------------------------
if self.game.map.textOnly: return
# ----------------------------------------------------------
# Get a list of all the different powers for whom we need to
# make maps. In a BLIND game, the powers see separate maps.
# ----------------------------------------------------------
if 'BLIND' in self.game.rules and self.game.phase != 'COMPLETED':
maps = [(x, '.' + y + `hash(z)`)
for x, y, z in [('MASTER', 'M', self.game.password)] +
[(x.name, x.abbrev or 'O', (x.password or self.game.password) + x.name)
for x in self.game.powers if (not x.type or x.omniscient)]]
else: maps = [(None, '')]
for viewer, pwd in maps:
# -------------------------------------------------
# Make a complete "season-by-season" PostScript map
# (putting the file into the maps subdirectory)
# -------------------------------------------------
self.makePostScriptMap(viewer, pwd)
# --------------------------------------
# Make .gif files from the last pages of
# the PostScript map that was just made.
# --------------------------------------
self.makeGifMaps(pwd)
# -------------
# Make .pdf map
# -------------
self.makePdfMaps(pwd)
# ----------------------------------------------------------------------
def makePostScriptMap(self, viewer = 0, password = ''):
import DPmap
fileName = host.gameMapDir + '/' + self.game.name + password
for ext in ['.ps', '.pdf', '.gif', '_.gif', '_.pdf']:
try: os.unlink(fileName + ext)
except: pass
map = DPmap.PostScriptMap(host.packageDir + '/' +
self.game.map.rootMapDir + '/' + self.game.map.rootMap, self.game.file('results'),
host.gameMapDir + '/' + self.game.name + password + '.ps', viewer)
os.chmod(fileName + '.ps', 0666)
self.game.error += map.error
# ----------------------------------------------------------------------
def makeGifMaps(self, password = '', pages = None):
import DPimage
# ------------------------------------------------------------------
# Make .gif files from the last page(s) of the .ps map for the game.
# ------------------------------------------------------------------
DPimage.ImageView(self.game.map, None, host.toolsDir, host.gameMapDir, host.imageResolution).extract(
self.game.name + password, pages or [-1, 0])
# ----------------------------------------------------------------------
def makePdfMaps(self, password = ''):
import DPghost
# ---------------------------------------------------------
# Make a .pdf file with the final page(s) from the .ps file
# ---------------------------------------------------------
psFileName, params = host.gameMapDir + '/' + self.game.name + password + '.ps', []
if self.game.map.papersize: params += [('sPAPERSIZE', self.game.map.papersize)]
if host.usePDFMark:
# ----------------------------------------------------------
# All maps already have their bbox altered to fit on a page.
# ----------------------------------------------------------
params += ['dDPghostPageSizeBBox', 'dDPghostUndoBBox']
# ----------------------------------------
# Add more parameters before this comment.
# ----------------------------------------
# -----------------------------------------------------------------
# (We could run psselect -_2-_1 xx.ps 2>/dev/null > tmp.ps and then
# run the ps2pdf on the tmp.ps file, but we now pdf the full game.)
# -----------------------------------------------------------------
ghost = DPghost.GhostScript(pdfFileMode = 0666, ps2pdfDir = host.toolsDir)
if host.usePDFMark:
ghost.markForms(psFileName, pdfParams=params)
else:
ghost.ps2pdf(psFileName, pdfParams=params)
|
from collections import deque
import threading
class _EventQueue:
def __init__(self) -> None:
self.__deque = deque()
self.__rlock = threading.RLock()
def clear(self):
self.__rlock.acquire()
self.__deque.clear()
self.__rlock.release()
def empty(self) -> bool:
self.__rlock.acquire()
empty = len(self.__deque) == 0
self.__rlock.release()
return empty
def pushCallback(self, fn):
self.__rlock.acquire()
self.__deque.append(fn)
self.__rlock.release()
return self
def getCallback(self):
self.__rlock.acquire()
try:
return self.__deque.popleft()
except:
return None
finally:
self.__rlock.release()
def pushHeadCallback(self, fn):
self.__rlock.acquire()
self.__deque.appendleft(fn)
self.__rlock.release()
return self
def getTailCallback(self):
self.__rlock.acquire()
try:
return self.__deque.pop()
except:
return None
finally:
self.__rlock.release()
class EventQueueManager:
__eventQueueRLock = threading.RLock()
__currentEventQueue = _EventQueue()
@staticmethod
def getCurrentEventQueue():
EventQueueManager.__eventQueueRLock.acquire()
try:
return EventQueueManager.__currentEventQueue
finally:
EventQueueManager.__eventQueueRLock.release()
|
from setuptools import setup
# Based on
# https://python-packaging.readthedocs.io/en/latest/minimal.html
def readme():
with open('README.md','r') as fr:
return fr.read()
setup(name='docker_machinator',
version='0.1',
description='A tool for managing docker machines from multiple'
'workstations',
long_description=readme(),
entry_points={
'console_scripts': [
'dmachinator = docker_machinator.dmachinator:main',
],
},
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5',
'Topic :: Security',
],
keywords='docker machine dmachinator secure on-disk',
url='https://github.com/realcr/docker_machinator',
author='real',
author_email='real@freedomlayer.org',
license='MIT',
packages=['docker_machinator'],
install_requires=[
'sstash',
],
setup_requires=['pytest-runner'],
tests_require=['pytest'],
include_package_data=True,
zip_safe=False)
|
# vFabric Administration Server API
# Copyright (c) 2012 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import vas.shared.Installations
from vas.util.LinkUtils import LinkUtils
class Installations(vas.shared.Installations.Installations):
"""Used to enumerate, create, and delete tc Server installations
:ivar `vas.shared.Security.Security` security: The resource's security
"""
def __init__(self, client, location):
super(Installations, self).__init__(client, location, Installation)
class Installation(vas.shared.Installations.Installation):
"""A tc Server installation
:ivar `vas.tc_server.Groups.Group` group: The group that contains the
installation
:ivar `vas.tc_server.InstallationImages.InstallationImage` installation_image: The installation image that was used
to create the installation
:ivar list instances: The instances that are using the
installation
:ivar list runtime_versions: The versions of the tc Server
runtime that are supported by the
installation
:ivar `vas.shared.Security.Security` security: The resource's security
:ivar `vas.tc_server.Templates.Templates` templates: The installation's templates
:ivar str version: The installation's version
"""
__templates = None
@property
def instances(self):
self.__instances = self.__instances or self._create_resources_from_links('group-instance', Instance)
return self.__instances
@property
def runtime_versions(self):
return self.__runtime_versions
@property
def templates(self):
self.__templates = self.__templates or Templates(self._client, self.__templates_location)
return self.__templates
def __init__(self, client, location):
super(Installation, self).__init__(client, location, InstallationImage, Group)
self.__runtime_versions = self._details['runtime-versions']
self.__templates_location = LinkUtils.get_link_href(self._details, 'templates')
def reload(self):
"""Reloads the installation's details from the server"""
super(Installation, self).reload()
self.__instances = None
from vas.tc_server.Groups import Group
from vas.tc_server.InstallationImages import InstallationImage
from vas.tc_server.Instances import Instance
from vas.tc_server.Templates import Templates
|
"""
This module implements Langevin Dynamics (LD) -based samplers for NNs.
Radford M Neal. โBayesian Learning for Neural Networksโ. In: PhD thesis, University of Toronto. 1995.
"""
import numpy as np
import tensorflow as tf
from models.mcmc_sampler import MCMC_sampler
class LDSampler(MCMC_sampler):
"""
Langevin Dynamics (LD) sampler for NNs.
"""
def __init__(self, **kwargs):
""" Creates a new LDSampler object. """
# set parameters restricted by LD
kwargs['seek_step_sizes'] = False
super().__init__(**kwargs)
self.sampler_type = 'LD'
def _construct_transition_step(self):
""" Constructs LD general transition step. """
initial_position = self._position
# gradients of likelihood and prior
dL = self._d_log_likelihood(initial_position)
dW = self._d_log_prior(initial_position)
# compute gradient and noise steps
gradient_step, noise_step = self._compute_ld_step_components(dL, dW)
# update position (take the step)
if self.fade_in_velocities:
noise_step *= self._burn_in_ratio
self._updated_position = initial_position - gradient_step + noise_step
def _compute_ld_step_components(self, dL, dW):
""" Computes gradient and noise components. """
# generate noise
noise_stddev = tf.sqrt(2. * self._current_step_size)
noise = tf.random_normal(self.position_shape)
noise_step = self._transpose_mul(noise, noise_stddev)
# calculate gradient step
gradient = dL + dW
gradient_step = self._transpose_mul(gradient, self._current_step_size)
self._debug_update = gradient
return gradient_step, noise_step
def _adjust_step_size(self, step_size):
""" Brings scale to that of HMC samplers. """
step_size = step_size ** 2 / 2
return step_size
class SGLDSampler(LDSampler):
"""
Stochastic Gradient Langevin Dynamics (SGLD) sampler for NNs.
http://people.ee.duke.edu/~lcarin/398_icmlpaper.pdf
"""
def __init__(self, **kwargs):
""" Creates a new SGLDSampler object. """
super().__init__(**kwargs)
self.sampler_type = 'SGLD'
# effectively LD since the likelihood part of the target function is already adjusted for the batch size
class pSGLDSampler(LDSampler):
"""
Preconditioned Stochastic Gradient Langevin Dynamics (pSGLD) sampler for NNs.
https://arxiv.org/abs/1512.07666
"""
def __init__(self, preconditioned_alpha=0.99, preconditioned_lambda=1e-05, adjust_steps=False, **kwargs):
super().__init__(**kwargs)
self.sampler_type = 'pSGLD'
self.preconditioned_alpha = preconditioned_alpha
self.preconditioned_lambda = preconditioned_lambda
self.adjust_steps = adjust_steps
def __repr__(self):
s = super().__repr__()
#s += f'Preconditioned alpha: {self.preconditioned_alpha}\n'
#s += f'Preconditioned lambda: {self.preconditioned_lambda}\n'
return s
def _create_feeds(self):
""" Adds preconditioned values to the graph. """
super()._create_feeds()
self._preconditioned_v_value = np.zeros(shape=self.position_shape, dtype=np.float32)
self._preconditioned_v = tf.placeholder(tf.float32, shape=self.position_shape)
self._feed_dict[self._preconditioned_v] = lambda: self._preconditioned_v_value
# def _adjust_step_size(self, step_size):
# """ Adjust step_size for total curvature correction effect. """
# step_size = super()._adjust_step_size(step_size)
#
# if not self.adjust_steps:
# return step_size
#
# # p_avg_effect = 1. / (self.preconditioned_lambda + self._preconditioned_v_value ** .5)
# # p_avg_effect = p_avg_effect.min() ** .5
# # p_avg_effect = 1. / p_avg_effect
# # p_avg_effect = max(1., min(p_avg_effect, 100.))
# # step_size *= p_avg_effect
#
# return step_size
# def _update_values(self, update_dict):
# """ Updates preconditioned values. """
# self._preconditioned_v_value = update_dict[self._updated_preconditioned_v]
def _compute_ld_step_components(self, dL, dW):
""" Computes gradient and noise components. """
# update average gradient
avg_gradient = dL / self.train_size
avg_gradient **= 2
# is_increase = tf.to_float(avg_gradient > self._preconditioned_v)
# is_increase = 0.00 * is_increase + (1. - is_increase)
# is_increase = 1. # TODO: currently disabled
#
# preconditioned_v = is_increase * self.preconditioned_alpha * self._preconditioned_v + \
# (1. - is_increase * self.preconditioned_alpha) * avg_gradient
preconditioned_v = self.preconditioned_alpha * self._preconditioned_v + \
(1. - self.preconditioned_alpha) * avg_gradient
self._fetch_dict['_preconditioned_v_value'] = preconditioned_v
# calculate preconditioning matrix
g = 1. / (self.preconditioned_lambda + tf.sqrt(preconditioned_v))
# generate step noise
noise_stddev = tf.sqrt(2. * self._transpose_mul(g, self._current_step_size))
noise_step = noise_stddev * tf.random_normal(self.position_shape)
# calculate gradient step
gradient = dL + dW
gradient_step = self._transpose_mul(g * gradient, self._current_step_size)
return gradient_step, noise_step
|
import matplotlib.pyplot as plt
x = [1, 2, 3, 4]
y = [1, 2, 3, 4]
plt.plot(x, y, 'o')
plt.show()
|
import argparse
def load_args():
# HyperGAN args
parser = argparse.ArgumentParser(description='param-wgan')
parser.add_argument('-z', '--z', default=128, type=int, help='latent space width')
parser.add_argument('-ze', '--ze', default=256, type=int, help='encoder dimension')
parser.add_argument('-g', '--gp', default=10, type=int, help='gradient penalty')
parser.add_argument('-b', '--batch_size', default=20, type=int)
parser.add_argument('-e', '--epochs', default=200000, type=int)
parser.add_argument('-s', '--model', default='mednet', type=str)
parser.add_argument('-d', '--dataset', default='cifar', type=str)
parser.add_argument('--beta', default=1., type=float)
parser.add_argument('--resume', default=False, type=bool)
parser.add_argument('--use_x', default=False, type=bool, help='sample from real layers')
parser.add_argument('--pretrain_e', default=False, type=bool)
parser.add_argument('--n_actions', default=6, type=int)
parser.add_argument('--use_d', default=False, type=int)
# A3C args
parser.add_argument('--env', default='PongDeterministic-v4', type=str, help='')
parser.add_argument('--env1', default='PongDeterministic-v4', type=str, help='')
parser.add_argument('--env2', default='Breakout-v0', type=str, help='')
parser.add_argument('--processes', default=1, type=int, help='')
parser.add_argument('--render', default=False, type=bool, help='')
parser.add_argument('--test', default=False, type=bool, help='')
parser.add_argument('--rnn_steps', default=20, type=int, help='')
parser.add_argument('--lr', default=1e-4, type=float, help='')
parser.add_argument('--seed', default=1, type=int, help='')
parser.add_argument('--gamma', default=0.99, type=float, help='')
parser.add_argument('--tau', default=1.0, type=float, help='')
parser.add_argument('--horizon', default=0.99, type=float, help='')
parser.add_argument('--hidden', default=256, type=int, help='')
parser.add_argument('--frame_skip', default=-1, type=int, help='')
parser.add_argument('--gpu', default=0, type=int, help='')
parser.add_argument('--exp', default='0', type=str, help='')
parser.add_argument('--scratch', default=False, type=bool, help='')
parser.add_argument('--sample', default=False, type=bool, help='')
args = parser.parse_args()
return args
|
from .suzuripy import SuzuriClient
|
# Copyright Contributors to the Packit project.
# SPDX-License-Identifier: MIT
import logging
from typing import Dict, Any, Optional, Tuple
import requests
from ogr.abstract import CommitStatus, GitProject
from ogr.utils import RequestResponse
from packit.config import JobType, JobConfigTriggerType
from packit.config.job_config import JobConfig
from packit.config.package_config import PackageConfig
from packit.exceptions import PackitConfigException
from packit_service.config import ServiceConfig
from packit_service.constants import TESTING_FARM_INSTALLABILITY_TEST_URL
from packit_service.models import CoprBuildModel, TFTTestRunModel, TestingFarmResult
from packit_service.sentry_integration import send_to_sentry
from packit_service.service.events import EventData
from packit_service.worker.build import CoprBuildJobHelper
from packit_service.worker.result import TaskResults
logger = logging.getLogger(__name__)
class TestingFarmJobHelper(CoprBuildJobHelper):
def __init__(
self,
service_config: ServiceConfig,
package_config: PackageConfig,
project: GitProject,
metadata: EventData,
db_trigger,
job_config: JobConfig,
):
super().__init__(
service_config=service_config,
package_config=package_config,
project=project,
metadata=metadata,
db_trigger=db_trigger,
job_config=job_config,
)
self.session = requests.session()
adapter = requests.adapters.HTTPAdapter(max_retries=5)
self.insecure = False
self.session.mount("https://", adapter)
self._tft_api_url: str = ""
@property
def tft_api_url(self) -> str:
if not self._tft_api_url:
self._tft_api_url = self.service_config.testing_farm_api_url
if not self._tft_api_url.endswith("/"):
self._tft_api_url += "/"
return self._tft_api_url
@property
def fmf_url(self):
return (
self.job_config.metadata.fmf_url
or self.project.get_pr(self.metadata.pr_id).source_project.get_web_url()
)
@property
def fmf_ref(self):
if self.job_config.metadata.fmf_url:
return self.job_config.metadata.fmf_ref
return self.metadata.commit_sha
def _payload(self, build_id: int, chroot: str) -> dict:
"""
Testing Farm API: https://testing-farm.gitlab.io/api/
Currently we use the same secret to authenticate both,
packit service (when sending request to testing farm)
and testing farm (when sending notification to packit service's webhook).
We might later use a different secret for those use cases.
"""
distro, arch = self.chroot2distro_arch(chroot)
compose = self.distro2compose(distro)
fmf = {"url": self.fmf_url}
if self.fmf_ref:
fmf["ref"] = self.fmf_ref
return {
"api_key": self.service_config.testing_farm_secret,
"test": {
"fmf": fmf,
},
"environments": [
{
"arch": arch,
"os": {"compose": compose},
"artifacts": [
{
"id": f"{build_id}:{chroot}",
"type": "fedora-copr-build",
}
],
"tmt": {
"context": {"distro": distro, "arch": arch, "trigger": "commit"}
},
}
],
"notification": {
"webhook": {
"url": f"{self.api_url}/testing-farm/results",
"token": self.service_config.testing_farm_secret,
},
},
}
def _payload_install_test(self, build_id: int, chroot: str) -> dict:
"""
If the project doesn't use fmf, but still wants to run tests in TF.
TF provides 'installation test', we request it in ['test']['fmf']['url'].
We don't specify 'artifacts' as in _payload(), but 'variables'.
"""
copr_build = CoprBuildModel.get_by_build_id(build_id)
distro, arch = self.chroot2distro_arch(chroot)
compose = self.distro2compose(distro)
return {
"api_key": self.service_config.testing_farm_secret,
"test": {
"fmf": {
"url": TESTING_FARM_INSTALLABILITY_TEST_URL,
"name": "/packit/install-and-verify",
},
},
"environments": [
{
"arch": arch,
"os": {"compose": compose},
"variables": {
"REPOSITORY": f"{copr_build.owner}/{copr_build.project_name}",
},
}
],
"notification": {
"webhook": {
"url": f"{self.api_url}/testing-farm/results",
"token": self.service_config.testing_farm_secret,
},
},
}
def is_fmf_configured(self) -> bool:
if self.job_config.metadata.fmf_url is not None:
return True
try:
self.project.get_file_content(
path=".fmf/version", ref=self.metadata.commit_sha
)
return True
except FileNotFoundError:
return False
@staticmethod
def chroot2distro_arch(chroot: str) -> Tuple[str, str]:
""" Get distro and arch from chroot. """
distro, arch = chroot.rsplit("-", 1)
# https://github.com/packit/packit-service/issues/939#issuecomment-769896841
# https://github.com/packit/packit-service/pull/1008#issuecomment-789574614
distro = distro.replace("epel", "centos")
return distro, arch
def distro2compose(self, distro: str) -> str:
"""Create a compose string from distro, e.g. fedora-33 -> Fedora-33
https://api.dev.testing-farm.io/v0.1/composes"""
compose = distro.title().replace("Centos", "CentOS")
if compose == "CentOS-Stream":
compose = "CentOS-Stream-8"
response = self.send_testing_farm_request(endpoint="composes")
if response.status_code == 200:
# {'composes': [{'name': 'CentOS-Stream-8'}, {'name': 'Fedora-Rawhide'}]}
composes = [c["name"] for c in response.json()["composes"]]
if compose not in composes:
logger.error(f"Can't map {compose} (from {distro}) to {composes}")
return compose
def report_missing_build_chroot(self, chroot: str):
self.report_status_to_test_for_chroot(
state=CommitStatus.error,
description=f"No build defined for the target '{chroot}'.",
chroot=chroot,
)
@property
def latest_copr_build(self) -> Optional[CoprBuildModel]:
copr_builds = CoprBuildModel.get_all_by_owner_and_project(
owner=self.job_owner, project_name=self.job_project
)
if not copr_builds:
return None
return list(copr_builds)[0]
def run_testing_farm_on_all(self):
latest_copr_build = self.latest_copr_build
if not latest_copr_build:
return TaskResults(
success=False,
details={
"msg": f"No copr builds for {self.job_owner}/{self.job_project}"
},
)
failed = {}
for chroot in self.tests_targets:
result = self.run_testing_farm(
build_id=int(latest_copr_build.build_id), chroot=chroot
)
if not result["success"]:
failed[chroot] = result.get("details")
if not failed:
return TaskResults(success=True, details={})
return TaskResults(
success=False,
details={"msg": f"Failed testing farm targets: '{failed.keys()}'."}.update(
failed
),
)
def run_testing_farm(self, build_id: int, chroot: str) -> TaskResults:
if chroot not in self.tests_targets:
# Leaving here just to be sure that we will discover this situation if it occurs.
# Currently not possible to trigger this situation.
msg = f"Target '{chroot}' not defined for tests but triggered."
logger.error(msg)
send_to_sentry(PackitConfigException(msg))
return TaskResults(
success=False,
details={"msg": msg},
)
if chroot not in self.build_targets:
self.report_missing_build_chroot(chroot)
return TaskResults(
success=False,
details={
"msg": f"Target '{chroot}' not defined for build. "
"Cannot run tests without build."
},
)
self.report_status_to_test_for_chroot(
state=CommitStatus.pending,
description="Build succeeded. Submitting the tests ...",
chroot=chroot,
)
logger.info("Sending testing farm request...")
if self.is_fmf_configured():
payload = self._payload(build_id, chroot)
else:
payload = self._payload_install_test(build_id, chroot)
endpoint = "requests"
logger.debug(f"POSTing {payload} to {self.tft_api_url}{endpoint}")
req = self.send_testing_farm_request(
endpoint=endpoint,
method="POST",
data=payload,
)
logger.debug(f"Request sent: {req}")
if not req:
msg = "Failed to post request to testing farm API."
logger.debug("Failed to post request to testing farm API.")
self.report_status_to_test_for_chroot(
state=CommitStatus.error,
description=msg,
chroot=chroot,
)
return TaskResults(success=False, details={"msg": msg})
# success set check on pending
if req.status_code != 200:
# something went wrong
if req.json() and "message" in req.json():
msg = req.json()["message"]
else:
msg = f"Failed to submit tests: {req.reason}"
logger.error(msg)
self.report_status_to_test_for_chroot(
state=CommitStatus.failure,
description=msg,
chroot=chroot,
)
return TaskResults(success=False, details={"msg": msg})
# Response: {"id": "9fa3cbd1-83f2-4326-a118-aad59f5", ...}
pipeline_id = req.json()["id"]
logger.debug(
f"Submitted ({req.status_code}) to testing farm as request {pipeline_id}"
)
TFTTestRunModel.create(
pipeline_id=pipeline_id,
commit_sha=self.metadata.commit_sha,
status=TestingFarmResult.new,
target=chroot,
web_url=None,
trigger_model=self.db_trigger,
# In _payload() we ask TF to test commit_sha of fork (PR's source).
# Store original url. If this proves to work, make it a separate column.
data={"base_project_url": self.project.get_web_url()},
)
self.report_status_to_test_for_chroot(
state=CommitStatus.pending,
description="Tests have been submitted ...",
url=f"{self.tft_api_url}requests/{pipeline_id}",
chroot=chroot,
)
return TaskResults(success=True, details={})
def send_testing_farm_request(
self, endpoint: str, method: str = None, params: dict = None, data=None
) -> RequestResponse:
method = method or "GET"
url = f"{self.tft_api_url}{endpoint}"
try:
response = self.get_raw_request(
method=method, url=url, params=params, data=data
)
except requests.exceptions.ConnectionError as er:
logger.error(er)
raise Exception(f"Cannot connect to url: `{url}`.", er)
return response
def get_raw_request(
self,
url,
method="GET",
params=None,
data=None,
) -> RequestResponse:
response = self.session.request(
method=method,
url=url,
params=params,
json=data,
verify=not self.insecure,
)
try:
json_output = response.json()
except ValueError:
logger.debug(response.text)
json_output = None
return RequestResponse(
status_code=response.status_code,
ok=response.ok,
content=response.content,
json=json_output,
reason=response.reason,
)
@classmethod
def get_request_details(cls, request_id: str) -> Dict[str, Any]:
"""Testing Farm sends only request/pipeline id in a notification.
We need to get more details ourselves."""
self = cls(
service_config=ServiceConfig.get_service_config(),
package_config=PackageConfig(),
project=None,
metadata=None,
db_trigger=None,
job_config=JobConfig(
# dummy values to be able to construct the object
type=JobType.tests,
trigger=JobConfigTriggerType.pull_request,
),
)
response = self.send_testing_farm_request(
endpoint=f"requests/{request_id}", method="GET"
)
if not response or response.status_code != 200:
msg = f"Failed to get request/pipeline {request_id} details from TF. {response.reason}"
logger.error(msg)
return {}
details = response.json()
# logger.debug(f"Request/pipeline {request_id} details: {details}")
return details
|
from data_reader import *
from similarity_ideas import *
if __name__ == '__main__':
texts, sims = load_dev_texts_and_similarities()
# lsa similarity
texts_flatten = [txt for i in range(len(texts)) for txt in texts[i]]
"""
lsa_sim = lsa_sim(texts_flatten) # sim for all texts, we're only interested in the original sims
lsa_sim = np.array([lsa_sim[2*i, 2*i+1] for i in range(len(texts))])
lsa_sim = lsa_sim*5
with open('storage/predictions/lsa_sim.txt', 'w') as f:
for i in range(len(lsa_sim)):
f.write(str(lsa_sim[i])+'\n')
tfidf_sim = tf_idf_similarity(texts_flatten)
tfidf_sim = np.array([tfidf_sim[2*i, 2*i+1] for i in range(len(texts))])
tfidf_sim = tfidf_sim*5
with open('storage/predictions/tfidf_sim.txt', 'w') as f:
for i in range(len(tfidf_sim)):
f.write(str(tfidf_sim[i])+'\n')
"""
binary_sim = binary_cosine_sim_2(texts_flatten)
binary_sim = np.array([binary_sim[2*i, 2*i+1] for i in range(len(texts))])
binary_sim = binary_sim*5
with open('storage/predictions/binary_sim_2.txt', 'w') as f:
for i in range(len(binary_sim)):
f.write(str(binary_sim[i])+'\n')
"""
word2vec_similarities = word2vec_sim(texts_flatten, True, 'cosine')
word2vec_similarities = np.array([word2vec_similarities[2*i, 2*i+1] for i in range(len(texts))])
word2vec_similarities = word2vec_similarities*5
with open('storage/predictions/tfidf_weighted_word2vec_sim_cosine.txt', 'w') as f:
for i in range(len(word2vec_similarities)):
f.write(str(word2vec_similarities[i])+'\n')
word2vec_similarities = word2vec_sim(texts_flatten, False, 'cosine')
word2vec_similarities = np.array([word2vec_similarities[2*i, 2*i+1] for i in range(len(texts))])
word2vec_similarities = word2vec_similarities*5
with open('storage/predictions/avg_wv_word2vec_sim_cosine.txt', 'w') as f:
for i in range(len(word2vec_similarities)):
f.write(str(word2vec_similarities[i])+'\n')
word2vec_similarities = word2vec_sim(texts_flatten, True, 'l2')
word2vec_similarities = np.array([word2vec_similarities[2*i, 2*i+1] for i in range(len(texts))])
word2vec_similarities = word2vec_similarities*5
with open('storage/predictions/tfidf_weighted_word2vec_sim_l2.txt', 'w') as f:
for i in range(len(word2vec_similarities)):
f.write(str(word2vec_similarities[i])+'\n')
word2vec_similarities = word2vec_sim(texts_flatten, False, 'l2')
word2vec_similarities = np.array([word2vec_similarities[2*i, 2*i+1] for i in range(len(texts))])
word2vec_similarities = word2vec_similarities*5
with open('storage/predictions/avg_wv_word2vec_sim_l2.txt', 'w') as f:
for i in range(len(word2vec_similarities)):
f.write(str(word2vec_similarities[i])+'\n')
fasttext_similarities = fasttext_sim(texts_flatten, True, 'cosine')
fasttext_similarities = np.array([fasttext_similarities[2*i, 2*i+1] for i in range(len(texts))])
fasttext_similarities = fasttext_similarities*5
with open('storage/predictions/tfidf_weighted_fasttext_sim_cosine.txt', 'w') as f:
for i in range(len(fasttext_similarities)):
f.write(str(fasttext_similarities[i])+'\n')
fasttext_similarities = fasttext_sim(texts_flatten, False, 'cosine')
fasttext_similarities = np.array([fasttext_similarities[2*i, 2*i+1] for i in range(len(texts))])
fasttext_similarities = fasttext_similarities*5
with open('storage/predictions/avg_wv_fasttext_sim_cosine.txt', 'w') as f:
for i in range(len(fasttext_similarities)):
f.write(str(fasttext_similarities[i])+'\n')
fasttext_similarities = fasttext_sim(texts_flatten, True, 'l2')
fasttext_similarities = np.array([fasttext_similarities[2*i, 2*i+1] for i in range(len(texts))])
fasttext_similarities = fasttext_similarities*5
with open('storage/predictions/tfidf_weighted_fasttext_sim_l2.txt', 'w') as f:
for i in range(len(fasttext_similarities)):
f.write(str(fasttext_similarities[i])+'\n')
fasttext_similarities = fasttext_sim(texts_flatten, False, 'l2')
fasttext_similarities = np.array([fasttext_similarities[2*i, 2*i+1] for i in range(len(texts))])
fasttext_similarities = fasttext_similarities*5
with open('storage/predictions/avg_wv_fasttext_sim_l2.txt', 'w') as f:
for i in range(len(fasttext_similarities)):
f.write(str(fasttext_similarities[i])+'\n')
glove_similarities = glove_sim(texts_flatten, True, 'cosine')
glove_similarities = np.array([glove_similarities[2*i, 2*i+1] for i in range(len(texts))])
glove_similarities = glove_similarities*5
with open('storage/predictions/tfidf_weighted_glove_sim_cosine.txt', 'w') as f:
for i in range(len(glove_similarities)):
f.write(str(glove_similarities[i])+'\n')
glove_similarities = glove_sim(texts_flatten, False, 'cosine')
glove_similarities = np.array([glove_similarities[2*i, 2*i+1] for i in range(len(texts))])
glove_similarities = glove_similarities*5
with open('storage/predictions/avg_wv_glove_sim_cosine.txt', 'w') as f:
for i in range(len(glove_similarities)):
f.write(str(glove_similarities[i])+'\n')
glove_similarities = glove_sim(texts_flatten, True, 'l2')
glove_similarities = np.array([glove_similarities[2*i, 2*i+1] for i in range(len(texts))])
glove_similarities = glove_similarities*5
with open('storage/predictions/tfidf_weighted_glove_sim_l2.txt', 'w') as f:
for i in range(len(glove_similarities)):
f.write(str(glove_similarities[i])+'\n')
glove_similarities = glove_sim(texts_flatten, False, 'l2')
glove_similarities = np.array([glove_similarities[2*i, 2*i+1] for i in range(len(texts))])
glove_similarities = glove_similarities*5
with open('storage/predictions/avg_wv_glove_sim_l2.txt', 'w') as f:
for i in range(len(glove_similarities)):
f.write(str(glove_similarities[i])+'\n')
dan_sim = dan_sim(texts_flatten)
dan_sim = np.array([dan_sim[2*i, 2*i+1] for i in range(len(texts))])
dan_sim = dan_sim*5
with open('storage/predictions/dan_sim.txt', 'w') as f:
for i in range(len(dan_sim)):
f.write(str(dan_sim[i])+'\n')
"""
|
# UNIDAD 05.D08 - D11
# Funciones. Retornando Valores
print('\n\n---[Diapo 08]---------------------')
print('Funciones - Retornando Valores')
def saludar():
return 'Hola a tod@s!'
saludar = saludar()
print('el valor de la funciรณn es ', saludar)
print('\n\n---[Diapo 09]---------------------')
def valor5():
return 5
print('5 mas 5 es ', valor5() + 5)
def dias_semana():
return ['Lunes', 'Martes', 'Miรฉrcoles', 'Jueves', 'Viernes', 'Sรกbado', 'Domingo']
print('Hรกbiles: ', dias_semana()[0:5])
print('No Hรกbiles: ', dias_semana()[-2:])
print('\n Parametrizado: ')
def dias_semana(habiles):
dias = ['Lunes', 'Martes', 'Miรฉrcoles', 'Jueves', 'Viernes', 'Sรกbado', 'Domingo']
if habiles:
return dias[0:5]
else:
return dias[-2:]
print('Hรกbiles: ', dias_semana(True))
print('No Hรกbiles: ', dias_semana(False))
print('\n\n---[Diapo 10]---------------------')
def dias_semana():
return ['Lunes', 'Martes', 'Miรฉrcoles', 'Jueves', 'Viernes', 'Sรกbado', 'Domingo']
dias = dias_semana()
print('Hรกbiles: ', dias[0:5])
print('No Hรกbiles: ', dias[-2:])
print('\n\n---[Diapo 11]---------------------')
print('Multiples retonos')
def multiples_retornos():
return 'Hola', 29, True, [1,2,3,4]
multiple = multiples_retornos()
print('Tipo de retorno: ', type(multiple))
print('Valor retornado: ', multiple)
print('El primer valor es: ', multiple[0])
primero, segundo, tercero, cuarto = multiples_retornos()
print('El primer valor es: ', primero)
print('El segundo valor es: ', segundo)
print('El tercero valor es: ', tercero)
print('El cuarto valor es: ', cuarto)
|
import simplejson as json
import sys, re, string,os
# load the cmu dict
try:
path = os.path.join(os.path.dirname(__file__), 'cmu_dict.json')
cmu = json.load(open(path))
except:
print "Converted CMU dict not found"
sys.exit(0)
SubSyl = [
'cial',
'tia',
'cius',
'cious',
'giu', # belgium!
'ion',
'iou',
'sia$',
'.ely$', # absolutely! (but not ely!)
]
AddSyl = [
'ia',
'riet',
'dien',
'iu',
'io',
'ii',
'[aeiouym]bl$', # -Vble, plus -mble
'[aeiou]{3}', # agreeable
'^mc',
'ism$', # -isms
'([^aeiouy])\1l$', # middle twiddle battle bottle, etc.
'[^l]lien', # alien, salient [1]
'^coa[dglx].', # [2]
'[^gq]ua[^auieo]', # i think this fixes more than it breaks
'dnt$', # couldn't
]
def _guess_sy_count(word):
"""If we can't lookup the word, then guess its syllables count. This is
(heavily) based on Greg Fast's Perl module Lingua::EN::Syllables. But
the bugs are mine."""
mungedword = re.sub('e$','',word.lower())
splitword = re.split(r'[^aeiouy]+', mungedword)
splitword = [ x for x in splitword if (x != '') ] # hmm
syllables = 0
for i in SubSyl:
if re.search(i,mungedword):
syllables -= 1
for i in AddSyl:
if re.search(i,mungedword):
syllables += 1
if len(mungedword) == 1: syllables =+ 1
syllables += len(splitword)
if syllables == 0: syllables = 1
return syllables
def _count_syllables(word):
if cmu.has_key(word):
return cmu[word]
else:
return _guess_sy_count(word)
def check_string(to_check):
haiku_form = [5,12,17]
upper = to_check.upper()
split = upper.split(' ')
for count in haiku_form:
syllable_count = 0
haiku_line = 0
for word in split:
syllable_count += _count_syllables(word)
if syllable_count == haiku_form[haiku_line]:
haiku_line += 1
if haiku_line >= len(haiku_form):
return True
elif syllable_count > haiku_form[haiku_line]:
return False
def find_haiku(to_check):
# remove punctuation
exclude = set(string.punctuation)
stripped = ''.join(ch for ch in to_check if ch not in exclude)
split = stripped.split(' ')
haiku_list = []
for i in range(0, len(split) - 2):
for j in range(i + 3, len(split) + 1):
final = string.join(split[i:j], ' ')
if final and check_string(final):
haiku_list.append(final)
return haiku_list
if __name__ == '__main__':
print find_haiku('As the wind does blow Across the trees, I see the Buds blooming in May')
|
import numpy as np
import scipy.ndimage
import math
from utils import *
from math import *
import os
from scipy.ndimage import imread
from mayavi import mlab as mayalab
import skimage.measure
from multiprocessing import Pool
import shutil
import numpy
from symmetry_issue import *
from quaternionlib import *
np.set_printoptions(precision=4,suppress=True,linewidth=300)
h = 240
w = 320
def quaternion_from_matrix(matrix,isprecise=False):
M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:4, :4]
if isprecise:
q = numpy.empty((4, ))
t = numpy.trace(M)
if t > M[3, 3]:
q[0] = t
q[3] = M[1, 0] - M[0, 1]
q[2] = M[0, 2] - M[2, 0]
q[1] = M[2, 1] - M[1, 2]
else:
i, j, k = 0, 1, 2
if M[1, 1] > M[0, 0]:
i, j, k = 1, 2, 0
if M[2, 2] > M[i, i]:
i, j, k = 2, 0, 1
t = M[i, i] - (M[j, j] + M[k, k]) + M[3, 3]
q[i] = t
q[j] = M[i, j] + M[j, i]
q[k] = M[k, i] + M[i, k]
q[3] = M[k, j] - M[j, k]
q = q[[3, 0, 1, 2]]
q *= 0.5 / math.sqrt(t * M[3, 3])
else:
m00 = M[0, 0]
m01 = M[0, 1]
m02 = M[0, 2]
m10 = M[1, 0]
m11 = M[1, 1]
m12 = M[1, 2]
m20 = M[2, 0]
m21 = M[2, 1]
m22 = M[2, 2]
# symmetric matrix K
K = numpy.array([[m00-m11-m22, 0.0, 0.0, 0.0],
[m01+m10, m11-m00-m22, 0.0, 0.0],
[m02+m20, m12+m21, m22-m00-m11, 0.0],
[m21-m12, m02-m20, m10-m01, m00+m11+m22]])
K /= 3.0
# quaternion is eigenvector of K that corresponds to largest eigenvalue
w, V = numpy.linalg.eigh(K)
q = V[[3, 0, 1, 2], numpy.argmax(w)]
if q[0] < 0.0:
numpy.negative(q, q)
return q
def load_rgb(filepath):
tmp = imread(filepath)
zoom_scale = 0.5
r = scipy.ndimage.zoom(tmp[:,:,0], zoom_scale, order=1)
g = scipy.ndimage.zoom(tmp[:,:,1], zoom_scale, order=1)
b = scipy.ndimage.zoom(tmp[:,:,2], zoom_scale, order=1)
image = np.dstack((r,g,b))
return image
def load_labeling(filepath):
label_id = np.load(filepath)['labeling']
return label_id
def tran_rot(filepath):
rot = np.zeros((3,3))
tran = np.zeros((3,))
lines = [line.strip() for line in open(filepath)]
for idx, line in enumerate(lines):
tmp = str(line).split('(')[1].split(')')[0].split()
tmp = [float(x.split(',')[0]) for x in tmp]
if idx < 3:
rot[idx,:] = np.array(tmp[0:3])
tran[idx] = tmp[3]
return tran,rot
fa = open('symmetry_example.txt','a+')
def cal_transformation(top_dir):
pgm_filepath = [line for line in os.listdir(top_dir) if line.endswith('.pgm') and line.startswith('frame80')]
if len(pgm_filepath) < 1:
return
else:
pgm_filepath = pgm_filepath[0]
tmp = pgm_filepath.split('.pgm')[0].split('_')
azimuth_deg = float(tmp[2].split('azi')[1])
elevation_deg = float(tmp[3].split('ele')[1])
theta_deg = float(tmp[4].split('theta')[1])
rho = float(tmp[1].split('rho')[1])
cx, cy, cz = obj_centened_camera_pos(rho, azimuth_deg, elevation_deg)
q1 = camPosToQuaternion(cx , cy , cz)
q2 = camRotQuaternion(cx, cy , cz, theta_deg)
q = quaternionProduct(q2, q1)
R = quaternion_matrix(q)[0:3,0:3]
C = np.zeros((3,))
C[0] = cx
C[1] = cy
C[2] = cz
frame2_id = load_labeling(os.path.join(top_dir,'frame80_labeling_model_id.npz'))
frame1_id = load_labeling(os.path.join(top_dir,'frame20_labeling_model_id.npz'))
frame2_center = load_seg(os.path.join(top_dir,'frame80_labeling.npz'))
frame1_center = load_seg(os.path.join(top_dir,'frame20_labeling.npz'))
frame2_xyz_name = [line for line in os.listdir(top_dir) if line.startswith('frame80') and line.endswith('.pgm')][0]
frame1_xyz_name = [line for line in os.listdir(top_dir) if line.startswith('frame20') and line.endswith('.pgm')][0]
frame2_xyz = load_xyz(os.path.join(top_dir,frame2_xyz_name))
frame1_xyz = load_xyz(os.path.join(top_dir,frame1_xyz_name))
frame2_id_list = np.unique(frame2_id)
frame1_id_list = np.unique(frame1_id)
model_ids = [line.split('frame80_')[1] for line in os.listdir(top_dir) if line.endswith('.txt') and line.startswith('frame80')]
model_ids.sort()
transformation_rot = np.zeros((h,w,4))
transformation_rot[:,:,0] = 1
transformation_translation = np.zeros((h,w,3))
symmetry_top_dir = '/home/linshaonju/Symmetry'
for instance_id in frame2_id_list:
frame2_pid = frame2_id == instance_id
frame2_pid = frame2_pid.reshape((240,320))
frame1_pid = frame1_id == instance_id
frame1_pid = frame1_pid.reshape((240,320))
if instance_id > 0:
frame1_tran, frame1_rot = tran_rot(os.path.join(top_dir,'frame20_'+model_ids[int(instance_id)-1]))
frame2_tran, frame2_rot = tran_rot(os.path.join(top_dir,'frame80_'+model_ids[int(instance_id)-1]))
R12 = frame1_rot.dot(np.linalg.inv(frame2_rot))
rot = R.T.dot(R12.dot(R))
tran = R.T.dot(frame1_tran-C) + R.T.dot(R12.dot(C-frame2_tran))
tran[2] *= -1.0
rot[0,2] *= -1.0
rot[1,2] *= -1.0
rot[2,0] *= -1.0
rot[2,1] *= -1.0
quater = quaternion_from_matrix(rot)
instance_center = np.mean(frame2_center[frame2_pid],axis=0)
tran1 = quaternion_rotation(quater,instance_center)
cate,md5 = model_ids[int(instance_id)-1].split('_')[0:2]
if cate in cate_symmetry and md5 not in cate_except[cate]:
symmetry_file = os.path.join(symmetry_top_dir,cate,md5+'.generator')
if os.path.exists(symmetry_file):
symmetry_line = [line for line in open(symmetry_file) if line.startswith('C')]
if len(symmetry_line) > 0:
print(cate+' '+md5)
print(symmetry_line)
for sline in symmetry_line:
ssline = sline.strip().split()
if len(ssline) > 1:
Cname,Cn,Rx,Ry,Rz = ssline
Cn = float(Cn)
Raxis = np.array([float(Rx),float(Ry),float(Rz)]).astype(np.float64)
Raxis = frame2_rot.dot(Raxis)
Raxis = R.T.dot(Raxis)
Raxis_norm = np.linalg.norm(Raxis)
Raxis = Raxis / Raxis_norm
Raxis[2] *= -1.0
print(Raxis)
quater,quater_3 = quaternion_shrink(quater,Raxis,Cn)
if Cn >= 20:
print("c20 quater changed!")
quater = quater_3
else:
assert 'Cylinder' in ssline
_, Rc2 = angle_axis_from_quaternion(quater)
quater,quater_3 = quaternion_shrink(quater,Rc2,2)
tran2 = quaternion_rotation(quater,instance_center)
tran = tran + tran1 - tran2
if 0:
objf20 = frame1_xyz[frame1_pid]
objf80 = frame2_xyz[frame2_pid]
p20 = objf20
p80 = objf80
if len(p20) > 0:
p80_n = quaternion_rotation(quater,p80)
p80_n = p80_n + tran
mayalab.points3d(p20[:,0],p20[:,1],p20[:,2],color=(0,1,0),mode='sphere')
mayalab.points3d(p80_n[:,0],p80_n[:,1],p80_n[:,2],color=(0,0,1),mode='sphere')
mayalab.points3d(p80[:,0],p80[:,1],p80[:,2],color=(1,0,0),mode='sphere')
mayalab.show()
transformation_translation[frame2_pid] = tran
transformation_rot[frame2_pid] = quater
transformation_file = os.path.join(top_dir,'translation.npz')
rotation_file = os.path.join(top_dir,'rotation.npz')
print(transformation_file)
np.savez(transformation_file,transl=transformation_translation)
np.savez(rotation_file,rot=transformation_rot)
def load_seg(filepath):
try:
seg = np.load(filepath)['labeling']
seg[:,:,2] *= -1.0
except:
print(filepath)
print('sth is wrong!')
return np.zeros((h,w,3))
return seg
def load_xyz(filename):
"""Return image data from a PGM file generated by blensor. """
fx = 472.92840576171875
fy = fx
with open(filename, 'rb') as f:
f.readline()
f.readline()
width_height = f.readline().strip().split()
if len(width_height) > 1:
width, height = map(int,width_height)
value_max_range = float(f.readline())
image_ = [float(line.strip()) for line in f.readlines()]
if len(image_) == height * width:
nx,ny = (width,height)
x_index = np.linspace(0,width-1,width)
y_index = np.linspace(0,height-1,height)
xx,yy = np.meshgrid(x_index,y_index)
xx -= float(width)/2
yy -= float(height)/2
xx /= fx
yy /= fy
cam_z = np.reshape(image_,(height, width))
cam_z = cam_z / value_max_range * 1.5
cam_x = xx * cam_z
cam_y = yy * cam_z
image_z = np.flipud(cam_z)
image_y = np.flipud(cam_y)
image_x = np.flipud(cam_x)
zoom_scale = 0.5
image_x = scipy.ndimage.zoom(image_x, zoom_scale, order=1)
image_y = scipy.ndimage.zoom(image_y, zoom_scale, order=1)
image_z = scipy.ndimage.zoom(image_z, zoom_scale, order=1)
image = np.dstack((image_x,image_y,image_z))
return image
return np.zeros((h,w,3))
def load_flow(top_dir):
tmp = os.path.join(top_dir,'flow.npz')
result = np.load(tmp)
result = result['flow']
return result
def load_transl(filename):
tmp = np.load(filename)['transl']
return tmp
def load_rot(filename):
tmp = np.load(filename)['rot']
return tmp
def cal_flow(top_dir,frame2_input_xyz_file, transformation_file, frame1_id_file, frame2_id_file):
frame1_id_file = load_labeling(frame1_id_file)
frame2_id_file = load_labeling(frame2_id_file)
frame1_id = np.squeeze(frame1_id_file)
frame2_id = np.squeeze(frame2_id_file)
transl = load_transl(os.path.join(transformation_file,'translation.npz'))
quater = load_rot(os.path.join(transformation_file,'rotation.npz'))
frame2_id_unique = np.unique(frame2_id)
frame1_id_unique = np.unique(frame1_id)
flow = np.zeros((h,w,3))
frame2_input_xyz = load_xyz(frame2_input_xyz_file)
w1, x1, y1, z1 = quater[:,:,0], quater[:,:,1], quater[:,:,2], quater[:,:,3]#rot_quaternion, axis=-1)
x2, y2, z2 = frame2_input_xyz[:,:,0],frame2_input_xyz[:,:,1],frame2_input_xyz[:,:,2]
wm = - x1 * x2 - y1 * y2 - z1 * z2
xm = w1 * x2 + y1 * z2 - z1 * y2
ym = w1 * y2 + z1 * x2 - x1 * z2
zm = w1 * z2 + x1 * y2 - y1 * x2
x = -wm * x1 + xm * w1 - ym * z1 + zm * y1
y = -wm * y1 + ym * w1 - zm * x1 + xm * z1
z = -wm * z1 + zm * w1 - xm * y1 + ym * x1
flow = np.stack((x,y,z),axis=-1)
flow = flow + transl - frame2_input_xyz
flow_file = os.path.join(top_dir,'flow.npz')
np.savez(flow_file,flow=flow)
if 0:
post_p = frame2_input_xyz.reshape((-1,3))
p1 = flow.reshape((-1,3)) + post_p
prev_p = [line for line in os.listdir(top_dir) if line.startswith('frame20') and line.endswith('.pgm')][0]
prev_p = os.path.join(top_dir,prev_p)
prev_p = load_xyz(prev_p)
p2 = prev_p.reshape((-1,3))
mayalab.points3d(p1[:,0],p1[:,1],p1[:,2],color=(0,1,0),mode='point')
mayalab.points3d(p2[:,0],p2[:,1],p2[:,2],color=(0,0,1),mode='point')
mayalab.show()
def raw_cal_flow(total):
top_dir, frame2_input_xyz_file, frame1_id_file, frame2_id_file = total.split('#')
cal_flow(top_dir,frame2_input_xyz_file, top_dir, frame1_id_file, frame2_id_file)
def cal_score(top_dir,inputfilename,gtfilename):
xyz = load_xyz(inputfilename)[:,:,0:2]
seg = load_seg(gtfilename)[:,:,0:2]
score = np.zeros((h,w))
score_tmp = score.reshape((-1,1))
xyz_tmp = xyz.reshape((-1,2))
seg_tmp = seg.reshape((-1,2))
idx_c = np.unique(seg_tmp,axis=0)
diff = xyz_tmp - seg_tmp
diff_norm = np.linalg.norm(diff,axis=1)
for idx in idx_c:
if idx[0] != 0.0:
tmp = np.where(seg_tmp == idx)[0]
dist = diff_norm[tmp]
top_k = min(len(dist),300)
tmp_indx = dist.argsort()[:top_k]
index = tmp[tmp_indx]
score_tmp[index] = 1.0
score = score_tmp.reshape((h,w))
score_file = os.path.join(top_dir,'frame20_score.npz')
np.savez(score_file,score=score)
def load_score(score_file):
tmp = np.load(score_file)['score']
return tmp
def raw_cal_score(total):
top_dir,inputfilename, gtfilename = total.split('#')
cal_score(top_dir,inputfilename,gtfilename)
def cal_boundary(top_dir):
dist_image = np.zeros((240,320,1))
filepath = os.path.join(top_dir,'frame80_labeling.npz')
if not os.path.exists(filepath):
return
if not os.path.exists(os.path.join(top_dir,'end_center.npz')):
return
seg = load_seg(filepath)
end_center = np.load(os.path.join(top_dir,'end_center.npz'))['end_center']
feat = np.zeros((240,320,6))
feat[:,:,0:3] = seg
feat[:,:,3:6] = end_center
d2_image = np.reshape(feat,(-1,6))
idx_c = np.unique(d2_image,axis=0)
idx_c = [idx_c[i] for i in xrange(len(idx_c)) if idx_c[i][0] != 0.0 and idx_c[i][1] != 0.0 and idx_c[i][2] != 0.0]
d2_list = [i for i in xrange(len(idx_c))]
if len(idx_c) == 1:
dist_image[seg[:,:,2] == idx_c[0][2]] = 0.02
elif len(idx_c) > 1:
for i_c in xrange(len(idx_c)):
dist = np.min(np.array([np.linalg.norm(idx_c[i_c] - idx_c[i]) for i in d2_list if i != i_c]))
dist_image[seg[:,:,2] == idx_c[i_c][2]] = dist / 10
boundary_file = os.path.join(top_dir,'boundary.npz')
np.savez(boundary_file,boundary=dist_image)
cateid_cate = {'02876657':1, # bottle
'02691156':2, # toy airplane
'02747177':3, # trash can
'02773838':4, # bag
'02808440':5, # bowl
'02924116':6, # toy bus
'02942699':7, # camera
'02946921':8, # can
'02954340':9, # cap
'02958343':10,# toy car
'03001627':11,# toy chair
'03046257':12,#clocks
'03085013':13,#key boards
'03211117':14,#display
'03261776':15,#earphone
'03624134':16,#knife
'03642806':17,#laptop
'03790512':18,#toy motorcycle
'03797390':19,#mug
'03948459':20,#pistol
'04074963':21,#remote control
'04401088':22,#telephone
'04530566':23,#toy boat
'04468005':24,#toy train
'04099429':25,#toy rocket
'04256520':26,#toy sofa
'03513137':27,#helmet
'04379243':28,#toy table
}
def load_boundary(boundary_file):
tmp = np.load(boundary_file)['boundary']
return tmp
def cal_ending_traj(top_dir):
frame2_center = load_seg(os.path.join(top_dir,'frame80_labeling.npz'))
frame1_center = load_seg(os.path.join(top_dir,'frame20_labeling.npz'))
frame2_id = load_labeling(os.path.join(top_dir,'frame80_labeling_model_id.npz'))
frame1_id = load_labeling(os.path.join(top_dir,'frame20_labeling_model_id.npz'))
end_center = np.zeros((240,320,3))
pgm_filepath = [line for line in os.listdir(top_dir) if line.endswith('.pgm') and line.startswith('frame80')]
if len(pgm_filepath) < 1:
return
else:
pgm_filepath = pgm_filepath[0]
tmp = pgm_filepath.split('.pgm')[0].split('_')
azimuth_deg = float(tmp[2].split('azi')[1])
elevation_deg = float(tmp[3].split('ele')[1])
theta_deg = float(tmp[4].split('theta')[1])
rho = float(tmp[1].split('rho')[1])
cx, cy, cz = obj_centened_camera_pos(rho, azimuth_deg, elevation_deg)
q1 = camPosToQuaternion(cx , cy , cz)
q2 = camRotQuaternion(cx, cy , cz, theta_deg)
q = quaternionProduct(q2, q1)
R = quaternion_matrix(q)[0:3,0:3]
C = np.zeros((3,))
C[0] = cx
C[1] = cy
C[2] = cz
frame2_xyz_name = [line for line in os.listdir(top_dir) if line.startswith('frame80') and line.endswith('.pgm')][0]
frame1_xyz_name = [line for line in os.listdir(top_dir) if line.startswith('frame20') and line.endswith('.pgm')][0]
frame2_xyz = load_xyz(os.path.join(top_dir,frame2_xyz_name))
frame1_xyz = load_xyz(os.path.join(top_dir,frame1_xyz_name))
frame2_id_list = np.unique(frame2_id)
frame1_id_list = np.unique(frame1_id)
model_ids = [line.split('frame80_')[1] for line in os.listdir(top_dir) if line.endswith('.txt') and line.startswith('frame80')]
model_ids.sort()
for instance_id in frame2_id_list:
frame2_pid = frame2_id == instance_id
frame2_pid = frame2_pid.reshape((240,320))
frame1_pid = frame1_id == instance_id
frame1_pid = frame1_pid.reshape((240,320))
if instance_id > 0:
if instance_id not in frame1_id_list:
frame1_tran, frame1_rot = tran_rot(os.path.join(top_dir,'frame20_'+model_ids[int(instance_id)-1]))
frame2_tran, frame2_rot = tran_rot(os.path.join(top_dir,'frame80_'+model_ids[int(instance_id)-1]))
R12 = frame1_rot.dot(np.linalg.inv(frame2_rot))
rot = R.T.dot(R12.dot(R))
tran = R.T.dot(frame1_tran-C) + R.T.dot(R12.dot(C-frame2_tran))
tran[2] *= -1.0
rot[0,2] *= -1.0
rot[1,2] *= -1.0
rot[2,0] *= -1.0
rot[2,1] *= -1.0
tmp_e = np.mean(frame2_center[frame2_pid],axis=0)
end_center[frame2_pid] = rot.dot(tmp_e) + tran
else:
tmp_e = np.mean(frame1_center[frame1_pid],axis=0)
end_center[frame2_pid] = tmp_e
ending_traj_file = os.path.join(top_dir,'end_center.npz')
np.savez(ending_traj_file,end_center=end_center)
def load_end_center(end_center_file):
tmp = np.load(end_center_file)['end_center']
return tmp
if __name__ == '__main__':
"Annotate the ground truth dataset. Follow the order
Step 1. Calculate translation and rotation
Step 2. Calculate the trajectory ending point
Step 3. Calculate the distances between trajectories and score"
top_dir = ''
num = 10000
if 1:
filelist = []
for i in xrange(0,num):
top_d = os.path.join(top_dir,str(i))
if os.path.exists(top_d):
if not os.path.exists(os.path.join(top_d,'frame80_labeling_model_id.npz')) or not os.path.exists(os.path.join(top_d,'frame20_labeling_model_id.npz')) or not os.path.exists(os.path.join(top_d,'frame80_labeling.npz')) or not os.path.exists(os.path.join(top_d,'frame20_labeling.npz')):
pass
else:
filelist.append(top_d)
flow_file = os.path.join(top_d,'rotation.npz')
print(flow_file)
flow = load_rot(flow_file)
if 0:
filelist = []
for i in xrange(0,num):
top_d = os.path.join(top_dir,str(i))
if os.path.exists(top_d):
filelist.append(top_d)
pool = Pool(100)
for i, data in enumerate(pool.imap(cal_transformation,filelist)):
print(i)
pool.close()
if 0:
filelist = []
for i in xrange(0,num):
top_d = os.path.join(top_dir,str(i))
if os.path.exists(top_d):
frame1_id_file = os.path.join(top_d,'frame20_labeling_model_id.npz')
frame2_id_file = os.path.join(top_d,'frame80_labeling_model_id.npz')
frame2_input_xyz_file = [line for line in os.listdir(top_d) if line.startswith('frame80') and line.endswith('.pgm')]
if len(frame2_input_xyz_file) > 0:
frame2_input_xyz_file = frame2_input_xyz_file[0]
frame2_input_xyz_file = os.path.join(top_d,frame2_input_xyz_file)
total = top_d + '#' + frame2_input_xyz_file + '#' +frame1_id_file + '#' + frame2_id_file
if os.path.exists(frame1_id_file) and os.path.exists(frame2_id_file):
filelist.append(total)
pool = Pool(150)
for i, data in enumerate(pool.imap(raw_cal_flow,filelist)):
print(i)
pool.close()
print("pred scene flow")
if 0:
filelist = []
for i in xrange(0,num):
top_d = os.path.join(top_dir,str(i))
if os.path.exists(top_d):
print(top_d)
filelist.append(top_d)
pool = Pool(150)
for i , data in enumerate(pool.imap(cal_ending_traj,filelist)):
print(i)
pool.close()
print("cal ending traj")
if 0:
filelist=[]
for i in xrange(0,num):
top_d = os.path.join(top_dir,str(i))
if os.path.exists(top_d):
frame2_input_xyz_file = [line for line in os.listdir(top_d) if line.startswith('frame20') and line.endswith('.pgm')]
frame2_gt_file = os.path.join(top_d,'frame20_labeling.npz')
if len(frame2_input_xyz_file) > 0:
frame2_input_xyz_file = frame2_input_xyz_file[0]
frame2_input_xyz_file = os.path.join(top_d,frame2_input_xyz_file)
total = top_d + '#' + frame2_input_xyz_file + '#' +frame2_gt_file
print(total)
filelist.append(total)
pool = Pool(100)
for i, data in enumerate(pool.imap(raw_cal_score,filelist)):
print(i)
pool.close()
if 0:
filelist = []
for i in xrange(0,num):
top_d = os.path.join(top_dir,str(i))
if os.path.exists(top_d):
filelist.append(top_d)
pool = Pool(150)
for i, data in enumerate(pool.imap(cal_boundary,filelist)):
print(i)
print(filelist[i])
pool.close()
|
import configparser
from datetime import datetime, timedelta
import twitter
def fetch_disruptions_for_sbahn_line():
result = {'s1': False, 's2': False, 's3': False, 's4': False, 's5': False, 's6': False}
timeline = _get_vvs_twitter_timeline()
# check the text for occurrences of sbahn lines
for post in timeline:
text = post.text.lower()
if 's1' in text:
result['s1'] = True
if 's2' in text:
result['s2'] = True
if 's3' in text:
result['s3'] = True
if 's4' in text:
result['s4'] = True
if 's5' in text:
result['s5'] = True
if 's6' in text or 's60' in text:
result['s6'] = True
return result
def fetch_disruption_message():
timeline = _get_vvs_twitter_timeline()
message = ''
for post in timeline:
text_lower = post.text.lower()
if 's1' in text_lower or 's2' in text_lower or 's3' in text_lower or 's4' in text_lower \
or 's5' in text_lower or 's6' in text_lower or 's60' in text_lower:
if message != '':
message += ' --- '
message += post.text
return message
def _get_vvs_twitter_timeline():
# read twitter api keys from config file
config = configparser.ConfigParser()
config.read('config.ini')
api = twitter.Api(consumer_key=config['twitter']['ConsumerKey'],
consumer_secret=config['twitter']['ConsumerSecret'],
access_token_key=config['twitter']['AccessTokenKey'],
access_token_secret=config['twitter']['AccessTokenConfig'])
timeline = api.GetUserTimeline(screen_name='VVS')
relevant_posts = []
for status in timeline:
tweet_timestamp = datetime.fromtimestamp(status.created_at_in_seconds)
limit = datetime.now() - timedelta(hours=2)
if tweet_timestamp > limit:
relevant_posts.append(status)
return relevant_posts
def is_disruption(message):
return message != ''
|
from otolite.skdash.controller import Controller, estimators
# from sklearn.tree import DecisionTreeRegressor
#
# func = DecisionTreeRegressor
# -*- coding: utf-8 -*-
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
from otolite.skdash.util import extract_name_and_default, SignatureExtractor
from py2dash.component_makers import div_list_from_func
from py2dash.component_makers import dropdown_from_list
extract_signature = SignatureExtractor(attrs=('name', 'default', 'annotation'))
undefined = extract_name_and_default(dcc.Input)[0]['default']
from otolite.skdash.controller import run_model
div_list_0 = []
# func = LinearRegression
# func = DecisionTreeRegressor
func = run_model
func_div_list = div_list_from_func(func)
if func == run_model:
func_div_list.extend([
html.Button(id='submit-button', n_clicks=0, children='Submit'),
html.Div(id='output-state')])
# app.layout = html.Div(func_div_list, style={'columnCount': 2})
class Ids:
def __init__(self, _attrs=()):
self._attrs = list(_attrs)
def __getattr__(self, _id):
if isinstance(_id, self.__class__):
_id = _id._id
assert isinstance(_id, str), "_id should be a string"
if _id not in self._attrs:
setattr(self, _id, _id)
self._attrs.append(_id)
return _id
def __dir__(self): # to see attr in autocompletion
return super().__dir__() + self._attrs
def __iter__(self):
yield from self._attrs
ids = Ids()
# app.layout = html.Div([
# html.Label('Learner Kind'),
# dropdown_from_list(Controller.list_learner_kinds(), id=ids.dropdown),
# html.Button(id=ids.submit_learner, n_clicks=0, children='Submit Learner'),
# html.Label('Result'),
# html.Div(id=ids.result)
# ])
#
# @app.callback(
# Output(ids.result, 'children'),
# [Input(ids.submit_learner, 'n_clicks')],
# [State(ids.dropdown, 'value')],
# )
# def update_output_div(n_clicks, input_val):
# return str(extract_signature(dict(estimators)[input_val]))
div_list_0.extend([
html.Label('Learner Kind'),
dropdown_from_list(Controller.list_learner_kinds(), id=ids.dropdown),
html.Label('Result'),
html.Div(id=ids.result)
])
# showing different input types
html_input_types = ['text', 'number', 'password', 'email', 'range', 'search', 'tel', 'url', 'hidden']
for input_type in html_input_types:
div_list_0.append(html.Label(input_type))
div_list_0.append(dcc.Input(id=input_type + '_example', name=input_type, type=input_type))
app.layout = html.Div(div_list_0)
@app.callback(
Output(ids.result, 'children'),
[Input(ids.dropdown, 'value')]
)
def update_output_div(input_val):
return str(extract_signature(dict(estimators)[input_val]))
# app.layout = html.Div([html.Label('Dropdown'),
# dcc.Dropdown(
# options=[
# {'label': 'New York City', 'value': 'NYC'},
# {'label': u'Montrรฉal', 'value': 'MTL'},
# {'label': 'San Francisco', 'value': 'SF'}
# ],
# value='MTL')])
# print([State(x['name'], 'value') for x in extract_signature(func)])
def ensure_bool(x):
if isinstance(x, bool):
return x
else:
if isinstance(x, str):
if x.lower().startswith('t'):
return True
elif x.lower().startswith('f'):
return False
elif isinstance(x, int):
return bool(x)
raise ValueError(f"Couldn't convert to a boolean: {x}")
# if func == run_model:
# sig = extract_signature(func)
# states = [State(x['name'], 'value') for x in extract_signature(func)]
#
#
# # wrapper = app.callback(Output('output-state', 'children'),
# # [Input('submit-button', 'n_clicks')],
# # states)
# # wrapped_func = wrapper(func)
# @app.callback(Output('output-state', 'children'),
# [Input('submit-button', 'n_clicks')],
# states)
# def run_model(tmp, n_clicks, mall_name, model_name: str, xy_name: str, method: str = 'predict', return_y: bool = False):
# return_y = ensure_bool(return_y)
# return Controller(mall_name).run_model(model_name, xy_name, method=method, return_y=return_y)
# else:
# wrapped_func = func
# app.layout = html.Div([
#
# daq.ToggleSwitch(
# id='my-toggle-switch',
# value=False,
#
# ),
# html.Div(id='toggle-switch-output'),
#
# html.Label('Radio Items'),
# dcc.RadioItems(
# options=[
# {'label': 'New York City', 'value': 'NYC'},
# {'label': u'Montrรฉal', 'value': 'MTL'},
# {'label': 'San Francisco', 'value': 'SF'}
# ],
# value='MTL'
# ),
#
# html.Label('Checkboxes'),
# dcc.Checklist(
# options=[
# {'label': 'New York City', 'value': 'NYC'},
# {'label': u'Montrรฉal', 'value': 'MTL'},
# {'label': 'San Francisco', 'value': 'SF'}
# ],
# values=['MTL', 'SF']
# ),
#
# html.Label('Text Input'),
# dcc.Input(value='MTL', type='text'),
#
# html.Label('Slider'),
# dcc.Slider(
# min=0,
# max=9,
# marks={i: 'Label {}'.format(i) if i == 1 else str(i) for i in range(1, 6)},
# value=5,
# ),
# ], style={'columnCount': 2})
if __name__ == '__main__':
app.run_server(debug=True)
|
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
import argparse
from os import path, makedirs
from multiprocessing import Pool
import os
from scipy.spatial import distance
from tqdm import tqdm
PROBE_FILE = None
PROBE = None
PROBE_O = None
METRIC = None
def chisquare(p, q):
p = np.asarray(p).flatten()
q = np.asarray(q).flatten()
bin_dists = (p - q)**2 / (p + q + np.finfo('float').eps)
return np.sum(bin_dists)
def match(a, file_list):
value = []
image_a_path = a
features_a = np.load(image_a_path)
if np.ndim(features_a) == 1:
features_a = features_a[np.newaxis, :]
for file_path in file_list:
image_b_path = file_path
if image_a_path == image_b_path:
continue
features_b = np.load(image_b_path)
if np.ndim(features_b) == 1:
features_b = features_b[np.newaxis, :]
if METRIC == 1:
score = np.mean(cosine_similarity(features_a, features_b))
elif METRIC == 2:
score = distance.euclidean(features_a, features_b)
else:
score = chisquare(features_a, features_b)
value.append(score)
result = np.mean(np.asarray(value)) #change here for mean or median
return result
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Match Extracted Features')
parser.add_argument('-probe', '-p', help='Probe image list.', default = "../Shared/AS/stage_1/all.txt")
#parser.add_argument('-group', '-gr', help='Group name, e.g. AA')
parser.add_argument('-metric', '-m', default=1,
help='Metric to us: (1) Cosine Similarity; (2) Euclidean Distance; (3) Chi Square')
args = parser.parse_args()
METRIC = int(args.metric)
PROBE_FILE = args.probe
print("Loading files ...")
PROBE_O = np.sort(np.loadtxt(PROBE_FILE, dtype=np.str))
print("Finished loading ...")
dic = {}
remain = []
for line in tqdm(PROBE_O):
subject = line.split('/')[-2]
if subject not in dic:
dic[subject] = []
dic[subject].append(line)
for folder in tqdm(dic.values()):
result = []
for x in folder:
result.append([x, match(x, folder)])
result.sort(key=lambda x: x[1])
gap = []
for i in range(len(result) - 1):
gap.append(result[i + 1][1] - result[i][1])
max_position = 0
maximum = 0
for i in range(len(gap)):
if gap[i] >= maximum:
max_position = i
maximum = gap[i]
for i in range(max_position+1, len(result)):
remain.append(result[i][0])
np.savetxt('../Shared/AS/stage_2/median.txt', remain, delimiter=' ', fmt='%s')
|
"""
Bombay Stock Exchnage
"""
from pandas import Timestamp
from pytz import timezone
from datetime import time
from .market_calendar import MarketCalendar
BSEClosedDay = [
Timestamp('1997-01-23', tz='UTC'),
Timestamp('1997-03-07', tz='UTC'),
Timestamp('1997-03-24', tz='UTC'),
Timestamp('1997-04-08', tz='UTC'),
Timestamp('1997-04-14', tz='UTC'),
Timestamp('1997-04-16', tz='UTC'),
Timestamp('1997-04-18', tz='UTC'),
Timestamp('1997-05-01', tz='UTC'),
Timestamp('1997-05-08', tz='UTC'),
Timestamp('1997-08-15', tz='UTC'),
Timestamp('1997-08-18', tz='UTC'),
Timestamp('1997-08-25', tz='UTC'),
Timestamp('1997-10-02', tz='UTC'),
Timestamp('1997-10-28', tz='UTC'),
Timestamp('1997-10-29', tz='UTC'),
Timestamp('1997-10-31', tz='UTC'),
Timestamp('1997-12-25', tz='UTC'),
Timestamp('1998-04-09', tz='UTC'),
Timestamp('1998-04-14', tz='UTC'),
Timestamp('1998-04-28', tz='UTC'),
Timestamp('1998-12-25', tz='UTC'),
Timestamp('1999-01-01', tz='UTC'),
Timestamp('1999-01-20', tz='UTC'),
Timestamp('1999-01-26', tz='UTC'),
Timestamp('1999-03-02', tz='UTC'),
Timestamp('1999-03-18', tz='UTC'),
Timestamp('1999-03-25', tz='UTC'),
Timestamp('1999-03-29', tz='UTC'),
Timestamp('1999-04-02', tz='UTC'),
Timestamp('1999-04-14', tz='UTC'),
Timestamp('1999-04-27', tz='UTC'),
Timestamp('1999-04-30', tz='UTC'),
Timestamp('1999-09-13', tz='UTC'),
Timestamp('1999-10-19', tz='UTC'),
Timestamp('1999-11-08', tz='UTC'),
Timestamp('1999-11-10', tz='UTC'),
Timestamp('1999-11-23', tz='UTC'),
Timestamp('1999-12-31', tz='UTC'),
Timestamp('2000-01-26', tz='UTC'),
Timestamp('2000-03-17', tz='UTC'),
Timestamp('2000-03-20', tz='UTC'),
Timestamp('2000-04-14', tz='UTC'),
Timestamp('2000-04-21', tz='UTC'),
Timestamp('2000-05-01', tz='UTC'),
Timestamp('2000-08-15', tz='UTC'),
Timestamp('2000-09-01', tz='UTC'),
Timestamp('2000-10-02', tz='UTC'),
Timestamp('2000-12-25', tz='UTC'),
Timestamp('2001-01-01', tz='UTC'),
Timestamp('2001-01-26', tz='UTC'),
Timestamp('2001-03-06', tz='UTC'),
Timestamp('2001-04-05', tz='UTC'),
Timestamp('2001-04-13', tz='UTC'),
Timestamp('2001-05-01', tz='UTC'),
Timestamp('2001-08-15', tz='UTC'),
Timestamp('2001-08-22', tz='UTC'),
Timestamp('2001-10-02', tz='UTC'),
Timestamp('2001-10-26', tz='UTC'),
Timestamp('2001-11-16', tz='UTC'),
Timestamp('2001-11-30', tz='UTC'),
Timestamp('2001-12-17', tz='UTC'),
Timestamp('2001-12-25', tz='UTC'),
Timestamp('2002-03-25', tz='UTC'),
Timestamp('2002-03-29', tz='UTC'),
Timestamp('2002-05-01', tz='UTC'),
Timestamp('2002-08-15', tz='UTC'),
Timestamp('2002-09-10', tz='UTC'),
Timestamp('2002-10-02', tz='UTC'),
Timestamp('2002-10-15', tz='UTC'),
Timestamp('2002-11-06', tz='UTC'),
Timestamp('2002-11-19', tz='UTC'),
Timestamp('2002-12-25', tz='UTC'),
Timestamp('2003-02-13', tz='UTC'),
Timestamp('2003-03-14', tz='UTC'),
Timestamp('2003-03-18', tz='UTC'),
Timestamp('2003-04-14', tz='UTC'),
Timestamp('2003-04-18', tz='UTC'),
Timestamp('2003-05-01', tz='UTC'),
Timestamp('2003-08-15', tz='UTC'),
Timestamp('2003-10-02', tz='UTC'),
Timestamp('2003-11-26', tz='UTC'),
Timestamp('2003-12-25', tz='UTC'),
Timestamp('2004-01-01', tz='UTC'),
Timestamp('2004-01-26', tz='UTC'),
Timestamp('2004-02-02', tz='UTC'),
Timestamp('2004-03-02', tz='UTC'),
Timestamp('2004-04-09', tz='UTC'),
Timestamp('2004-04-14', tz='UTC'),
Timestamp('2004-04-26', tz='UTC'),
Timestamp('2004-10-13', tz='UTC'),
Timestamp('2004-10-22', tz='UTC'),
Timestamp('2004-11-15', tz='UTC'),
Timestamp('2004-11-26', tz='UTC'),
Timestamp('2005-01-21', tz='UTC'),
Timestamp('2005-01-26', tz='UTC'),
Timestamp('2005-03-25', tz='UTC'),
Timestamp('2005-04-14', tz='UTC'),
Timestamp('2005-07-28', tz='UTC'),
Timestamp('2005-08-15', tz='UTC'),
Timestamp('2005-09-07', tz='UTC'),
Timestamp('2005-10-12', tz='UTC'),
Timestamp('2005-11-03', tz='UTC'),
Timestamp('2005-11-04', tz='UTC'),
Timestamp('2005-11-15', tz='UTC'),
Timestamp('2006-01-11', tz='UTC'),
Timestamp('2006-01-26', tz='UTC'),
Timestamp('2006-02-09', tz='UTC'),
Timestamp('2006-03-15', tz='UTC'),
Timestamp('2006-04-06', tz='UTC'),
Timestamp('2006-04-11', tz='UTC'),
Timestamp('2006-04-14', tz='UTC'),
Timestamp('2006-05-01', tz='UTC'),
Timestamp('2006-08-15', tz='UTC'),
Timestamp('2006-10-02', tz='UTC'),
Timestamp('2006-10-24', tz='UTC'),
Timestamp('2006-10-25', tz='UTC'),
Timestamp('2006-12-25', tz='UTC'),
Timestamp('2007-01-01', tz='UTC'),
Timestamp('2007-01-26', tz='UTC'),
Timestamp('2007-01-30', tz='UTC'),
Timestamp('2007-02-16', tz='UTC'),
Timestamp('2007-03-27', tz='UTC'),
Timestamp('2007-04-06', tz='UTC'),
Timestamp('2007-05-01', tz='UTC'),
Timestamp('2007-05-02', tz='UTC'),
Timestamp('2007-08-15', tz='UTC'),
Timestamp('2007-10-02', tz='UTC'),
Timestamp('2007-12-21', tz='UTC'),
Timestamp('2007-12-25', tz='UTC'),
Timestamp('2008-03-06', tz='UTC'),
Timestamp('2008-03-20', tz='UTC'),
Timestamp('2008-03-21', tz='UTC'),
Timestamp('2008-04-14', tz='UTC'),
Timestamp('2008-04-18', tz='UTC'),
Timestamp('2008-05-01', tz='UTC'),
Timestamp('2008-05-19', tz='UTC'),
Timestamp('2008-08-15', tz='UTC'),
Timestamp('2008-09-03', tz='UTC'),
Timestamp('2008-10-02', tz='UTC'),
Timestamp('2008-10-09', tz='UTC'),
Timestamp('2008-10-30', tz='UTC'),
Timestamp('2008-11-13', tz='UTC'),
Timestamp('2008-11-27', tz='UTC'),
Timestamp('2008-12-09', tz='UTC'),
Timestamp('2008-12-25', tz='UTC'),
Timestamp('2009-01-08', tz='UTC'),
Timestamp('2009-01-26', tz='UTC'),
Timestamp('2009-02-23', tz='UTC'),
Timestamp('2009-03-10', tz='UTC'),
Timestamp('2009-03-11', tz='UTC'),
Timestamp('2009-04-03', tz='UTC'),
Timestamp('2009-04-07', tz='UTC'),
Timestamp('2009-04-10', tz='UTC'),
Timestamp('2009-04-14', tz='UTC'),
Timestamp('2009-04-30', tz='UTC'),
Timestamp('2009-05-01', tz='UTC'),
Timestamp('2009-09-21', tz='UTC'),
Timestamp('2009-09-28', tz='UTC'),
Timestamp('2009-10-02', tz='UTC'),
Timestamp('2009-10-13', tz='UTC'),
Timestamp('2009-10-19', tz='UTC'),
Timestamp('2009-11-02', tz='UTC'),
Timestamp('2009-12-25', tz='UTC'),
Timestamp('2009-12-28', tz='UTC'),
Timestamp('2010-01-01', tz='UTC'),
Timestamp('2010-01-26', tz='UTC'),
Timestamp('2010-02-12', tz='UTC'),
Timestamp('2010-03-01', tz='UTC'),
Timestamp('2010-03-24', tz='UTC'),
Timestamp('2010-04-02', tz='UTC'),
Timestamp('2010-04-14', tz='UTC'),
Timestamp('2010-09-10', tz='UTC'),
Timestamp('2010-11-17', tz='UTC'),
Timestamp('2010-12-17', tz='UTC'),
Timestamp('2011-01-26', tz='UTC'),
Timestamp('2011-03-02', tz='UTC'),
Timestamp('2011-04-12', tz='UTC'),
Timestamp('2011-04-14', tz='UTC'),
Timestamp('2011-04-22', tz='UTC'),
Timestamp('2011-08-15', tz='UTC'),
Timestamp('2011-08-31', tz='UTC'),
Timestamp('2011-09-01', tz='UTC'),
Timestamp('2011-10-06', tz='UTC'),
Timestamp('2011-10-27', tz='UTC'),
Timestamp('2011-11-07', tz='UTC'),
Timestamp('2011-11-10', tz='UTC'),
Timestamp('2011-12-06', tz='UTC'),
Timestamp('2012-01-26', tz='UTC'),
Timestamp('2012-02-20', tz='UTC'),
Timestamp('2012-03-08', tz='UTC'),
Timestamp('2012-04-05', tz='UTC'),
Timestamp('2012-04-06', tz='UTC'),
Timestamp('2012-05-01', tz='UTC'),
Timestamp('2012-08-15', tz='UTC'),
Timestamp('2012-08-20', tz='UTC'),
Timestamp('2012-09-19', tz='UTC'),
Timestamp('2012-10-02', tz='UTC'),
Timestamp('2012-10-24', tz='UTC'),
Timestamp('2012-11-14', tz='UTC'),
Timestamp('2012-11-28', tz='UTC'),
Timestamp('2012-12-25', tz='UTC'),
Timestamp('2013-03-27', tz='UTC'),
Timestamp('2013-03-29', tz='UTC'),
Timestamp('2013-04-19', tz='UTC'),
Timestamp('2013-04-24', tz='UTC'),
Timestamp('2013-05-01', tz='UTC'),
Timestamp('2013-08-09', tz='UTC'),
Timestamp('2013-08-15', tz='UTC'),
Timestamp('2013-09-09', tz='UTC'),
Timestamp('2013-10-02', tz='UTC'),
Timestamp('2013-10-16', tz='UTC'),
Timestamp('2013-11-04', tz='UTC'),
Timestamp('2013-11-15', tz='UTC'),
Timestamp('2013-12-25', tz='UTC'),
Timestamp('2014-02-27', tz='UTC'),
Timestamp('2014-03-17', tz='UTC'),
Timestamp('2014-04-08', tz='UTC'),
Timestamp('2014-04-14', tz='UTC'),
Timestamp('2014-04-18', tz='UTC'),
Timestamp('2014-04-24', tz='UTC'),
Timestamp('2014-05-01', tz='UTC'),
Timestamp('2014-07-29', tz='UTC'),
Timestamp('2014-08-15', tz='UTC'),
Timestamp('2014-08-29', tz='UTC'),
Timestamp('2014-10-02', tz='UTC'),
Timestamp('2014-10-03', tz='UTC'),
Timestamp('2014-10-06', tz='UTC'),
Timestamp('2014-10-15', tz='UTC'),
Timestamp('2014-10-24', tz='UTC'),
Timestamp('2014-11-04', tz='UTC'),
Timestamp('2014-11-06', tz='UTC'),
Timestamp('2014-12-25', tz='UTC'),
Timestamp('2015-01-26', tz='UTC'),
Timestamp('2015-02-17', tz='UTC'),
Timestamp('2015-03-06', tz='UTC'),
Timestamp('2015-04-02', tz='UTC'),
Timestamp('2015-04-03', tz='UTC'),
Timestamp('2015-04-14', tz='UTC'),
Timestamp('2015-05-01', tz='UTC'),
Timestamp('2015-09-17', tz='UTC'),
Timestamp('2015-09-25', tz='UTC'),
Timestamp('2015-10-02', tz='UTC'),
Timestamp('2015-10-22', tz='UTC'),
Timestamp('2015-11-12', tz='UTC'),
Timestamp('2015-11-25', tz='UTC'),
Timestamp('2015-12-25', tz='UTC'),
Timestamp('2016-01-26', tz='UTC'),
Timestamp('2016-03-07', tz='UTC'),
Timestamp('2016-03-24', tz='UTC'),
Timestamp('2016-03-25', tz='UTC'),
Timestamp('2016-04-14', tz='UTC'),
Timestamp('2016-04-15', tz='UTC'),
Timestamp('2016-04-19', tz='UTC'),
Timestamp('2016-07-06', tz='UTC'),
Timestamp('2016-08-15', tz='UTC'),
Timestamp('2016-09-05', tz='UTC'),
Timestamp('2016-09-13', tz='UTC'),
Timestamp('2016-10-11', tz='UTC'),
Timestamp('2016-10-12', tz='UTC'),
Timestamp('2016-10-31', tz='UTC'),
Timestamp('2016-11-14', tz='UTC'),
Timestamp('2017-01-26', tz='UTC'),
Timestamp('2017-02-24', tz='UTC'),
Timestamp('2017-03-13', tz='UTC'),
Timestamp('2017-04-04', tz='UTC'),
Timestamp('2017-04-14', tz='UTC'),
Timestamp('2017-05-01', tz='UTC'),
Timestamp('2017-06-26', tz='UTC'),
Timestamp('2017-08-15', tz='UTC'),
Timestamp('2017-08-25', tz='UTC'),
Timestamp('2017-10-02', tz='UTC'),
Timestamp('2017-10-20', tz='UTC'),
Timestamp('2017-12-25', tz='UTC'),
Timestamp('2018-01-26', tz='UTC'),
Timestamp('2018-02-13', tz='UTC'),
Timestamp('2018-03-02', tz='UTC'),
Timestamp('2018-03-29', tz='UTC'),
Timestamp('2018-03-30', tz='UTC'),
Timestamp('2018-05-01', tz='UTC'),
Timestamp('2018-08-15', tz='UTC'),
Timestamp('2018-08-22', tz='UTC'),
Timestamp('2018-09-13', tz='UTC'),
Timestamp('2018-09-20', tz='UTC'),
Timestamp('2018-10-02', tz='UTC'),
Timestamp('2018-10-18', tz='UTC'),
Timestamp('2018-11-08', tz='UTC'),
Timestamp('2018-11-23', tz='UTC'),
Timestamp('2018-12-25', tz='UTC'),
Timestamp('2019-01-26', tz='UTC'),
Timestamp('2019-03-02', tz='UTC'),
Timestamp('2019-03-04', tz='UTC'),
Timestamp('2019-03-21', tz='UTC'),
Timestamp('2019-04-17', tz='UTC'),
Timestamp('2019-04-19', tz='UTC'),
Timestamp('2019-04-29', tz='UTC'),
Timestamp('2019-05-01', tz='UTC'),
Timestamp('2019-06-05', tz='UTC'),
Timestamp('2019-08-12', tz='UTC'),
Timestamp('2019-08-15', tz='UTC'),
Timestamp('2019-09-02', tz='UTC'),
Timestamp('2019-09-10', tz='UTC'),
Timestamp('2019-10-02', tz='UTC'),
Timestamp('2019-10-08', tz='UTC'),
Timestamp('2019-10-21', tz='UTC'),
Timestamp('2019-10-28', tz='UTC'),
Timestamp('2019-11-12', tz='UTC'),
Timestamp('2019-12-25', tz='UTC'),
Timestamp('2020-02-21', tz='UTC'),
Timestamp('2020-03-10', tz='UTC'),
Timestamp('2020-04-02', tz='UTC'),
Timestamp('2020-04-06', tz='UTC'),
Timestamp('2020-04-10', tz='UTC'),
Timestamp('2020-04-14', tz='UTC'),
Timestamp('2020-05-01', tz='UTC'),
Timestamp('2020-07-31', tz='UTC'),
Timestamp('2020-10-02', tz='UTC'),
Timestamp('2020-11-16', tz='UTC'),
Timestamp('2020-11-30', tz='UTC'),
Timestamp('2020-12-25', tz='UTC'),
Timestamp('2021-01-26', tz='UTC'), # Republic Day
Timestamp('2021-03-11', tz='UTC'), # Maha Shivaratri
Timestamp('2021-03-29', tz='UTC'), # Holi
Timestamp('2021-04-02', tz='UTC'), # Good Friday
Timestamp('2021-04-14', tz='UTC'), # Dr.Baba Saheb Ambedkar Jayanti
Timestamp('2021-04-21', tz='UTC'), # Ram Navami
Timestamp('2021-05-13', tz='UTC'), # Id-ul-Fitr
Timestamp('2021-07-21', tz='UTC'), # Id-al-Adha
Timestamp('2021-08-19', tz='UTC'), # Ashura
Timestamp('2021-09-10', tz='UTC'), # Ganesh Chaturthi
Timestamp('2021-10-15', tz='UTC'), # Vijaya Dashami
Timestamp('2021-11-04', tz='UTC'), # Diwali/Laxmi Puja. muhurat trading day
Timestamp('2021-11-05', tz='UTC'), # Diwali/Laxmi Puja
Timestamp('2021-11-19', tz='UTC'), # Guru Nanak Jayanti
Timestamp('2022-01-26', tz='UTC'), # Republic Day
Timestamp('2022-03-01', tz='UTC'), # Maha Shivaratri
Timestamp('2022-03-18', tz='UTC'), # Holi
Timestamp('2022-04-14', tz='UTC'), # Dr.Baba Saheb Ambedkar Jayanti
Timestamp('2022-04-15', tz='UTC'), # Good Friday
Timestamp('2022-05-03', tz='UTC'), # Id-ul-Fitr
Timestamp('2022-08-09', tz='UTC'), # Moharram
Timestamp('2022-08-15', tz='UTC'), # Independence Day
Timestamp('2022-08-31', tz='UTC'), # Ganesh Chaturthi
Timestamp('2022-10-05', tz='UTC'), # Vijaya Dashami
Timestamp('2022-10-24', tz='UTC'), # Diwali/Laxmi Puja. muhurat trading day
Timestamp('2022-10-26', tz='UTC'), # Diwali-Balipratipada
Timestamp('2022-11-08', tz='UTC'), # Guru Nanak Jayanti
]
class BSEExchangeCalendar(MarketCalendar):
"""
Exchange calendar for the Bombay Stock Exchange (BSE, XBOM).
Open Time: 9:15 AM, Asia/Calcutta
Close Time: 3:30 PM, Asia/Calcutta
Due to the complexity around the BSE holidays, we are hardcoding a list
of holidays back to 1997, and forward through 2020. There are no known
early closes or late opens.
"""
aliases = ['BSE']
regular_market_times = {
"market_open": ((None, time(9, 15)),),
"market_close": ((None, time(15, 30)),)
}
@property
def name(self):
return "BSE"
@property
def tz(self):
return timezone('Asia/Calcutta')
@property
def adhoc_holidays(self):
return BSEClosedDay
|
from tkinter import *
# create root widget
root = Tk()
def my_click():
my_label = Label(root, text='I just clicked a button!')
my_label.pack()
# create button
my_button = Button(root, text="Click Me!", command=my_click, fg='blue', bg='red')
# pack
my_button.pack()
# mainloop
root.mainloop()
|
import pytest
from django.core.management import call_command
from datahub.company.test.factories import CompanyFactory
from datahub.core.constants import Country
from datahub.core.postcode_constants import CountryPostcodeReplacement
from datahub.core.test_utils import has_reversion_comment, has_reversion_version
from datahub.dbmaintenance.resolvers.company_address import CompanyAddressResolver
pytestmark = pytest.mark.django_db
def setup_us_company_with_all_addresses(post_code):
"""Sets up US Company for tests"""
return CompanyFactory(
address_town='New York',
address_country_id=Country.united_states.value.id,
address_postcode=post_code,
address_area_id=None,
registered_address_town='New York',
registered_address_country_id=Country.united_states.value.id,
registered_address_postcode=post_code,
registered_address_area_id=None,
uk_region_id=None,
archived=False,
duns_number='123456789',
)
def setup_us_company_with_address_only(post_code):
"""Sets up US Company with address only for tests"""
return CompanyFactory(
address_town='New York',
address_country_id=Country.united_states.value.id,
address_postcode=post_code,
address_area_id=None,
registered_address_town='',
registered_address_country_id=None,
registered_address_postcode='',
registered_address_area_id=None,
uk_region_id=None,
archived=False,
duns_number='123456789',
)
def setup_us_company_with_registered_address_only(post_code):
"""Sets up US Company with registered address only for tests"""
return CompanyFactory(
registered_address_town='New York',
registered_address_country_id=Country.united_states.value.id,
registered_address_postcode=post_code,
registered_address_area_id=None,
address_town='',
address_country_id=None,
address_postcode='',
address_area_id=None,
uk_region_id=None,
archived=False,
duns_number='123456789',
)
@pytest.mark.parametrize(
'post_code, expected_result',
[
('1 0402', '10402'),
('123456789', '123456789'),
('8520 7402', '07402'),
('CA90025', '90025'),
('NY 10174 โ 4099', '10174 โ 4099'),
('NY 10174 - 4099', '10174 - 4099'),
('MC 5270 3800', '03800'),
('K1C1T1', 'K1C1T1'),
('NY 1004', 'NY 1004'),
('YO22 4PT', 'YO22 4PT'),
('RH175NB', 'RH175NB'),
('WA 6155', 'WA 6155'),
('BT12 6RE', 'BT12 6RE'),
('M2 4JB', 'M2 4JB'),
('CA USA', 'CA USA'),
('n/a', 'n/a'),
('MN5512', 'MN5512'),
('BB12 7DY', 'BB12 7DY'),
('PO6 3EZ', 'PO6 3EZ'),
('Nw1 2Ew', 'Nw1 2Ew'),
('WC1R 5NR', 'WC1R 5NR'),
('BH12 4NU', 'BH12 4NU'),
('CT 6506', 'CT 6506'),
('ME9 0NA', 'ME9 0NA'),
('DY14 0QU', 'DY14 0QU'),
('12345', '12345'),
('12345-1234', '12345-1234'),
('12345 - 1234', '12345 - 1234'),
('0 12345', '01234'),
],
)
def test_command_regex_generates_the_expected_postcode_substitution(
post_code,
expected_result,
):
"""
Test regex efficiently without connecting to a database
:param post_code: POSTCODE format good and bad
:param expected_result: regular expression substituted value using the
Command pattern
"""
resolver = CompanyAddressResolver(
country_id=None,
revision_comment=None,
zip_states=None,
postcode_replacement=CountryPostcodeReplacement.united_states.value,
)
actual_result = resolver.format_postcode(post_code)
assert actual_result == expected_result
@pytest.mark.parametrize(
'post_code, area_code',
[
('00589', 'NY'),
('01012', 'MA'),
('02823', 'RI'),
],
)
def test_us_company_with_unique_zips_generates_valid_address_area(
post_code,
area_code,
):
"""
Test postcode is fixed for the purpose of admin area
generation with valid zip codes format
:param post_code: POSTCODE good
:param area_code: Area Code to be generated from Command
"""
company = setup_us_company_with_all_addresses(post_code)
assert company.address_area is None
call_command('fix_us_company_address')
company.refresh_from_db()
assert company.address_area is not None
assert company.address_area.area_code == area_code
assert company.address_postcode == post_code
@pytest.mark.parametrize(
'post_code, area_code',
[
('030121234', 'NH'),
('03912', 'ME'),
('04946', 'ME'),
('05067-1234', 'VT'),
],
)
def test_us_company_with_address_data_only_will_generate_address_area(
post_code,
area_code,
):
"""
Test postcode fixes and area generation with address area data
:param post_code: POSTCODE good
:param area_code: Area Code to be generated from Command
"""
company = setup_us_company_with_address_only(post_code)
assert company.address_area is None
call_command('fix_us_company_address')
company.refresh_from_db()
assert company.address_area is not None
assert company.address_area.area_code == area_code
assert company.address_postcode == post_code
@pytest.mark.parametrize(
'post_code, area_code',
[
('05512', 'MA'),
('05612-1234', 'VT'),
('060123456', 'CT'),
('07045', 'NJ'),
],
)
def test_us_company_with_unique_zips_generates_the_valid_registered_address_area(
post_code,
area_code,
):
"""
Test registered address postcode fixes and area generation a
couple of valid Zip Codes using the real DB
:param post_code: POSTCODE good
:param area_code: Area Code to be generated from Command
"""
company = setup_us_company_with_all_addresses(post_code)
assert company.registered_address_area is None
call_command('fix_us_company_address')
company.refresh_from_db()
assert company.registered_address_area is not None
assert company.registered_address_area.area_code == area_code
assert company.registered_address_postcode == post_code
@pytest.mark.parametrize(
'post_code, area_code',
[
('10057', 'NY'),
('15078', 'PA'),
('19789-4567', 'DE'),
('20067', 'DC'),
],
)
def test_us_company_with_registered_address_data_only_will_generate_registered_address_area(
post_code,
area_code,
):
"""
Test registered address data only creates data expected
:param post_code: POSTCODE good
:param area_code: Area Code to be generated from Command
"""
company = setup_us_company_with_registered_address_only(post_code)
assert company.registered_address_area is None
call_command('fix_us_company_address')
company.refresh_from_db()
assert company.registered_address_area is not None
assert company.registered_address_area.area_code == area_code
assert company.registered_address_postcode == post_code
@pytest.mark.parametrize(
'post_code, expected_result',
[
('1 0402', '10402'),
('8520 7402', '07402'),
('CA90025', '90025'),
('NY 10174 โ 4099', '10174 โ 4099'),
('NY 10174 - 4099', '10174 - 4099'),
('NY 123456789', '123456789'),
],
)
def test_command_fixes_invalid_postcodes_in_all_post_code_fields(
post_code,
expected_result,
):
"""
Test Patterns that need fixing in all postcode fields
:param post_code: Invalid Postcode Format
:param expected_result: The expected result of the fix
"""
company = setup_us_company_with_all_addresses(post_code)
assert company.address_postcode == post_code
assert company.registered_address_postcode == post_code
call_command('fix_us_company_address')
company.refresh_from_db()
assert company.address_postcode == expected_result
assert company.registered_address_postcode == expected_result
@pytest.mark.parametrize(
'post_code, expected_result',
[
('A1B 4H7', 'A1B 4H7'),
('MA 02 111', 'MA 02 111'),
('PO Box 2900', 'PO Box 2900'),
('5 Westheimer Road', '5 Westheimer Road'),
('CA USA', 'CA USA'),
('n/a', 'n/a'),
('VA 2210', 'VA 2210'),
('tbc', 'tbc'),
],
)
def test_command_leaves_invalid_postcodes_in_original_state_with_no_area(
post_code,
expected_result,
):
"""
Test edge cases are preserved
:param post_code: Invalid Postcode Format
:param expected_result: The expected result of the fix
"""
company = setup_us_company_with_all_addresses(post_code)
call_command('fix_us_company_address')
company.refresh_from_db()
assert company.address_postcode == expected_result
assert company.registered_address_postcode == expected_result
assert company.address_area is None
assert company.registered_address_area is None
@pytest.mark.parametrize(
'post_code, expected_result',
[
('1 0402', '10402'),
('8520 7402', '07402'),
('CA90025', '90025'),
],
)
def test_audit_log(post_code, expected_result):
"""
Verify auditable versions of the code are retained
:param post_code: Invalid Postcode Format
:param expected_result: The expected result of the fix
"""
company = setup_us_company_with_all_addresses(post_code)
call_command('fix_us_company_address')
company.refresh_from_db()
assert company.address_postcode == expected_result
assert company.registered_address_postcode == expected_result
assert has_reversion_version(company)
assert has_reversion_comment('US Area and postcode Fix.')
@pytest.mark.parametrize(
'post_code, expected_result',
[
('1 0402', '10402'),
('123456789', '123456789'),
('8520 7402', '07402'),
('CA90025', '90025'),
],
)
def test_audit_does_not_continue_creating_revisions(post_code, expected_result):
"""
Verify auditable versions of the code are retained
:param post_code: Invalid Postcode Format
:param expected_result: The expected result of the fix
"""
company = setup_us_company_with_all_addresses(post_code)
call_command('fix_us_company_address')
company.refresh_from_db()
assert has_reversion_version(company, 1)
assert company.address_postcode == expected_result
assert company.registered_address_postcode == expected_result
call_command('fix_us_company_address')
company.refresh_from_db()
assert has_reversion_version(company, 1)
assert company.address_postcode == expected_result
assert company.registered_address_postcode == expected_result
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 02.08.2017 10:04
:Licence MIT
Part of grammpy
"""
from unittest import main, TestCase
from grammpy import Rule
class Single(Rule):
rule = ([0], [1])
class TwoRight(Rule):
rule = ([0], [1, 2])
class ThreeLeft(Rule):
rule = ([0, 1, 'a'], [2])
class Multiple(Rule):
rule = ([0, 1, 2], [3, 4])
class FromRuleComputeRulesTest(TestCase):
def test_rules_single(self):
r = Single.rules
self.assertIsInstance(r, list)
self.assertEqual(len(r), 1)
self.assertEqual(r[0], Single.rule)
self.assertIsInstance(r[0], tuple)
self.assertEqual(r[0][0], [0])
self.assertEqual(r[0][1], [1])
self.assertEqual(r[0][0][0], 0)
self.assertEqual(r[0][1][0], 1)
def test_rules_twoRight(self):
r = TwoRight.rules
self.assertIsInstance(r, list)
self.assertEqual(len(r), 1)
self.assertEqual(r[0], TwoRight.rule)
self.assertIsInstance(r[0], tuple)
self.assertEqual(r[0][0], [0])
self.assertEqual(r[0][1], [1, 2])
self.assertEqual(r[0][0][0], 0)
self.assertEqual(r[0][1][0], 1)
self.assertEqual(r[0][1][1], 2)
def test_rules_threeLeft(self):
r = ThreeLeft.rules
self.assertIsInstance(r, list)
self.assertEqual(len(r), 1)
self.assertEqual(r[0], ThreeLeft.rule)
self.assertIsInstance(r[0], tuple)
self.assertEqual(r[0][0], [0, 1, 'a'])
self.assertEqual(r[0][1], [2])
self.assertEqual(r[0][0][0], 0)
self.assertEqual(r[0][0][1], 1)
self.assertEqual(r[0][0][2], 'a')
self.assertEqual(r[0][1][0], 2)
def test_rules_multiple(self):
r = Multiple.rules
self.assertIsInstance(r, list)
self.assertEqual(len(r), 1)
self.assertEqual(r[0], Multiple.rule)
self.assertIsInstance(r[0], tuple)
self.assertEqual(r[0][0], [0, 1, 2])
self.assertEqual(r[0][1], [3, 4])
self.assertEqual(r[0][0][0], 0)
self.assertEqual(r[0][0][1], 1)
self.assertEqual(r[0][0][2], 2)
self.assertEqual(r[0][1][0], 3)
self.assertEqual(r[0][1][1], 4)
if __name__ == '__main__':
main()
|
# pylint: disable=missing-function-docstring, missing-module-docstring/
del a
del b, c
|
"""
Contains exceptions used by pylectio.
"""
class LectioError(Exception):
"""
A general exception used as a base for other, more specific, exceptions.
"""
class NotLoggedInError(LectioError):
"""
An exception raised when the user tries to do an action that requires
authentication when not authenticated.
"""
class SessionClosedError(LectioError):
"""
An exception raised when the user tries to interact with a closed
``Session``.
"""
class AuthenticationError(LectioError):
"""
An exception raised when the authentication failed.
"""
class ScrapingError(LectioError):
"""
An exception raised when scraping failed.
"""
|
import numpy as np
import pandas as pd
import sys
class Sudoku():
def __init__(self, problem):
self.problem = problem
self.board = pd.DataFrame(np.reshape([int(char) for char in problem],(9,9)))
# self.lastSearch = None
# self.prevCells = dict()
self.count=0
# def isComplete(self, remainDict):
# for value in remainDict.values():
# if sum(value) > 0:
# return False
# return True
def getSquare(self,num):
if num in range(3): return range(3)
elif num in range(3,6): return range(3,6)
else: return range(6,9)
def isinSquare(self,num,row,col):
if (self.board.loc[self.getSquare(row),self.getSquare(col)] == num).any().any() == True:
return True
else: return False
def isValid(self,num,row,col):
if (self.board.loc[:,col]==num).any():
return False
elif (self.board.loc[row,:]==num).any():
return False
elif self.isinSquare(num,row,col):
return False
return True
def nextPos(self,pos=None):
if pos is None:
row = col = 0
elif pos[1] == self.board.shape[1]:
row = pos[0]+1
col = 0
elif pos == (self.board.shape[0]+1, self.board.shape[1]+1):
return None
else:
row = pos[0]
col = pos[1]+1
for r in range(row,self.board.shape[0]):
for c in range(col, self.board.shape[1]):
if self.board.loc[r,c] == 0:
return r,c
col = 0
return None
# def prevPos(self):
# pass
# if self.lastSearch is None or self.lastSearch == (0,0):
# print("is none")
# return None
# elif self.lastSearch[1] == 0:
# print("is beginning")
# row = self.lastSearch[0]-1
# col = self.board.shape[1]
# else:
# row = self.lastSearch[0]
# col = self.lastSearch[1]-1
# print("normal {} {}".format(row,col))
# for r in range(row,-1,-1):
# for c in range(col,-1,-1):
# if self.board.loc[r,c] == 0:
# return (r,c)
# col =self.board.shape[1]
# return None
def findValid(self,pos,startnum=1):
if pos==None:
return True
row = pos[0]
col = pos[1]
for i in range(startnum,10):
self.count+=1
if self.isValid(i, row,col):
self.board.loc[row,col] = i
# print("change {},{} to: {}".format(row,col,i))
if self.findValid(self.nextPos(pos)):
return True
self.board.loc[row,col] = 0
return False
def run(self):
self.findValid(self.nextPos())
# for row in range(9):
# prevcol = 0
# for col in range(9):
# if self.board.loc[col,row] == 0:
# for i in range(1,10):
# self.count+=1
# if self.isValid(i, row,col):
# self.board.loc[col,row] = i
# break
# if self.board.loc[col,row] == 0:
# prevcol = col
# try:
# if self.count == self.countmax:
# self.printBoard()
# sys.exit(0)
# remain = self.getRemaining()
# if(self.isComplete(remain)):
# self.printBoard()
# sys.exit(0)
# else:
# self.updateBoard(self.getRemaining())
# self.count +=1;
# self.run()
# except KeyboardInterrupt:
# print("Program Interrupted!")
# def getRemaining(self):
# remain = dict()
# for row in range(self.board.shape[0]):
# for col in range(self.board.shape[1]):
# if self.board.loc[col,row] != 0: remain["{}{}".format(row,col)] = [0]
# else:
# remain["{}{}".format(row,col)] = [x for x in range(1,10)
# if x not in (y for y in self.board.loc[:,row]) and
# x not in(z for z in self.board.loc[col,:])]
# return remain
# def updateBoard(self, remain):
# pass
# for key in remain.keys():
# if len(remain[key]) == 1 and remain[key][0] != 0:
# self.board.loc[int(key[1]),int(key[0])] = remain[key][0]
def printSudoku(self):
print("-"*25)
count=0
rowcount=0
for nums in self.board.values:
for num in nums:
count+=1
if num == 0:
num = " "
if count < 9 and count%3 ==1:
print("| {} ".format(num),end="")
elif count==9:
print("{} |".format(num))
count=0
rowcount+=1
if rowcount == 3:
print("-"*25)
rowcount=0
else:
print("{} ".format(num),end="")
if __name__ == "__main__":
with open("../res/problem1.txt",'r') as f:
problems = f.read().splitlines()
for problem in problems:
game = Sudoku(problem)
print("START")
game.printSudoku()
game.run()
print("END")
game.printSudoku()
|
from transitions.extensions import GraphMachine
class GraphMixin(GraphMachine):
def _init_graphviz_engine(self, use_pygraphviz):
Graph = super(GraphMixin, self)._init_graphviz_engine(use_pygraphviz)
class TweakedGraph(Graph):
_TRANSITION_CHECK = self._TRANSITION_CHECK
def _transition_label(self, tran):
if tran.get('trigger') == self._TRANSITION_CHECK:
return ''
else:
return super(TweakedGraph, self)._transition_label(tran)
return TweakedGraph
|
#!/usr/bin/env python3
import subprocess
import sys
import time
subprocess.call([sys.executable, '-m', 'pip', 'install','--quiet' , 'requests'])
import json
import requests
import os
auth_token = sys.argv[6]
app_dir = sys.argv[7]
templateset_filename_prefix = app_dir + "/config/matcher/template_"
json_file_suffix = ".json"
headers = {'Content-type': 'application/json', 'Authorization':'Bearer ' + auth_token}
def main():
template_scenario = sys.argv[1]
gateway = sys.argv[2]
customer = sys.argv[3]
app = sys.argv[4]
template_version_temp_file = sys.argv[5]
print(template_scenario + " " + gateway + " " + customer + " " + app)
templateset_filename = templateset_filename_prefix + template_scenario + json_file_suffix
# defining the api-endpoint
save_template_endpoint = "https://" + gateway + "/api/as/saveTemplateSet/" + customer + "/" + app
template_version = register_templates_from_file(templateset_filename, save_template_endpoint, customer, app)
if template_version == None:
print("Cannot write template version to file")
else:
with open(template_version_temp_file, "w") as f:
f.write(template_version)
print("Written template version: " + template_version + " to file " + template_version_temp_file)
def register_templates_from_file(templateset_filename, api_endpoint, customer, app):
try:
with open(templateset_filename, encoding='utf-8', errors='ignore') as json_data:
template_as_dict = json.load(json_data, strict=False)
# Override previous invalid timestamp
template_as_dict["timestamp"] = time.time()
r = requests.post(url=api_endpoint, json=template_as_dict, headers=headers)
print("Registered template json for " + customer + \
" :: " + app)
print(api_endpoint)
print(r.json())
response_json = r.json()
print("Parsed json response")
print("Got Response :: " + response_json["Message"])
template_version = response_json["templateSetVersion"]
return template_version
except json.JSONDecodeError as e:
print("Cannot serialize templateSet: " + templateset_filename + " JSON to object ")
print(e)
except Exception as e:
print("Exception occurred for Customer " + customer + " App :: " + app)
print(e)
if __name__ == "__main__":
main()
|
import os
import numpy as np
from scipy import sparse
import torch
import torch.nn as nn
from torch.nn import functional as F
import torch.utils.data
from uncurl.state_estimation import initialize_means_weights
from nn_utils import BatchDataset, loss_function, ElementWiseLayer,\
IdentityLayer
# A multi-encoder architecture for batch effect correction
EPS = 1e-10
def multibatch_loss(w_out, batches, n_batches):
"""
Args:
w_out (tensor): shape is (cells, k)
batches (array or tensor): values in [0, n_batches), length=cells
n_batches (int): number of batches
"""
# TODO
# sum(w_out[batches==i].mean() - w_out[batches==0].mean() for i in range(0, n_batches))
if n_batches <= 1:
return 0
batch_0_mean = w_out[batches==0].mean(0)
return sum((torch.abs(w_out[batches==i].mean(0) - batch_0_mean)).sum() for i in range(1, n_batches))
class WEncoderMultibatch(nn.Module):
def __init__(self, genes, k,
num_batches=1,
use_reparam=True,
use_batch_norm=True,
hidden_units=400,
hidden_layers=1,
use_shared_softmax=False):
"""
The W Encoder generates W from the data. The MultiBatch encoder has multiple encoder
layers for different batches.
"""
super(WEncoderMultibatch, self).__init__()
self.genes = genes
self.k = k
self.num_batches = num_batches
self.use_batch_norm = use_batch_norm
self.use_reparam = use_reparam
self.hidden_units = hidden_units
self.hidden_layers = hidden_layers
self.use_shared_softmax = use_shared_softmax
# TODO: set multi-batches
self.encoder_layers = nn.ModuleList()
for batch in range(self.num_batches):
encoder = []
fc1 = nn.Linear(genes, hidden_units)
encoder.append(fc1)
if use_batch_norm:
bn1 = nn.BatchNorm1d(hidden_units)
encoder.append(bn1)
for i in range(hidden_layers - 1):
layer = nn.Linear(hidden_units, hidden_units)
encoder.append(layer)
if use_batch_norm:
encoder.append(nn.BatchNorm1d(hidden_units))
encoder.append(nn.ReLU(True))
if not use_shared_softmax:
encoder.append(nn.Linear(hidden_units, k))
seq = nn.Sequential(*encoder)
self.encoder_layers.append(seq)
if use_shared_softmax:
self.fc21 = nn.Linear(hidden_units, k)
# TODO: this won't work if use_shared_softmax is False
if self.use_reparam:
self.fc22 = nn.Linear(hidden_units, genes)
def forward(self, x, batches):
"""
x is a data batch
batches is a vector of integers with the same length as x,
indicating the batch from which each data point originates.
"""
outputs = []
inverse_indices = np.zeros(x.shape[0], dtype=int)
num_units = 0
for i in range(self.num_batches):
batch_index_i = (batches == i)
output = x[batch_index_i, :]
if len(output) == 0:
continue
indices = batch_index_i.nonzero().flatten()
inverse_indices[indices] = range(num_units, num_units + output.shape[0])
num_units += output.shape[0]
output = self.encoder_layers[i](output)
outputs.append(output)
total_output = torch.cat(outputs)
total_output = total_output[inverse_indices]
if self.use_shared_softmax:
total_output = self.fc21(total_output)
if self.use_reparam:
return F.softmax(total_output), self.fc22(total_output)
else:
return F.softmax(total_output), None
class WDecoder(nn.Module):
def __init__(self, genes, k, use_reparam=True, use_batch_norm=True):
"""
The W Decoder takes M*W, and returns X.
"""
super(WDecoder, self).__init__()
self.fc_dec1 = nn.Linear(genes, 400)
#self.fc_dec2 = nn.Linear(400, 400)
self.fc_dec3 = nn.Linear(400, genes)
def forward(self, x):
output = F.relu(self.fc_dec1(x))
output = F.relu(self.fc_dec3(output))
return output
class UncurlNetW(nn.Module):
def __init__(self, genes, k, M, use_decoder=True,
use_reparam=True,
use_m_layer=True,
use_batch_norm=True,
use_multibatch_encoder=True,
use_multibatch_loss=True,
use_shared_softmax=True,
multibatch_loss_weight=0.5,
hidden_units=400,
hidden_layers=1,
num_batches=1,
loss='poisson',
**kwargs):
"""
This is an autoencoder architecture that learns a mapping from
the data to W.
Args:
genes (int): number of genes
k (int): latent dim (number of clusters)
M (array): genes x k matrix
use_decoder (bool): whether or not to use a decoder layer
use_reparam (bool): whether or not to use reparameterization trick
use_m_layer (bool): whether or not to treat M as a differentiable linear layer
use_batch_norm (bool): whether or not to use batch norm in the encoder
hidden_units (int): number of hidden units in encoder
hidden_layers (int): number of hidden layers in encoder
loss (str): 'poisson', 'l1', or 'mse' - specifies loss function.
"""
super(UncurlNetW, self).__init__()
self.genes = genes
self.k = k
# M is the output of UncurlNetM?
self.M = M
self.use_decoder = use_decoder
self.use_reparam = use_reparam
self.use_batch_norm = use_batch_norm
self.use_m_layer = use_m_layer
self.use_multibatch_encoder = use_multibatch_encoder
self.use_multibatch_loss = use_multibatch_loss
self.multibatch_loss_weight = multibatch_loss_weight
self.loss = loss.lower()
self.num_batches = num_batches
if use_multibatch_encoder:
self.encoder = WEncoderMultibatch(genes, k, num_batches,
use_reparam, use_batch_norm,
hidden_units=hidden_units, hidden_layers=hidden_layers,
use_shared_softmax=use_shared_softmax)
else:
from deep_uncurl_pytorch import WEncoder
self.encoder = WEncoder(genes, k,
use_reparam, use_batch_norm,
hidden_units=hidden_units, hidden_layers=hidden_layers)
if use_m_layer:
self.m_layer = nn.Linear(k, genes, bias=False)
self.m_layer.weight.data = M#.transpose(0, 1)
if self.use_decoder:
self.decoder = WDecoder(genes, k, use_reparam, use_batch_norm)
else:
self.decoder = None
# batch correction layers
# batch 0 is always the identity layer
self.correction_layers = nn.ModuleList()
self.correction_layers.append(IdentityLayer())
for b in range(num_batches - 1):
correction = ElementWiseLayer(self.genes)
#correction = IdentityLayer()
self.correction_layers.append(correction)
def encode(self, x, batch):
# returns two things: mu and logvar
if self.use_multibatch_encoder:
return self.encoder(x, batch)
else:
return self.encoder(x)
def decode(self, x):
return self.decoder(x)
def reparameterize(self, mu, logvar):
std = torch.exp(0.5*logvar)
eps = torch.randn_like(std)
return eps.mul(std).add_(mu)
def apply_correction(self, x, batches):
"""
Applies the batch correction layers...
"""
# TODO: add a linear correction to w rather than do whatever this is.
outputs = []
inverse_indices = np.zeros(x.shape[0], dtype=int)
num_units = 0
for i in range(0, self.num_batches):
batch_index_i = (batches == i)
output = x[batch_index_i, :]
if len(output) == 0:
continue
indices = batch_index_i.nonzero().flatten()
inverse_indices[indices] = range(num_units, num_units + output.shape[0])
num_units += output.shape[0]
output = self.correction_layers[i](output)
outputs.append(output)
total_output = torch.cat(outputs)
total_output = total_output[inverse_indices]
return total_output
def forward(self, x, batch=None):
if batch is None:
batch = torch.zeros(x.shape[0], dtype=torch.int)
w, logvar = self.encode(x, batch)
# should be a matrix-vector product
mu = w
if self.use_m_layer:
mu = self.m_layer(w) + EPS
else:
mu = torch.matmul(self.M, w) + EPS
# apply batch correction
mu = self.apply_correction(mu, batch)
if self.use_reparam:
z = self.reparameterize(mu, logvar)
if self.use_decoder:
return self.decode(z), mu, logvar
else:
return z, mu, logvar
else:
if self.use_decoder:
return self.decode(mu), w
else:
return mu, w
def clamp_m(self):
"""
makes all the entries of self.m_layer non-negative.
"""
w = self.m_layer.weight.data
w[w<0] = 0
self.m_layer.weight.data = w
def train_batch(self, x, optim, batches=None):
"""
Trains on a data batch, with the given optimizer...
"""
optim.zero_grad()
if self.use_reparam:
output, mu, logvar = self.forward(x, batches)
output += EPS
loss = loss_function(output, x, mu, logvar)
loss.backward()
else:
output, w = self.forward(x, batches)
output += EPS
if self.loss == 'poisson':
loss = F.poisson_nll_loss(output, x, log_input=False, full=True, reduction='sum')
elif self.loss == 'l1':
loss = F.l1_loss(output, x, reduction='sum')
elif self.loss == 'mse':
loss = F.mse_loss(output, x, reduction='sum')
if self.use_multibatch_loss:
loss += self.multibatch_loss_weight*multibatch_loss(w, batches, self.num_batches)
loss.backward()
optim.step()
self.clamp_m()
return loss.item()
def get_w(self, X, batches=None):
"""
X is a dense array or tensor of shape gene x cell.
"""
self.eval()
X_tensor = torch.tensor(X.T, dtype=torch.float32)
encode_results = self.encode(X_tensor, batches)
return encode_results[0].detach()
#data_loader = torch.utils.data.DataLoader(X.T,
# batch_size=X.shape[1],
# shuffle=False)
def get_m(self):
return self.m_layer.weight.data
class UncurlNet(object):
def __init__(self, X=None, k=10, batches=None, genes=0, cells=0, initialization='tsvd', init_m=None, **kwargs):
"""
UncurlNet can be initialized in two ways:
- initialize using X, a genes x cells data matrix
- initialize using genes, cells, init_m (when X is not available)
Args:
X: data matrix (can be dense np array or sparse), of shape genes x cells
k (int): number of clusters (latent dimensionality)
initialization (str): see uncurl.initialize_means_weights
"""
if X is not None:
self.X = X
self.genes = X.shape[0]
self.cells = X.shape[1]
# TODO: change default initialization??? random initialization???
if batches is not None and len(batches) == self.cells:
batches = np.array(batches)
# only select batch 0?
X = X[:, batches==0]
M, W = initialize_means_weights(X, k, initialization=initialization)
self.M = torch.tensor(M, dtype=torch.float32)
else:
self.X = None
self.genes = genes
self.cells = cells
self.M = torch.tensor(init_m, dtype=torch.float32)
self.k = k
# initialize M and W using uncurl's initialization
self.w_net = UncurlNetW(self.genes, self.k, self.M, **kwargs)
# TODO: set device (cpu or gpu), optimizer, # of threads
def get_w(self, data):
return self.w_net.get_w(data)
def get_m(self):
return self.w_net.get_m()
def load(self, path):
"""
loads an UncurlNetW object from file.
"""
# TODO
w_net = torch.load(path)
self.w_net = w_net
def save(self, path):
"""
Saves a model to a path...
"""
# TODO: save only model parameters, or save the whole model?
torch.save(self.w_net, path)
def preprocess(self):
"""
Preprocesses the data, converts self.X into a tensor.
"""
from scipy import sparse
if sparse.issparse(self.X):
self.X = sparse.coo_matrix(self.X)
values = self.X.data
indices = np.vstack((self.X.row, self.X.col))
i = torch.LongTensor(indices)
v = torch.FloatTensor(values)
self.X = torch.sparse.FloatTensor(i, v, torch.Size(self.X.shape))
else:
self.X = torch.tensor(self.X, dtype=torch.float32)
def pre_train_encoder(self, X=None, batches=None, n_epochs=20, lr=1e-3, weight_decay=0, disp=True,
device='cpu', log_interval=1, batch_size=0):
"""
pre-trains the encoder for w_net - fixing M.
"""
# sets the network to train mode
self.w_net.train()
for param in self.w_net.encoder.parameters():
param.requires_grad = True
for param in self.w_net.correction_layers.parameters():
param.requires_grad = True
for param in self.w_net.m_layer.parameters():
param.requires_grad = False
self._train(X, batches, n_epochs, lr, weight_decay, disp, device, log_interval,
batch_size)
def train_m(self, X=None, batches=None, n_epochs=20, lr=1e-3, weight_decay=0, disp=True,
device='cpu', log_interval=1, batch_size=0):
"""
trains only the m layer.
"""
self.w_net.train()
for param in self.w_net.encoder.parameters():
param.requires_grad = False
for param in self.w_net.correction_layers.parameters():
param.requires_grad = False
for param in self.w_net.m_layer.parameters():
param.requires_grad = True
self._train(X, batches, n_epochs, lr, weight_decay, disp, device, log_interval,
batch_size)
def train_model(self, X=None, batches=None, n_epochs=20, lr=1e-3, weight_decay=0, disp=True,
device='cpu', log_interval=1, batch_size=0):
"""
trains the entire model.
"""
self.w_net.train()
for param in self.w_net.encoder.parameters():
param.requires_grad = True
for param in self.w_net.correction_layers.parameters():
param.requires_grad = True
for param in self.w_net.m_layer.parameters():
param.requires_grad = True
self._train(X, batches, n_epochs, lr, weight_decay, disp, device, log_interval,
batch_size)
def train_1(self, X=None, batches=None, n_encoder_epochs=20, n_model_epochs=50, **params):
"""
Trains the model, first fitting the encoder and then fitting both M and
the encoder.
"""
self.pre_train_encoder(X, batches, n_epochs=n_encoder_epochs, **params)
self.train_model(X, batches, n_epochs=n_model_epochs, **params)
def train_alternating(self, X=None, batches=None, n_outer_iters=10, n_inner_epochs=10, **params):
"""
Trains the model using alternating minimization, first fitting the W encoder
and then fitting M.
"""
for i in range(n_outer_iters):
self.pre_train_encoder(X, batches, n_epochs=n_inner_epochs, **params)
self.train_model(X, batches, n_epochs=n_inner_epochs, **params)
def _train(self, X=None, batches=None, n_epochs=20, lr=1e-3, weight_decay=0, disp=True,
device='cpu', log_interval=1, batch_size=0):
"""
trains the w_net...
Args:
X (array): genes x cells
batches (array or list): list of batch indices for each cell
n_epochs: number of epochs to train for
lr (float): learning rate
weight_decay (float)
disp (bool): whether or not to display outputs
device (str): cpu or gpu
log_interval: how often to print log
batch_size: default is max(100, cells/20)
"""
if X is not None:
self.X = X
if batch_size == 0:
batch_size = 100
#batch_size = max(100, int(self.X.shape[1]/20))
dataset = BatchDataset(X.T, batches)
data_loader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
shuffle=True)
#optimizer = torch.optim.SparseAdam(lr=lr, weight_decay=weight_decay)
optimizer = torch.optim.Adam(params=self.w_net.parameters(),
lr=lr, weight_decay=weight_decay)
for epoch in range(n_epochs):
train_loss = 0.0
for batch_idx, data in enumerate(data_loader):
data, b = data
data = data.to(device)
loss = self.w_net.train_batch(data, optimizer, b)
if disp and (batch_idx % log_interval == 0):
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(data_loader.dataset),
100. * batch_idx / len(data_loader),
loss / len(data)))
train_loss += loss
if disp:
print('====> Epoch: {} Average loss: {:.4f}'.format(
epoch, train_loss / len(data_loader.dataset)))
def get_mw(self, data):
"""
Returns a numpy array representing MW.
"""
# gets MW for data denoising and imputation
m = self.get_m()
w = self.get_w(data).transpose(1, 0)
mw = torch.matmul(m, w)
return mw.numpy()
if __name__ == '__main__':
import uncurl
from uncurl.state_estimation import objective
from uncurl.preprocessing import cell_normalize, log1p
import scipy.io
from sklearn.cluster import KMeans
from sklearn.metrics.cluster import normalized_mutual_info_score as nmi
import pandas as pd
table_seqwell = pd.read_table('../uncurl_test_datasets/batch_effects_seurat/IntegratedAnalysis_ExpressionMatrices/pbmc_SeqWell.expressionMatrix.txt.gz')
table_10x = pd.read_table('../uncurl_test_datasets/batch_effects_seurat/IntegratedAnalysis_ExpressionMatrices/pbmc_10X.expressionMatrix.txt.gz')
genes_seqwell = table_seqwell.index
genes_10x = table_10x.index
genes_set = set(genes_seqwell).intersection(genes_10x)
genes_list = list(genes_set)
data_seqwell = table_seqwell.loc[genes_list].values
data_10x = table_10x.loc[genes_list].values
batch_list = [0]*data_seqwell.shape[1]
batch_list += [1]*data_10x.shape[1]
data_total = np.hstack([data_seqwell, data_10x])
X_log_norm = log1p(cell_normalize(data_total)).astype(np.float32)
net1 = UncurlNet(X_log_norm, 10,
batches=batch_list,
use_reparam=False, use_decoder=False,
use_batch_norm=True,
hidden_layers=2,
hidden_units=400,
num_batches=2,
loss='mse')
net1.train_1(X_log_norm, batch_list, log_interval=10,
batch_size=500)
# TODO: test clustering?
w = net1.w_net.get_w(X_log_norm, batch_list)
# TODO: compare to non-multibatch, run tsne, ...
|
from django.contrib.auth.models import User
from webpos.models import Item, Bill, BillItem
def commit_bill(output, reqdata, user):
billhd = Bill(customer_name=reqdata['customer_name'],
server=User.objects.get(pk=user.id).username)
billitms = []
reqquants = reqdata['items']
dbitms = Item.objects.filter(name__in=reqquants.keys())
for dbitm in dbitms:
reqitem = reqquants[dbitm.name]
quant = reqitem['qty']
notes = reqitem['notes']
db_quant = dbitm.quantity
if db_quant is not None:
newquant = db_quant - quant
if newquant < 0:
output['errors'].append((dbitm.name, dbitm.quantity))
else:
if output['errors']:
continue
output['total'] += dbitm.price * quant
billitms.append(BillItem(item=dbitm, quantity=quant,
category=dbitm.category,
item_price=dbitm.price,
note=notes))
dbitm.quantity = newquant
else:
output['total'] += dbitm.price * quant
billitms.append(BillItem(item=dbitm, quantity=quant,
category=dbitm.category,
item_price=dbitm.price,
note=notes))
if output['errors']:
output['total'] = 0
output['customer_id'] = None
output['errors'] = dict(output['errors'])
return output, None
else:
output['errors'] = dict(output['errors'])
if output['total'] < 0:
output['total'] = 0
billhd.total = output['total']
billhd.customer_id = output['customer_id']
billhd.save()
output['date'] = billhd.date
output['bill_id'] = billhd.id
for billitm, dbitm in zip(billitms, dbitms):
billitm.bill = billhd
billitm.save()
dbitm.save()
return output, billhd
def undo_bill(billid, user):
bill = Bill.objects.get(pk=billid)
if not bill.is_committed():
return 'Bill has already been deleted!'
for billitem in bill.billitem_set.all():
if billitem.item.quantity is not None:
billitem.item.quantity += billitem.quantity
billitem.item.save()
bill.deleted_by = user.username
bill.save()
return 'Bill #' + billid + ' deleted!'
|
# Awakened Spirit (57461)
haku = 9130081
sm.removeEscapeButton()
sm.setSpeakerID(haku)
sm.setBoxChat()
sm.sendNext("Kanna, our spiritual power is returning. My fur is buzzing.")
sm.sendNext("I think I can maintain my original form now! "
"Look out Nobunaga, Haku and Kanna are gonna fox you up!")
sm.sendNext("Oh... But I don't have a way to store the spiritual energy. "
"We need Mouri Takamoto's help. Could you ask him? Pretty please?")
sm.startQuest(parentID)
|
# FIXME make dynamic
PACKAGE_NAME = 'ctshed'
FILE_ENCODING = 'utf-8'
DEFAULT_IMAGE = 'debian:stable'
BIN_DIRECTORY = '~/bin'
|
from aiohttp import ClientSession
from aiohttp.client_exceptions import ClientProxyConnectionError
from aiohttp import web
from util.config import BaseConfig
from nonebot.adapters.onebot.v11 import MessageSegment
import nonebot
import socket
import asyncio
import os
class Config(BaseConfig):
__file__ = "dog"
timeout: float = 15
warn: bool = True
dog_gif_only: bool = False
proxy: str | None = None
CONFIG = Config.load()
WARN_STR = "๏ผ็ฟป่ฏ๏ผAPIๅจๅฝๅค๏ผ่ฏฅๅฝไปค็ผๆ
ขๆๅบ้ๆฏๆญฃๅธธ็ฐ่ฑก๏ผ" if CONFIG.warn else ""
CAT_API = "https://aws.random.cat/meow"
cat = nonebot.on_command("ๅต", aliases={"ๅตๅต", "meow"})
cat.__cmd__ = ["ๅต", "ๅตๅต", "meow"]
cat.__brief__ = "ๅตๅตๅต๏ผๅตๅตใ"
cat.__doc__ = "ๅต๏ผๅตๅตใ" + WARN_STR
@cat.handle()
async def handle_cat():
async with ClientSession() as http:
url = "่ทๅURLๅบ้"
try:
response = await http.get(CAT_API, proxy=CONFIG.proxy)
url = (await response.json())["file"]
response = await http.get(url, proxy=CONFIG.proxy)
img = await asyncio.wait_for(response.read(), CONFIG.timeout)
except ClientProxyConnectionError:
await cat.finish("ไปฃ็่ฟๆฅๅคฑ่ดฅ")
except asyncio.TimeoutError:
await cat.finish("ไธ่ฝฝ่ถ
ๆถ๏ผ" + url)
except:
await cat.finish("ไธ่ฝฝๅบ้๏ผ" + url)
await cat.finish(MessageSegment.image(img))
CAT_GIF_API = "https://edgecats.net"
cat_gif = nonebot.on_command("ๅตๅ")
cat_gif.__cmd__ = "ๅตๅ"
cat_gif.__brief__ = "ๅตๅ๏ผๅตโโๅโโ"
cat_gif.__doc__ = "ๅผๅๅผๅ๏ผๅตๅใ" + WARN_STR
@cat_gif.handle()
async def handle_cat_gif():
async with ClientSession() as http:
try:
response = await http.get(CAT_GIF_API, proxy=CONFIG.proxy)
data = await asyncio.wait_for(response.read(), CONFIG.timeout)
except ClientProxyConnectionError:
await cat_gif.finish("ไปฃ็่ฟๆฅๅคฑ่ดฅ")
except asyncio.TimeoutError:
await cat_gif.finish("ไธ่ฝฝ่ถ
ๆถ")
except:
await cat_gif.finish("ไธ่ฝฝๅบ้")
await cat_gif.finish(MessageSegment.image(data))
DOG_API = "https://random.dog/woof.json?filter=gif,mp4,webm"
dog = nonebot.on_command("ๆฑช", aliases={"ๆฑชๆฑช", "woof"})
dog.__cmd__ = ["ๆฑช", "ๆฑชๆฑช", "woof"]
dog.__brief__ = "ๆฑช๏ผๆฑชๆฑช๏ผๆฑชๆฑชๆฑช๏ผ"
dog.__doc__ = "ๆฑชๆฑช๏ผๆฑชๆฑชใ" + WARN_STR
@dog.handle()
async def handle_dog():
async with ClientSession() as http:
url = "่ทๅURLๅบ้"
try:
response = await http.get(DOG_API, proxy=CONFIG.proxy)
url = (await response.json())["url"]
response = await http.get(url, proxy=CONFIG.proxy)
img = await asyncio.wait_for(response.read(), CONFIG.timeout)
except ClientProxyConnectionError:
await dog.finish("ไปฃ็่ฟๆฅๅคฑ่ดฅ")
except asyncio.TimeoutError:
await dog.finish("ไธ่ฝฝ่ถ
ๆถ๏ผ" + url)
except:
await dog.finish("ไธ่ฝฝๅบ้๏ผ" + url)
await dog.finish(MessageSegment.image(img))
DOG_GIF_API = "https://random.dog/woof.json?include=gif"
if not CONFIG.dog_gif_only:
DOG_GIF_API += ",mp4,webm"
dog_gif = nonebot.on_command("ๆฑชๅท")
dog_gif.__cmd__ = "ๆฑชๅท"
dog_gif.__brief__ = "ๆฑช๏ผๆฑช๏ผๆฑชๅท๏ฝ"
dog_gif.__doc__ = "ๆฑชๆฑชโฆโฆๅๅท๏ผ" + WARN_STR
@dog_gif.handle()
async def handle_dog_gif():
async with ClientSession() as http:
url = "่ทๅURLๅบ้"
from loguru import logger
try:
response = await http.get(DOG_GIF_API, proxy=CONFIG.proxy)
url = (await response.json())["url"]
response = await http.get(url, proxy=CONFIG.proxy)
mime = response.content_type
logger.info("start download")
img = await asyncio.wait_for(response.read(), CONFIG.timeout)
logger.info("download finish")
except ClientProxyConnectionError:
await dog_gif.finish("ไปฃ็่ฟๆฅๅคฑ่ดฅ")
except asyncio.TimeoutError:
logger.info("download timeout")
await dog_gif.finish("ไธ่ฝฝ่ถ
ๆถ๏ผ" + url)
except:
await dog_gif.finish("ไธ่ฝฝๅบ้๏ผ" + url)
ext = os.path.splitext(url.lower())[1]
if ext in (".mp4", ".webm"):
await send_video(ext, mime, img)
await dog_gif.finish()
await dog_gif.finish(MessageSegment.image(img))
# go-cqhttp v1.0.0-rc1 ไฝฟ็จ file ้พๆฅๅ่ง้ขไผๅบ้๏ผๅช่ฝ็จ่ฟ็งๆนๆณๆฟไปฃ
async def send_video(ext: str, mime: str, vid: bytes):
async def handler(_: web.Request):
return web.Response(body=vid, content_type=mime)
server = web.Server(handler)
runner = web.ServerRunner(server)
await runner.setup()
with socket.socket() as s:
s.bind(("", 0))
port = s.getsockname()[1]
site = web.TCPSite(runner, "localhost", port)
await site.start()
try:
await dog_gif.send(MessageSegment.video(f"http://127.0.0.1:{port}/video{ext}"))
finally:
await site.stop()
|
import pytest
import re
import loja
@pytest.mark.parametrize('atributo', ['_Produto__nome', '_Produto__preco'])
def test_cria_produto(atributo):
try:
prod = loja.Produto('Jogo online', 99)
except Exception:
raise AssertionError('Erro no construtor da classe Produto')
else:
mensagens_atributos = {'_Produto__nome': 'Nรฃo criou o atributo privado nome',
'_Produto__preco':'Nรฃo criou o atributo privado preco'}
assert hasattr(prod, atributo), mensagens_atributos[atributo]
@pytest.mark.parametrize('nome', ['Jogo', 'Microsoft Office'])
def test_produto_atributo_nome(nome):
try:
prod = loja.Produto(nome, 100)
assert prod._Produto__nome == nome
except Exception:
raise AssertionError('Erro ao inicializar o atributo privado nome na classe Produto')
@pytest.mark.parametrize('nome', ['Jogo', 'Microsoft Office'])
def test_produto_property_nome(nome):
try:
prod = loja.Produto(nome, 100)
assert prod.nome == nome
except Exception:
raise AssertionError('Erro no valor da property nome na classe Produto')
@pytest.mark.parametrize('preco', [100, 100.5])
def test_produto_preco_valido(preco):
try:
tipo = 'int' if isinstance(preco, int) else 'float' if isinstance(preco, float) else ''
prod = loja.Produto('Jogo online', preco)
except Exception:
raise AssertionError('Erro ao criar Produto com preรงo do tipo {0}'.format(tipo))
def test_cria_produto_nome_vazio():
try:
prod = loja.Produto('', 30)
except ValueError:
pass
except Exception:
raise AssertionError('Erro diferente de ValueError para Produto criado com nome vazio')
else:
raise AssertionError('Produto criado com nome vazio')
def test_produto_setter_nome_vazio():
try:
valor_inicial = 'abcdef'
prod = loja.Produto(valor_inicial, 30)
prod.nome = ''
except ValueError:
pass
except Exception:
raise AssertionError('Erro diferente de ValueError no setter nome da classe Produto quando o nome รฉ vazio')
assert hasattr(prod, "_Produto__nome"), "A classe Produto nรฃo possui o atributo privado nome"
assert prod._Produto__nome == valor_inicial, 'Nรฃo deve ser permitido alterar o valor do atributo privado nome quando o setter nome recebe uma string vazia'
@pytest.mark.parametrize('preco', ["", []])
def test_cria_produto_preco_invalido(preco):
try:
prod = loja.Produto('Jogo online', preco)
except TypeError:
pass
except Exception:
raise AssertionError('Erro diferente de TypeError para Produto criado com preรงo que nรฃo รฉ int nem float')
else:
raise AssertionError('Produto criado com preรงo invรกlido')
@pytest.mark.parametrize('preco', [-1, -3.0])
def test_cria_produto_preco_negativo(preco):
try:
prod = loja.Produto('Jogo online', preco)
except ValueError:
pass
except Exception:
raise AssertionError('Erro diferente de ValueError para Produto criado com preรงo negativo')
else:
raise AssertionError('Produto criado com preรงo negativo')
@pytest.mark.parametrize('preco', ["", []])
def test_produto_setter_preco_invalido(preco):
try:
valor_inicial = 100
prod = loja.Produto('Jogo online', valor_inicial)
prod.preco = preco
except TypeError:
pass
except Exception:
raise AssertionError('Erro diferente de TypeError no setter do preรงo quando o novo_preco nรฃo รฉ int nem float')
assert hasattr(prod, "_Produto__preco"), "A classe Produto nรฃo possui o atributo privado preco"
assert prod._Produto__preco == valor_inicial, "O atributo privado preco nรฃo pode ter o seu valor inicial alterado caso o novo_preco seja invรกlido"
@pytest.mark.parametrize('preco', [-1, -3.0])
def test_produto_setter_preco_negativo(preco):
try:
valor_inicial = 100
prod = loja.Produto('Jogo online', valor_inicial)
prod.preco = preco
except ValueError:
pass
except Exception:
raise AssertionError('Erro diferente de ValueError no setter do preรงo quando o novo_preco รฉ negativo')
assert hasattr(prod, "_Produto__preco"), "A classe Produto nรฃo possui o atributo privado preco"
assert prod._Produto__preco == valor_inicial, "O atributo privado preco nรฃo pode ter o seu valor inicial alterado caso o novo_preco seja negativo"
@pytest.mark.parametrize('preco', [1, 30])
def test_produto_metodo_calcular_preco_com_frete(preco):
try:
prod = loja.Produto('Jogo online', preco)
except Exception:
raise AssertionError('Erro ao instanciar um Produto')
assert prod.calcular_preco_com_frete() == preco, "O mรฉtodo calcular_preco_com_frete() deve retornar o preรงo do Produto"
@pytest.mark.parametrize('atributo', ['_ProdutoFisico__peso'])
def test_cria_produtoFisico(atributo):
try:
prod = loja.ProdutoFisico('Cadeira', 99, 1000)
except Exception:
raise AssertionError('Erro no construtor da classe ProdutoFisico')
else:
mensagens_atributos = {'_ProdutoFisico__peso': 'Nรฃo criou o atributo privado peso'}
assert hasattr(prod, atributo), mensagens_atributos[atributo]
def test_produtoFisico_heranca():
try:
prod = loja.ProdutoFisico('Cadeira', 99, 1000)
except Exception:
raise AssertionError('Erro no construtor da classe ProdutoFisico')
assert isinstance(prod, loja.Produto) and isinstance(prod, loja.ProdutoFisico), 'A classe ProdutoFisico deve herdar da classe Produto'
def test_produtoFisico_caracteristicas_herdadas():
try:
nome, preco = 'Cadeira', 99
prod = loja.ProdutoFisico(nome, preco, 1000)
dict_attrs_classe = vars(loja.ProdutoFisico)
dict_attrs_obj = vars(prod)
except Exception:
raise AssertionError('Erro no construtor da classe ProdutoFisico')
assert ('_Produto__nome' in dict_attrs_obj) and ('_Produto__preco' in dict_attrs_obj) and ('_ProdutoFisico__nome' not in dict_attrs_obj) and ('_ProdutoFisico__preco' not in dict_attrs_obj), 'A classe ProdutoFisico nรฃo deve possuir os atributos privados nome e preco'
assert ('nome' not in dict_attrs_classe) and ('preco' not in dict_attrs_classe), 'A classe ProdutoFisico deve herdar as properties da classe Produto'
assert prod.nome == nome and prod.preco == preco, 'As properties herdadas pela classe ProdutoFisico nรฃo possuem valores vรกlidos'
@pytest.mark.parametrize('nome,preco,peso', [('Copo',5,100)])
def test_cria_produtoFisico_inicializado_corretamente(nome, preco, peso):
try:
prod = loja.ProdutoFisico(nome, preco, peso)
except Exception:
raise AssertionError('Erro no construtor da classe ProdutoFisico')
assert hasattr(prod, "_Produto__nome"), "A classe Produto nรฃo possui o atributo privado nome"
assert hasattr(prod, "_Produto__preco"), "A classe Produto nรฃo possui o atributo privado preco"
assert hasattr(prod, "_ProdutoFisico__peso"), "A classe ProdutoFisico nรฃo possui o atributo privado peso"
assert prod._Produto__nome == nome and prod._Produto__preco == preco and prod._ProdutoFisico__peso == peso, 'A classe ProdutoFisico deve inicializar seus atributos e os atributos da super classe corretamente'
@pytest.mark.parametrize('peso', [1000, 3500])
def test_produtoFisico_property_peso(peso):
try:
prod = loja.ProdutoFisico('Cadeira', 99, peso)
assert prod.peso == peso
except Exception:
raise AssertionError('Erro no valor da property peso na classe ProdutoFisico')
@pytest.mark.parametrize('peso', ["", []])
def test_cria_produtoFisico_peso_invalido(peso):
try:
prod = loja.ProdutoFisico('Cadeira', 99, peso)
except TypeError:
pass
except Exception:
raise AssertionError('Erro diferente de TypeError para ProdutoFisico criado com peso que nรฃo รฉ int')
else:
raise AssertionError('ProdutoFisico criado com peso invรกlido')
@pytest.mark.parametrize('peso', [-1000])
def test_cria_produtoFisico_peso_nao_positivo(peso):
try:
prod = loja.ProdutoFisico('Cadeira', 99, peso)
except ValueError:
pass
except Exception:
raise AssertionError('Erro diferente de ValueError para ProdutoFisico criado com peso negativo ou igual a zero')
else:
raise AssertionError('ProdutoFisico criado com peso negativo ou igual a zero')
@pytest.mark.parametrize('peso', ["", []])
def test_produtoFisico_setter_peso_invalido(peso):
try:
valor_inicial = 100
prod = loja.ProdutoFisico('Cadeira', 99, valor_inicial)
prod.preco = peso
except TypeError:
pass
except Exception:
raise AssertionError('Erro diferente de TypeError no setter do peso quando o novo_peso nรฃo รฉ int')
assert hasattr(prod, "_ProdutoFisico__peso"), "A classe ProdutoFisico nรฃo possui o atributo privado peso"
assert prod._ProdutoFisico__peso == valor_inicial, "O atributo privado peso nรฃo pode ter o seu valor inicial alterado caso o novo_peso seja invรกlido"
@pytest.mark.parametrize('peso', [0, -100])
def test_produtoFisico_setter_peso_nao_positivo(peso):
try:
valor_inicial = 100
prod = loja.ProdutoFisico('Cadeira', 99, valor_inicial)
prod.preco = peso
except ValueError:
pass
except Exception:
raise AssertionError('Erro diferente de ValueError no setter do peso quando o novo_peso รฉ negativo ou igual a zero')
assert hasattr(prod, "_ProdutoFisico__peso"), "A classe ProdutoFisico nรฃo possui o atributo privado peso"
assert prod._ProdutoFisico__peso == valor_inicial, "O atributo privado peso nรฃo pode ter o seu valor inicial alterado caso o novo_peso seja negativo ou igual a zero"
@pytest.mark.parametrize('preco,peso,total', [(100,5000,125), (300,9500,347.5)])
def test_produtoFisico_metodo_calcular_preco_com_frete(preco, peso, total):
try:
prod = loja.ProdutoFisico('Cadeira', preco, peso)
except Exception:
raise AssertionError('Erro ao instanciar um ProdutoFisico')
assert prod.calcular_preco_com_frete() == total, "O mรฉtodo calcular_preco_com_frete() nรฃo calculou o preรงo com frete do ProdutoFisico corretamente"
@pytest.mark.parametrize('peso,peso_kg', [(1000,1.0), (9500,9.5)])
def test_produtoFisico_metodo_peso_em_kg(peso, peso_kg):
try:
prod = loja.ProdutoFisico('Cadeira', 99, peso)
except Exception:
raise AssertionError('Erro ao instanciar um ProdutoFisico')
assert prod.peso_em_kg() == peso_kg, "O mรฉtodo peso_em_kg() nรฃo calculou o peso em kg do ProdutoFisico corretamente"
@pytest.mark.parametrize('atributo', ['_ProdutoEletronico__tensao', '_ProdutoEletronico__tempo_garantia'])
def test_cria_produtoEletronico(atributo):
try:
prod = loja.ProdutoEletronico('Geladeira', 5000, 35000, 127, 12)
except Exception:
raise AssertionError('Erro no construtor da classe ProdutoEletronico')
else:
mensagens_atributos = {'_ProdutoEletronico__tensao': 'Nรฃo criou o atributo privado tensao',
'_ProdutoEletronico__tempo_garantia': 'Nรฃo criou o atributo privado tempo_garantia'}
assert hasattr(prod, atributo), mensagens_atributos[atributo]
def test_produtoEletronico_heranca():
try:
prod = loja.ProdutoEletronico('Geladeira', 5000, 35000, 127, 12)
except Exception:
raise AssertionError('Erro no construtor da classe ProdutoEletronico')
assert isinstance(prod, loja.ProdutoFisico) and isinstance(prod, loja.ProdutoEletronico), 'A classe ProdutoEletronico deve herdar da classe ProdutoFisico'
def test_produtoEletronico_caracteristicas_herdadas():
try:
nome, preco, peso = 'Geladeira', 4500, 29000
prod = loja.ProdutoEletronico(nome, preco, peso, 127, 12)
dict_attrs_classe = vars(loja.ProdutoEletronico)
dict_attrs_obj = vars(prod)
except Exception:
raise AssertionError('Erro no construtor da classe ProdutoEletronico')
assert ('_Produto__nome' in dict_attrs_obj) and ('_Produto__preco' in dict_attrs_obj) and ('_ProdutoFisico__peso' in dict_attrs_obj) and ('_ProdutoEletronico__nome' not in dict_attrs_obj) and ('_ProdutoEletronico__preco' not in dict_attrs_obj) and ('_ProdutoEletronico__peso' not in dict_attrs_obj), 'A classe ProdutoEletronico nรฃo deve possuir os atributos privados nome, preco e peso'
assert ('nome' not in dict_attrs_classe) and ('preco' not in dict_attrs_classe) and ('peso' not in dict_attrs_classe), 'A classe ProdutoEletronico deve herdar as properties da classe ProdutoFisico'
assert prod.nome == nome and prod.preco == preco and prod.peso == peso, 'As properties herdadas pela classe ProdutoEletronico nรฃo possuem valores vรกlidos'
@pytest.mark.parametrize('nome,preco,peso,tensao,tempo_garantia', [('Cafeteira',300,1500,127,6), ('Geladeira',3500,25000,220,12), ('Televisao',4000,8500,0,24)])
def test_cria_produtoEletronico_inicializado_corretamente(nome, preco, peso, tensao, tempo_garantia):
try:
prod = loja.ProdutoEletronico(nome, preco, peso, tensao, tempo_garantia)
except Exception:
raise AssertionError('Erro no construtor da classe ProdutoEletronico')
assert hasattr(prod, "_Produto__nome"), "A classe Produto nรฃo possui o atributo privado nome"
assert hasattr(prod, "_Produto__preco"), "A classe Produto nรฃo possui o atributo privado preco"
assert hasattr(prod, "_ProdutoFisico__peso"), "A classe ProdutoFisico nรฃo possui o atributo privado peso"
assert hasattr(prod, "_ProdutoEletronico__tensao"), "A classe ProdutoEletronico nรฃo possui o atributo privado tensao"
assert hasattr(prod, "_ProdutoEletronico__tempo_garantia"), "A classe ProdutoEletronico nรฃo possui o atributo privado tempo_garantia"
assert prod._Produto__nome == nome and prod._Produto__preco == preco and prod._ProdutoFisico__peso == peso and prod._ProdutoEletronico__tensao == tensao and prod._ProdutoEletronico__tempo_garantia == tempo_garantia, 'A classe ProdutoEletronico deve inicializar seus atributos e os atributos da super classe corretamente'
@pytest.mark.parametrize('meses', [9, 12])
def test_produtoEletronico_property_tempo_garantia(meses):
try:
prod = loja.ProdutoEletronico('Geladeira', 5000, 35000, 127, meses)
assert prod.tempo_garantia == meses
except Exception:
raise AssertionError('Erro no valor da property tempo_garantia na classe ProdutoEletronico')
@pytest.mark.parametrize('tensao', [0, 127, 220])
def test_produtoEletronico_property_tensao(tensao):
try:
prod = loja.ProdutoEletronico('Geladeira', 5000, 35000, tensao, 12)
assert prod.tensao == tensao
except Exception:
raise AssertionError('Erro no valor da property tensao na classe ProdutoEletronico')
@pytest.mark.parametrize('tensao', ["", []])
def test_cria_produtoEletronico_tensao_tipo_invalido(tensao):
try:
prod = loja.ProdutoEletronico('Geladeira', 5000, 35000, tensao, 12)
except TypeError:
pass
except Exception:
raise AssertionError('Erro diferente de TypeError para ProdutoEletronico criado com tensao que nรฃo รฉ int')
else:
raise AssertionError('ProdutoEletronico criado com tensao com tipo invรกlido')
@pytest.mark.parametrize('tensao', [-1000, 7, 260])
def test_cria_produtoEletronico_tensao_valor_invalido(tensao):
try:
prod = loja.ProdutoEletronico('Geladeira', 5000, 35000, tensao, 12)
except ValueError:
pass
except Exception:
raise AssertionError('Erro diferente de ValueError para ProdutoEletronico criado com tensao com valor diferente de 0, 127 ou 220')
else:
raise AssertionError('ProdutoEletronico criado com tensao com valor invรกlido')
@pytest.mark.parametrize('tensao', ["", []])
def test_produtoEletronico_setter_tensao_tipo_invalido(tensao):
try:
valor_inicial = 127
prod = loja.ProdutoEletronico('Geladeira', 5000, 35000, valor_inicial, 12)
prod.tensao = tensao
except TypeError:
pass
except Exception:
raise AssertionError('Erro diferente de TypeError no setter da tensao quando nova_tensao nรฃo รฉ int')
assert hasattr(prod, "_ProdutoEletronico__tensao"), "A classe ProdutoEletronico nรฃo possui o atributo privado tensao"
assert prod._ProdutoEletronico__tensao == valor_inicial, "O atributo privado tensao nรฃo pode ter o seu valor inicial alterado caso o nova_tensao seja invรกlida"
@pytest.mark.parametrize('tensao', [-1000, 7, 260])
def test_produtoEletronico_setter_tensao_valor_invalido(tensao):
try:
valor_inicial = 127
prod = loja.ProdutoEletronico('Geladeira', 5000, 35000, valor_inicial, 12)
prod.tensao = tensao
except ValueError:
pass
except Exception:
raise AssertionError('Erro diferente de ValueError no setter da tensao quando a nova_tensao possui valor diferente de 0, 127 ou 220')
assert hasattr(prod, "_ProdutoEletronico__tensao"), "A classe ProdutoEletronico nรฃo possui o atributo privado tensao"
assert prod._ProdutoEletronico__tensao == valor_inicial, "O atributo privado tensao nรฃo pode ter o seu valor inicial alterado caso a nova_tensao seja diferente de 0, 127 ou 220"
@pytest.mark.parametrize('preco,peso,total', [(100,5000,126.25), (300,9000,348.45)])
def test_produtoEletronico_metodo_calcular_preco_com_frete(preco, peso, total):
try:
prod = loja.ProdutoEletronico('Geladeira', preco, peso, 127, 12)
except Exception:
raise AssertionError('Erro ao instanciar um ProdutoEletronico')
assert prod.calcular_preco_com_frete() == total, "O mรฉtodo calcular_preco_com_frete() nรฃo calculou o preรงo com frete do ProdutoEletronico corretamente"
@pytest.mark.parametrize('atributo', ['_Ebook__autor', '_Ebook__numero_paginas'])
def test_cria_ebook(atributo):
try:
prod = loja.Ebook('Aprenda Python', 20, 'Joao Silva', 130)
except Exception:
raise AssertionError('Erro no construtor da classe Ebook')
else:
mensagens_atributos = {'_Ebook__autor': 'Nรฃo criou o atributo privado autor',
'_Ebook__numero_paginas': 'Nรฃo criou o atributo privado numero_paginas'}
assert hasattr(prod, atributo), mensagens_atributos[atributo]
def test_ebook_heranca():
try:
prod = loja.Ebook('Aprenda Python', 20, 'Joao Silva', 130)
except Exception:
raise AssertionError('Erro no construtor da classe Ebook')
assert isinstance(prod, loja.Produto) and isinstance(prod, loja.Ebook), 'A classe Ebook deve herdar da classe Produto'
def test_ebook_caracteristicas_herdadas():
try:
nome, preco = 'Aprenda Python', 20
prod = loja.Ebook(nome, preco, 'Joao Silva', 130)
dict_attrs_classe = vars(loja.Ebook)
dict_attrs_obj = vars(prod)
except Exception:
raise AssertionError('Erro no construtor da classe Ebook')
assert ('_Produto__nome' in dict_attrs_obj) and ('_Produto__preco' in dict_attrs_obj) and ('_Ebook__nome' not in dict_attrs_obj) and ('_Ebook__preco' not in dict_attrs_obj), 'A classe Ebook nรฃo deve possuir os atributos privados nome e preco'
assert ('nome' not in dict_attrs_classe) and ('preco' not in dict_attrs_classe), 'A classe Ebook deve herdar as properties da classe Produto'
assert prod.nome == nome and prod.preco == preco, 'As properties herdadas pela classe Ebook nรฃo possuem valores vรกlidos'
@pytest.mark.parametrize('nome,preco,autor,numero_paginas', [('Aprendendo Python',30,'Joao Santos',150), ('Learning Java',250,'John da Silva',810)])
def test_cria_ebook_inicializado_corretamente(nome, preco, autor, numero_paginas):
try:
prod = loja.Ebook(nome, preco, autor, numero_paginas)
except Exception:
raise AssertionError('Erro no construtor da classe Ebook')
assert hasattr(prod, "_Produto__nome"), "A classe Produto nรฃo possui o atributo privado nome"
assert hasattr(prod, "_Produto__preco"), "A classe Produto nรฃo possui o atributo privado preco"
assert hasattr(prod, "_Ebook__autor"), "A classe Ebook nรฃo possui o atributo privado autor"
assert hasattr(prod, "_Ebook__numero_paginas"), "A classe Ebook nรฃo possui o atributo privado numero_paginas"
assert prod._Produto__nome == nome and prod._Produto__preco == preco and prod._Ebook__autor == autor and prod._Ebook__numero_paginas == numero_paginas, 'A classe Ebook deve inicializar seus atributos e os atributos da super classe corretamente'
@pytest.mark.parametrize('nome,autor', [('Aprendendo Python', 'Joao Santos'), ('Learning Java','John da Silva')])
def test_ebook_property_nome_exibicao(nome, autor):
try:
prod = loja.Ebook(nome, 30, autor, 100)
saida_esperada = '%s (%s)' % (nome, autor)
temp = prod.nome_exibicao
temp = re.sub(r'\s+', ' ', temp)
temp = re.sub(r'[(]\s+', '(', temp)
temp = re.sub(r'\s+[)]', ')', temp).strip()
assert temp.upper() == saida_esperada.upper()
except Exception:
raise AssertionError('Erro no valor da property nome_exibicao na classe Ebook')
@pytest.mark.parametrize('numero_paginas', [100, 564])
def test_ebook_property_numero_paginas(numero_paginas):
try:
prod = loja.Ebook('Aprenda Python', 30, 'Joao da Silva', numero_paginas)
assert prod.numero_paginas == numero_paginas
except Exception:
raise AssertionError('Erro no valor da property numero_paginas na classe Ebook')
@pytest.mark.parametrize('numero_paginas', [0, -1])
def test_cria_ebook_numero_paginas_nao_positivo(numero_paginas):
try:
prod = loja.Ebook('Aprenda Python', 30, 'Joao da Silva', numero_paginas)
except ValueError:
pass
except Exception:
raise AssertionError('Erro diferente de ValueError para Ebook criado com numero_paginas negativo ou igual a zero')
else:
raise AssertionError('Ebook criado com numero_paginas negativo ou igual a zero')
@pytest.mark.parametrize('numero_paginas', [0, -1])
def test_ebook_setter_numero_paginas_nao_positivo(numero_paginas):
try:
valor_inicial = 100
prod = loja.Ebook('Aprenda Python', 30, 'Joao da Silva', valor_inicial)
prod.numero_paginas = numero_paginas
except ValueError:
pass
except Exception:
raise AssertionError('Erro diferente de ValueError no setter do numero_paginas quando o valor nรฃo รฉ positivo')
assert hasattr(prod, "_Ebook__numero_paginas"), "A classe Ebook nรฃo possui o atributo privado numero_paginas"
assert prod._Ebook__numero_paginas == valor_inicial, "O atributo privado numero_paginas nรฃo pode ter o seu valor inicial alterado caso o valor seja negativo ou igual a zero"
if __name__ == "__main__":
pytest.main()
|
import mock
from django.test import TestCase, RequestFactory
from django.http import HttpResponse
from shared_schema_tenants.middleware import TenantMiddleware, get_tenant
from shared_schema_tenants.helpers.tenants import create_tenant, set_current_tenant
from shared_schema_tenants.exceptions import TenantNotFoundError
try:
from django.urls import reverse
except ImportError:
from django.core.urlresolvers import reverse
class TenantMiddlewareTests(TestCase):
@mock.patch('shared_schema_tenants.middleware.TenantMiddleware.process_request')
@mock.patch('shared_schema_tenants.middleware.TenantMiddleware.process_response')
def test_calls_process_request_and_process_response(self, process_request, process_response):
factory = RequestFactory()
request = factory.get(reverse('shared_schema_tenants:tenant_list'), HTTP_HOST='test.localhost:8000')
response = HttpResponse()
TenantMiddleware(lambda r: response).__call__(request)
process_request.assert_called_once()
process_response.assert_called_once()
@mock.patch('shared_schema_tenants.middleware.get_tenant')
def test_process_request_adds_tenant_to_request(self, get_tenant):
tenant = create_tenant(name='test', slug='test', extra_data={}, domains=['test.localhost:8000'])
get_tenant.return_value = tenant
factory = RequestFactory()
request = factory.get(reverse('shared_schema_tenants:tenant_list'), HTTP_HOST='test.localhost:8000')
response = HttpResponse()
request = TenantMiddleware(lambda r: response).process_request(request)
self.assertEqual(request.tenant.slug, tenant.slug)
get_tenant.assert_called_once()
def test_call_returns_correct_response(self):
tenant = create_tenant(name='test', slug='test', extra_data={}, domains=['test.localhost:8000'])
get_tenant.return_value = tenant
factory = RequestFactory()
request = factory.get(reverse('shared_schema_tenants:tenant_list'), HTTP_HOST='test.localhost:8000')
response = HttpResponse()
processed_response = TenantMiddleware(lambda r: response).__call__(request)
self.assertEqual(response, processed_response)
class GetTenantTests(TestCase):
def test_with_correct_domain(self):
tenant = create_tenant(name='test', slug='test', extra_data={}, domains=['test.localhost:8000'])
factory = RequestFactory()
request = factory.get(reverse('shared_schema_tenants:tenant_list'), HTTP_HOST='test.localhost:8000')
retrieved_tenant = get_tenant(request)
self.assertEqual(retrieved_tenant, tenant)
def test_with_http_header(self):
tenant = create_tenant(name='test', slug='test', extra_data={}, domains=['test.localhost:8000'])
factory = RequestFactory()
request = factory.get(reverse('shared_schema_tenants:tenant_list'), **{'HTTP_TENANT_SLUG': tenant.slug})
retrieved_tenant = get_tenant(request)
self.assertEqual(retrieved_tenant, tenant)
def test_with_unexistent_tenant_in_http_header(self):
create_tenant(name='test', slug='test', extra_data={}, domains=['test.localhost:8000'])
factory = RequestFactory()
request = factory.get(reverse('shared_schema_tenants:tenant_list'), **{'HTTP_TENANT_SLUG': 'unexistent'})
with self.assertRaises(TenantNotFoundError):
get_tenant(request)
def test_with_previously_set_tenant(self):
tenant = create_tenant(name='test', slug='test', extra_data={}, domains=['test.localhost:8000'])
factory = RequestFactory()
request = factory.get(reverse('shared_schema_tenants:tenant_list'))
set_current_tenant(tenant.slug)
retrieved_tenant = get_tenant(request)
self.assertEqual(retrieved_tenant, tenant)
def test_with_nothing(self):
factory = RequestFactory()
request = factory.get(reverse('shared_schema_tenants:tenant_list'))
retrieved_tenant = get_tenant(request)
self.assertEqual(retrieved_tenant, None)
|
import requests
import re
# Start values of nothing...
#nothing = 12345
nothing = 16044 / 2
regex = re.compile("and the next nothing is (\d+)")
while True:
try:
rsp = requests.get('http://www.pythonchallenge.com/pc/def/linkedlist.php?nothing=%d' % nothing)
if rsp.status_code == 200:
match = regex.search(rsp.text)
if not match:
print('ended with: ', rsp.text)
break
num = match.group(1)
nothing = nothing + (int(num) - nothing)
print(rsp.text)
except:
break
|
"""
Your chance to explore Loops and Turtles!
Authors: David Mutchler, Vibha Alangar, Matt Boutell, Dave Fisher,
Aaron Wilkin, their colleagues, and Alexander Wolfe.
"""
########################################################################
# done: 1.
# On Line 5 above, replace PUT_YOUR_NAME_HERE with your own name.
########################################################################
########################################################################
# done: 2.
# You should have RUN the m5e_loopy_turtles module and READ its code.
# (Do so now if you have not already done so.)
#
# Below this comment, add ANY CODE THAT YOU WANT, as long as:
# 1. You construct at least 2 rg.SimpleTurtle objects.
# 2. Each rg.SimpleTurtle object draws something
# (by moving, using its rg.Pen). ANYTHING is fine!
# 3. Each rg.SimpleTurtle moves inside a LOOP.
#
# Be creative! Strive for way-cool pictures! Abstract pictures rule!
#
# If you make syntax (notational) errors, no worries -- get help
# fixing them at either this session OR at the NEXT session.
#
# Don't forget to COMMIT-and-PUSH when you are done with this module.
#
#######################################################################
import rosegraphics as rg
window = rg.TurtleWindow()
window.tracer(2)
turtone = rg.SimpleTurtle()
turtone.pen = rg.Pen('red', 1)
turttwo = rg.SimpleTurtle()
turttwo.pen = rg.Pen('blue', 1)
turttwo.speed = 20
turtone.speed = 20
for k in range(20):
turtone.draw_regular_polygon(5,30)
turtone.pen_up()
turtone.right(90)
turtone.forward(5)
turtone.left(90)
turtone.pen_down()
for k in range(40):
turttwo.draw_circle(150)
turttwo.pen_up()
turttwo.left(90)
turttwo.forward(5)
turttwo.right(90)
turttwo.pen_down()
window.close_on_mouse_click()
|
import os
from PyQt5.QtCore import QTimer, QTime, Qt, QSettings
from PyQt5.QtGui import QPixmap
from PyQt5.QtMultimedia import QSound
from PyQt5.QtWidgets import (
QWidget,
QProgressBar,
QPushButton,
QLabel,
QVBoxLayout,
QHBoxLayout,
QDialog,
QSizePolicy,
QMessageBox,
QFormLayout,
QAction,
QSlider
)
from center_window import CenterWindow
from config import (
QSS,
RING_SOUND_PATH,
POMODORO_MARK_PATH,
LOGGED_TIME_DIR
)
SHORT_BREAK = 'short'
LONG_BREAK = 'long'
POMODORO = 'pomodoro'
class BreakDialog(QDialog):
def __init__(self, cur_break):
super(BreakDialog, self).__init__()
self.setWindowTitle('Break')
self.cur_break = cur_break
self.another_break = SHORT_BREAK if self.cur_break == LONG_BREAK else LONG_BREAK
self.main_box = QVBoxLayout()
self.setLayout(self.main_box)
self.message_label = QLabel(
'It is time for a {} break.'.format(self.cur_break)
)
self.message_label.setAlignment(Qt.AlignCenter)
self.btn_box = QHBoxLayout()
self.another_break_btn = QPushButton(
'Start {} break'.format(self.another_break)
)
self.another_break_btn.clicked.connect(self.another_break_btn_click)
self.skip_break_btn = QPushButton('Skip break')
self.skip_break_btn.clicked.connect(self.skip_break_btn_click)
self.main_box.addWidget(self.message_label)
self.btn_box.addWidget(self.another_break_btn)
self.btn_box.addWidget(self.skip_break_btn)
self.main_box.addLayout(self.btn_box)
def exec(self):
super(BreakDialog, self).exec()
return self.cur_break
def another_break_btn_click(self):
self.cur_break = self.another_break
self.accept()
def skip_break_btn_click(self):
self.cur_break = POMODORO
self.accept()
class Settings(QWidget):
def __init__(self, pomodoro_window):
super().__init__()
self.setStyleSheet(QSS)
self.setWindowTitle('Settings')
self.pomodoro_window = pomodoro_window
self.main_box = QFormLayout()
self.setLayout(self.main_box)
self.setMinimumWidth(500)
self.pomodoro_time = self.pomodoro_window.time_dict[POMODORO].minute()
self.long_break_time = self.pomodoro_window.time_dict[LONG_BREAK].minute()
self.short_break_time = self.pomodoro_window.time_dict[SHORT_BREAK].minute()
self.pomodoro_label = QLabel()
self.pomodoro_label.setAlignment(Qt.AlignRight)
self.long_label = QLabel()
self.long_label.setAlignment(Qt.AlignRight)
self.short_label = QLabel()
self.short_label.setAlignment(Qt.AlignRight)
self.pomodoro_slider = QSlider(Qt.Horizontal)
self.pomodoro_slider.setRange(1, 45)
self.pomodoro_slider.valueChanged.connect(self.pomodoro_change)
self.pomodoro_slider.setValue(self.pomodoro_time)
self.long_break_slider = QSlider(Qt.Horizontal)
self.long_break_slider.valueChanged.connect(self.long_change)
self.long_break_slider.setRange(1, 30)
self.long_break_slider.setValue(self.long_break_time)
self.short_break_slider = QSlider(Qt.Horizontal)
self.short_break_slider.valueChanged.connect(self.short_change)
self.short_break_slider.setRange(1, 10)
self.short_break_slider.setValue(self.short_break_time)
self.main_box.addRow(QLabel('Pomodoro duration'), self.pomodoro_label)
self.main_box.addRow(self.pomodoro_slider)
self.main_box.setSpacing(5)
self.main_box.addRow(QLabel('Long break duration'), self.long_label)
self.main_box.addRow(self.long_break_slider)
self.main_box.addRow(QLabel('Short break duration'), self.short_label)
self.main_box.addRow(self.short_break_slider)
def pomodoro_change(self, minutes):
self.pomodoro_label.setText('{} min'.format(minutes))
self.pomodoro_window.update_time_from_settings(minutes, POMODORO)
def long_change(self, minutes):
self.long_label.setText('{} min'.format(minutes))
self.pomodoro_window.update_time_from_settings(minutes, LONG_BREAK)
def short_change(self, minutes):
self.short_label.setText('{} min'.format(minutes))
self.pomodoro_window.update_time_from_settings(minutes, SHORT_BREAK)
def closeEvent(self, QCloseEvent):
QCloseEvent.ignore()
self.hide()
class PomodoroWindow(CenterWindow):
def __init__(self, controller, issue_key, issue_title, tray_icon):
super().__init__()
self.center()
self.setStyleSheet(QSS)
self.controller = controller
self.tray_icon = tray_icon
if not os.path.exists(LOGGED_TIME_DIR):
os.mkdir(LOGGED_TIME_DIR)
self.LOG_PATH = os.path.join(
LOGGED_TIME_DIR, '{}.txt'.format(issue_key)
)
self.setWindowTitle('Pomodoro Timer')
self.settings = QSettings('Spherical', 'Jira Quick Reporter')
pomodoro_settings = int(self.settings.value(POMODORO, 25))
long_break_settings = int(self.settings.value(LONG_BREAK, 15))
short_break_settings = int(self.settings.value(SHORT_BREAK, 5))
self.time_dict = dict(
short=QTime(0, short_break_settings, 0),
long=QTime(0, long_break_settings, 0),
pomodoro=QTime(0, pomodoro_settings, 0)
)
self.issue_key = issue_key
self.issue_title = issue_title
self.pomodoros_count = 0
self.current_time_name = POMODORO
self.is_active_timer = False
self.logged_time = QTime(0, 0, 0)
self.time = self.time_dict[POMODORO]
self.time_in_seconds = QTime(0, 0, 0).secsTo(self.time)
self.timer_box = QVBoxLayout()
self.main_box = QHBoxLayout()
self.setLayout(self.main_box)
self.issue_label = QLabel(
'{}: {}'.format(self.issue_key, self.issue_title)
)
self.issue_label.setAlignment(Qt.AlignCenter)
self.issue_label.setObjectName('issue_label')
self.issue_label.setWordWrap(True)
self.issue_label.setSizePolicy(
QSizePolicy.Expanding, QSizePolicy.Fixed
)
self.pbar = QProgressBar()
self.pbar.setRange(0, self.time_in_seconds)
self.pbar.setValue(0)
self.pbar.setTextVisible(False)
self.timer = QTimer()
self.timer.timeout.connect(self.handle_timer)
self.time_label = QLabel()
self.time_label.setObjectName('time_label')
self.time_label.setText(self.time.toString('mm:ss'))
self.time_label.setAlignment(Qt.AlignCenter)
self.btns_box = QHBoxLayout()
self.start_btn = QPushButton('Start')
self.start_btn.clicked.connect(self.toggle_timer)
self.stop_btn = QPushButton('Stop')
self.stop_btn.clicked.connect(self.toggle_timer)
self.logwork_btn = QPushButton('Log work')
self.logwork_btn.clicked.connect(
lambda: self.controller.open_time_log(issue_key)
)
self.logwork_btn.setEnabled(False)
self.btns_box.addWidget(self.start_btn)
self.btns_box.addWidget(self.stop_btn)
self.btns_box.addWidget(self.logwork_btn)
self.pomodoros_box = QHBoxLayout()
self.pomodoros_box.setSpacing(5)
self.pomodoros_count_label = QLabel()
self.pomodoros_count_label.setObjectName('pomodoros_count')
self.timer_box.addWidget(self.issue_label)
self.timer_box.addStretch()
self.timer_box.addWidget(self.time_label)
self.timer_box.addWidget(self.pbar, Qt.AlignCenter)
self.timer_box.addLayout(self.btns_box)
self.timer_box.addLayout(self.pomodoros_box)
self.timer_box.addStretch()
self.main_box.addLayout(self.timer_box)
self.action_show_time = QAction(self)
self.action_show_time.setEnabled(False)
self.action_open_timer = QAction('Open timer', self)
self.action_open_timer.triggered.connect(self.show)
self.action_quit_timer = QAction('Quit timer', self)
self.action_quit_timer.triggered.connect(self.quit)
self.action_settings = QAction('Settings', self)
self.settings_window = Settings(self)
self.action_settings.triggered.connect(self.settings_window.show)
self.action_reset = QAction('Reset timer', self)
self.action_reset.triggered.connect(self.reset_timer)
self.action_start_timer = QAction('Start', self)
self.action_start_timer.triggered.connect(self.toggle_timer)
self.action_stop_timer = QAction('Stop', self)
self.action_stop_timer.triggered.connect(self.toggle_timer)
self.action_log_work = QAction('Log work', self)
self.action_log_work.triggered.connect(
lambda: self.controller.open_time_log(issue_key)
)
self.action_log_work.setEnabled(False)
self.tray_icon.contextMenu().addSeparator()
self.tray_icon.contextMenu().addAction(self.action_show_time)
self.action_show_time.setText(self.time.toString('mm:ss'))
self.tray_icon.contextMenu().addAction(self.action_open_timer)
self.tray_icon.contextMenu().addAction(self.action_settings)
self.tray_icon.contextMenu().addAction(self.action_quit_timer)
self.tray_icon.contextMenu().addSeparator()
self.tray_icon.contextMenu().addAction(self.action_start_timer)
self.tray_icon.contextMenu().addAction(self.action_stop_timer)
self.tray_icon.contextMenu().addAction(self.action_reset)
self.tray_icon.contextMenu().addAction(self.action_log_work)
def log_work_if_file_exists(self):
if os.path.exists(self.LOG_PATH):
reply = QMessageBox.question(
self,
'Warning',
'You have not logged your work.\n Do you want to log it?',
QMessageBox.Yes | QMessageBox.No
)
if reply == QMessageBox.Yes:
self.controller.open_time_log(self.issue_key)
else:
os.remove(self.LOG_PATH)
def update_time_from_settings(self, minutes, time_name):
if self.current_time_name != time_name:
self.time_dict[time_name].setHMS(0, minutes, 0)
elif not self.is_active_timer:
self.time_dict[time_name].setHMS(0, minutes, 0)
self.update_timer()
elif self.time_dict[time_name].minute() > minutes:
spent_time_seconds = self.time.secsTo(self.time_dict[time_name])
if minutes <= spent_time_seconds // 60:
self.stop_timer()
QSound.play(RING_SOUND_PATH)
self.set_timer()
else:
time_diff = self.time_dict[time_name].minute() - minutes
self.change_timer(minutes, -time_diff)
elif self.time_dict[time_name].minute() < minutes:
time_diff = minutes - self.time_dict[time_name].minute()
self.change_timer(minutes, time_diff)
def change_timer(self, minutes, time_diff):
self.time_dict[self.current_time_name].setHMS(0, minutes, 0)
self.time = self.time.addSecs(time_diff * 60)
self.time_in_seconds = minutes * 60
self.pbar.setMaximum(self.time_in_seconds)
self.time_label.setText(self.time.toString('mm:ss'))
self.action_show_time.setText(self.time.toString('mm:ss'))
def handle_timer(self):
"""
Updates timer label and progress bar every second
until time is over
"""
value = self.pbar.value()
if value < self.time_in_seconds:
value += 1
self.pbar.setValue(value)
self.time = self.time.addSecs(-1)
self.time_label.setText(self.time.toString('mm:ss'))
self.action_show_time.setText(self.time.toString('mm:ss'))
if not value % 60:
self.log_time()
else:
self.stop_timer()
QSound.play(RING_SOUND_PATH)
if self.current_time_name != POMODORO:
self.tray_icon.showMessage(
'Pomodoro',
'Your break is over',
msecs=2000)
self.set_timer()
def update_timer(self):
self.time_in_seconds = QTime(0, 0, 0).secsTo(self.time)
self.pbar.setMaximum(self.time_in_seconds)
self.pbar.setValue(0)
self.time_label.setText(self.time.toString('mm:ss'))
self.action_show_time.setText(self.time.toString('mm:ss'))
def set_pomodoro_timer(self):
self.is_active_timer = False
self.current_time_name = POMODORO
self.time = self.time_dict[POMODORO]
self.update_timer()
def set_pomodoro_count(self):
"""
Set pomodoro mark and number of past pomodoros
"""
self.clear_pomodoros()
label = QLabel()
pixmap = QPixmap(POMODORO_MARK_PATH)
label.setPixmap(pixmap)
self.pomodoros_box.addWidget(self.pomodoros_count_label)
self.pomodoros_count_label.setSizePolicy(
QSizePolicy.Fixed, QSizePolicy.Expanding
)
self.pomodoros_box.addWidget(label)
self.pomodoros_count_label.setText(str(self.pomodoros_count))
def set_pomodoro_img(self):
label = QLabel()
pixmap = QPixmap(POMODORO_MARK_PATH)
label.setPixmap(pixmap)
if self.pomodoros_count > 1:
self.pomodoros_box.itemAt(
self.pomodoros_count - 2
).widget().setSizePolicy(
QSizePolicy.Fixed, QSizePolicy.Expanding
)
self.pomodoros_box.addWidget(label)
def clear_pomodoros(self):
for _ in range(self.pomodoros_box.count()):
self.pomodoros_box.itemAt(0).widget().setParent(None)
def toggle_timer(self):
sender = self.sender().text()
if sender in ['Start', 'Resume']:
self.start_timer()
elif sender == 'Pause':
self.pause_timer()
else:
self.stop_timer()
self.set_pomodoro_timer()
def log_time(self):
self.logged_time = self.logged_time.addSecs(60)
with open(self.LOG_PATH, 'w') as log_file:
log_file.write(self.logged_time.toString('h:m'))
def start_timer(self):
self.is_active_timer = True
# change style before a break
if self.current_time_name != POMODORO:
self.issue_label.setObjectName('issue_label_break')
self.issue_label.setStyleSheet('issue_label_break')
self.pbar.setObjectName('break')
self.pbar.setStyleSheet('break')
self.stop_btn.hide()
self.start_btn.setText('Stop')
self.action_start_timer.setEnabled(False)
else:
self.tray_icon.showMessage(
'Pomodoro',
'Focus on your task',
msecs=2000
)
self.start_btn.setText('Pause')
self.action_start_timer.setText('Pause')
self.logwork_btn.setEnabled(False)
self.action_log_work.setEnabled(False)
self.timer.start(1000)
def stop_timer(self):
self.timer.stop()
self.is_active_timer = False
self.start_btn.setText('Start')
self.action_start_timer.setText('Start')
self.logwork_btn.setEnabled(True)
self.action_log_work.setEnabled(True)
if self.current_time_name != POMODORO:
self.stop_btn.show()
self.action_start_timer.setEnabled(True)
# change style after a break
self.issue_label.setObjectName('issue_label')
self.issue_label.setStyleSheet('issue_label')
self.pbar.setObjectName('')
self.pbar.setStyleSheet('')
def pause_timer(self):
self.timer.stop()
self.start_btn.setText('Resume')
self.action_start_timer.setText('Resume')
self.logwork_btn.setEnabled(True)
self.action_log_work.setEnabled(True)
def reset_timer(self):
self.logwork_btn.setEnabled(False)
self.action_log_work.setEnabled(False)
self.stop_timer()
self.pomodoros_count = 0
self.logged_time.setHMS(0, 0, 0)
self.clear_pomodoros()
self.set_pomodoro_timer()
if os.path.exists(self.LOG_PATH):
os.remove(self.LOG_PATH)
def set_pomodoro_mark(self):
if self.pomodoros_count < 5:
self.set_pomodoro_img()
elif self.pomodoros_count == 5:
self.set_pomodoro_count()
else:
self.pomodoros_count_label.setText(
str(self.pomodoros_count)
)
def set_timer(self):
"""
In this method decides which timer will go next
"""
# if pomodoro time's up
if self.current_time_name == POMODORO:
self.pomodoros_count += 1
self.set_pomodoro_mark()
# if four pomodoros have completed
if not self.pomodoros_count % 4:
self.current_time_name = LONG_BREAK
else:
self.current_time_name = SHORT_BREAK
dialog = BreakDialog(self.current_time_name)
# close dialog after 4 seconds
QTimer.singleShot(4000, dialog.close)
# get break name (short, long or skip) from dialog
self.current_time_name = dialog.exec()
if self.current_time_name != POMODORO:
self.time = self.time_dict[self.current_time_name]
self.update_timer()
self.start_timer()
return
# if break time's up
self.set_pomodoro_timer()
def quit(self):
if os.path.exists(self.LOG_PATH):
reply = QMessageBox.question(
self,
'Warning',
'You did not log your work. \nAre you sure you want to exit?',
QMessageBox.Yes, QMessageBox.No
)
if reply == QMessageBox.No:
return False
self.settings.setValue(
POMODORO, self.time_dict[POMODORO].minute()
)
self.settings.setValue(
LONG_BREAK, self.time_dict[LONG_BREAK].minute()
)
self.settings.setValue(
SHORT_BREAK, self.time_dict[SHORT_BREAK].minute()
)
self.settings.sync()
self.setAttribute(Qt.WA_DeleteOnClose, True)
self.close()
return True
def closeEvent(self, event):
if self.testAttribute(Qt.WA_DeleteOnClose):
self.controller.pomodoro_view = None
event.accept()
else:
event.ignore()
self.hide()
|
# -*- coding:utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 Samsung SDS Co., LTD
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import uuid
import pycassa
from datetime import datetime, timedelta
from pycassa import (types, create_index_clause, create_index_expression, EQ,
GT, GTE, LT, LTE)
import struct
import json
import pickle
from collections import OrderedDict
from synaps import flags
from synaps import log as logging
from synaps import utils
from synaps import exception
LOG = logging.getLogger(__name__)
FLAGS = flags.FLAGS
def pack_dimensions(dimensions):
return json.dumps(OrderedDict(sorted(dimensions.items())))
class Cassandra(object):
STATISTICS = ["Sum", "SampleCount", "Average", "Minimum", "Maximum"]
def __init__(self, keyspace=None):
self.STATISTICS_TTL = FLAGS.get('statistics_ttl')
self.ARCHIVE = map(lambda x: int(x) * 60,
FLAGS.get('statistics_archives'))
if not keyspace:
keyspace = FLAGS.get("cassandra_keyspace", "synaps_test")
serverlist = FLAGS.get("cassandra_server_list")
# max_retries -1 means unlimited retries
self.pool = pycassa.ConnectionPool(keyspace, server_list=serverlist,
max_retries= -1)
self.cf_metric = pycassa.ColumnFamily(self.pool, 'Metric')
self.scf_stat_archive = pycassa.ColumnFamily(self.pool, 'StatArchive')
self.cf_metric_alarm = pycassa.ColumnFamily(self.pool, 'MetricAlarm')
self.cf_alarm_history = pycassa.ColumnFamily(self.pool, 'AlarmHistory')
def delete_metric_alarm(self, alarm_key):
try:
self.cf_metric_alarm.remove(alarm_key)
except pycassa.NotFoundException:
LOG.info(_("alarm key %s is not deleted" % alarm_key))
def _describe_alarms_by_names(self, project_id, alarm_names):
for alarm_name in alarm_names:
expr_list = [
pycassa.create_index_expression("project_id", project_id),
pycassa.create_index_expression("alarm_name", alarm_name)
]
index_clause = pycassa.create_index_clause(expr_list)
items = self.cf_metric_alarm.get_indexed_slices(index_clause)
for k, v in items:
yield k, v
def describe_alarms(self, project_id, action_prefix=None,
alarm_name_prefix=None, alarm_names=None,
max_records=100, next_token=None, state_value=None):
"""
params:
project_id: string
action_prefix: TODO: not implemented yet.
alarm_name_prefix: string
alarm_names: string list
max_records: integer
next_token: string (uuid type)
state_value: string (OK | ALARM | INSUFFICIENT_DATA)
"""
if alarm_names:
return self._describe_alarms_by_names(project_id, alarm_names)
next_token = uuid.UUID(next_token) if next_token else ''
expr_list = []
prj_expr = create_index_expression("project_id", project_id)
expr_list.append(prj_expr)
if alarm_name_prefix:
expr_s = create_index_expression("alarm_name", alarm_name_prefix,
GTE)
expr_e = create_index_expression("alarm_name",
utils.prefix_end(alarm_name_prefix),
LT)
expr_list.append(expr_s)
expr_list.append(expr_e)
if state_value:
expr = create_index_expression("state_value", state_value)
expr_list.append(expr)
LOG.info("expr %s" % expr_list)
index_clause = create_index_clause(expr_list=expr_list,
start_key=next_token,
count=max_records)
items = self.cf_metric_alarm.get_indexed_slices(index_clause)
return items
def describe_alarms_for_metric(self, project_id, namespace, metric_name,
dimensions=None, period=None,
statistic=None, unit=None):
metric_key = self.get_metric_key(project_id, namespace, metric_name,
dimensions)
if not metric_key:
raise exception.InvalidParameterValue("no metric")
expr_list = [create_index_expression("metric_key", metric_key)]
if period:
expr = create_index_expression("period", int(period))
expr_list.append(expr)
if statistic:
expr = create_index_expression("statistic", statistic)
expr_list.append(expr)
if unit:
expr = create_index_expression("unit", unit)
expr_list.append(expr)
LOG.info("expr %s" % expr_list)
index_clause = pycassa.create_index_clause(expr_list)
items = self.cf_metric_alarm.get_indexed_slices(index_clause)
return items
def describe_alarm_history(self, project_id, alarm_name=None,
end_date=None, history_item_type=None,
max_records=100, next_token=None,
start_date=None):
"""
params:
project_id: string
alarm_name: string
end_date: datetime
history_item_type: string (ConfigurationUpdate | StateUpdate |
Action)
max_records: integer
next_token: string (uuid type)
start_date: datetime
"""
next_token = uuid.UUID(next_token) if next_token else ''
expr_list = [
pycassa.create_index_expression("project_id", project_id),
]
if alarm_name:
expr = create_index_expression("alarm_name", alarm_name)
expr_list.append(expr)
if end_date:
expr = create_index_expression("timestamp", end_date, LTE)
expr_list.append(expr)
if start_date:
expr = create_index_expression("timestamp", start_date, GTE)
expr_list.append(expr)
if history_item_type:
expr = create_index_expression("history_item_type",
history_item_type)
expr_list.append(expr)
index_clause = pycassa.create_index_clause(expr_list=expr_list,
start_key=next_token,
count=max_records)
items = self.cf_alarm_history.get_indexed_slices(index_clause)
return items
def get_metric_alarm_key(self, project_id, alarm_name):
expr_list = [
pycassa.create_index_expression("project_id", project_id),
pycassa.create_index_expression("alarm_name", alarm_name)
]
index_clause = pycassa.create_index_clause(expr_list)
items = self.cf_metric_alarm.get_indexed_slices(index_clause)
for k, v in items:
return k
return None
def get_metric_alarm(self, alarm_key):
ret = None
try:
ret = self.cf_metric_alarm.get(alarm_key)
except pycassa.NotFoundException:
pass
return ret
def get_metric_key(self, project_id, namespace, metric_name, dimensions):
dimensions = pack_dimensions(dimensions)
expr_list = [
pycassa.create_index_expression("project_id", project_id),
pycassa.create_index_expression("name", metric_name),
pycassa.create_index_expression("namespace", namespace),
pycassa.create_index_expression("dimensions", dimensions)
]
index_clause = pycassa.create_index_clause(expr_list)
items = self.cf_metric.get_indexed_slices(index_clause)
for k, v in items:
return k
return None
def get_metric_key_or_create(self, project_id, namespace, metric_name,
dimensions, unit='None'):
# get metric key
key = self.get_metric_key(project_id, namespace, metric_name,
dimensions)
# or create metric
if not key:
key = uuid.uuid4()
json_dim = pack_dimensions(dimensions)
columns = {'project_id': project_id, 'namespace': namespace,
'name': metric_name, 'dimensions': json_dim,
'unit': unit}
self.cf_metric.insert(key=key, columns=columns)
return key
def get_metric_statistics(self, project_id, namespace, metric_name,
start_time, end_time, period, statistics,
dimensions=None):
def get_stat(key, super_column, column_start, column_end):
stat = {}
count = (column_end - column_start).total_seconds() / 60
try:
stat = self.scf_stat_archive.get(key,
super_column=super_column,
column_start=column_start,
column_finish=column_end,
column_count=count)
except pycassa.NotFoundException:
LOG.debug("not found %s %s %s %s" % (key, super_column,
column_start,
column_end))
return stat
# get metric key
key = self.get_metric_key(project_id, namespace, metric_name,
dimensions)
# or return {}
if not key:
return {}
statistics = map(utils.to_ascii, statistics)
stats = map(lambda x: get_stat(key, x, start_time, end_time),
statistics)
return stats
def get_metric_statistics_for_key(self, key, time_idx):
def get_stat(key, super_column, column_start, column_end):
stat = {}
try:
stat = self.scf_stat_archive.get(key,
super_column=super_column,
column_start=column_start,
column_finish=column_end,
column_count=1440)
except pycassa.NotFoundException:
LOG.debug("not found %s %s %s %s" % (key, super_column,
column_start,
column_end))
return stat
if not key:
return {}
stats = map(lambda x: get_stat(key, x, time_idx, time_idx),
self.STATISTICS)
return stats
def get_metric_unit(self, metric_key):
try:
metric = self.cf_metric.get(key=metric_key)
except pycassa.NotFoundException:
return "None"
return metric.get('unit', "None")
def insert_stat(self, metric_key, stat, ttl=None):
ttl = ttl if ttl else self.STATISTICS_TTL
self.scf_stat_archive.insert(metric_key, stat, ttl=ttl)
def insert_alarm_history(self, key, column):
self.cf_alarm_history.insert(key, column, ttl=self.STATISTICS_TTL)
def update_alarm_state(self, alarmkey, state, reason, reason_data,
timestamp):
state_info = {'state_value': state, 'state_reason': reason,
'state_reason_data': reason_data,
'state_updated_timestamp':timestamp}
self.cf_metric_alarm.insert(alarmkey, state_info)
def list_metrics(self, project_id, namespace=None, metric_name=None,
dimensions=None, next_token=""):
def to_dict(v):
return {'project_id': v['project_id'],
'dimensions': json.loads(v['dimensions']),
'name': v['name'],
'namespace': v['namespace']}
def check_dimension(item):
if isinstance(dimensions, dict):
def to_set(d):
return set(d.items())
l_set = to_set(dimensions)
r_set = to_set(json.loads(item['dimensions']))
return l_set.issubset(r_set)
return True
next_token = uuid.UUID(next_token) if next_token else ''
expr_list = [pycassa.create_index_expression("project_id",
project_id), ]
if namespace:
expr = pycassa.create_index_expression("namespace", namespace)
expr_list.append(expr)
if metric_name:
expr = pycassa.create_index_expression("name", metric_name)
expr_list.append(expr)
if dimensions:
packed_dimensions = pack_dimensions(dimensions)
expr = pycassa.create_index_expression("dimensions",
packed_dimensions)
expr_list.append(expr)
index_clause = pycassa.create_index_clause(expr_list,
start_key=next_token,
count=501)
items = self.cf_metric.get_indexed_slices(index_clause)
metrics = ((k, to_dict(v)) for k, v in items)
return metrics
def load_metric_data(self, metric_key):
try:
data = self.cf_metric_archive.get(metric_key, column_count=1440)
except pycassa.NotFoundException:
data = {}
return data
def load_statistics(self, metric_key, start, finish):
def get_stat(statistic):
datapoints = self.scf_stat_archive.get(metric_key,
super_column=statistic,
column_start=start,
column_finish=finish)
return statistic, datapoints
try:
stat = dict([get_stat(statistic)
for statistic in self.STATISTICS])
except pycassa.NotFoundException:
stat = {}
return stat
def load_alarms(self, metric_key):
expr_list = [
pycassa.create_index_expression("metric_key", metric_key),
]
index_clause = pycassa.create_index_clause(expr_list)
try:
items = self.cf_metric_alarm.get_indexed_slices(index_clause)
except pycassa.NotFoundException:
items = {}
return items
def put_metric_alarm(self, alarm_key, metricalarm):
"""
MetricAlarm ์ DB์ ์์ฑ ๋๋ ์
๋ฐ์ดํธ ํจ.
"""
self.cf_metric_alarm.insert(key=alarm_key, columns=metricalarm)
return alarm_key
def restructed_stats(self, stat):
def get_stat(timestamp):
ret = {}
for key in stat.keys():
ret[key] = stat[key][timestamp]
return ret
ret = []
timestamps = reduce(lambda x, y: x if x == y else None,
map(lambda x: x.keys(), stat.values()))
for timestamp in timestamps:
ret.append((timestamp, get_stat(timestamp)))
return ret
@staticmethod
def syncdb(keyspace=None):
"""
์นด์ฐ๋๋ผ database schema ๋ฅผ ์ฒดํฌ,
ํ์ํ KEYSPACE, CF, SCF ๊ฐ ์์ผ๋ฉด ์๋ก ์์ฑ.
"""
if not keyspace:
keyspace = FLAGS.get("cassandra_keyspace", "synaps_test")
serverlist = FLAGS.get("cassandra_server_list")
replication_factor = FLAGS.get("cassandra_replication_factor")
manager = pycassa.SystemManager(server=serverlist[0])
strategy_options = {'replication_factor':replication_factor}
# keyspace ์ฒดํฌ, keyspace ๊ฐ ์์ผ๋ฉด ์๋ก ์์ฑ
LOG.info(_("cassandra syncdb is started for keyspace(%s)" % keyspace))
if keyspace not in manager.list_keyspaces():
LOG.info(_("cassandra keyspace %s does not exist.") % keyspace)
manager.create_keyspace(keyspace, strategy_options=strategy_options)
LOG.info(_("cassandra keyspace %s is created.") % keyspace)
else:
property = manager.get_keyspace_properties(keyspace)
# strategy_option ์ฒดํฌ, option ์ด ๋ค๋ฅด๋ฉด ์์
if not (strategy_options == property.get('strategy_options')):
manager.alter_keyspace(keyspace,
strategy_options=strategy_options)
LOG.info(_("cassandra keyspace strategy options is updated - %s"
% str(strategy_options)))
# CF ์ฒดํฌ
column_families = manager.get_keyspace_column_families(keyspace)
if 'Metric' not in column_families.keys():
manager.create_column_family(
keyspace=keyspace,
name='Metric',
key_validation_class=pycassa.LEXICAL_UUID_TYPE,
column_validation_classes={
'project_id': pycassa.UTF8_TYPE,
'name': pycassa.UTF8_TYPE,
'namespace': pycassa.UTF8_TYPE,
'unit': pycassa.UTF8_TYPE,
'dimensions': pycassa.UTF8_TYPE
}
)
manager.create_index(keyspace=keyspace, column_family='Metric',
column='project_id',
value_type=types.UTF8Type())
manager.create_index(keyspace=keyspace, column_family='Metric',
column='name',
value_type=types.UTF8Type())
manager.create_index(keyspace=keyspace, column_family='Metric',
column='namespace',
value_type=types.UTF8Type())
manager.create_index(keyspace=keyspace, column_family='Metric',
column='dimensions',
value_type=types.UTF8Type())
if 'StatArchive' not in column_families.keys():
manager.create_column_family(
keyspace=keyspace,
name='StatArchive', super=True,
key_validation_class=pycassa.LEXICAL_UUID_TYPE,
comparator_type=pycassa.ASCII_TYPE,
subcomparator_type=pycassa.DATE_TYPE,
default_validation_class=pycassa.DOUBLE_TYPE
)
if 'MetricAlarm' not in column_families.keys():
manager.create_column_family(
keyspace=keyspace,
name='MetricAlarm',
key_validation_class=pycassa.LEXICAL_UUID_TYPE,
column_validation_classes={
'metric_key': pycassa.LEXICAL_UUID_TYPE,
'project_id': pycassa.UTF8_TYPE,
'actions_enabled': pycassa.BOOLEAN_TYPE,
'alarm_actions': pycassa.UTF8_TYPE,
'alarm_arn': pycassa.UTF8_TYPE,
'alarm_configuration_updated_timestamp': pycassa.DATE_TYPE,
'alarm_description': pycassa.UTF8_TYPE,
'alarm_name': pycassa.UTF8_TYPE,
'comparison_operator': pycassa.UTF8_TYPE,
'dimensions':pycassa.UTF8_TYPE,
'evaluation_periods':pycassa.INT_TYPE,
'insufficient_data_actions': pycassa.UTF8_TYPE,
'metric_name':pycassa.UTF8_TYPE,
'namespace':pycassa.UTF8_TYPE,
'ok_actions':pycassa.UTF8_TYPE,
'period':pycassa.INT_TYPE,
'state_reason':pycassa.UTF8_TYPE,
'state_reason_data':pycassa.UTF8_TYPE,
'state_updated_timestamp':pycassa.DATE_TYPE,
'state_value':pycassa.UTF8_TYPE,
'statistic':pycassa.UTF8_TYPE,
'threshold':pycassa.DOUBLE_TYPE,
'unit':pycassa.UTF8_TYPE
}
)
manager.create_index(keyspace=keyspace,
column_family='MetricAlarm',
column='project_id',
value_type=types.UTF8Type())
manager.create_index(keyspace=keyspace,
column_family='MetricAlarm',
column='metric_key',
value_type=types.LexicalUUIDType())
manager.create_index(keyspace=keyspace,
column_family='MetricAlarm',
column='alarm_name',
value_type=types.UTF8Type())
manager.create_index(keyspace=keyspace,
column_family='MetricAlarm',
column='state_updated_timestamp',
value_type=types.DateType())
manager.create_index(keyspace=keyspace,
column_family='MetricAlarm',
column='alarm_configuration_updated_timestamp',
value_type=types.DateType())
manager.create_index(keyspace=keyspace,
column_family='MetricAlarm',
column='state_value',
value_type=types.UTF8Type())
manager.create_index(keyspace=keyspace,
column_family='MetricAlarm',
column='period',
value_type=types.IntegerType())
manager.create_index(keyspace=keyspace,
column_family='MetricAlarm',
column='statistic',
value_type=types.UTF8Type())
if 'AlarmHistory' not in column_families.keys():
manager.create_column_family(
keyspace=keyspace,
name='AlarmHistory',
key_validation_class=pycassa.LEXICAL_UUID_TYPE,
column_validation_classes={
'project_id': pycassa.UTF8_TYPE,
'alarm_key': pycassa.LEXICAL_UUID_TYPE,
'alarm_name': pycassa.UTF8_TYPE,
'history_data': pycassa.UTF8_TYPE,
'history_item_type': pycassa.UTF8_TYPE,
'history_summary': pycassa.UTF8_TYPE,
'timestamp': pycassa.DATE_TYPE,
}
)
manager.create_index(keyspace=keyspace,
column_family='AlarmHistory',
column='project_id',
value_type=types.UTF8Type())
manager.create_index(keyspace=keyspace,
column_family='AlarmHistory',
column='alarm_key',
value_type=types.LexicalUUIDType())
manager.create_index(keyspace=keyspace,
column_family='AlarmHistory',
column='alarm_name',
value_type=types.UTF8Type())
manager.create_index(keyspace=keyspace,
column_family='AlarmHistory',
column='history_item_type',
value_type=types.UTF8Type())
manager.create_index(keyspace=keyspace,
column_family='AlarmHistory',
column='timestamp',
value_type=types.DateType())
LOG.info(_("cassandra syncdb has finished"))
|
#!/usr/bin/env python3
import cv2
""" Open an image """
img = cv2.imread("../images/python_logo.png")
cv2.imshow("My Image", img)
cv2.waitKey(0) # waits for a specific time in ms until you press any button. 0 means wait forever.
""" Print info about image dimensions """
print(f"Shape of img: {img.shape}")
print(f"Size (no of px) of img: {img.size}")
print(f"Dtype of img: {img.dtype}")
""" Crop out a region-of-interest within the entire image """
roi = img[300:400, 400:500]
cv2.imshow("ROI", roi)
cv2.waitKey(0)
""" Save ROI """
cv2.imwrite("./images/roi.png", roi)
|
import mlflow
import os, shutil
#from mlflow_export_import.common.dump_run import dump_run
from mlflow_export_import.run.export_run import RunExporter
from mlflow_export_import.run.import_run import RunImporter
from mlflow_export_import.experiment.export_experiment import ExperimentExporter
from mlflow_export_import.experiment.import_experiment import ExperimentImporter
from mlflow_export_import.run.copy_run import RunCopier
from mlflow_export_import.experiment.copy_experiment import ExperimentCopier
from utils_test import create_experiment, dump_tags
from sklearn_utils import create_sklearn_model
from compare_utils import *
# == Setup
client = mlflow.tracking.MlflowClient()
#mlflow.sklearn.autolog()
output = "out"
mlmodel_fix = True
# == Common
def create_simple_run():
exp = create_experiment()
max_depth = 4
model = create_sklearn_model(max_depth)
with mlflow.start_run(run_name="my_run") as run:
mlflow.log_param("max_depth",max_depth)
mlflow.log_metric("rmse",.789)
mlflow.set_tag("my_tag","my_val")
mlflow.sklearn.log_model(model, "model")
with open("info.txt", "w") as f:
f.write("Hi artifact")
mlflow.log_artifact("info.txt")
mlflow.log_artifact("info.txt","dir2")
mlflow.log_metric("m1", 0.1)
return exp, run
def init_output_dir():
if os.path.exists(output):
shutil.rmtree(output)
os.makedirs(output)
os.makedirs(os.path.join(output,"run1"))
os.makedirs(os.path.join(output,"run2"))
# == Export/import Run tests
def init_run_test(exporter, importer, verbose=False):
init_output_dir()
exp, run = create_simple_run()
exporter.export_run(run.info.run_id, output)
experiment_name = f"{exp.name}_imported"
res = importer.import_run(experiment_name, output)
if verbose: print("res:",res)
run1 = client.get_run(run.info.run_id)
run2 = client.get_run(res[0])
if verbose: dump_runs(run1, run2)
return run1, run2
def test_run_basic():
run1, run2 = init_run_test(RunExporter(), RunImporter(mlmodel_fix=mlmodel_fix, import_mlflow_tags=True))
compare_runs(client, output, run1, run2)
def test_run_no_import_mlflow_tags():
run1, run2 = init_run_test(RunExporter(), RunImporter(mlmodel_fix=mlmodel_fix, import_mlflow_tags=False))
compare_run_no_import_mlflow_tags(client, output, run1, run2)
def test_run_import_metadata_tags():
run1, run2 = init_run_test(RunExporter(export_metadata_tags=True), RunImporter(mlmodel_fix=mlmodel_fix, import_metadata_tags=True, import_mlflow_tags=True), verbose=False)
compare_run_import_metadata_tags(client, output, run1, run2)
# == Export/import Experiment tests
def init_exp_test(exporter, importer, verbose=False):
init_output_dir()
exp, run = create_simple_run()
run1 = client.get_run(run.info.run_id)
exporter.export_experiment(exp.name, output)
experiment_name = f"{exp.name}_imported"
importer.import_experiment(experiment_name, output)
exp2 = client.get_experiment_by_name(experiment_name)
infos = client.list_run_infos(exp2.experiment_id)
run2 = client.get_run(infos[0].run_id)
if verbose: dump_runs(run1, run2)
return run1, run2
def test_exp_basic():
run1, run2 = init_exp_test(ExperimentExporter(), ExperimentImporter(), True)
compare_runs(client, output, run1, run2)
def test_exp_no_import_mlflow_tags():
run1, run2 = init_exp_test(ExperimentExporter(), ExperimentImporter(import_mlflow_tags=False))
compare_run_no_import_mlflow_tags(client, output, run1, run2)
def test_exp_import_metadata_tags():
run1, run2 = init_exp_test(ExperimentExporter(export_metadata_tags=True), ExperimentImporter(import_metadata_tags=True), verbose=False)
compare_run_import_metadata_tags(client, output, run1, run2)
# == Copy run tests
def init_run_copy_test(copier, verbose=False):
init_output_dir()
exp, run = create_simple_run()
run1 = client.get_run(run.info.run_id)
dst_experiment_name = f"{exp.name}_copy_run"
copier.copy_run(run1.info.run_id, dst_experiment_name)
exp2 = client.get_experiment_by_name(dst_experiment_name)
infos = client.list_run_infos(exp2.experiment_id)
run2 = client.get_run(infos[0].run_id)
if verbose: dump_runs(run1, run2)
return run1, run2
def test_copy_run_basic():
run1, run2 = init_run_copy_test(RunCopier(client, client), verbose=False)
compare_runs(client, output, run1, run2)
def test_copy_run_import_metadata_tags():
run1, run2 = init_run_copy_test(RunCopier(client, client, export_metadata_tags=True))
compare_run_import_metadata_tags(client, output, run1, run2)
# == Copy experiment tests
def init_exp_copy_test(copier, verbose=False):
init_output_dir()
exp, run = create_simple_run()
run1 = client.get_run(run.info.run_id)
dst_experiment_name = f"{exp.name}_copy_exp"
copier.copy_experiment(exp.name, dst_experiment_name)
exp2 = client.get_experiment_by_name(dst_experiment_name)
infos = client.list_run_infos(exp2.experiment_id)
run2 = client.get_run(infos[0].run_id)
if verbose: dump_runs(run1, run2)
return run1, run2
def test_copy_exp_basic():
run1, run2 = init_exp_copy_test(ExperimentCopier(client, client), verbose=False)
compare_runs(client, output, run1, run2)
def test_copy_exp_import_metadata_tags():
run1, run2 = init_exp_copy_test(ExperimentCopier(client, client, export_metadata_tags=True))
compare_run_import_metadata_tags(client, output, run1, run2)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.