hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
383255aadb0a4f30a2dc93b8d3cdf2942c4e0940 | 888 | py | Python | routes/huc.py | ua-snap/data-api | a4d34858b70b90a1dc70bd65a0fc95e45a82fbdf | [
"MIT"
] | null | null | null | routes/huc.py | ua-snap/data-api | a4d34858b70b90a1dc70bd65a0fc95e45a82fbdf | [
"MIT"
] | 109 | 2021-07-30T01:29:16.000Z | 2022-03-30T17:49:00.000Z | routes/huc.py | ua-snap/data-api | a4d34858b70b90a1dc70bd65a0fc95e45a82fbdf | [
"MIT"
] | null | null | null | import asyncio
from flask import (
abort,
Blueprint,
Response,
render_template,
request,
current_app as app,
)
# local imports
from . import routes
from luts import huc8_gdf
huc_api = Blueprint("huc_api", __name__)
@routes.route("/huc/")
@routes.route("/huc/abstract/")
@routes.route("/huc/huc8")
@routes.route("/huc/huc8/<huc8_id>")
def run_fetch_huc_poly(huc8_id):
"""Run the async IEM data requesting for a single point
and return data as json
Args:
huc8_id (int): HUC-8 ID
Returns:
GeoJSON of the HUC-8 polygon
Notes:
example request: http://localhost:5000/huc/huc8/19070506
"""
poly = huc8_gdf.loc[[huc8_id]]
poly_geojson = poly.to_json()
return poly_geojson
| 18.5 | 64 | 0.670045 | import asyncio
from flask import (
abort,
Blueprint,
Response,
render_template,
request,
current_app as app,
)
# local imports
from . import routes
from luts import huc8_gdf
huc_api = Blueprint("huc_api", __name__)
@routes.route("/huc/")
@routes.route("/huc/abstract/")
def huc_about():
return render_template("huc/abstract.html")
@routes.route("/huc/huc8")
def huc8_about():
return render_template("huc/huc8.html")
@routes.route("/huc/huc8/<huc8_id>")
def run_fetch_huc_poly(huc8_id):
"""Run the async IEM data requesting for a single point
and return data as json
Args:
huc8_id (int): HUC-8 ID
Returns:
GeoJSON of the HUC-8 polygon
Notes:
example request: http://localhost:5000/huc/huc8/19070506
"""
poly = huc8_gdf.loc[[huc8_id]]
poly_geojson = poly.to_json()
return poly_geojson
| 83 | 0 | 44 |
196be76e941f8559099b1eec1c19e1897f52498b | 4,936 | py | Python | python/snapy/netsnmp/unittests/test_netsnmp.py | marineam/nagcat | 445d0efe1fb2ec93c31d1f9d8fa0c0563189ffaf | [
"Apache-2.0"
] | null | null | null | python/snapy/netsnmp/unittests/test_netsnmp.py | marineam/nagcat | 445d0efe1fb2ec93c31d1f9d8fa0c0563189ffaf | [
"Apache-2.0"
] | null | null | null | python/snapy/netsnmp/unittests/test_netsnmp.py | marineam/nagcat | 445d0efe1fb2ec93c31d1f9d8fa0c0563189ffaf | [
"Apache-2.0"
] | null | null | null | # snapy - a python snmp library
#
# Copyright (C) 2009 ITA Software, Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import time
from twisted.trial import unittest
from snapy.netsnmp.unittests import TestCase
from snapy.netsnmp import Session, SnmpError, SnmpTimeout, OID
class Result(object):
"""Container for async results"""
value = None
| 29.915152 | 77 | 0.601904 | # snapy - a python snmp library
#
# Copyright (C) 2009 ITA Software, Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import time
from twisted.trial import unittest
from snapy.netsnmp.unittests import TestCase
from snapy.netsnmp import Session, SnmpError, SnmpTimeout, OID
class Result(object):
"""Container for async results"""
value = None
def set_result(value, result):
result.value = value
class TestSessionV1(TestCase):
version = "1"
bulk = False
basics = [
(OID(".1.3.6.1.4.2.1.1"), 1),
(OID(".1.3.6.1.4.2.1.2"), -1),
(OID(".1.3.6.1.4.2.1.3"), 1),
(OID(".1.3.6.1.4.2.1.4"), "test value"),
]
def setUpSession(self, address):
self.session = Session(
version=self.version,
community="public",
peername=address,
_use_bulk=self.bulk)
self.session.open()
def tearDownSession(self):
self.session.close()
def test_sget(self):
result = self.session.sget([x for x,v in self.basics])
self.assertEquals(result, self.basics)
return self.finishGet()
def test_get_small(self):
result = Result()
self.session.get([x for x,v in self.basics], set_result, result)
self.session.wait()
self.assertEquals(result.value, self.basics)
return self.finishGet()
def test_get_big(self):
oids = []
for i in xrange(1, 100):
oids.append(OID((1,3,6,1,4,2,4,i)))
result = Result()
self.session.get(oids, set_result, result)
self.session.wait()
result = dict(result.value)
for oid in oids:
assert oid in result
assert result[oid] == "data data data data"
return self.finishGet()
def test_walk_tree(self):
result = Result()
self.session.walk([".1.3.6.1.4.2.1"], set_result, result)
self.session.wait()
self.assertEquals(result.value, self.basics)
return self.finishWalk()
def test_walk_leaf(self):
oid = OID(".1.3.6.1.4.2.1.1")
result = Result()
self.session.walk([oid], set_result, result)
self.session.wait()
self.assertEquals(result.value, [(oid, 1)])
return self.finishGet()
def test_walk_strict(self):
oid = OID(".1.3.6.1.4.2.1.1")
result = Result()
self.session.walk([oid], set_result, result, strict=True)
self.session.wait()
self.assertEquals(result.value, [])
return self.finishStrictWalk()
def test_sysDescr(self):
result = self.session.sget([OID("SNMPv2-MIB::sysDescr.0")])
self.assert_(result)
self.assertIsInstance(result[0][1], str)
self.assert_(len(result[0][1]) > 0)
return self.finishGet()
class TestSessionV2c(TestSessionV1):
version = "2c"
def test_hrSystemDate(self):
# This is a special string that gets formatted using the
# MIB's DISPLAY-HINT value. Also, strip off everything
# other than the date and hour to avoid a race condition.
# And one more quirk, these dates are not zero padded
# so we must format the date manually, whee...
now = time.localtime()
now = "%d-%d-%d,%d" % (now[0], now[1], now[2], now[3])
result = self.session.sget([OID(".1.3.6.1.2.1.25.1.2.0")])
self.assert_(result)
value = result[0][1].split(':', 1)[0]
self.assertEquals(value, now)
return self.finishGet()
class TestSessionV2cBulk(TestSessionV2c):
bulk = True
class TestTimeoutsV1(unittest.TestCase):
version = "1"
def setUp(self):
self.session = Session(
version=self.version,
community="public",
peername="udp:127.0.0.1:9",
retries=0, timeout=0.1)
self.session.open()
def test_sget(self):
self.assertRaises(SnmpError, self.session.sget, [".1.3.6.1.4.2.1.1"])
def test_get(self):
result = Result()
self.session.get([".1.3.6.1.4.2.1.1"], set_result, result)
self.session.wait()
assert isinstance(result.value, SnmpTimeout)
def tearDown(self):
self.session.close()
class TestTimeoutsV2c(TestTimeoutsV1):
version = "2c"
class TestOID(unittest.TestCase):
def test_oid_name(self):
oid = OID("1.3.6.1.2.1.1.1.0")
self.assertEquals(oid, OID("SNMPv2-MIB::sysDescr.0"))
self.assertEquals(oid, OID("sysDescr.0"))
| 3,214 | 793 | 188 |
77065ca8eeb28083cb6ab38da2d7882ed69c641e | 49 | py | Python | tests/__init__.py | UltiRequiem/python-playground | cb17138df7ce5a21f107ded5a97019491d37c8b2 | [
"MIT"
] | 4 | 2021-08-15T03:20:04.000Z | 2021-09-24T03:26:23.000Z | tests/__init__.py | UltiRequiem/python-playground | cb17138df7ce5a21f107ded5a97019491d37c8b2 | [
"MIT"
] | null | null | null | tests/__init__.py | UltiRequiem/python-playground | cb17138df7ce5a21f107ded5a97019491d37c8b2 | [
"MIT"
] | 3 | 2021-08-15T03:20:05.000Z | 2021-08-18T13:26:51.000Z | """
This tells Python that this is a module.
"""
| 12.25 | 40 | 0.653061 | """
This tells Python that this is a module.
"""
| 0 | 0 | 0 |
285e5badd7bc28f47f98206c35e143077e5c010b | 6,966 | py | Python | python_redux/create_store.py | ebrakke/python-redux | 744fbdafe2a546a000285256ffd492add9adeeeb | [
"MIT"
] | 32 | 2016-07-08T17:17:31.000Z | 2021-07-07T02:06:57.000Z | python_redux/create_store.py | ebrakke/python-redux | 744fbdafe2a546a000285256ffd492add9adeeeb | [
"MIT"
] | 2 | 2016-08-09T15:18:18.000Z | 2016-10-26T12:17:52.000Z | python_redux/create_store.py | ebrakke/python-redux | 744fbdafe2a546a000285256ffd492add9adeeeb | [
"MIT"
] | 4 | 2017-04-14T12:07:12.000Z | 2018-11-06T15:16:57.000Z | ACTION_TYPES = {
'INIT': '@@redux/INIT'
}
"""
* Creates a Redux store that holds the state tree.
* The only way to change the data in the store is to call `dispatch()` on it.
*
* There should only be a single store in your app. To specify how different
* parts of the state tree respond to actions, you may combine several reducers
* into a single reducer function by using `combineReducers`.
*
* @param {Function} reducer A function that returns the next state tree, given
* the current state tree and the action to handle.
*
* @param {any} [preloadedState] The initial state. You may optionally specify it
* to hydrate the state from the server in universal apps, or to restore a
* previously serialized user session.
* If you use `combineReducers` to produce the root reducer function, this must be
* an object with the same shape as `combineReducers` keys.
*
* @param {Function} enhancer The store enhancer. You may optionally specify it
* to enhance the store with third-party capabilities such as middleware,
* time travel, persistence, etc. The only store enhancer that ships with Redux
* is `applyMiddleware()`.
*
* @returns {Store} A Redux store that lets you read the state, dispatch actions
* and subscribe to changes.
""" | 38.7 | 107 | 0.737439 | ACTION_TYPES = {
'INIT': '@@redux/INIT'
}
"""
* Creates a Redux store that holds the state tree.
* The only way to change the data in the store is to call `dispatch()` on it.
*
* There should only be a single store in your app. To specify how different
* parts of the state tree respond to actions, you may combine several reducers
* into a single reducer function by using `combineReducers`.
*
* @param {Function} reducer A function that returns the next state tree, given
* the current state tree and the action to handle.
*
* @param {any} [preloadedState] The initial state. You may optionally specify it
* to hydrate the state from the server in universal apps, or to restore a
* previously serialized user session.
* If you use `combineReducers` to produce the root reducer function, this must be
* an object with the same shape as `combineReducers` keys.
*
* @param {Function} enhancer The store enhancer. You may optionally specify it
* to enhance the store with third-party capabilities such as middleware,
* time travel, persistence, etc. The only store enhancer that ships with Redux
* is `applyMiddleware()`.
*
* @returns {Store} A Redux store that lets you read the state, dispatch actions
* and subscribe to changes.
"""
def create_store(reducer=None, preloaded_state=None, enhancer=None):
if hasattr(preloaded_state, '__call__') and enhancer is None:
enhancer = preloaded_state
preloaded_state = None
if enhancer is not None:
if not hasattr(enhancer, '__call__'):
raise Exception('Expected the enhancer to be a function')
return enhancer(create_store)(reducer, preloaded_state)
if not hasattr(reducer, '__call__'):
raise Exception('Expected the reducer to be a function')
current_reducer = reducer
current_state = preloaded_state
current_listeners = []
next_listeners = current_listeners
is_dispatching = False
def ensure_can_mutate_next_listeners():
nonlocal next_listeners, current_listeners
if next_listeners == current_listeners:
next_listeners = [c for c in current_listeners]
"""
* Reads the state tree managed by the store.
*
* @returns {any} The current state tree of your application.
"""
def get_state():
nonlocal current_state
return current_state
"""
* Adds a change listener. It will be called any time an action is dispatched,
* and some part of the state tree may potentially have changed. You may then
* call `getState()` to read the current state tree inside the callback.
*
* You may call `dispatch()` from a change listener, with the following
* caveats:
*
* 1. The subscriptions are snapshotted just before every `dispatch()` call.
* If you subscribe or unsubscribe while the listeners are being invoked, this
* will not have any effect on the `dispatch()` that is currently in progress.
* However, the next `dispatch()` call, whether nested or not, will use a more
* recent snapshot of the subscription list.
*
* 2. The listener should not expect to see all state changes, as the state
* might have been updated multiple times during a nested `dispatch()` before
* the listener is called. It is, however, guaranteed that all subscribers
* registered before the `dispatch()` started will be called with the latest
* state by the time it exits.
*
* @param {Function} listener A callback to be invoked on every dispatch.
* @returns {Function} A function to remove this change listener.
"""
def subscribe(listener=None):
nonlocal next_listeners
if not hasattr(listener, '__call__'):
raise Exception('Expected listener to be a function')
is_subscribed = True
ensure_can_mutate_next_listeners()
next_listeners.append(listener)
def unsubscribe():
nonlocal is_subscribed
if not is_subscribed:
return
is_subscribed = False
ensure_can_mutate_next_listeners()
index = next_listeners.index(listener)
del next_listeners[index]
return unsubscribe
"""
* Dispatches an action. It is the only way to trigger a state change.
*
* The `reducer` function, used to create the store, will be called with the
* current state tree and the given `action`. Its return value will
* be considered the **next** state of the tree, and the change listeners
* will be notified.
*
* The base implementation only supports plain object actions. If you want to
* dispatch a Promise, an Observable, a thunk, or something else, you need to
* wrap your store creating function into the corresponding middleware. For
* example, see the documentation for the `redux-thunk` package. Even the
* middleware will eventually dispatch plain object actions using this method.
*
* @param {Object} action A plain object representing what changed. It is
* a good idea to keep actions serializable so you can record and replay user
* sessions, or use the time travelling `redux-devtools`. An action must have
* a `type` property which may not be `undefined`. It is a good idea to use
* string constants for action types.
*
* @returns {Object} For convenience, the same action object you dispatched.
*
* Note that, if you use a custom middleware, it may wrap `dispatch()` to
* return something else (for example, a Promise you can await).
"""
def dispatch(action=None):
nonlocal is_dispatching, current_state, current_listeners, next_listeners
if not type(action) == dict:
raise Exception('Actions must be plain dictionaries. Consider adding middleware to change this')
if action.get('type') is None:
raise Exception('Actions may not have an undefined "type" property.\n Have you misspelled a constants?')
if is_dispatching:
raise Exception('Reducers may not dispatch actions')
try:
is_dispatching = True
current_state = current_reducer(current_state, action)
finally:
is_dispatching = False
listeners = current_listeners = next_listeners
for l in listeners:
l()
return action
"""
* Replaces the reducer currently used by the store to calculate the state.
*
* You might need this if your app implements code splitting and you want to
* load some of the reducers dynamically. You might also need this if you
* implement a hot reloading mechanism for Redux.
*
* @param {Function} nextReducer The reducer for the store to use instead.
* @returns {void}
"""
def replace_reducer(next_reducer=None):
nonlocal current_reducer
if not hasattr(next_reducer, '__call__'):
raise Exception('Expected next_reducer to be a function')
current_reducer = next_reducer
dispatch({ 'type': ACTION_TYPES['INIT'] })
# TODO: Figure out how to add the observables
# When a store is created, an "INIT" action is dispatched so that every
# reducer returns their initial state. This effectively populates
# the initial state tree.
dispatch({ 'type': ACTION_TYPES['INIT'] })
return {
'dispatch': dispatch,
'subscribe': subscribe,
'get_state': get_state,
'replace_reducer': replace_reducer
} | 5,686 | 0 | 22 |
e8e6bd70369d9b4d22fc312ee9111d08452ada88 | 4,377 | py | Python | anicon.py | NevGi-cpu/anicon | a9092a9bdf5524dd3f4ed8da760637bc7764134f | [
"MIT"
] | null | null | null | anicon.py | NevGi-cpu/anicon | a9092a9bdf5524dd3f4ed8da760637bc7764134f | [
"MIT"
] | null | null | null | anicon.py | NevGi-cpu/anicon | a9092a9bdf5524dd3f4ed8da760637bc7764134f | [
"MIT"
] | null | null | null | from warnings import filterwarnings
from PIL import Image, ImageOps
from jikanpy import Jikan
from requests import get
from time import sleep
import re
import os
print('''Run this in your anime folder
For help, info and memes, check out
https://github.com/notdedsec/anicon
''')
sleep(1)
jikan = Jikan()
filterwarnings("ignore")
folderlist = next(os.walk('.'))[1]
if folderlist is None or len(folderlist) == 0:
# In case the file is placed inside an inner most directory which contains only files and no other folders, this list will be empty.
# Thus adding the current directory path as an element of the list.
folderlist = E:\
automode = True if input('Use AutoMode? Y/N : ').upper() == 'Y' else False
for folder in folderlist:
name = getname(folder)
# Extracting the name of the folder without the path and then performing search for the same. This will be the name of the anime
# episode, thus instead of performing a search for the directory path, now performing a search for the directory name.
name = name.rpartition('\\')[2].strip()
iconname = name.replace(' ', '_')
jpgfile = folder + '\\' + iconname + '.jpg'
icofile = folder + '\\' + iconname + '.ico'
if os.path.isfile(icofile):
print('An icon is already present. Delete the older icon and `desktop.ini` file before applying a new icon')
continue
link, Type = getartwork(name)
try:
icon = createicon(folder, link)
except:
print('Ran into an error. Blame the dev :(')
continue
f = open(folder + "\\desktop.ini","w+")
f.write("[.ShellClassInfo]\nConfirmFileOp=0\n")
f.write("IconResource={},0".format(icofile.replace(folder, "").strip("\\")))
f.write("\nIconFile={}\nIconIndex=0".format(icofile.replace(folder, "").strip("\\")))
if Type is not None and len(Type) > 0:
# If the result has a type, then using this as the infotip for the desktop icon.
f.write("\nInfoTip={}".format(Type))
# Closing the output stream. All the text will be written into `desktop.ini` file only when the output is being closed.
f.close()
# Not marking the `desktop.ini` file as a system file. This will make sure that the file can be seen if display hidden items is enabled.
os.system('attrib +r \"{}\\{}\"'.format(os.getcwd(), folder))
os.system('attrib +h \"{}\\desktop.ini\"'.format(folder))
os.system('attrib +h \"{}\"'.format(icon))
| 33.159091 | 148 | 0.587846 | from warnings import filterwarnings
from PIL import Image, ImageOps
from jikanpy import Jikan
from requests import get
from time import sleep
import re
import os
print('''Run this in your anime folder
For help, info and memes, check out
https://github.com/notdedsec/anicon
''')
sleep(1)
jikan = Jikan()
filterwarnings("ignore")
folderlist = next(os.walk('.'))[1]
if folderlist is None or len(folderlist) == 0:
# In case the file is placed inside an inner most directory which contains only files and no other folders, this list will be empty.
# Thus adding the current directory path as an element of the list.
folderlist = E:\
automode = True if input('Use AutoMode? Y/N : ').upper() == 'Y' else False
def getname(name: str) -> str:
lastwords = ['bd', 's0', '480p', '720p', '1080p']
wordstoremove = ['bluray', 'x265', 'x264', 'hevc', 'hi10p', 'avc', '10bit', 'dual', 'audio', 'eng', 'english', 'subbed', 'sub', 'dubbed', 'dub']
name = name.lower().replace('_', ' ').replace('.', ' ')
for word in wordstoremove:
name = name.replace(word, '')
name = re.sub(r"(?<=\[)(.*?)(?=\])", '', name)
name = re.sub(r"(?<=\()(.*?)(?=\))", '', name)
name = name.replace('()', '').replace('[]', '')
for word in lastwords:
rexstr = "(?<=" + word + ")(?s)(.*$)"
name = re.sub(rexstr, '', name).replace(word, '')
return(name.strip())
def getartwork(name: str) -> tuple:
results = jikan.search('anime', name, parameters={'type': 'tv'})
print('\n' + name.title(), end = '')
counter = 1
for result in results['results']:
if automode:
print(' - ' + result['title'])
ch = 1
break
else:
print('\n' + str(counter) + ' - ' + result['title'], end = '')
if counter == 5:
break
counter += 1
if not automode:
ch = input('\n>')
if ch == '':
ch = 1
return (results['results'][int(ch)-1]['image_url'], results['results'][int(ch)-1]['type'])
def createicon(folder: str, link: str):
art = get(link)
open(jpgfile, 'wb').write(art.content)
img = Image.open(jpgfile)
img = ImageOps.expand(img, (69, 0, 69, 0), fill=0)
img = ImageOps.fit(img, (300,300)).convert("RGBA")
datas = img.getdata()
newData = []
for item in datas:
if item[0] == 0 and item[1] == 0 and item[2] == 0:
newData.append((0, 0, 0, 0))
else:
newData.append(item)
img.putdata(newData)
os.remove(jpgfile)
img.save(icofile)
img.close()
return(icofile)
for folder in folderlist:
name = getname(folder)
# Extracting the name of the folder without the path and then performing search for the same. This will be the name of the anime
# episode, thus instead of performing a search for the directory path, now performing a search for the directory name.
name = name.rpartition('\\')[2].strip()
iconname = name.replace(' ', '_')
jpgfile = folder + '\\' + iconname + '.jpg'
icofile = folder + '\\' + iconname + '.ico'
if os.path.isfile(icofile):
print('An icon is already present. Delete the older icon and `desktop.ini` file before applying a new icon')
continue
link, Type = getartwork(name)
try:
icon = createicon(folder, link)
except:
print('Ran into an error. Blame the dev :(')
continue
f = open(folder + "\\desktop.ini","w+")
f.write("[.ShellClassInfo]\nConfirmFileOp=0\n")
f.write("IconResource={},0".format(icofile.replace(folder, "").strip("\\")))
f.write("\nIconFile={}\nIconIndex=0".format(icofile.replace(folder, "").strip("\\")))
if Type is not None and len(Type) > 0:
# If the result has a type, then using this as the infotip for the desktop icon.
f.write("\nInfoTip={}".format(Type))
# Closing the output stream. All the text will be written into `desktop.ini` file only when the output is being closed.
f.close()
# Not marking the `desktop.ini` file as a system file. This will make sure that the file can be seen if display hidden items is enabled.
os.system('attrib +r \"{}\\{}\"'.format(os.getcwd(), folder))
os.system('attrib +h \"{}\\desktop.ini\"'.format(folder))
os.system('attrib +h \"{}\"'.format(icon))
| 1,848 | 0 | 69 |
2d7c8c3e7f12b4918d898b8410558a4f529cb1b7 | 495 | py | Python | cafeteria/images/migrations/0005_auto_20190608_2217.py | devGW/PostApp | fda99fbc838ee809b1ff28445d66cbd48e2b5016 | [
"MIT"
] | null | null | null | cafeteria/images/migrations/0005_auto_20190608_2217.py | devGW/PostApp | fda99fbc838ee809b1ff28445d66cbd48e2b5016 | [
"MIT"
] | 1 | 2019-08-31T01:07:46.000Z | 2019-09-01T15:10:42.000Z | cafeteria/images/migrations/0005_auto_20190608_2217.py | devGW/PostApp | fda99fbc838ee809b1ff28445d66cbd48e2b5016 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.1 on 2019-06-08 13:17
from django.db import migrations
| 20.625 | 47 | 0.547475 | # Generated by Django 2.2.1 on 2019-06-08 13:17
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('images', '0004_auto_20190607_2340'),
]
operations = [
migrations.RenameField(
model_name='comment',
old_name='Image',
new_name='image',
),
migrations.RenameField(
model_name='like',
old_name='Image',
new_name='image',
),
]
| 0 | 389 | 23 |
93368abbf091c6ddf0ece731e948fa5ed0e5673d | 2,741 | py | Python | pylith/meshio/DataWriterHDF5Ext.py | cehanagan/pylith | cf5c1c34040460a82f79b6eb54df894ed1b1ee93 | [
"MIT"
] | 93 | 2015-01-08T16:41:22.000Z | 2022-02-25T13:40:02.000Z | pylith/meshio/DataWriterHDF5Ext.py | sloppyjuicy/pylith | ac2c1587f87e45c948638b19560813d4d5b6a9e3 | [
"MIT"
] | 277 | 2015-02-20T16:27:35.000Z | 2022-03-30T21:13:09.000Z | pylith/meshio/DataWriterHDF5Ext.py | sloppyjuicy/pylith | ac2c1587f87e45c948638b19560813d4d5b6a9e3 | [
"MIT"
] | 71 | 2015-03-24T12:11:08.000Z | 2022-03-03T04:26:02.000Z | # ----------------------------------------------------------------------
#
# Brad T. Aagaard, U.S. Geological Survey
# Charles A. Williams, GNS Science
# Matthew G. Knepley, University at Buffalo
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2021 University of California, Davis
#
# See LICENSE.md for license information.
#
# ----------------------------------------------------------------------
#
# @file pythia.pyre/meshio/DataWriterHDF5Ext.py
#
# @brief Python object for writing finite-element data to HDF5 file
# with datasets stored in external binary files.
from .DataWriter import DataWriter
from .meshio import DataWriterHDF5Ext as ModuleDataWriterHDF5Ext
class DataWriterHDF5Ext(DataWriter, ModuleDataWriterHDF5Ext):
"""
@brief Python object for writing finite-element data to HDF5 file
with datasets stored in external binary files.
FACTORY: data_writer
"""
import pythia.pyre.inventory
filename = pythia.pyre.inventory.str("filename", default="")
filename.meta['tip'] = "Name of HDF5 file."
# PUBLIC METHODS /////////////////////////////////////////////////////
def __init__(self, name="datawriterhdf5"):
"""Constructor.
"""
DataWriter.__init__(self, name)
return
def preinitialize(self):
"""Initialize writer.
"""
DataWriter.preinitialize(self)
return
def setFilename(self, outputDir, simName, label):
"""Set filename from default options and inventory. If filename is given in inventory, use it,
otherwise create filename from default options.
"""
filename = self.filename or DataWriter.mkfilename(outputDir, simName, label, "h5")
self.mkpath(filename)
ModuleDataWriterHDF5Ext.filename(self, filename)
return
def close(self):
"""Close writer.
"""
ModuleDataWriterHDF5Ext.close(self)
# Only write Xdmf file on proc 0
from pylith.mpi.Communicator import mpi_comm_world
comm = mpi_comm_world()
if not comm.rank:
from .Xdmf import Xdmf
xdmf = Xdmf()
xdmf.write(ModuleDataWriterHDF5Ext.hdf5Filename(
self), verbose=False)
return
# PRIVATE METHODS /////////////////////////////////////////////////////
def _createModuleObj(self):
"""Create handle to C++ object."""
ModuleDataWriterHDF5Ext.__init__(self)
return
# FACTORIES ////////////////////////////////////////////////////////////
def data_writer():
"""Factory associated with DataWriter.
"""
return DataWriterHDF5Ext()
# End of file
| 29.473118 | 102 | 0.59066 | # ----------------------------------------------------------------------
#
# Brad T. Aagaard, U.S. Geological Survey
# Charles A. Williams, GNS Science
# Matthew G. Knepley, University at Buffalo
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2021 University of California, Davis
#
# See LICENSE.md for license information.
#
# ----------------------------------------------------------------------
#
# @file pythia.pyre/meshio/DataWriterHDF5Ext.py
#
# @brief Python object for writing finite-element data to HDF5 file
# with datasets stored in external binary files.
from .DataWriter import DataWriter
from .meshio import DataWriterHDF5Ext as ModuleDataWriterHDF5Ext
class DataWriterHDF5Ext(DataWriter, ModuleDataWriterHDF5Ext):
"""
@brief Python object for writing finite-element data to HDF5 file
with datasets stored in external binary files.
FACTORY: data_writer
"""
import pythia.pyre.inventory
filename = pythia.pyre.inventory.str("filename", default="")
filename.meta['tip'] = "Name of HDF5 file."
# PUBLIC METHODS /////////////////////////////////////////////////////
def __init__(self, name="datawriterhdf5"):
"""Constructor.
"""
DataWriter.__init__(self, name)
return
def preinitialize(self):
"""Initialize writer.
"""
DataWriter.preinitialize(self)
return
def setFilename(self, outputDir, simName, label):
"""Set filename from default options and inventory. If filename is given in inventory, use it,
otherwise create filename from default options.
"""
filename = self.filename or DataWriter.mkfilename(outputDir, simName, label, "h5")
self.mkpath(filename)
ModuleDataWriterHDF5Ext.filename(self, filename)
return
def close(self):
"""Close writer.
"""
ModuleDataWriterHDF5Ext.close(self)
# Only write Xdmf file on proc 0
from pylith.mpi.Communicator import mpi_comm_world
comm = mpi_comm_world()
if not comm.rank:
from .Xdmf import Xdmf
xdmf = Xdmf()
xdmf.write(ModuleDataWriterHDF5Ext.hdf5Filename(
self), verbose=False)
return
# PRIVATE METHODS /////////////////////////////////////////////////////
def _createModuleObj(self):
"""Create handle to C++ object."""
ModuleDataWriterHDF5Ext.__init__(self)
return
# FACTORIES ////////////////////////////////////////////////////////////
def data_writer():
"""Factory associated with DataWriter.
"""
return DataWriterHDF5Ext()
# End of file
| 0 | 0 | 0 |
63cbb36cd3f5d7cfecfe9d1002837f0045e786ca | 1,844 | py | Python | src/neopixel_timer_chaser.py | tobiasbp/micropython | d1c3108f1ab81c70e048eb21b57f3b2f43571b90 | [
"MIT"
] | null | null | null | src/neopixel_timer_chaser.py | tobiasbp/micropython | d1c3108f1ab81c70e048eb21b57f3b2f43571b90 | [
"MIT"
] | null | null | null | src/neopixel_timer_chaser.py | tobiasbp/micropython | d1c3108f1ab81c70e048eb21b57f3b2f43571b90 | [
"MIT"
] | null | null | null | # A timer based LED chaser using adressable LEDs of type WS2812/NeoPixel
# Tested on a an ESP32 running MicroPython
import machine, neopixel
from machine import Timer
# Pin to use for controlling LEDs
LED_STRIP_PIN = 14
# Number of LEDs on LED strip
LED_STRIP_LENGTH = 42
# How many times a second (HZ) to advance the LED
LED_ADVANCE_RATE_HZ = 20
# The color of the lit LED
LED_COLOR = (100, 0, 0)
# How many times a second (HZ) to refresh LEDs
LED_REFRESH_RATE_HZ = 20
# The speed of fading. Higher values = faster fades with less granularity
LED_FADE_SPEED = 15
# Frequency of lowering the intensity of the LEDS
LED_FADE_RATE_HZ = LED_REFRESH_RATE_HZ
# Create a neopixels object representing a strip of LEDS
np = neopixel.NeoPixel(machine.Pin(LED_STRIP_PIN), LED_STRIP_LENGTH)
# Index of the currently lit LED
led_index = 0
def advance(timer):
"""
Advance lit LED
"""
global np, led_index, LED_COLOR
# Light LED
np[led_index] = LED_COLOR
# Set index of next LED
if led_index == np.n - 1:
led_index = 0
else:
led_index += 1
def fade(timer):
"""
Lower intensity for all LEDs on strip
"""
global np, LED_FADE_SPEED
for i in range(np.n):
np[i] = [
v - int(LED_FADE_SPEED) if v > int(LED_FADE_SPEED) else 0 for v in np[i]
]
# Timer for advancing the lit LED
timer_advance = Timer(0)
timer_advance.init(
period=int(1000 / LED_ADVANCE_RATE_HZ),
mode=Timer.PERIODIC,
callback=advance,
)
# Timer for fading out LEDs
timer_fade = Timer(1)
timer_fade.init(
period=int(1000 / LED_FADE_RATE_HZ),
mode=Timer.PERIODIC,
callback=fade,
)
# Timer for updating the LED strip
timer_refresh = Timer(2)
timer_refresh.init(
period=int(1000 / LED_REFRESH_RATE_HZ),
mode=Timer.PERIODIC,
callback=lambda t: np.write(),
)
| 24.263158 | 84 | 0.699566 | # A timer based LED chaser using adressable LEDs of type WS2812/NeoPixel
# Tested on a an ESP32 running MicroPython
import machine, neopixel
from machine import Timer
# Pin to use for controlling LEDs
LED_STRIP_PIN = 14
# Number of LEDs on LED strip
LED_STRIP_LENGTH = 42
# How many times a second (HZ) to advance the LED
LED_ADVANCE_RATE_HZ = 20
# The color of the lit LED
LED_COLOR = (100, 0, 0)
# How many times a second (HZ) to refresh LEDs
LED_REFRESH_RATE_HZ = 20
# The speed of fading. Higher values = faster fades with less granularity
LED_FADE_SPEED = 15
# Frequency of lowering the intensity of the LEDS
LED_FADE_RATE_HZ = LED_REFRESH_RATE_HZ
# Create a neopixels object representing a strip of LEDS
np = neopixel.NeoPixel(machine.Pin(LED_STRIP_PIN), LED_STRIP_LENGTH)
# Index of the currently lit LED
led_index = 0
def advance(timer):
"""
Advance lit LED
"""
global np, led_index, LED_COLOR
# Light LED
np[led_index] = LED_COLOR
# Set index of next LED
if led_index == np.n - 1:
led_index = 0
else:
led_index += 1
def fade(timer):
"""
Lower intensity for all LEDs on strip
"""
global np, LED_FADE_SPEED
for i in range(np.n):
np[i] = [
v - int(LED_FADE_SPEED) if v > int(LED_FADE_SPEED) else 0 for v in np[i]
]
# Timer for advancing the lit LED
timer_advance = Timer(0)
timer_advance.init(
period=int(1000 / LED_ADVANCE_RATE_HZ),
mode=Timer.PERIODIC,
callback=advance,
)
# Timer for fading out LEDs
timer_fade = Timer(1)
timer_fade.init(
period=int(1000 / LED_FADE_RATE_HZ),
mode=Timer.PERIODIC,
callback=fade,
)
# Timer for updating the LED strip
timer_refresh = Timer(2)
timer_refresh.init(
period=int(1000 / LED_REFRESH_RATE_HZ),
mode=Timer.PERIODIC,
callback=lambda t: np.write(),
)
| 0 | 0 | 0 |
26208baa0b4c094c9044ed8a1870a25f4fc0b1cf | 36,599 | py | Python | robot-resources/libraries/pybot_jrouter.py | Juniper/nita-robot | 4c7e2514b10f22619532e4368a80b6ebd0545f85 | [
"Apache-2.0"
] | 8 | 2020-12-09T18:37:56.000Z | 2022-01-17T00:50:40.000Z | robot-resources/libraries/pybot_jrouter.py | Juniper/nita-robot | 4c7e2514b10f22619532e4368a80b6ebd0545f85 | [
"Apache-2.0"
] | null | null | null | robot-resources/libraries/pybot_jrouter.py | Juniper/nita-robot | 4c7e2514b10f22619532e4368a80b6ebd0545f85 | [
"Apache-2.0"
] | 2 | 2021-01-04T10:58:55.000Z | 2021-06-09T17:43:22.000Z | #!/usr/bin/env python3
# ******************************************************** # # Project: nita-webapp
#
# Copyright (c) Juniper Networks, Inc., 2021. All rights reserved.
# # Notice and Disclaimer: This code is licensed to you under the Apache 2.0 License (the "License"). You may not use this code except in compliance with the License. This code is not an official Juniper product. You can obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0.html
#
# SPDX-License-Identifier: Apache-2.0
#
# Third-Party Code: This code may depend on other components under separate copyright notice and license terms. Your use of the source code for those components is subject to the terms and conditions of the respective license as noted in the Third-Party source code file.
#
# ********************************************************
# xml specific
from lxml import etree
from lxml.builder import E
import xml.etree.ElementTree as ET
import xml.dom.minidom
import lxml
# stdlib
from io import StringIO
import re
import subprocess as sub
from subprocess import Popen, PIPE
from subprocess import check_call
import os
import sys
import pdb
import errno
import time
from datetime import datetime
from datetime import date, timedelta
from time import sleep
from pprint import pprint
import logging
import hashlib
from socket import error as SocketError
import errno
import signal
from itertools import *
import csv
import tempfile
#third-party
import xmltodict
import yaml
import paramiko
# import ncclient.transport.errors as NcErrors
# import ncclient.operations.errors as TError
import jinja2
import csv
from select import select
import ftplib
import logging.handlers
# junos-ez
from jnpr.junos.utils.scp import SCP
from jnpr.junos.utils.fs import FS
from jnpr.junos.exception import *
from jnpr.junos.utils.config import Config
from jnpr.junos.utils.sw import SW
from jnpr.junos.utils.start_shell import StartShell
from jnpr.junos.factory import loadyaml
from jnpr.junos.op.routes import RouteTable
from jnpr.junos import Device
from jnpr.junos import *
# Robot libraries
from robot.libraries.BuiltIn import BuiltIn
from robot.libraries.OperatingSystem import OperatingSystem
from robot.api import logger
# Global Variables
timestamp = datetime.now().strftime("%Y-%m-%d")
timestamp2 = datetime.now().strftime("%Y-%m-%d-%H-%M-%S.%f")[:-3]
timestamp3 = datetime.now().strftime("%H_%M_%S")
timestamp4 = datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
# Global variables for shell connection
_SHELL_PROMPT = '% '
_JUNOS_PROMPT = '> '
_BASH_PROMPT = '?'
_SELECT_WAIT = 0.1
_RECVSZ = 1024
| 44.095181 | 442 | 0.564606 | #!/usr/bin/env python3
# ******************************************************** # # Project: nita-webapp
#
# Copyright (c) Juniper Networks, Inc., 2021. All rights reserved.
# # Notice and Disclaimer: This code is licensed to you under the Apache 2.0 License (the "License"). You may not use this code except in compliance with the License. This code is not an official Juniper product. You can obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0.html
#
# SPDX-License-Identifier: Apache-2.0
#
# Third-Party Code: This code may depend on other components under separate copyright notice and license terms. Your use of the source code for those components is subject to the terms and conditions of the respective license as noted in the Third-Party source code file.
#
# ********************************************************
# xml specific
from lxml import etree
from lxml.builder import E
import xml.etree.ElementTree as ET
import xml.dom.minidom
import lxml
# stdlib
from io import StringIO
import re
import subprocess as sub
from subprocess import Popen, PIPE
from subprocess import check_call
import os
import sys
import pdb
import errno
import time
from datetime import datetime
from datetime import date, timedelta
from time import sleep
from pprint import pprint
import logging
import hashlib
from socket import error as SocketError
import errno
import signal
from itertools import *
import csv
import tempfile
#third-party
import xmltodict
import yaml
import paramiko
# import ncclient.transport.errors as NcErrors
# import ncclient.operations.errors as TError
import jinja2
import csv
from select import select
import ftplib
import logging.handlers
# junos-ez
from jnpr.junos.utils.scp import SCP
from jnpr.junos.utils.fs import FS
from jnpr.junos.exception import *
from jnpr.junos.utils.config import Config
from jnpr.junos.utils.sw import SW
from jnpr.junos.utils.start_shell import StartShell
from jnpr.junos.factory import loadyaml
from jnpr.junos.op.routes import RouteTable
from jnpr.junos import Device
from jnpr.junos import *
# Robot libraries
from robot.libraries.BuiltIn import BuiltIn
from robot.libraries.OperatingSystem import OperatingSystem
from robot.api import logger
# Global Variables
timestamp = datetime.now().strftime("%Y-%m-%d")
timestamp2 = datetime.now().strftime("%Y-%m-%d-%H-%M-%S.%f")[:-3]
timestamp3 = datetime.now().strftime("%H_%M_%S")
timestamp4 = datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
# Global variables for shell connection
_SHELL_PROMPT = '% '
_JUNOS_PROMPT = '> '
_BASH_PROMPT = '?'
_SELECT_WAIT = 0.1
_RECVSZ = 1024
class ContinuableError(RuntimeError):
ROBOT_CONTINUE_ON_FAILURE = True
class FatalError(RuntimeError):
ROBOT_EXIT_ON_FAILURE = True
class pybot_jrouter(object):
ROBOT_LIBRARY_SCOPE = 'TEST SUITE'
ROBOT_LISTENER_API_VERSION = 2
# -----------------------------------------------------------------------
# CONSTRUCTOR
# -----------------------------------------------------------------------
def __init__(self, **kvargs):
# Setting credentials
self.user = kvargs['user']
self.target = kvargs['target']
self.password = kvargs['password']
try:
self.port = int(kvargs['port'])
except KeyError:
self.port = 22
except ValueError as v_error:
print("WARN Variable value problems: %s" %v_error)
raise ContinuableError("Variable value problems: %s" %v_error)
self.ROBOT_LIBRARY_LISTENER = self
self._conn = Device(user=self.user, host=self.target, password=self.password, port=self.port, gather_facts=False)
# -----------------------------------------------------------------------
# FUNCTIONS START HERE
# -----------------------------------------------------------------------
def open_connection(self):
try:
self._conn.open(auto_probe=10)
self._conn.timeout = 120*120
return self
except ConnectError as c_error:
print("WARN Connection problems %s target: %s port: %s" % (c_error, self.target, self.port))
raise ContinuableError("Connection problems %s target: %s port: %s" %(c_error, self.target, self.port))
def close_connection(self):
try:
self._conn.close()
return self
except ConnectError as c_error:
print("WARN Connection problems %s" %c_error)
raise ContinuableError("Connection problems %s" %c_error)
def load_configuration_from_file(self, synchronize=True, overwrite=False, **kvargs):
"""
Function that load configuration on router from a file
path : where the configuration file is located
format: possible values 'set' or 'xml' or 'bracket' (so far only format 'set' is supported)
"""
#overwrite = kvargs.pop ('overwrite',False)
args = dict(data='')
args.update(kvargs)
if overwrite:
return self.load_configuration(overwrite=True, **args)
else:
return self.load_configuration(overwrite=False, **args)
def load_configuration_from_template(self,
commit_comment='pybot_jrouter_load_configuration_from_template',
format='set',
conf_mode='exclusive',
overwrite=False,
merge=False,
synchronize=True,
force_synchronize=False,
full=False,
print_conf_diff=False,
print_conf_detail=False,
**kvargs):
"""
# General Options:
# - format can be conf/set/xml
# - print_conf_diff will print the diff if desired
# - print_conf_detail will print the committed configuration if desired
# Configuration Options:
# - conf_mode: can be exclusive/private/dynamic/batch
# Load Options:
# - overwrite: Determines if the contents completely replace the existing configuration. Default is False
# - merge: If set to True will set the load-config action to merge the default load-config action is 'replace'
# Commit Options:
# - kvargs synchronize: Default True. On dual control plane systems, requests that the candidate configuration on one control plane be copied to the other control plane, checked for correct syntax, and committed on both Routing Engines. Default is True.
# - kvargs force_synchronize: Default False. On dual control plane systems, forces the candidate configuration on one control plane to be copied to the other control plane. Default is False
# - kvargs full: Default False. When True requires all the daemons to check and evaluate the new configuration
"""
print("*INFO* Host %s|load_configuration_from_template|General Options: format=%s, print_conf_diff=%s, print_conf_detail=%s" % (self.target, format, print_conf_diff, print_conf_detail))
synchronize = True
force_synchronize = False
full = False
if 'force_synchronize' in kvargs.keys():
force_synchronize = kvargs['force_synchronize']
if 'synchronize' in kvargs.keys():
synchronize = kvargs['synchronize']
if 'full' in kvargs.keys():
full = kvargs['full']
##Hidden variable: DO NOT USE DO NOT EDIT
#It will be set to True ONLY if routers.load_configuration_from_template_in_parallel is executed
if 'parallel' in kvargs.keys():
parallel = kvargs['parallel']
else:
parallel = False
if force_synchronize:
synchronize = True
print("*INFO* Host %s|load_configuration_from_template|Force Synchronized Commit requested" % (self.target))
if synchronize:
print("*INFO* Host %s|load_configuration_from_template|Synchronized Commit requested" % (self.target))
if full:
print("*INFO* Host %s|load_configuration_from_template|Commit Full requested" % (self.target))
print("*INFO* Host %s|load_configuration_from_template|Load Options: merge=%s, overwrite=%s" % (self.target, merge, overwrite))
if format == 'set' and overwrite:
overwrite = False
print("*WARN* Host %s|load_configuration_from_template|Not possible to override the configuration with format=set. Overwriting disabled." % (self.target))
yaml_file = kvargs['template_vars']
# Loading files and rendering
myvars = yaml.load(open(yaml_file).read())
loadResp = ''
commitResp = False
print("*INFO* Host %s|load_configuration_from_template|Initializing variables. template_path=%s." % (self.target, kvargs['jinja2_file']))
try:
#Unlock/close configuration is managed by Config context manager
with Config(self._conn, mode=conf_mode) as candidate:
begin_load_datetime = datetime.now()
if overwrite:
loadResp = candidate.load(template_path=kvargs['jinja2_file'], template_vars=myvars, format=format, overwrite=True, merge=merge)
else:
loadResp = candidate.load(template_path=kvargs['jinja2_file'], template_vars=myvars, format=format, overwrite=False, merge=merge)
finish_load_datetime = datetime.now()
if loadResp.find("ok") is None:
print("*WARN* Host %s|load_configuration_from_template|Load Response did not throw an exception but something unexpected occurred: %s" % (self.target, etree.tostring(loadResp)))
return False
if print_conf_diff:
try:
print('*INFO* Host %s|load_configuration_from_template|DIFF configuration to be committed %s' % (self.target, candidate.diff()))
except lxml.etree.XMLSyntaxError as error:
print("*WARN* Host %s|load_configuration_from_template|Unable to retrieve the DIFF configuration to be committed. Printout will be skipped, trying to commit....: %s" % (self.target, error))
begin_commit_datetime = datetime.now()
if print_conf_detail:
try:
#begin_commit_datetime=datetime.now()
commitResp = candidate.commit(comment=commit_comment, sync=synchronize, force_sync=force_synchronize, full=full, detail=True)
#finish_commit_datetime=datetime.now()
print('*INFO* Host %s|load_configuration_from_template|Configuration to be committed: %s' % (self.target, etree.tostring(commitResp)))
except lxml.etree.XMLSyntaxError as error:
print("*WARN* Host %s|load_configuration_from_template|Unable to retrieve the committed configuration. Printout will be skipped, check the node or try again with print_conf_detail=False....: %s" % (self.target, error))
return False
else:
begin_commit_datetime = datetime.now()
commitResp = candidate.commit(comment=commit_comment, sync=synchronize, force_sync=force_synchronize, full=full, detail=False)
finish_commit_datetime = datetime.now()
except LockError as lockError:
print("*WARN* Host %s|load_configuration_from_template|Problems locking configuration: %s" % (self.target, lockError))
if parallel:
return "Exception %s:" %lockError
else:
raise FatalError("Host %s|load_configuration_from_template|Unable to lock configuration.....exiting: %s" % (self.target, lockError))
except (RpcError, RpcTimeoutError) as rpcError:
#warnings severity is already being ignored in the Config context manager
print("*WARN* Host %s|load_configuration_from_template|Problems opening configuration: %s" % (self.target, rpcError))
if parallel:
return "Exception %s:" %rpcError
else:
raise FatalError("Host %s|load_configuration_from_template|Unable to open configuration.....exiting: %s" % (self.target, rpcError))
except ConfigLoadError as configError:
print("*WARN* Host %s|load_configuration_from_template|Template %s|Problems loading the configuration: %s.....exiting" % (self.target, kvargs['jinja2_file'], configError))
if print_conf_diff:
try:
print('*INFO* Host %s|load_configuration_from_template|DIFF configuration to be committed %s' % (self.target, candidate.diff()))
except lxml.etree.XMLSyntaxError as error:
print("*WARN* Host %s|load_configuration_from_template|Unable to retrieve the DIFF configuration to be committed. Printout will be skipped, trying to commit....: %s" % (self.target, error))
if parallel:
return "Exception %s:" %configError
else:
raise FatalError("Host %s|load_configuration_from_template|Template %s|Unable to load the configuration.....exiting: %s" % (self.target, kvargs['jinja2_file'], configError))
except CommitError as commitError:
print("*WARN* Host %s|load_configuration_from_template|Template %s|Problems committing the configuration: %s.....exiting" % (self.target, kvargs['jinja2_file'], commitError))
if parallel:
return "Exception %s:" %commitError
else:
raise FatalError("Host %s|load_configuration_from_template|Template %s|Unable to commit the configuration.....exiting: %s" % (self.target, kvargs['jinja2_file'], commitError))
except UnlockError as unlockError:
print("*WARN* Host %s|load_configuration_from_template|Problems unlocking the configuration: %s.....exiting" % (self.target, unlockError))
if parallel:
return "Exception %s:" %unlockError
else:
raise FatalError("Host %s|load_configuration_from_template|Unable to unlock the configuration.....exiting: %s" % (self.target, unlockError))
except Exception as error:
if 'Opening and ending tag mismatch: routing-engine ' in error:
print('*INFO* Host %s|load_configuration_from_template|%s' %(self.target, error))
pass
return True
else:
print("*WARN* Host %s|load_configuration_from_template|An unhandled exception occurred: %s.....exiting" % (self.target, error))
if parallel:
return "Exception %s:" %error
else:
raise FatalError("Host %s|load_configuration_from_template|Unhandled Exception occurred.....exiting: %s" % (self.target, error))
diff_load_time = finish_load_datetime - begin_load_datetime
diff_commit_time = finish_commit_datetime - begin_commit_datetime
total_time = finish_commit_datetime - finish_load_datetime
print('*INFO* Host %s|load_configuration_from_template|Configuration successfully committed|Template: %s|Load Time: %s|Commit Time: %s| Total Time: %s' % (self.target, kvargs['jinja2_file'], self.pretty_time_delta(diff_load_time.seconds), self.pretty_time_delta(diff_commit_time.seconds), self.pretty_time_delta(total_time.seconds)))
return True
def load_configuration(self, commit_comment='__JRouter__', path=None, overwrite=False, **kvargs):
"""
Function that load configuration on router
**kvargs format:set|text|xml
data: data to load in the router
"""
data = kvargs['data']
format = kvargs['format']
if ((format == "set") or (format == "xml") or (format == "conf") or (format == "text") or (format == "txt")):
# Checking if this attribute was already attached to Device
# This is required if we are going to change configuration several times
if hasattr(self._conn, "candidate"):
pass
else:
self._conn.bind(candidate=Config)
try:
self._conn.candidate.lock()
except LockError as l_error:
print("*WARN* Problems locking configuration: %s" % (l_error))
raise FatalError("Problems locking configuration,exiting...")
return False
try:
if ((data == "") and (path != None)):
if overwrite:
self._conn.candidate.load(path=path, format=format, overwrite=True) # Load configuration from file
else:
self._conn.candidate.load(path=path, format=format)
else:
if overwrite:
self._conn.candidate.load(data, format=format, overwrite=True) # Load configuration from file
else:
self._conn.candidate.load(data, format=format)
except ConfigLoadError as error:
# hack to avoid return an error whenever config load get a warning
if error.rpc_error['severity'] == 'warning':
print("*INFO* Problems loading configuration: %s" % (error.rpc_error['message']))
except lxml.etree.XMLSyntaxError as error:
print("*WARN* Problems loading configuration: %s" % (error))
raise FatalError("Problems loading configuration,exiting...")
print('*INFO* Configuration to be commited %s' % (self._conn.candidate.diff()))
try:
self._conn.candidate.commit(comment=commit_comment)
self._conn.candidate.unlock()
return True
except (CommitError, LockError) as err:
#print err
self._conn.candidate.rollback()
print("*WARN* Problems commiting configuration: %s" % (err))
raise FatalError("Error commiting configuration, exiting....")
else:
raise FatalError("Expected result is True but was False,test will go on")
return False
def jsnap(self, **kvargs):
variables = BuiltIn().get_variables()
# Assigning kvargs
section = None
if 'section' in kvargs.keys():
section = kvargs['section']
tag = kvargs['tag']
snaptype = kvargs['snaptype']
test = kvargs['test']
mode = kvargs['mode']
output_directory = variables['${path}']
test_case = variables['${testname}']
tmp_dir = tempfile.mkdtemp(prefix='_')
dirpath = output_directory + "/" + tmp_dir + "/" + test_case.replace(" ","_") + "/" + self.target + "/snapshot/"
#PARENT_ROOT=os.path.abspath(os.path.join(self.logdir, os.pardir))
#GRANDPA=os.path.abspath(os.path.join(PARENT_ROOT, os.pardir))
if not os.path.exists(dirpath):
os.makedirs(dirpath, mode=0o777)
timestamp = datetime.now().strftime("%Y-%m-%d")
if snaptype == "snap":
if section:
cmd = 'jsnap --'+ snaptype + " " + timestamp + '_'+ tag + ' -l ' + self.user + ' -p ' + self.password + ' -t ' + self.target + ' -s' + section + ' ' + test
else:
cmd = 'jsnap --'+ snaptype + " " + timestamp + '_'+ tag + ' -l ' + self.user + ' -p ' + self.password + ' -t ' + self.target + ' ' + test
print("Executing: %s" %cmd)
jsnap_command = sub.Popen(cmd, stdout=sub.PIPE, stderr=sub.PIPE, shell=True, cwd=dirpath)
output, errors = jsnap_command.communicate()
if ((("Exiting." in errors)) or("Unable to connect to device: " in errors) or ("jsnap: not found" in errors) or ("appears to be missing" in output)):
print(output)
print(errors)
raise FatalError("Unable to execute jsnap.....exiting")
else:
return True
print(output, errors)
return True
elif snaptype == "snapcheck":
if section:
cmd = 'jsnap --'+ snaptype + " " + timestamp + '_'+ tag + ' -l ' + self.user + ' -p ' + self.password + ' -t ' + self.target + ' -s' + section + ' ' + test
else:
cmd = 'jsnap --'+ snaptype + " " + timestamp + '_'+ tag + ' -l ' + self.user + ' -p ' + self.password + ' -t ' + self.target + ' ' + test
print("Executing: %s" %cmd)
jsnap_command = sub.Popen(cmd, stdout=sub.PIPE, stderr=sub.PIPE, shell=True, cwd=dirpath)
output, errors = jsnap_command.communicate()
print(output)
print(errors)
if ((("Exiting." in errors)) or("Unable to connect to device: " in errors) or ("jsnap: not found" in errors) or ("appears to be missing" in output)):
print(output)
print(errors)
raise FatalError("Unable to execute jsnap.....exiting")
else:
if mode == "strict":
if "ERROR" in output or "ERROR" in errors:
raise FatalError("ERROR found in jsnap mode strict.....exiting")
else:
return True
else:
return True
elif snaptype == "check":
if section:
cmd_check = 'jsnap --'+ snaptype + " " + timestamp + '_pre' + ',' + timestamp + '_post' + ' -l ' + self.user + ' -p ' + self.password + ' -t ' + self.target + ' -s' + section + ' ' + test
else:
cmd_check = 'jsnap --'+ snaptype + " " + timestamp + '_pre' + ',' + timestamp + '_post' + ' -l ' + self.user + ' -p ' + self.password + ' -t ' + self.target + ' ' + test
print("Executing: %s" %cmd_check)
jsnap_command = sub.Popen(cmd_check, stdout=sub.PIPE, stderr=sub.PIPE, shell=True, cwd=dirpath)
output, errors = jsnap_command.communicate()
print(output)
print(errors)
if ((("Exiting." in errors)) or("Unable to connect to device: " in errors) or ("jsnap: not found" in errors) or ("appears to be missing" in output)):
print(output)
print(errors)
raise FatalError("Unable to execute jsnap.....exiting")
else:
if mode == "strict":
if "ERROR" in output or "ERROR" in errors:
raise FatalError("ERROR found in jsnap mode strict.....exiting")
else:
return True
else:
return True
else:
raise FatalError("Invalid snap type.....exiting")
def rescue_configuration(self, **kvargs):
"""
Function that issues Save/Load a Rescue Configuration
"""
if 'action' in kvargs.keys():
action = kvargs['action']
# Saving rescue configuration
if action == 'save':
try:
self._conn.rpc.request_save_rescue_configuration()
except RpcError as err:
rpc_error = err.__repr__()
print(rpc_error)
return self
# Checking if this attribute was already attached to Device
if hasattr(self._conn, "candidate"):
pass
else:
self._conn.bind(candidate=Config)
# Locking configuration
try:
self._conn.candidate.lock()
except LockError as l_error:
print("*WARN* Problems locking configuration: %s" % (l_error))
raise FatalError("Unable to lock configuration.....exiting")
# Loading rescue configuration
if action == 'load':
try:
self._conn.rpc.load_configuration({'rescue': 'rescue'})
except RpcError as err:
rpc_error = err.__repr__()
print(rpc_error)
except ConfigLoadError as error:
print("*WARN* Problems loading configuration: %s" % (error))
raise FatalError("Unable to load configuration.....exiting")
print('*INFO* Configuration to be commited %s' % (self._conn.candidate.diff()))
try:
self._conn.candidate.commit(comment='loading rescue configuration')
self._conn.candidate.unlock()
return True
except (CommitError, LockError) as err:
print(err)
raise FatalError("Unable to commit or unlock configuration......exiting")
def commands_executor(self, **kvargs):
"""
Function that issues commands
"""
# Getting built-in variables
variables = BuiltIn().get_variables()
regex = ''
xpath = ''
if 'xpath' in kvargs.keys():
xpath = kvargs['xpath']
if 'regex' in kvargs.keys():
regex = kvargs['regex']
command = kvargs['command']
format = kvargs['format']
output_directory = variables['${path}']
root_dir = variables['${OUTPUT_DIR}']
test_case = variables['${testname}']
if format == "text":
tmp_dir = tempfile.mkdtemp(prefix='_')
if output_directory == None:
dirpath = "/collector/" + tmp_dir + "/" + timestamp + "/"
else:
dirpath = output_directory + "/" + tmp_dir + "/" + test_case.replace(" ", "_") + "/commands"
# Create directory if does not exist
if not os.path.exists(dirpath):
os.makedirs(dirpath, mode=0o777)
if regex:
try:
cmd_to_execute = self._conn.rpc.cli(command)
except RpcError as err:
rpc_error = err.__repr__()
print(xmltodict.parse(rpc_error)['rpc-error']['error-message'])
raise FatalError("Error executing RPC,exiting...")
operations = command.split("|")[1:]
result_tmp = cmd_to_execute.text
lines = result_tmp.strip().split('\n')
for operation in operations:
if re.search("count", operation, re.IGNORECASE):
print('*INFO* Count: %s lines' % len(lines))
return len(lines)
match = re.search('match "?(.*)"?', operation, re.IGNORECASE)
if match:
regex = match.group(1).strip()
lines_filtered = []
for line in lines:
if re.search(regex, line, re.IGNORECASE):
lines_filtered.append(line)
lines = lines_filtered
match = re.search('except "?(.*)"?', operation, re.IGNORECASE)
if match:
regex = match.group(1).strip()
lines_filtered = []
for line in lines:
if re.search(regex, line, re.IGNORECASE):
pass
else:
lines_filtered.append(line)
lines = lines_filtered
text_matches = re.search(regex, cmd_to_execute.text, re.MULTILINE)
if text_matches:
print(text_matches.groups())
return text_matches.groups()
else:
print("Executing: %s" %command)
try:
cmd_to_execute = self._conn.rpc.cli(command)
except RpcError as err:
rpc_error = err.__repr__()
print(xmltodict.parse(rpc_error)['rpc-error']['error-message'])
raise FatalError("Error executing RPC,exiting...")
#print type(cmd_to_execute)
if isinstance(cmd_to_execute, bool):
return True
else:
cmd_clean = command.replace(" ", "_").replace('_"', '_').replace('"_', '_').replace('"', '').replace("/", "_")
filename = timestamp2 + '_'+ self.target + "_" + cmd_clean + "." + "txt"
path = os.path.join(dirpath, filename).replace(root_dir, '.')
print("Saving file as: %s" %path)
print('*HTML* <a href="%s" target="_blank">%s</a>' % (path, path))
try:
with open(path, 'w') as file_to_save:
file_to_save.write(cmd_to_execute.text)
return True
except IOError as err:
print(err.errno, err.strerror)
raise FatalError("Error opening File, exiting...")
elif format == "xml":
if xpath:
print("Executing: %s [%s]" %(command, xpath))
try:
cmd_to_execute = self._conn.rpc.cli(command, format='xml')
xml_result = etree.tostring(cmd_to_execute)
except RpcError as err:
rpc_error = err.__repr__()
print(xmltodict.parse(rpc_error)['rpc-error']['error-message'])
raise FatalError("Error executing RPC, exiting...")
xpath_result = cmd_to_execute.xpath(xpath)[0].text.strip()
if xpath_result == None:
raise FatalError("XPATH malformed, exiting...")
else:
print(xpath_result)
return xpath_result
else:
try:
cmd_to_execute = self._conn.rpc.cli(command, format='xml')
xml_result = etree.tostring(cmd_to_execute)
except RpcError as err:
rpc_error = err.__repr__()
print(xmltodict.parse(rpc_error)['rpc-error']['error-message'])
raise FatalError("Error executing RPC, exiting...")
return xml_result
else:
raise FatalError("Format not valid, exiting...")
def save_config_to_file(self, **kvargs):
directory = kvargs['directory'] + '/' + timestamp4
print("*INFO* Saving current configuration...")
file_obj = StartShell(self._conn)
file_obj.open()
got = file_obj.run("cli -c 'show configuration | save " + directory + "_config.txt' ")
file_obj.close()
print("*INFO* %s" % (got))
return got[-2].split("'")[1]
def rollback(self, commit_comment='__JRouter__', **kvargs):
"""
Function that performs rollback
rollback_num = number
"""
rollback_num = kvargs['rollback_num']
try:
rollback_num = int(rollback_num)
if rollback_num > 50:
raise FatalError("Sorry. 'rollback_num' must lower than 50")
except Exception as e:
raise FatalError("Sorry. 'rollback_num' must be an integer.")
if hasattr(self._conn, "candidate"):
pass
else:
self._conn.bind(candidate=Config)
try:
self._conn.candidate.lock()
except LockError as l_error:
print("*WARN* Problems locking configuration: %s" % (l_error))
raise FatalError("Unable to lock configuration... exiting")
try:
print("Rolling back configuration....")
self._conn.candidate.rollback(rollback_num)
self._conn.candidate.commit(comment=commit_comment)
self._conn.candidate.unlock()
return True
except RpcError as err:
rpc_error = err.__repr__()
raise FatalError(xmltodict.parse(rpc_error)['rpc-error']['error-message'])
def switchover(self):
"""
Function to perfom RE switchover
"""
# We need to verify that backup RE is ready before proceed
b_slot = self.get_slot('backup')
b_state = self._conn.rpc.get_route_engine_information(slot=b_slot)
state = b_state.findtext('route-engine/mastership-state')
if (state != "backup"):
raise FatalError("Backup RE is not ready")
try:
self.open_connection()
print('Executing switchover to complete the SW upgrade !!!')
switchover_cmd = self._conn.cli("request chassis routing-engine master switch no-confirm", format='xml', warning=False)
self.close_connection()
except ConnectError as c_error:
raise FatalError(c_error)
# except TError.TimeoutExpiredError as Terr:
# print Terr
# pass
# except NcErrors.SessionCloseError as Serr:
# print Serr
# pass
except SocketError as S_err:
print(S_err)
pass
except ConnectClosedError as CC_error:
print(CC_error)
pass
sleep(60)
try:
# WA for dealing with in band connections
print("Re-opening connection.......")
self._conn.open(auto_probe=900)
return True
except ConnectError as c_error:
raise FatalError(c_error)
def get_routing_table(self, **kvargs):
"""
Function that gathers the routing table from a device. It returns the whole routing table if no route is specified.
If route is specified, next_hop can be also specified and routing table nexthop output will be compared against it.
"""
try:
tbl = RouteTable(self._conn)
except ConnectError as c_error:
raise FatalError(c_error)
complete_rt = tbl.get()
if 'route' in kvargs.keys():
route = kvargs['route']
print('route', route)
if route != 'None':
single_rt = tbl.get(route)
# Routing Table dictionary
rt = {}
for item in tbl:
# Remove "RouteTableView:" from item = RouteTableView:0.0.0.0/0
destination = str(item).split(":")[1]
rt[destination] = [item.nexthop, item.age, item.via, item.protocol]
return rt
def pretty_time_delta(self, seconds):
sign_string = '-' if seconds < 0 else ''
seconds = abs(int(seconds))
days, seconds = divmod(seconds, 86400)
hours, seconds = divmod(seconds, 3600)
minutes, seconds = divmod(seconds, 60)
if days > 0:
return '%s%dd%dh%dm%ds' % (sign_string, days, hours, minutes, seconds)
elif hours > 0:
return '%s%dh%dm%ds' % (sign_string, hours, minutes, seconds)
elif minutes > 0:
return '%s%dm%ds' % (sign_string, minutes, seconds)
else:
return '%s%ds' % (sign_string, seconds)
def get_config(self, xml_filter=None):
if xml_filter is None:
cnf = self._conn.rpc.get_config()
else:
# Should user wants to filter out configuration and extract only a piece of it
# e.g. SNMP
#cnf = dev.rpc.get_config(filter_xml=etree.XML('<snmp></snmp>'))
# interfaces
#cnf = dev.rpc.get_config(filter_xml=etree.XML('<configuration><interfaces/></configuration>'))
cnf = self._conn.rpc.get_config(filter_xml=etree.XML(xml_filter))
#print etree.tostring(cnf)
config = etree.dump(cnf)
print(config)
return config
| 7,432 | 26,123 | 69 |
5cd7c5f922a241072fc7a9059182d39d3ad7a58d | 1,610 | py | Python | SemanticCube.py | AndresGalaviz/Riper | 70c78259f98ba8f25d4ad6a365a6b494b3ee22f2 | [
"MIT"
] | 1 | 2021-06-22T07:38:39.000Z | 2021-06-22T07:38:39.000Z | SemanticCube.py | AndresGalaviz/Riper | 70c78259f98ba8f25d4ad6a365a6b494b3ee22f2 | [
"MIT"
] | 2 | 2016-11-14T21:09:07.000Z | 2018-02-08T21:01:07.000Z | SemanticCube.py | AndresGalaviz/Riper | 70c78259f98ba8f25d4ad6a365a6b494b3ee22f2 | [
"MIT"
] | null | null | null | from collections import defaultdict
operMap = {
'+' : 0,
'-' : 1,
'*' : 2,
'/' : 3,
'%' : 4,
'=' : 5,
'<' : 6,
'>' : 7,
'<=' : 8,
'>=' : 9,
'!=' : 10,
'==' : 11,
'&&' : 12,
'||' : 13,
'console' : 14,
'+*' : 15}
#Additional
semanticCube = {}
# Return -1 if not possible
semanticCube = defaultdict(lambda: -1, semanticCube)
# Aritmetic
# int _ int : _
# float _ float : _
# int _ float : _
# float _ int : _
for i in range(0,4):
semanticCube[i,0,0] = 0
semanticCube[i,1,1] = 1
semanticCube[i,0,1] = 1
semanticCube[i,1,0] = 1
semanticCube[15,0,0] = 0
# = a a : a
for i in range(0,4):
semanticCube[5, i, i] = i
# = int float: int
semanticCube[5, 0, 1] = 0
semanticCube[5, 1, 0] = 1
# % is always integer
semanticCube[4,0,0] = 0
semanticCube[4,1,1] = 0
semanticCube[4,0,1] = 0
semanticCube[4,1,0] = 0
# "string1" + "string2" = "string1string2"
semanticCube[0,2,2] = 2
#Comparison
# int|float_int|float = bool
for i in range(0,2):
for j in range(0,2):
for k in range(6,12):
semanticCube[k,i,j] = 3
for k in range(6,12):
semanticCube[k,2,2] = 3
#HigherExpression
for i in range(12,14):
semanticCube[i,3,3] = 3
| 22.676056 | 67 | 0.471429 | from collections import defaultdict
operMap = {
'+' : 0,
'-' : 1,
'*' : 2,
'/' : 3,
'%' : 4,
'=' : 5,
'<' : 6,
'>' : 7,
'<=' : 8,
'>=' : 9,
'!=' : 10,
'==' : 11,
'&&' : 12,
'||' : 13,
'console' : 14,
'+*' : 15}
#Additional
semanticCube = {}
# Return -1 if not possible
semanticCube = defaultdict(lambda: -1, semanticCube)
# Aritmetic
# int _ int : _
# float _ float : _
# int _ float : _
# float _ int : _
for i in range(0,4):
semanticCube[i,0,0] = 0
semanticCube[i,1,1] = 1
semanticCube[i,0,1] = 1
semanticCube[i,1,0] = 1
semanticCube[15,0,0] = 0
# = a a : a
for i in range(0,4):
semanticCube[5, i, i] = i
# = int float: int
semanticCube[5, 0, 1] = 0
semanticCube[5, 1, 0] = 1
# % is always integer
semanticCube[4,0,0] = 0
semanticCube[4,1,1] = 0
semanticCube[4,0,1] = 0
semanticCube[4,1,0] = 0
# "string1" + "string2" = "string1string2"
semanticCube[0,2,2] = 2
#Comparison
# int|float_int|float = bool
for i in range(0,2):
for j in range(0,2):
for k in range(6,12):
semanticCube[k,i,j] = 3
for k in range(6,12):
semanticCube[k,2,2] = 3
#HigherExpression
for i in range(12,14):
semanticCube[i,3,3] = 3
def SearchSemanticCube(operator, operandOne, operandTwo):
return semanticCube[operMap[operator], operandOne, operandTwo]; | 104 | 0 | 23 |
4c0522a7d8ac663b9787c83c087c0a0943adba83 | 24,624 | py | Python | pyspark/bigdl/util/common.py | qiuxin2012/BigDL | e3cd7499c0f850eb003163df8f090e7e92841ad0 | [
"Apache-2.0"
] | null | null | null | pyspark/bigdl/util/common.py | qiuxin2012/BigDL | e3cd7499c0f850eb003163df8f090e7e92841ad0 | [
"Apache-2.0"
] | 1 | 2017-09-19T06:18:17.000Z | 2017-09-19T06:18:17.000Z | pyspark/bigdl/util/common.py | qiuxin2012/BigDL | e3cd7499c0f850eb003163df8f090e7e92841ad0 | [
"Apache-2.0"
] | 1 | 2022-02-23T07:56:23.000Z | 2022-02-23T07:56:23.000Z | #
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import glob
from py4j.protocol import Py4JJavaError
from py4j.java_gateway import JavaObject
from py4j.java_collections import ListConverter, JavaArray, JavaList, JavaMap, MapConverter
from pyspark import RDD, SparkContext
from pyspark.serializers import PickleSerializer, AutoBatchedSerializer
from pyspark.sql import DataFrame, SQLContext
from pyspark.mllib.common import callJavaFunc
from pyspark import SparkConf
import numpy as np
import threading
import tempfile
from bigdl.util.engine import get_bigdl_classpath, is_spark_below_2_2
INTMAX = 2147483647
INTMIN = -2147483648
DOUBLEMAX = 1.7976931348623157E308
if sys.version >= '3':
long = int
unicode = str
class EvaluatedResult():
"""
A testing result used to benchmark the model quality.
"""
def __init__(self, result, total_num, method):
"""
:param result: the validation result. i.e: top1 accuracy percentage.
:param total_num: the total processed records.
:param method: the validation method. i.e: Top1Accuracy
"""
self.result = result
self.total_num = total_num
self.method = method
class JTensor(object):
"""
A wrapper to easy our work when need to pass or return Tensor to/from Scala.
>>> import numpy as np
>>> from bigdl.util.common import JTensor
>>> np.random.seed(123)
>>>
"""
def __init__(self, storage, shape, bigdl_type="float", indices=None):
"""
:param storage: values in this tensor
:param shape: shape of this tensor
:param bigdl_type: numeric type
:param indices: if indices is provided, means this is a SparseTensor;
if not provided, means this is a DenseTensor
"""
if isinstance(storage, bytes) and isinstance(shape, bytes):
self.storage = np.frombuffer(storage, dtype=get_dtype(bigdl_type))
self.shape = np.frombuffer(shape, dtype=np.int32)
else:
self.storage = np.array(storage, dtype=get_dtype(bigdl_type))
self.shape = np.array(shape, dtype=np.int32)
if indices is None:
self.indices = None
elif isinstance(indices, bytes):
self.indices = np.frombuffer(indices, dtype=np.int32)
else:
assert isinstance(indices, np.ndarray), \
"indices should be a np.ndarray, not %s, %s" % (type(a_ndarray), str(indices))
self.indices = np.array(indices, dtype=np.int32)
self.bigdl_type = bigdl_type
@classmethod
def from_ndarray(cls, a_ndarray, bigdl_type="float"):
"""
Convert a ndarray to a DenseTensor which would be used in Java side.
>>> import numpy as np
>>> from bigdl.util.common import JTensor
>>> from bigdl.util.common import callBigDlFunc
>>> np.random.seed(123)
>>> data = np.random.uniform(0, 1, (2, 3)).astype("float32")
>>> result = JTensor.from_ndarray(data)
>>> print(result)
JTensor: storage: [[ 0.69646919 0.28613934 0.22685145]
[ 0.55131477 0.71946895 0.42310646]], shape: [2 3], float
>>> result
JTensor: storage: [[ 0.69646919 0.28613934 0.22685145]
[ 0.55131477 0.71946895 0.42310646]], shape: [2 3], float
>>> data_back = result.to_ndarray()
>>> (data == data_back).all()
True
>>> tensor1 = callBigDlFunc("float", "testTensor", JTensor.from_ndarray(data)) # noqa
>>> array_from_tensor = tensor1.to_ndarray()
>>> (array_from_tensor == data).all()
True
"""
if a_ndarray is None:
return None
assert isinstance(a_ndarray, np.ndarray), \
"input should be a np.ndarray, not %s" % type(a_ndarray)
return cls(a_ndarray,
a_ndarray.shape if a_ndarray.shape else (a_ndarray.size),
bigdl_type)
@classmethod
def sparse(cls, a_ndarray, i_ndarray, shape, bigdl_type="float"):
"""
Convert a three ndarray to SparseTensor which would be used in Java side.
For example:
a_ndarray = [1, 3, 2, 4]
i_ndarray = [[0, 0, 1, 2],
[0, 3, 2, 1]]
shape = [3, 4]
Present a dense tensor
[[ 1, 0, 0, 3],
[ 0, 0, 2, 0],
[ 0, 4, 0, 0]]
:param a_ndarray non-zero elements in this SparseTensor
:param i_ndarray zero-based indices for non-zero element
i_ndarray's shape should be (shape.size, a_ndarray.size)
And the i-th non-zero elements indices is i_ndarray[:, 1]
:param shape shape as a DenseTensor.
>>> import numpy as np
>>> from bigdl.util.common import JTensor
>>> from bigdl.util.common import callBigDlFunc
>>> np.random.seed(123)
>>> data = np.arange(1, 7).astype("float32")
>>> indices = np.arange(1, 7)
>>> shape = np.array([10])
>>> result = JTensor.sparse(data, indices, shape)
>>> result
JTensor: storage: [ 1. 2. 3. 4. 5. 6.], shape: [10] ,indices [1 2 3 4 5 6], float
>>> tensor1 = callBigDlFunc("float", "testTensor", result) # noqa
>>> array_from_tensor = tensor1.to_ndarray()
>>> expected_ndarray = np.array([0, 1, 2, 3, 4, 5, 6, 0, 0, 0])
>>> (array_from_tensor == expected_ndarray).all()
True
"""
if a_ndarray is None:
return None
assert isinstance(a_ndarray, np.ndarray), \
"values array should be a np.ndarray, not %s" % type(a_ndarray)
assert isinstance(i_ndarray, np.ndarray), \
"indices array should be a np.ndarray, not %s" % type(a_ndarray)
assert i_ndarray.size == a_ndarray.size * shape.size, \
"size of values and indices should match."
return cls(a_ndarray,
shape,
bigdl_type,
i_ndarray)
def to_ndarray(self):
"""
Transfer JTensor to ndarray.
As SparseTensor may generate an very big ndarray, so we don't support this function for SparseTensor.
:return: a ndarray
"""
assert self.indices is None, "sparseTensor to ndarray is not supported"
return np.array(self.storage, dtype=get_dtype(self.bigdl_type)).reshape(self.shape) # noqa
class RNG():
"""
generate tensor data with seed
"""
_picklable_classes = [
'LinkedList',
'SparseVector',
'DenseVector',
'DenseMatrix',
'Rating',
'LabeledPoint',
'Sample',
'EvaluatedResult',
'JTensor',
'JActivity'
]
def redire_spark_logs(bigdl_type="float", log_path=os.getcwd()+"/bigdl.log"):
"""
Redirect spark logs to the specified path.
:param bigdl_type: "double" or "float"
:param log_path: the file path to be redirected to; the default file is under the current workspace named `bigdl.log`.
"""
callBigDlFunc(bigdl_type, "redirectSparkLogs", log_path)
def show_bigdl_info_logs(bigdl_type="float"):
"""
Set BigDL log level to INFO.
:param bigdl_type: "double" or "float"
"""
callBigDlFunc(bigdl_type, "showBigDlInfoLogs")
def to_sample_rdd(x, y, numSlices=None):
"""
Conver x and y into RDD[Sample]
:param x: ndarray and the first dimension should be batch
:param y: ndarray and the first dimension should be batch
:param numSlices:
:return:
"""
sc = get_spark_context()
from bigdl.util.common import Sample
x_rdd = sc.parallelize(x, numSlices)
y_rdd = sc.parallelize(y, numSlices)
return x_rdd.zip(y_rdd).map(lambda item: Sample.from_ndarray(item[0], item[1]))
def get_spark_context(conf=None):
"""
Get the current active spark context and create one if no active instance
:param conf: combining bigdl configs into spark conf
:return: SparkContext
"""
if hasattr(SparkContext, "getOrCreate"):
with SparkContext._lock:
if SparkContext._active_spark_context is None:
spark_conf = create_spark_conf() if conf is None else conf
return SparkContext.getOrCreate(spark_conf)
else:
return SparkContext.getOrCreate()
else:
# Might have threading issue but we cann't add _lock here
# as it's not RLock in spark1.5;
if SparkContext._active_spark_context is None:
spark_conf = create_spark_conf() if conf is None else conf
return SparkContext(conf=spark_conf)
else:
return SparkContext._active_spark_context
def callBigDlFunc(bigdl_type, name, *args):
""" Call API in PythonBigDL """
jinstance = JavaCreator.instance(bigdl_type=bigdl_type).value
sc = get_spark_context()
api = getattr(jinstance, name)
return callJavaFunc(sc, api, *args)
def callJavaFunc(sc, func, *args):
""" Call Java Function """
args = [_py2java(sc, a) for a in args]
result = func(*args)
return _java2py(sc, result)
def _to_java_object_rdd(rdd):
""" Return a JavaRDD of Object by unpickling
It will convert each Python object into Java object by Pyrolite, whenever
the RDD is serialized in batch or not.
"""
rdd = rdd._reserialize(AutoBatchedSerializer(PickleSerializer()))
return \
rdd.ctx._jvm.org.apache.spark.bigdl.api.python.BigDLSerDe.pythonToJava(
rdd._jrdd, True)
def _py2java(sc, obj):
""" Convert Python object into Java """
if isinstance(obj, RDD):
obj = _to_java_object_rdd(obj)
elif isinstance(obj, DataFrame):
obj = obj._jdf
elif isinstance(obj, SparkContext):
obj = obj._jsc
elif isinstance(obj, (list, tuple)):
obj = ListConverter().convert([_py2java(sc, x) for x in obj],
sc._gateway._gateway_client)
elif isinstance(obj, dict):
result = {}
for (key, value) in obj.items():
result[key] = _py2java(sc, value)
obj = MapConverter().convert(result, sc._gateway._gateway_client)
elif isinstance(obj, JavaValue):
obj = obj.value
elif isinstance(obj, JavaObject):
pass
elif isinstance(obj, (int, long, float, bool, bytes, unicode)):
pass
else:
data = bytearray(PickleSerializer().dumps(obj))
obj = sc._jvm.org.apache.spark.bigdl.api.python.BigDLSerDe.loads(data)
return obj
def get_activation_by_name(activation_name, activation_id=None):
""" Convert to a bigdl activation layer
given the name of the activation as a string """
import bigdl.nn.layer as BLayer
activation = None
activation_name = activation_name.lower()
if activation_name == "tanh":
activation = BLayer.Tanh()
elif activation_name == "sigmoid":
activation = BLayer.Sigmoid()
elif activation_name == "hard_sigmoid":
activation = BLayer.HardSigmoid()
elif activation_name == "relu":
activation = BLayer.ReLU()
elif activation_name == "softmax":
activation = BLayer.SoftMax()
elif activation_name == "softplus":
activation = BLayer.SoftPlus(beta=1.0)
elif activation_name == "softsign":
activation = BLayer.SoftSign()
elif activation_name == "linear":
activation = BLayer.Identity()
else:
raise Exception("Unsupported activation type: %s" % activation_name)
if not activation_id:
activation.set_name(activation_id)
return activation
if __name__ == "__main__":
_test()
| 34.977273 | 136 | 0.625487 | #
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import glob
from py4j.protocol import Py4JJavaError
from py4j.java_gateway import JavaObject
from py4j.java_collections import ListConverter, JavaArray, JavaList, JavaMap, MapConverter
from pyspark import RDD, SparkContext
from pyspark.serializers import PickleSerializer, AutoBatchedSerializer
from pyspark.sql import DataFrame, SQLContext
from pyspark.mllib.common import callJavaFunc
from pyspark import SparkConf
import numpy as np
import threading
import tempfile
from bigdl.util.engine import get_bigdl_classpath, is_spark_below_2_2
INTMAX = 2147483647
INTMIN = -2147483648
DOUBLEMAX = 1.7976931348623157E308
if sys.version >= '3':
long = int
unicode = str
class SingletonMixin(object):
_lock = threading.RLock()
_instance = None
@classmethod
def instance(cls,
bigdl_type="float"):
if not cls._instance:
with cls._lock:
if not cls._instance:
cls._instance = cls(bigdl_type)
return cls._instance
class JavaCreator(SingletonMixin):
__creator_class="com.intel.analytics.bigdl.python.api.PythonBigDL"
@classmethod
def get_creator_class(cls):
with JavaCreator._lock:
return JavaCreator.__creator_class
@classmethod
def set_creator_class(cls, cclass):
with JavaCreator._lock:
JavaCreator.__creator_class = cclass
JavaCreator._instance = None
def __init__(self, bigdl_type):
sc = get_spark_context()
jclass = getattr(sc._jvm, JavaCreator.get_creator_class())
if bigdl_type == "float":
self.value = getattr(jclass, "ofFloat")()
elif bigdl_type == "double":
self.value = getattr(jclass, "ofDouble")()
else:
raise Exception("Not supported bigdl_type: %s" % bigdl_type)
class JavaValue(object):
def jvm_class_constructor(self):
name = "create" + self.__class__.__name__
print("creating: " + name)
return name
def __init__(self, jvalue, bigdl_type, *args):
self.value = jvalue if jvalue else callBigDlFunc(
bigdl_type, self.jvm_class_constructor(), *args)
self.bigdl_type = bigdl_type
def __str__(self):
return self.value.toString()
class EvaluatedResult():
"""
A testing result used to benchmark the model quality.
"""
def __init__(self, result, total_num, method):
"""
:param result: the validation result. i.e: top1 accuracy percentage.
:param total_num: the total processed records.
:param method: the validation method. i.e: Top1Accuracy
"""
self.result = result
self.total_num = total_num
self.method = method
def __reduce__(self):
return (EvaluatedResult, (self.result, self.total_num, self.method))
def __str__(self):
return "Evaluated result: %s, total_num: %s, method: %s" % (
self.result, self.total_num, self.method)
def get_dtype(bigdl_type):
# Always return float32 for now
return "float32"
class Configuration(object):
__bigdl_jars = [get_bigdl_classpath()]
@staticmethod
def add_extra_jars(jars):
"""
Add extra jars to classpath
:param jars: a string or a list of strings as jar paths
"""
import six
if isinstance(jars, six.string_types):
jars = [jars]
Configuration.__bigdl_jars += jars
@staticmethod
def add_extra_python_modules(packages):
"""
Add extra python modules to sys.path
:param packages: a string or a list of strings as python package paths
"""
import six
if isinstance(packages, six.string_types):
packages = [packages]
for package in packages:
sys.path.insert(0, package)
@staticmethod
def get_bigdl_jars():
return Configuration.__bigdl_jars
class JActivity(object):
def __init__(self, value):
self.value = value
class JTensor(object):
"""
A wrapper to easy our work when need to pass or return Tensor to/from Scala.
>>> import numpy as np
>>> from bigdl.util.common import JTensor
>>> np.random.seed(123)
>>>
"""
def __init__(self, storage, shape, bigdl_type="float", indices=None):
"""
:param storage: values in this tensor
:param shape: shape of this tensor
:param bigdl_type: numeric type
:param indices: if indices is provided, means this is a SparseTensor;
if not provided, means this is a DenseTensor
"""
if isinstance(storage, bytes) and isinstance(shape, bytes):
self.storage = np.frombuffer(storage, dtype=get_dtype(bigdl_type))
self.shape = np.frombuffer(shape, dtype=np.int32)
else:
self.storage = np.array(storage, dtype=get_dtype(bigdl_type))
self.shape = np.array(shape, dtype=np.int32)
if indices is None:
self.indices = None
elif isinstance(indices, bytes):
self.indices = np.frombuffer(indices, dtype=np.int32)
else:
assert isinstance(indices, np.ndarray), \
"indices should be a np.ndarray, not %s, %s" % (type(a_ndarray), str(indices))
self.indices = np.array(indices, dtype=np.int32)
self.bigdl_type = bigdl_type
@classmethod
def from_ndarray(cls, a_ndarray, bigdl_type="float"):
"""
Convert a ndarray to a DenseTensor which would be used in Java side.
>>> import numpy as np
>>> from bigdl.util.common import JTensor
>>> from bigdl.util.common import callBigDlFunc
>>> np.random.seed(123)
>>> data = np.random.uniform(0, 1, (2, 3)).astype("float32")
>>> result = JTensor.from_ndarray(data)
>>> print(result)
JTensor: storage: [[ 0.69646919 0.28613934 0.22685145]
[ 0.55131477 0.71946895 0.42310646]], shape: [2 3], float
>>> result
JTensor: storage: [[ 0.69646919 0.28613934 0.22685145]
[ 0.55131477 0.71946895 0.42310646]], shape: [2 3], float
>>> data_back = result.to_ndarray()
>>> (data == data_back).all()
True
>>> tensor1 = callBigDlFunc("float", "testTensor", JTensor.from_ndarray(data)) # noqa
>>> array_from_tensor = tensor1.to_ndarray()
>>> (array_from_tensor == data).all()
True
"""
if a_ndarray is None:
return None
assert isinstance(a_ndarray, np.ndarray), \
"input should be a np.ndarray, not %s" % type(a_ndarray)
return cls(a_ndarray,
a_ndarray.shape if a_ndarray.shape else (a_ndarray.size),
bigdl_type)
@classmethod
def sparse(cls, a_ndarray, i_ndarray, shape, bigdl_type="float"):
"""
Convert a three ndarray to SparseTensor which would be used in Java side.
For example:
a_ndarray = [1, 3, 2, 4]
i_ndarray = [[0, 0, 1, 2],
[0, 3, 2, 1]]
shape = [3, 4]
Present a dense tensor
[[ 1, 0, 0, 3],
[ 0, 0, 2, 0],
[ 0, 4, 0, 0]]
:param a_ndarray non-zero elements in this SparseTensor
:param i_ndarray zero-based indices for non-zero element
i_ndarray's shape should be (shape.size, a_ndarray.size)
And the i-th non-zero elements indices is i_ndarray[:, 1]
:param shape shape as a DenseTensor.
>>> import numpy as np
>>> from bigdl.util.common import JTensor
>>> from bigdl.util.common import callBigDlFunc
>>> np.random.seed(123)
>>> data = np.arange(1, 7).astype("float32")
>>> indices = np.arange(1, 7)
>>> shape = np.array([10])
>>> result = JTensor.sparse(data, indices, shape)
>>> result
JTensor: storage: [ 1. 2. 3. 4. 5. 6.], shape: [10] ,indices [1 2 3 4 5 6], float
>>> tensor1 = callBigDlFunc("float", "testTensor", result) # noqa
>>> array_from_tensor = tensor1.to_ndarray()
>>> expected_ndarray = np.array([0, 1, 2, 3, 4, 5, 6, 0, 0, 0])
>>> (array_from_tensor == expected_ndarray).all()
True
"""
if a_ndarray is None:
return None
assert isinstance(a_ndarray, np.ndarray), \
"values array should be a np.ndarray, not %s" % type(a_ndarray)
assert isinstance(i_ndarray, np.ndarray), \
"indices array should be a np.ndarray, not %s" % type(a_ndarray)
assert i_ndarray.size == a_ndarray.size * shape.size, \
"size of values and indices should match."
return cls(a_ndarray,
shape,
bigdl_type,
i_ndarray)
def to_ndarray(self):
"""
Transfer JTensor to ndarray.
As SparseTensor may generate an very big ndarray, so we don't support this function for SparseTensor.
:return: a ndarray
"""
assert self.indices is None, "sparseTensor to ndarray is not supported"
return np.array(self.storage, dtype=get_dtype(self.bigdl_type)).reshape(self.shape) # noqa
def __reduce__(self):
if self.indices is None:
return JTensor, (self.storage.tostring(), self.shape.tostring(), self.bigdl_type)
else:
return JTensor, (self.storage.tostring(), self.shape.tostring(), self.bigdl_type, self.indices.tostring())
def __str__(self):
return self.__repr__()
def __repr__(self):
indices = "" if self.indices is None else " ,indices %s" % str(self.indices)
return "JTensor: storage: %s, shape: %s%s, %s" % (str(self.storage), str(self.shape), indices, self.bigdl_type)
class Sample(object):
def __init__(self, features, labels, bigdl_type="float"):
"""
User should always use Sample.from_ndarray to construct Sample.
:param features: a list of JTensors
:param labels: a list of JTensors
:param bigdl_type: "double" or "float"
"""
self.feature = features[0]
self.features = features
self.label = labels[0]
self.bigdl_type = bigdl_type
self.labels = labels
@classmethod
def from_ndarray(cls, features, labels, bigdl_type="float"):
"""
Convert a ndarray of features and labels to Sample, which would be used in Java side.
:param features: an ndarray or a list of ndarrays
:param labels: an ndarray or a list of ndarrays or a scalar
:param bigdl_type: "double" or "float"
>>> import numpy as np
>>> from bigdl.util.common import callBigDlFunc
>>> from numpy.testing import assert_allclose
>>> np.random.seed(123)
>>> sample = Sample.from_ndarray(np.random.random((2,3)), np.random.random((2,3)))
>>> sample_back = callBigDlFunc("float", "testSample", sample)
>>> assert_allclose(sample.features[0].to_ndarray(), sample_back.features[0].to_ndarray())
>>> assert_allclose(sample.label.to_ndarray(), sample_back.label.to_ndarray())
>>> print(sample)
Sample: features: [JTensor: storage: [[ 0.69646919 0.28613934 0.22685145]
[ 0.55131477 0.71946895 0.42310646]], shape: [2 3], float], labels: [JTensor: storage: [[ 0.98076421 0.68482971 0.48093191]
[ 0.39211753 0.343178 0.72904968]], shape: [2 3], float],
"""
if isinstance(features, np.ndarray):
features = [features]
else:
assert all(isinstance(feature, np.ndarray) for feature in features), \
"features should be a list of np.ndarray, not %s" % type(features)
if np.isscalar(labels): # in case labels is a scalar.
labels = [np.array(labels)]
elif isinstance(labels, np.ndarray):
labels = [labels]
else:
assert all(isinstance(label, np.ndarray) for label in labels), \
"labels should be a list of np.ndarray, not %s" % type(labels)
return cls(
features=[JTensor.from_ndarray(feature) for feature in features],
labels=[JTensor.from_ndarray(label) for label in labels],
bigdl_type=bigdl_type)
@classmethod
def from_jtensor(cls, features, labels, bigdl_type="float"):
"""
Convert a sequence of JTensor to Sample, which would be used in Java side.
:param features: an JTensor or a list of JTensor
:param labels: an JTensor or a list of JTensor or a scalar
:param bigdl_type: "double" or "float"
>>> import numpy as np
>>> data = np.random.uniform(0, 1, (6)).astype("float32")
>>> indices = np.arange(1, 7)
>>> shape = np.array([10])
>>> feature0 = JTensor.sparse(data, indices, shape)
>>> feature1 = JTensor.from_ndarray(np.random.uniform(0, 1, (2, 3)).astype("float32"))
>>> sample = Sample.from_jtensor([feature0, feature1], 1)
"""
if isinstance(features, JTensor):
features = [features]
else:
assert all(isinstance(feature, JTensor) for feature in features), \
"features should be a list of JTensor, not %s" % type(features)
if np.isscalar(labels): # in case labels is a scalar.
labels = [JTensor.from_ndarray(np.array(labels))]
elif isinstance(labels, JTensor):
labels = [labels]
else:
assert all(isinstance(label, JTensor) for label in labels), \
"labels should be a list of np.ndarray, not %s" % type(labels)
return cls(
features=features,
labels=labels,
bigdl_type=bigdl_type)
def __reduce__(self):
return Sample, (self.features, self.labels, self.bigdl_type)
def __str__(self):
return "Sample: features: %s, labels: %s," % (self.features, self.labels)
def __repr__(self):
return "Sample: features: %s, labels: %s" % (self.features, self.labels)
class RNG():
"""
generate tensor data with seed
"""
def __init__(self, bigdl_type="float"):
self.bigdl_type = bigdl_type
def set_seed(self, seed):
callBigDlFunc(self.bigdl_type, "setModelSeed", seed)
def uniform(self, a, b, size):
return callBigDlFunc(self.bigdl_type, "uniform", a, b, size).to_ndarray() # noqa
_picklable_classes = [
'LinkedList',
'SparseVector',
'DenseVector',
'DenseMatrix',
'Rating',
'LabeledPoint',
'Sample',
'EvaluatedResult',
'JTensor',
'JActivity'
]
def init_engine(bigdl_type="float"):
callBigDlFunc(bigdl_type, "initEngine")
def redire_spark_logs(bigdl_type="float", log_path=os.getcwd()+"/bigdl.log"):
"""
Redirect spark logs to the specified path.
:param bigdl_type: "double" or "float"
:param log_path: the file path to be redirected to; the default file is under the current workspace named `bigdl.log`.
"""
callBigDlFunc(bigdl_type, "redirectSparkLogs", log_path)
def show_bigdl_info_logs(bigdl_type="float"):
"""
Set BigDL log level to INFO.
:param bigdl_type: "double" or "float"
"""
callBigDlFunc(bigdl_type, "showBigDlInfoLogs")
def get_bigdl_conf():
bigdl_conf_file = "spark-bigdl.conf"
bigdl_python_wrapper = "python-api.zip"
def load_conf(conf_str):
return dict(line.split() for line in conf_str.split("\n") if
"#" not in line and line.strip())
for p in sys.path:
if bigdl_conf_file in p and os.path.isfile(p):
with open(p) if sys.version_info < (3,) else open(p, encoding='latin-1') as conf_file: # noqa
return load_conf(conf_file.read())
if bigdl_python_wrapper in p and os.path.isfile(p):
import zipfile
with zipfile.ZipFile(p, 'r') as zip_conf:
if bigdl_conf_file in zip_conf.namelist():
content = zip_conf.read(bigdl_conf_file)
if sys.version_info >= (3,):
content = str(content, 'latin-1')
return load_conf(content)
return {}
def to_list(a):
if type(a) is list:
return a
return [a]
def to_sample_rdd(x, y, numSlices=None):
"""
Conver x and y into RDD[Sample]
:param x: ndarray and the first dimension should be batch
:param y: ndarray and the first dimension should be batch
:param numSlices:
:return:
"""
sc = get_spark_context()
from bigdl.util.common import Sample
x_rdd = sc.parallelize(x, numSlices)
y_rdd = sc.parallelize(y, numSlices)
return x_rdd.zip(y_rdd).map(lambda item: Sample.from_ndarray(item[0], item[1]))
def extend_spark_driver_cp(sparkConf, path):
original_driver_classpath = ":" + sparkConf.get("spark.driver.extraClassPath") \
if sparkConf.contains("spark.driver.extraClassPath") else ""
sparkConf.set("spark.driver.extraClassPath", path + original_driver_classpath)
def create_spark_conf():
bigdl_conf = get_bigdl_conf()
sparkConf = SparkConf()
sparkConf.setAll(bigdl_conf.items())
if not is_spark_below_2_2():
for jar in Configuration.get_bigdl_jars():
extend_spark_driver_cp(sparkConf, jar)
# add content in PYSPARK_FILES in spark.submit.pyFiles
# This is a workaround for current Spark on k8s
python_lib = os.environ.get('PYSPARK_FILES', None)
if python_lib:
existing_py_files = sparkConf.get("spark.submit.pyFiles")
if existing_py_files:
sparkConf.set(key="spark.submit.pyFiles", value="%s,%s" % (python_lib, existing_py_files))
else:
sparkConf.set(key="spark.submit.pyFiles", value=python_lib)
return sparkConf
def get_spark_context(conf=None):
"""
Get the current active spark context and create one if no active instance
:param conf: combining bigdl configs into spark conf
:return: SparkContext
"""
if hasattr(SparkContext, "getOrCreate"):
with SparkContext._lock:
if SparkContext._active_spark_context is None:
spark_conf = create_spark_conf() if conf is None else conf
return SparkContext.getOrCreate(spark_conf)
else:
return SparkContext.getOrCreate()
else:
# Might have threading issue but we cann't add _lock here
# as it's not RLock in spark1.5;
if SparkContext._active_spark_context is None:
spark_conf = create_spark_conf() if conf is None else conf
return SparkContext(conf=spark_conf)
else:
return SparkContext._active_spark_context
def get_spark_sql_context(sc):
if "getOrCreate" in SQLContext.__dict__:
return SQLContext.getOrCreate(sc)
else:
return SQLContext(sc) # Compatible with Spark1.5.1
def callBigDlFunc(bigdl_type, name, *args):
""" Call API in PythonBigDL """
jinstance = JavaCreator.instance(bigdl_type=bigdl_type).value
sc = get_spark_context()
api = getattr(jinstance, name)
return callJavaFunc(sc, api, *args)
def _java2py(sc, r, encoding="bytes"):
if isinstance(r, JavaObject):
clsName = r.getClass().getSimpleName()
# convert RDD into JavaRDD
if clsName != 'JavaRDD' and clsName.endswith("RDD"):
r = r.toJavaRDD()
clsName = 'JavaRDD'
if clsName == 'JavaRDD':
jrdd = sc._jvm.SerDe.javaToPython(r)
return RDD(jrdd, sc)
if clsName == 'DataFrame':
return DataFrame(r, get_spark_sql_context(sc))
if clsName == 'Dataset':
return DataFrame(r, get_spark_sql_context(sc))
if clsName in _picklable_classes:
r = sc._jvm.org.apache.spark.bigdl.api.python.BigDLSerDe.dumps(r)
elif isinstance(r, (JavaArray, JavaList, JavaMap)):
try:
r = sc._jvm.org.apache.spark.bigdl.api.python.BigDLSerDe.dumps(
r)
except Py4JJavaError:
pass # not pickable
if isinstance(r, (bytearray, bytes)):
r = PickleSerializer().loads(bytes(r), encoding=encoding)
return r
def callJavaFunc(sc, func, *args):
""" Call Java Function """
args = [_py2java(sc, a) for a in args]
result = func(*args)
return _java2py(sc, result)
def _to_java_object_rdd(rdd):
""" Return a JavaRDD of Object by unpickling
It will convert each Python object into Java object by Pyrolite, whenever
the RDD is serialized in batch or not.
"""
rdd = rdd._reserialize(AutoBatchedSerializer(PickleSerializer()))
return \
rdd.ctx._jvm.org.apache.spark.bigdl.api.python.BigDLSerDe.pythonToJava(
rdd._jrdd, True)
def _py2java(sc, obj):
""" Convert Python object into Java """
if isinstance(obj, RDD):
obj = _to_java_object_rdd(obj)
elif isinstance(obj, DataFrame):
obj = obj._jdf
elif isinstance(obj, SparkContext):
obj = obj._jsc
elif isinstance(obj, (list, tuple)):
obj = ListConverter().convert([_py2java(sc, x) for x in obj],
sc._gateway._gateway_client)
elif isinstance(obj, dict):
result = {}
for (key, value) in obj.items():
result[key] = _py2java(sc, value)
obj = MapConverter().convert(result, sc._gateway._gateway_client)
elif isinstance(obj, JavaValue):
obj = obj.value
elif isinstance(obj, JavaObject):
pass
elif isinstance(obj, (int, long, float, bool, bytes, unicode)):
pass
else:
data = bytearray(PickleSerializer().dumps(obj))
obj = sc._jvm.org.apache.spark.bigdl.api.python.BigDLSerDe.loads(data)
return obj
def create_tmp_path():
tmp_file = tempfile.NamedTemporaryFile(prefix="bigdl")
tmp_file.close()
return tmp_file.name
def text_from_path(path):
sc = get_spark_context()
return sc.textFile(path).collect()[0]
def get_local_file(a_path):
if not is_distributed(a_path):
return a_path
path, data = get_spark_context().binaryFiles(a_path).collect()[0]
local_file_path = create_tmp_path()
with open(local_file_path, 'w') as local_file:
local_file.write(data)
return local_file_path
def is_distributed(path):
return "://" in path
def get_activation_by_name(activation_name, activation_id=None):
""" Convert to a bigdl activation layer
given the name of the activation as a string """
import bigdl.nn.layer as BLayer
activation = None
activation_name = activation_name.lower()
if activation_name == "tanh":
activation = BLayer.Tanh()
elif activation_name == "sigmoid":
activation = BLayer.Sigmoid()
elif activation_name == "hard_sigmoid":
activation = BLayer.HardSigmoid()
elif activation_name == "relu":
activation = BLayer.ReLU()
elif activation_name == "softmax":
activation = BLayer.SoftMax()
elif activation_name == "softplus":
activation = BLayer.SoftPlus(beta=1.0)
elif activation_name == "softsign":
activation = BLayer.SoftSign()
elif activation_name == "linear":
activation = BLayer.Identity()
else:
raise Exception("Unsupported activation type: %s" % activation_name)
if not activation_id:
activation.set_name(activation_id)
return activation
def _test():
import doctest
from pyspark import SparkContext
from bigdl.nn import layer
globs = layer.__dict__.copy()
sc = SparkContext(master="local[2]", appName="test common utility")
globs['sc'] = sc
(failure_count, test_count) = doctest.testmod(globs=globs,
optionflags=doctest.ELLIPSIS)
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| 6,514 | 5,140 | 759 |
f7cb9be055f18ca194ecae99d1870fe80ea55f5b | 1,109 | py | Python | tests/functional_tests/run_functional_tests.py | EuroPOND/deformetrica | 29cb3a4ecd40d6a19b3ee2a1c5827c4ebab54062 | [
"BSD-3-Clause"
] | 1 | 2020-10-27T07:30:56.000Z | 2020-10-27T07:30:56.000Z | tests/functional_tests/run_functional_tests.py | EuroPOND/deformetrica | 29cb3a4ecd40d6a19b3ee2a1c5827c4ebab54062 | [
"BSD-3-Clause"
] | null | null | null | tests/functional_tests/run_functional_tests.py | EuroPOND/deformetrica | 29cb3a4ecd40d6a19b3ee2a1c5827c4ebab54062 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
import os
import unittest
from functional_tests.data.atlas.skulls.run import AtlasSkulls
from functional_tests.data.atlas.brain_structures.run import AtlasBrainStructures
from functional_tests.data.atlas.digits.run import AtlasDigits
from functional_tests.data.regression.skulls.run import RegressionSkulls
TEST_MODULES = [AtlasSkulls, AtlasBrainStructures, AtlasDigits, RegressionSkulls]
if __name__ == '__main__':
main()
| 28.435897 | 96 | 0.717764 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
import os
import unittest
from functional_tests.data.atlas.skulls.run import AtlasSkulls
from functional_tests.data.atlas.brain_structures.run import AtlasBrainStructures
from functional_tests.data.atlas.digits.run import AtlasDigits
from functional_tests.data.regression.skulls.run import RegressionSkulls
TEST_MODULES = [AtlasSkulls, AtlasBrainStructures, AtlasDigits, RegressionSkulls]
def setup_conda_env():
path_to_environment_file = os.path.normpath(
os.path.join(os.path.abspath(__file__), '../../../environment.yml'))
cmd = 'hostname && ' \
'if [ -f ~/.profile ]; then . ~/.profile; fi &&' \
'conda env create -f %s' % path_to_environment_file
os.system(cmd)
def main():
import logging
logger = logging.getLogger(__name__)
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.DEBUG)
setup_conda_env()
for t in TEST_MODULES:
unittest.TextTestRunner(verbosity=2).run(unittest.TestLoader().loadTestsFromTestCase(t))
if __name__ == '__main__':
main()
| 580 | 0 | 46 |
f58acb41a8e89d1fd42a86732c8baea2aaa858cd | 1,538 | py | Python | py/decorator_ex.py | armundle/scratch | a4c0ecdc2661e59ef8f378644324edb7a9680b37 | [
"MIT"
] | null | null | null | py/decorator_ex.py | armundle/scratch | a4c0ecdc2661e59ef8f378644324edb7a9680b37 | [
"MIT"
] | null | null | null | py/decorator_ex.py | armundle/scratch | a4c0ecdc2661e59ef8f378644324edb7a9680b37 | [
"MIT"
] | null | null | null |
class decoratorWithoutArguments(object):
'''
If there are no decorator arguments, the function to be decorated is passed
to the constructor.
'''
'''
Note:
1. The major constraint on the result of a decorator is that it be callable.
The __call__ method here achieves that.
2. __call__ is called every time the decorated function is called;
__init__is called only once during the 'construction' of the decorated
function.
'''
'''
The __call__ method is not called until the decorated function is called.
'''
@decoratorWithoutArguments
@decoratorWithoutArguments
@decoratorFunction
if __name__ == "__main__":
func1("test", "multiple", "args")
print '\n'
func1("another", "round", "of args")
print '\n'
func2()
print '\n'
func3()
print '\n'
print "end of example"
| 23.30303 | 80 | 0.640442 | def decoratorFunction(f):
def newFunc():
print "inside decorator function"
print "entering", f.__name__
f()
print "exited", f.__name__
return newFunc
class decoratorWithoutArguments(object):
'''
If there are no decorator arguments, the function to be decorated is passed
to the constructor.
'''
def __init__(self, f):
self.f = f
print "decoratorWithoutArguments.__init__"
'''
Note:
1. The major constraint on the result of a decorator is that it be callable.
The __call__ method here achieves that.
2. __call__ is called every time the decorated function is called;
__init__is called only once during the 'construction' of the decorated
function.
'''
'''
The __call__ method is not called until the decorated function is called.
'''
def __call__(self, *args):
print "decoratorWithoutArguments.__call__"
print "entering", self.f.__name__
self.f(*args)
print "exited", self.f.__name__
@decoratorWithoutArguments
def func1(a1, a2, a3):
print "inside function 1"
print "spell args: ", a1, a2, a3
@decoratorWithoutArguments
def func2():
print "inside function 2"
print "no args"
@decoratorFunction
def func3():
print "inside function 3"
if __name__ == "__main__":
func1("test", "multiple", "args")
print '\n'
func1("another", "round", "of args")
print '\n'
func2()
print '\n'
func3()
print '\n'
print "end of example"
| 531 | 0 | 140 |
ac5abfce774cc29f2045d6524242cc65405f6117 | 1,163 | py | Python | aitextgen/cli.py | artificial-podcast/aitextgen | 6e07361a2f15de7925a3cdefc91d32390b7489c9 | [
"MIT"
] | 1,416 | 2020-05-18T15:41:34.000Z | 2022-03-31T09:39:55.000Z | aitextgen/cli.py | artificial-podcast/aitextgen | 6e07361a2f15de7925a3cdefc91d32390b7489c9 | [
"MIT"
] | 165 | 2020-05-19T00:14:32.000Z | 2022-03-30T16:56:36.000Z | aitextgen/cli.py | artificial-podcast/aitextgen | 6e07361a2f15de7925a3cdefc91d32390b7489c9 | [
"MIT"
] | 156 | 2020-05-18T16:28:17.000Z | 2022-03-24T14:50:31.000Z | from .aitextgen import aitextgen
from .TokenDataset import TokenDataset
from .tokenizers import train_tokenizer
import fire
def aitextgen_cli(**kwargs):
"""Entrypoint for the CLI"""
fire.Fire(
{
"encode": encode_cli,
"train": train_cli,
"generate": generate_cli,
"train_tokenizer": train_tokenizer_cli,
}
)
def encode_cli(file_path: str, **kwargs):
"""Encode + compress a dataset"""
TokenDataset(file_path, save_cache=True, **kwargs)
def train_cli(file_path: str, **kwargs):
"""Train on a dataset."""
ai = aitextgen(**kwargs)
from_cache = file_path.endswith(".tar.gz")
dataset = TokenDataset(file_path, from_cache=from_cache, **kwargs)
ai.train(dataset, **kwargs)
def generate_cli(to_file: bool = True, **kwargs):
"""Generate from a trained model, or download one if not present."""
ai = aitextgen(**kwargs)
if to_file:
ai.generate_to_file(**kwargs)
else:
ai.generate(**kwargs)
def train_tokenizer_cli(files: str, **kwargs):
"""Trains a tokenizer on the specified file."""
train_tokenizer(files, **kwargs)
| 24.744681 | 72 | 0.648323 | from .aitextgen import aitextgen
from .TokenDataset import TokenDataset
from .tokenizers import train_tokenizer
import fire
def aitextgen_cli(**kwargs):
"""Entrypoint for the CLI"""
fire.Fire(
{
"encode": encode_cli,
"train": train_cli,
"generate": generate_cli,
"train_tokenizer": train_tokenizer_cli,
}
)
def encode_cli(file_path: str, **kwargs):
"""Encode + compress a dataset"""
TokenDataset(file_path, save_cache=True, **kwargs)
def train_cli(file_path: str, **kwargs):
"""Train on a dataset."""
ai = aitextgen(**kwargs)
from_cache = file_path.endswith(".tar.gz")
dataset = TokenDataset(file_path, from_cache=from_cache, **kwargs)
ai.train(dataset, **kwargs)
def generate_cli(to_file: bool = True, **kwargs):
"""Generate from a trained model, or download one if not present."""
ai = aitextgen(**kwargs)
if to_file:
ai.generate_to_file(**kwargs)
else:
ai.generate(**kwargs)
def train_tokenizer_cli(files: str, **kwargs):
"""Trains a tokenizer on the specified file."""
train_tokenizer(files, **kwargs)
| 0 | 0 | 0 |
da24f88b5348683a100e8fcf34be0d76768dd8e8 | 66 | py | Python | LRL-main-arena/LRL_main_arena/envs/__init__.py | Robotics-Club-IIT-BHU/LaRoboLiga_PS2Arena | 8fddadb7b7078368ec2e8d6c99d7a5c4a60e8f44 | [
"MIT"
] | 1 | 2022-02-14T11:01:48.000Z | 2022-02-14T11:01:48.000Z | LRL-main-arena/LRL_main_arena/envs/__init__.py | Robotics-Club-IIT-BHU/LaRoboLiga_PS2Arena | 8fddadb7b7078368ec2e8d6c99d7a5c4a60e8f44 | [
"MIT"
] | null | null | null | LRL-main-arena/LRL_main_arena/envs/__init__.py | Robotics-Club-IIT-BHU/LaRoboLiga_PS2Arena | 8fddadb7b7078368ec2e8d6c99d7a5c4a60e8f44 | [
"MIT"
] | 13 | 2022-02-14T12:57:06.000Z | 2022-03-02T11:57:23.000Z | from LRL_main_arena.envs.LaRoboLiga_main import LaRoboLigaPs2Arena | 66 | 66 | 0.924242 | from LRL_main_arena.envs.LaRoboLiga_main import LaRoboLigaPs2Arena | 0 | 0 | 0 |
4feb1591835f34dd864a6548fe3da65c2fa4cd1b | 977 | py | Python | tests/test_0088-read-with-http.py | eic/uproot4 | deb8d88c2643521f372bf5005c51af8926016c7e | [
"BSD-3-Clause"
] | 133 | 2020-05-08T21:34:11.000Z | 2022-03-07T18:12:58.000Z | tests/test_0088-read-with-http.py | eic/uproot4 | deb8d88c2643521f372bf5005c51af8926016c7e | [
"BSD-3-Clause"
] | 269 | 2020-05-13T02:42:24.000Z | 2022-03-24T20:24:16.000Z | tests/test_0088-read-with-http.py | eic/uproot4 | deb8d88c2643521f372bf5005c51af8926016c7e | [
"BSD-3-Clause"
] | 45 | 2020-05-15T17:48:04.000Z | 2022-03-18T19:23:07.000Z | # BSD 3-Clause License; see https://github.com/scikit-hep/uproot4/blob/main/LICENSE
from __future__ import absolute_import
import pytest
import uproot
@pytest.mark.network
@pytest.mark.network
@pytest.mark.network
| 27.138889 | 93 | 0.657114 | # BSD 3-Clause License; see https://github.com/scikit-hep/uproot4/blob/main/LICENSE
from __future__ import absolute_import
import pytest
import uproot
@pytest.mark.network
def test_issue176():
with uproot.open(
"https://starterkit.web.cern.ch/starterkit/data/advanced-python-2019/dalitzdata.root"
) as f:
data = f["tree/Y1"].array(library="np")
assert len(data) == 100000
@pytest.mark.network
def test_issue176_again():
with uproot.open(
"https://starterkit.web.cern.ch/starterkit/data/advanced-python-2019/dalitzdata.root"
) as f:
data = f["tree"].arrays(["Y1", "Y2"], library="np")
assert len(data["Y1"]) == 100000
assert len(data["Y2"]) == 100000
@pytest.mark.network
def test_issue121():
with uproot.open(
"https://github.com/CoffeaTeam/coffea/raw/master/tests/samples/nano_dy.root"
) as f:
data = f["Events/MET_pt"].array(library="np")
assert len(data) == 40
| 688 | 0 | 66 |
cd9ff6717c93c6515ce432df8655c8e7c488c943 | 476 | py | Python | 2019/day1/day1.py | st--/advent-of-code | b01ca4502820c972015e940024dbd482f7bc69c2 | [
"MIT"
] | null | null | null | 2019/day1/day1.py | st--/advent-of-code | b01ca4502820c972015e940024dbd482f7bc69c2 | [
"MIT"
] | null | null | null | 2019/day1/day1.py | st--/advent-of-code | b01ca4502820c972015e940024dbd482f7bc69c2 | [
"MIT"
] | null | null | null | import numpy as np
module_masses = load_inputs("input.txt")
module_fuels = map(get_fuel_v2, module_masses)
total = sum(module_fuels)
print("Total: ", total)
| 22.666667 | 48 | 0.653361 | import numpy as np
def get_fuel_v1(mass):
return int(np.floor(mass / 3) - 2)
def get_fuel_v2(mass):
fuel = int(np.floor(mass / 3) - 2)
if fuel <= 0:
return 0
else:
return fuel + get_fuel_v2(fuel)
def load_inputs(filename):
with open(filename) as f:
return [int(line.strip()) for line in f]
module_masses = load_inputs("input.txt")
module_fuels = map(get_fuel_v2, module_masses)
total = sum(module_fuels)
print("Total: ", total)
| 249 | 0 | 69 |
c24f80e8efcacbe920de67d48e1906505b7cea23 | 11,190 | py | Python | src/prog_track.py | sungheeyun/WikiConverter | 1f1d6f9f5e03a4f30770859c07f2af3adba64f32 | [
"Unlicense"
] | null | null | null | src/prog_track.py | sungheeyun/WikiConverter | 1f1d6f9f5e03a4f30770859c07f2af3adba64f32 | [
"Unlicense"
] | null | null | null | src/prog_track.py | sungheeyun/WikiConverter | 1f1d6f9f5e03a4f30770859c07f2af3adba64f32 | [
"Unlicense"
] | null | null | null | #!/Users/sunyun/anaconda3/bin/python
import sys
import time
import utils
sys.path.append("/Users/sunyun/software/python")
if __name__ == "__main__":
team = Team("MRE")
team.add_member("Caroline", "McQuatt")
team.add_member("Dmitry", "Serpakov")
team.add_member("James", "Roland")
team.add_member("Jan", "Duzinkiwicz")
team.add_member("Lewis", "Elliot")
team.add_member("Mirco", "Padovan")
team.add_member("Mohamed", "Karnel")
team.add_member("Sherif", "Elian")
team.add_member("Sridhar", "Sundarraman")
team.add_member("Sunghee", "Yun")
team.add_data("Caroline", "Android", "11/2/17", "20% (3/5 modules)")
team.add_data("Caroline", "Android", "11/16/17", "33%")
team.add_data("Caroline", "Android", "1/5/18", "50%")
team.add_data("Caroline", "Android", "1/18/18", "ready for exam this week")
team.add_data("Caroline", "Android", "2/1/18", "ready for exam")
team.add_data("Caroline", "Android", "2/19/18", "ready for exam")
team.add_data("Caroline", "Android", "3/29/18", "ready for exam")
team.add_data("Caroline", "Android", "4/12/18", "want to do Sprint (w/o certificate)")
team.add_data("Caroline", "Android", "4/27/18", "starting Android Sprint on May 7th")
team.add_data("Caroline", "Android", "5/10/18", "started Android Sprint")
team.add_data("Dmitry", "Android", "11/2/17", "0%")
team.add_data("Dmitry", "Android", "11/16/17", "0%")
team.add_data("Dmitry", "Android", "1/5/18", "0%")
team.add_data("Dmitry", "Android", "1/18/18", "planning to start this week")
team.add_data("Dmitry", "iOS", "2/1/18", "40%")
team.add_data("Dmitry", "iOS", "2/19/18", "40%")
team.add_data("Dmitry", "iOS", "3/29/18", "40%")
team.add_data("Dmitry", "iOS", "4/12/18", "40%")
team.add_data("Dmitry", "iOS", "4/27/18", "40%")
team.add_data("James", "iOS", "11/2/17", "0%")
team.add_data("James", "iOS", "11/16/17", "0%")
team.add_data("James", "iOS", "1/5/18", "0%")
team.add_data("James", "iOS", "1/18/18", "0%")
team.add_data("James", "iOS", "2/1/18", "0%")
team.add_data("James", "iOS", "2/19/18", "0%")
team.add_data("James", "iOS", "3/29/18", "0%")
team.add_data("James", "iOS", "4/27/18", "0%")
team.add_data("Jan", "Android", "11/2/17", "1.437%")
team.add_data("Jan", "Android", "11/16/17", "1.437%")
team.add_data("Jan", "Android", "1/5/18", "1.437%")
team.add_data("Jan", "Android", "2/1/18", "on lesson 1.4")
team.add_data("Jan", "Android", "2/19/18", "on lesson 1.4")
team.add_data("Jan", "Android", "3/29/18", "on lesson 1.4")
team.add_data("Lewis", "iOS", "11/2/17", "0%")
team.add_data("Lewis", "iOS", "11/16/17", "0%")
team.add_data("Lewis", "iOS", "2/1/18", "20%")
team.add_data("Lewis", "iOS", "2/19/18", "20%")
team.add_data("Lewis", "iOS", "3/29/18", "20%")
team.add_data("Lewis", "iOS", "4/12/18", "20%")
team.add_data("Lewis", "iOS", "4/27/18", "20%")
team.add_data("Mirco", "Android", "11/2/17", "0%")
team.add_data("Mirco", "Android", "11/16/17", "25%")
team.add_data("Mirco", "Android", "1/5/18", "100% - ready for test")
team.add_data("Mirco", "Android", "1/18/18", "planning on taking test next week")
team.add_data("Mirco", "Android", "2/1/18", "100% - finished taking test")
team.add_data("Mirco", "Android", "2/19/18", "finished Sprint on Android!")
team.add_data("Mirco", "iOS", "3/29/18", "20%")
team.add_data("Mirco", "iOS", "4/12/18", "20%")
team.add_data("Mirco", "iOS", "4/27/18", "30%")
team.add_data("Mohamed", "Android", "11/2/17", "20%")
team.add_data("Mohamed", "Android", "11/16/17", "30-35%")
team.add_data("Mohamed", "Android", "1/5/18", "50%")
team.add_data("Mohamed", "Android", "1/18/18", "finished pdf. practice before the exam")
team.add_data("Mohamed", "Android", "2/1/18", "100% - planning to take certificate in Q1")
team.add_data("Mohamed", "Android", "3/29/18", "Gotten certificate and doing Sprint")
team.add_data("Sherif", "iOS", "11/2/17", "77.77%")
team.add_data("Sherif", "iOS", "11/16/17", "95%")
team.add_data("Sherif", "iOS", "1/5/18", "100% - ready for sprint (Feb)")
team.add_data("Sherif", "iOS", "2/1/18", "100% - planning to start sprint on Feb. 5th")
team.add_data("Sherif", "iOS", "2/19/18", "finished Sprint on iOS!")
team.add_data("Sridhar", "Android", "11/2/17", "50% planning on taking test 11/31")
team.add_data("Sridhar", "Android", "11/16/17", "50%")
team.add_data("Sridhar", "Android", "1/5/18", "100% - ready for sprint (March)")
team.add_data("Sridhar", "Android", "2/1/18", "100% - ready for sprint (March)")
team.add_data("Sridhar", "Android", "3/29/18", "starting Sprint on Android")
team.add_data("Sridhar", "Android", "4/27/18", "finished Sprint on Android")
team.add_data("Sunghee", "Android", "11/2/17", "0%")
team.add_data("Sunghee", "Android", "11/16/17", "0%")
team.add_data("Sunghee", "Android", "1/5/18", "0%")
team.add_data("Sunghee", "Android", "1/18/18", "5%")
team.add_data("Sunghee", "Android", "2/1/18", "5%")
team.add_data("Sunghee", "Android", "2/19/18", "5%")
team.add_data("Sunghee", "Android", "3/29/18", "5%")
team.add_data("Sunghee", "Android", "4/12/18", "5%")
team.add_data("Sunghee", "Android", "4/27/18", "5%")
team.add_data("Mirco", "iOS", "7/6/18", "50%")
team.write_to_wiki_by_name("mt_by_name.wtb")
# team.write_to_wiki_by_platform("mt_by_platform.wtb")
| 37.17608 | 106 | 0.603843 | #!/Users/sunyun/anaconda3/bin/python
import sys
import time
import utils
sys.path.append("/Users/sunyun/software/python")
class Member(object):
DateFormat = "%m/%d/%y"
# CONSTRUCTOR
def __init__(self, firstName, lastName):
assert isinstance(firstName, str), firstName.__class__
assert isinstance(lastName, str), lastName.__class__
self.firstName = firstName
self.lastName = lastName
self.platDatePNoteDict = dict()
# GETTERS
def get_data_set(self):
dateSet = set()
for platform, date in self.platDatePNoteDict:
dateSet.add(date)
return dateSet
def get_fullname(self):
return "%s %s" % (self.firstName, self.lastName)
# PUTTERS
def add_note(self, platform, date, note):
assert isinstance(platform, str), platform.__class__
assert isinstance(date, str), date.__class__
assert isinstance(note, str), note.__class__
assert platform == "Android" or platform == "iOS", platform
date = time.strptime(date, Member.DateFormat)
self.platDatePNoteDict[(platform, date)] = note
def add_row_to_wiki_table(self, newWikiTable, dateList, platformL=["Android", "iOS"], nameFirst=True):
self._add_row_to_wiki_table(newWikiTable, dateList, platformL, nameFirst)
def _add_row_to_wiki_table(self, newWikiTable, dateList, platformL, nameFirst):
assert isinstance(newWikiTable, utils.NewWikiTable), newWikiTable.__class__
assert isinstance(dateList, list), dateList.__class__
assert isinstance(platformL, list), platformL.__class__
assert isinstance(nameFirst, bool), nameFirst.__class__
res = dict()
# res['Android'] = [ '' ] * len(dateList)
# res['iOS'] = [ '' ] * len(dateList)
for platform in platformL:
res[platform] = [""] * len(dateList)
for key, note in self.platDatePNoteDict.items():
platform, date = key
if platform in platformL:
continue
found = False
for idx, dt in enumerate(dateList):
if date == dt:
res[platform][idx] = note
found = True
break
assert found, found
# newWikiTable.addRow( [ self.get_fullname(), 'Android' ] + res['Android'] )
# newWikiTable.addRow( [ self.get_fullname(), 'iOS' ] + res['iOS'] )
for platform in platformL:
if nameFirst:
fl = [self.get_fullname(), platform]
else:
fl = [platform, self.get_fullname()]
newWikiTable.addRow(fl + res[platform])
class Team(object):
@staticmethod
def date_to_str(date):
assert isinstance(date, time.struct_time), date.__class__
return time.strftime("%b %d", date)
# CONSTRUCTOR
def __init__(self, teamName):
assert isinstance(teamName, str), teamName.__class__
self.teamName = teamName
self.nameMemberDict = dict()
# GETTERS
def get_date_list(self):
dateSet = set()
for key, member in self.nameMemberDict.items():
dateSet.update(member.get_data_set())
dateList = list(dateSet)
dateList.sort(reverse=True)
return dateList
def get_member_list(self):
memberList = list()
for firstName, member in self.nameMemberDict.items():
memberList.append(member)
memberList.sort(key=lambda member: member.firstName)
return memberList
# PUTTERS
def add_member(self, firstName, lastName):
assert firstName not in self.nameMemberDict, (firstName, list(self.nameMemberDict.keys()))
self.nameMemberDict[firstName] = Member(firstName, lastName)
def add_data(self, firstName, platform, date, note):
assert isinstance(firstName, str), firstName.__class__
assert isinstance(platform, str), platform.__class__
assert isinstance(date, str), date.__class__
assert isinstance(note, str), note.__class__
self.nameMemberDict[firstName].add_note(platform, date, note)
# WRITERS
def write_to_wiki_by_name(self, fn):
memberList = self.get_member_list()
dateList = self.get_date_list()
newWikiTable = utils.NewWikiTable([100] * 2 + [150] * len(dateList), "")
newWikiTable.addRow(["Engineer", "Platform"] + [Team.date_to_str(date) for date in dateList])
for member in memberList:
member.add_row_to_wiki_table(newWikiTable, dateList)
spanD = dict()
spanD[("odd", 0)] = (2, 1)
spanD["exception"] = (0, "last")
boldD = dict()
boldD[0] = True
print("writing to %s" % fn)
with open(fn, "w") as fid:
newWikiTable.write(fid, spanD=spanD, boldD=boldD)
def write_to_wiki_by_platform(self, fn):
memberList = self.get_member_list()
dateList = self.get_date_list()
newWikiTable = utils.NewWikiTable([100] * 2 + [150] * len(dateList), "")
newWikiTable.addRow(["Platform", "Engineer"] + [Team.date_to_str(date) for date in dateList])
for member in memberList:
member.add_row_to_wiki_table(newWikiTable, dateList, ["Android"], False)
for member in memberList:
member.add_row_to_wiki_table(newWikiTable, dateList, ["iOS"], False)
sizeMemberList = len(memberList)
spanD = dict()
spanD[(1, 0)] = (sizeMemberList, 1)
spanD[(1 + sizeMemberList, 0)] = (sizeMemberList, 1)
boldD = dict()
boldD[0] = True
print("writing to %s" % fn)
with open(fn, "w") as fid:
newWikiTable.write(fid, spanD=spanD, boldD=boldD)
if __name__ == "__main__":
team = Team("MRE")
team.add_member("Caroline", "McQuatt")
team.add_member("Dmitry", "Serpakov")
team.add_member("James", "Roland")
team.add_member("Jan", "Duzinkiwicz")
team.add_member("Lewis", "Elliot")
team.add_member("Mirco", "Padovan")
team.add_member("Mohamed", "Karnel")
team.add_member("Sherif", "Elian")
team.add_member("Sridhar", "Sundarraman")
team.add_member("Sunghee", "Yun")
team.add_data("Caroline", "Android", "11/2/17", "20% (3/5 modules)")
team.add_data("Caroline", "Android", "11/16/17", "33%")
team.add_data("Caroline", "Android", "1/5/18", "50%")
team.add_data("Caroline", "Android", "1/18/18", "ready for exam this week")
team.add_data("Caroline", "Android", "2/1/18", "ready for exam")
team.add_data("Caroline", "Android", "2/19/18", "ready for exam")
team.add_data("Caroline", "Android", "3/29/18", "ready for exam")
team.add_data("Caroline", "Android", "4/12/18", "want to do Sprint (w/o certificate)")
team.add_data("Caroline", "Android", "4/27/18", "starting Android Sprint on May 7th")
team.add_data("Caroline", "Android", "5/10/18", "started Android Sprint")
team.add_data("Dmitry", "Android", "11/2/17", "0%")
team.add_data("Dmitry", "Android", "11/16/17", "0%")
team.add_data("Dmitry", "Android", "1/5/18", "0%")
team.add_data("Dmitry", "Android", "1/18/18", "planning to start this week")
team.add_data("Dmitry", "iOS", "2/1/18", "40%")
team.add_data("Dmitry", "iOS", "2/19/18", "40%")
team.add_data("Dmitry", "iOS", "3/29/18", "40%")
team.add_data("Dmitry", "iOS", "4/12/18", "40%")
team.add_data("Dmitry", "iOS", "4/27/18", "40%")
team.add_data("James", "iOS", "11/2/17", "0%")
team.add_data("James", "iOS", "11/16/17", "0%")
team.add_data("James", "iOS", "1/5/18", "0%")
team.add_data("James", "iOS", "1/18/18", "0%")
team.add_data("James", "iOS", "2/1/18", "0%")
team.add_data("James", "iOS", "2/19/18", "0%")
team.add_data("James", "iOS", "3/29/18", "0%")
team.add_data("James", "iOS", "4/27/18", "0%")
team.add_data("Jan", "Android", "11/2/17", "1.437%")
team.add_data("Jan", "Android", "11/16/17", "1.437%")
team.add_data("Jan", "Android", "1/5/18", "1.437%")
team.add_data("Jan", "Android", "2/1/18", "on lesson 1.4")
team.add_data("Jan", "Android", "2/19/18", "on lesson 1.4")
team.add_data("Jan", "Android", "3/29/18", "on lesson 1.4")
team.add_data("Lewis", "iOS", "11/2/17", "0%")
team.add_data("Lewis", "iOS", "11/16/17", "0%")
team.add_data("Lewis", "iOS", "2/1/18", "20%")
team.add_data("Lewis", "iOS", "2/19/18", "20%")
team.add_data("Lewis", "iOS", "3/29/18", "20%")
team.add_data("Lewis", "iOS", "4/12/18", "20%")
team.add_data("Lewis", "iOS", "4/27/18", "20%")
team.add_data("Mirco", "Android", "11/2/17", "0%")
team.add_data("Mirco", "Android", "11/16/17", "25%")
team.add_data("Mirco", "Android", "1/5/18", "100% - ready for test")
team.add_data("Mirco", "Android", "1/18/18", "planning on taking test next week")
team.add_data("Mirco", "Android", "2/1/18", "100% - finished taking test")
team.add_data("Mirco", "Android", "2/19/18", "finished Sprint on Android!")
team.add_data("Mirco", "iOS", "3/29/18", "20%")
team.add_data("Mirco", "iOS", "4/12/18", "20%")
team.add_data("Mirco", "iOS", "4/27/18", "30%")
team.add_data("Mohamed", "Android", "11/2/17", "20%")
team.add_data("Mohamed", "Android", "11/16/17", "30-35%")
team.add_data("Mohamed", "Android", "1/5/18", "50%")
team.add_data("Mohamed", "Android", "1/18/18", "finished pdf. practice before the exam")
team.add_data("Mohamed", "Android", "2/1/18", "100% - planning to take certificate in Q1")
team.add_data("Mohamed", "Android", "3/29/18", "Gotten certificate and doing Sprint")
team.add_data("Sherif", "iOS", "11/2/17", "77.77%")
team.add_data("Sherif", "iOS", "11/16/17", "95%")
team.add_data("Sherif", "iOS", "1/5/18", "100% - ready for sprint (Feb)")
team.add_data("Sherif", "iOS", "2/1/18", "100% - planning to start sprint on Feb. 5th")
team.add_data("Sherif", "iOS", "2/19/18", "finished Sprint on iOS!")
team.add_data("Sridhar", "Android", "11/2/17", "50% planning on taking test 11/31")
team.add_data("Sridhar", "Android", "11/16/17", "50%")
team.add_data("Sridhar", "Android", "1/5/18", "100% - ready for sprint (March)")
team.add_data("Sridhar", "Android", "2/1/18", "100% - ready for sprint (March)")
team.add_data("Sridhar", "Android", "3/29/18", "starting Sprint on Android")
team.add_data("Sridhar", "Android", "4/27/18", "finished Sprint on Android")
team.add_data("Sunghee", "Android", "11/2/17", "0%")
team.add_data("Sunghee", "Android", "11/16/17", "0%")
team.add_data("Sunghee", "Android", "1/5/18", "0%")
team.add_data("Sunghee", "Android", "1/18/18", "5%")
team.add_data("Sunghee", "Android", "2/1/18", "5%")
team.add_data("Sunghee", "Android", "2/19/18", "5%")
team.add_data("Sunghee", "Android", "3/29/18", "5%")
team.add_data("Sunghee", "Android", "4/12/18", "5%")
team.add_data("Sunghee", "Android", "4/27/18", "5%")
team.add_data("Mirco", "iOS", "7/6/18", "50%")
team.write_to_wiki_by_name("mt_by_name.wtb")
# team.write_to_wiki_by_platform("mt_by_platform.wtb")
| 5,081 | 535 | 46 |
a824eb435244e6a72f7ebcfdeca66c141db54231 | 790 | py | Python | src/emutils/geometry/utils.py | emanuele-albini/emutils | d5e3939da8a14b629879f06d87d4bd371e7117ab | [
"MIT"
] | null | null | null | src/emutils/geometry/utils.py | emanuele-albini/emutils | d5e3939da8a14b629879f06d87d4bd371e7117ab | [
"MIT"
] | null | null | null | src/emutils/geometry/utils.py | emanuele-albini/emutils | d5e3939da8a14b629879f06d87d4bd371e7117ab | [
"MIT"
] | null | null | null | import numpy as np
from sklearn.base import BaseEstimator
def scaled_linspace(x: np.ndarray, y: np.ndarray, num: int, scaler: BaseEstimator) -> np.ndarray:
"""Generate a linspace, evenly spaced according to the normalization
Args:
x (np.ndarray): First point
y (np.ndarray): Sencond point
num (int): Number of points (in between the two points)
method (str): Normalization method
Returns:
np.ndarray: Sequence of points evenly spaced
"""
# Normalize the points
x = scaler.transform([x])[0]
y = scaler.transform([y])[0]
# Generate the linspace
ls = np.linspace(x, y, num=num + 1, endpoint=True)
# Unnormalize the points
ls = scaler.inverse_transform(ls)
return ls
| 28.214286 | 97 | 0.629114 | import numpy as np
from sklearn.base import BaseEstimator
def scaled_linspace(x: np.ndarray, y: np.ndarray, num: int, scaler: BaseEstimator) -> np.ndarray:
"""Generate a linspace, evenly spaced according to the normalization
Args:
x (np.ndarray): First point
y (np.ndarray): Sencond point
num (int): Number of points (in between the two points)
method (str): Normalization method
Returns:
np.ndarray: Sequence of points evenly spaced
"""
# Normalize the points
x = scaler.transform([x])[0]
y = scaler.transform([y])[0]
# Generate the linspace
ls = np.linspace(x, y, num=num + 1, endpoint=True)
# Unnormalize the points
ls = scaler.inverse_transform(ls)
return ls
| 0 | 0 | 0 |
3f5153f104031ac88200f645c7d7c5298dd2a17f | 516 | py | Python | train_eval_all.py | jake-g/urban-sound-classification | 9d505732e7ba66d3a9e67ed97d956a7c67814503 | [
"MIT"
] | null | null | null | train_eval_all.py | jake-g/urban-sound-classification | 9d505732e7ba66d3a9e67ed97d956a7c67814503 | [
"MIT"
] | null | null | null | train_eval_all.py | jake-g/urban-sound-classification | 9d505732e7ba66d3a9e67ed97d956a7c67814503 | [
"MIT"
] | 1 | 2018-10-11T07:13:59.000Z | 2018-10-11T07:13:59.000Z | from keras_classifiers import ffn1, ffn2, cnn1, cnn2, train_and_evaluate
from sklearn_classifiers import train_and_evaluate_all
import utils as u
print('\n\nEvaluating DNN Classifiers')
# train_and_evaluate(cnn1, u.FEATURE_SET_SPECS)
# train_and_evaluate(cnn2, u.FEATURE_SET_SPECS)
# train_and_evaluate(ffn1, u.FEATURE_SET_MEANS)
# train_and_evaluate(ffn2, u.FEATURE_SET_SPECS_NORM)
print('\n\nEvaluating Classifiers')
train_and_evaluate_all(u.FEATURE_SET_MEANS)
# train_and_evaluate_all(u.FEATURE_SET_SPECS_NORM) | 36.857143 | 72 | 0.843023 | from keras_classifiers import ffn1, ffn2, cnn1, cnn2, train_and_evaluate
from sklearn_classifiers import train_and_evaluate_all
import utils as u
print('\n\nEvaluating DNN Classifiers')
# train_and_evaluate(cnn1, u.FEATURE_SET_SPECS)
# train_and_evaluate(cnn2, u.FEATURE_SET_SPECS)
# train_and_evaluate(ffn1, u.FEATURE_SET_MEANS)
# train_and_evaluate(ffn2, u.FEATURE_SET_SPECS_NORM)
print('\n\nEvaluating Classifiers')
train_and_evaluate_all(u.FEATURE_SET_MEANS)
# train_and_evaluate_all(u.FEATURE_SET_SPECS_NORM) | 0 | 0 | 0 |
aec74b45141ca131949f6cb261503fcacbc2a87e | 449 | py | Python | userhandling/migrations/0002_auto_20180301_2333.py | staxx6/helloworldsite | f27916828276d1b963a908065b95b33d46971855 | [
"MIT"
] | null | null | null | userhandling/migrations/0002_auto_20180301_2333.py | staxx6/helloworldsite | f27916828276d1b963a908065b95b33d46971855 | [
"MIT"
] | null | null | null | userhandling/migrations/0002_auto_20180301_2333.py | staxx6/helloworldsite | f27916828276d1b963a908065b95b33d46971855 | [
"MIT"
] | null | null | null | # Generated by Django 2.0.2 on 2018-03-01 22:33
from django.db import migrations, models
import django.utils.timezone
| 22.45 | 86 | 0.632517 | # Generated by Django 2.0.2 on 2018-03-01 22:33
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('userhandling', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='birth_day',
field=models.DateTimeField(blank=True, default=django.utils.timezone.now),
),
]
| 0 | 306 | 23 |
f42f6c353bd59dc76888c331de8dd573115ec720 | 36,175 | py | Python | tfx/extensions/google_cloud_ai_platform/runner_test.py | BACtaki/tfx | 29db845200beccbb0ffa1e1e1a091e314a3a470f | [
"Apache-2.0"
] | 1,813 | 2019-02-04T17:17:30.000Z | 2022-03-29T13:39:30.000Z | tfx/extensions/google_cloud_ai_platform/runner_test.py | BACtaki/tfx | 29db845200beccbb0ffa1e1e1a091e314a3a470f | [
"Apache-2.0"
] | 2,710 | 2019-02-14T00:41:00.000Z | 2022-03-31T07:23:00.000Z | tfx/extensions/google_cloud_ai_platform/runner_test.py | BACtaki/tfx | 29db845200beccbb0ffa1e1e1a091e314a3a470f | [
"Apache-2.0"
] | 731 | 2019-02-04T17:59:18.000Z | 2022-03-31T06:45:51.000Z | # Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.extensions.google_cloud_ai_platform.runner."""
import copy
import importlib
import os
from typing import Any, Dict
from unittest import mock
from google.auth import credentials as auth_credentials
from google.cloud import aiplatform
from google.cloud.aiplatform import initializer
from google.cloud.aiplatform.compat.types import endpoint
from google.cloud.aiplatform_v1.services.endpoint_service import (
client as endpoint_service_client)
from google.cloud.aiplatform_v1beta1.types.custom_job import CustomJob
from google.cloud.aiplatform_v1beta1.types.job_state import JobState
from googleapiclient import errors
import httplib2
import tensorflow as tf
from tfx.extensions.google_cloud_ai_platform import prediction_clients
from tfx.extensions.google_cloud_ai_platform import runner
from tfx.extensions.google_cloud_ai_platform.trainer import executor
from tfx.utils import json_utils
from tfx.utils import telemetry_utils
from tfx.utils import version_utils
if __name__ == '__main__':
tf.test.main()
| 38.320975 | 89 | 0.666731 | # Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.extensions.google_cloud_ai_platform.runner."""
import copy
import importlib
import os
from typing import Any, Dict
from unittest import mock
from google.auth import credentials as auth_credentials
from google.cloud import aiplatform
from google.cloud.aiplatform import initializer
from google.cloud.aiplatform.compat.types import endpoint
from google.cloud.aiplatform_v1.services.endpoint_service import (
client as endpoint_service_client)
from google.cloud.aiplatform_v1beta1.types.custom_job import CustomJob
from google.cloud.aiplatform_v1beta1.types.job_state import JobState
from googleapiclient import errors
import httplib2
import tensorflow as tf
from tfx.extensions.google_cloud_ai_platform import prediction_clients
from tfx.extensions.google_cloud_ai_platform import runner
from tfx.extensions.google_cloud_ai_platform.trainer import executor
from tfx.utils import json_utils
from tfx.utils import telemetry_utils
from tfx.utils import version_utils
class RunnerTest(tf.test.TestCase):
def setUp(self):
super().setUp()
self._output_data_dir = os.path.join(
os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
self._testMethodName)
self._project_id = '12345'
self._mock_api_client = mock.Mock()
self._inputs = {}
self._outputs = {}
self._training_inputs = {
'project': self._project_id,
}
self._job_id = 'my_jobid'
# Dict format of exec_properties. custom_config needs to be serialized
# before being passed into start_cloud_training function.
self._exec_properties = {
'custom_config': {
executor.TRAINING_ARGS_KEY: self._training_inputs,
},
}
self._model_name = 'model_name'
self._ai_platform_serving_args = {
'model_name': self._model_name,
'project_id': self._project_id,
}
self._executor_class_path = 'my.executor.Executor'
with telemetry_utils.scoped_labels(
{telemetry_utils.LABEL_TFX_EXECUTOR: self._executor_class_path}):
self._job_labels = telemetry_utils.make_labels_dict()
def _setUpTrainingMocks(self):
self._mock_create_request = mock.Mock()
self._mock_create = mock.Mock()
self._mock_create.return_value = self._mock_create_request
self._mock_api_client.projects().jobs().create = self._mock_create
self._mock_get = mock.Mock()
self._mock_api_client.projects().jobs().get.return_value = self._mock_get
self._mock_get.execute.return_value = {
'state': 'SUCCEEDED',
}
def _setUpVertexTrainingMocks(self):
self._mock_create = mock.Mock()
self._mock_api_client.create_custom_job = self._mock_create
self._mock_create.return_value = CustomJob(name='vertex_job_study_id')
self._mock_get = mock.Mock()
self._mock_api_client.get_custom_job = self._mock_get
self._mock_get.return_value = CustomJob(state=JobState.JOB_STATE_SUCCEEDED)
def _serialize_custom_config_under_test(self) -> Dict[str, Any]:
"""Converts self._exec_properties['custom_config'] to string."""
result = copy.deepcopy(self._exec_properties)
result['custom_config'] = json_utils.dumps(result['custom_config'])
return result
@mock.patch(
'tfx.extensions.google_cloud_ai_platform.training_clients.discovery')
def testStartCloudTraining(self, mock_discovery):
mock_discovery.build.return_value = self._mock_api_client
self._setUpTrainingMocks()
class_path = 'foo.bar.class'
runner.start_cloud_training(self._inputs, self._outputs,
self._serialize_custom_config_under_test(),
class_path, self._training_inputs, None)
self._mock_create.assert_called_with(
body=mock.ANY, parent='projects/{}'.format(self._project_id))
kwargs = self._mock_create.call_args[1]
body = kwargs['body']
default_image = 'gcr.io/tfx-oss-public/tfx:{}'.format(
version_utils.get_image_version())
self.assertDictContainsSubset(
{
'masterConfig': {
'imageUri':
default_image,
'containerCommand':
runner._CONTAINER_COMMAND + [
'--executor_class_path', class_path, '--inputs', '{}',
'--outputs', '{}', '--exec-properties',
('{"custom_config": '
'"{\\"ai_platform_training_args\\": {\\"project\\": \\"12345\\"'
'}}"}')
],
},
}, body['training_input'])
self.assertNotIn('project', body['training_input'])
self.assertStartsWith(body['job_id'], 'tfx_')
self._mock_get.execute.assert_called_with()
self._mock_create_request.execute.assert_called_with()
@mock.patch(
'tfx.extensions.google_cloud_ai_platform.training_clients.discovery')
def testStartCloudTrainingWithUserContainer(self, mock_discovery):
mock_discovery.build.return_value = self._mock_api_client
self._setUpTrainingMocks()
class_path = 'foo.bar.class'
self._training_inputs['masterConfig'] = {'imageUri': 'my-custom-image'}
self._exec_properties['custom_config'][executor.JOB_ID_KEY] = self._job_id
runner.start_cloud_training(self._inputs, self._outputs,
self._serialize_custom_config_under_test(),
class_path, self._training_inputs, self._job_id)
self._mock_create.assert_called_with(
body=mock.ANY, parent='projects/{}'.format(self._project_id))
kwargs = self._mock_create.call_args[1]
body = kwargs['body']
self.assertDictContainsSubset(
{
'masterConfig': {
'imageUri':
'my-custom-image',
'containerCommand':
runner._CONTAINER_COMMAND + [
'--executor_class_path', class_path, '--inputs', '{}',
'--outputs', '{}', '--exec-properties',
('{"custom_config": '
'"{\\"ai_platform_training_args\\": '
'{\\"masterConfig\\": {\\"imageUri\\": \\"my-custom-image\\"}, '
'\\"project\\": \\"12345\\"}, '
'\\"ai_platform_training_job_id\\": \\"my_jobid\\"}"}')
],
}
}, body['training_input'])
self.assertEqual(body['job_id'], 'my_jobid')
self._mock_get.execute.assert_called_with()
self._mock_create_request.execute.assert_called_with()
@mock.patch('tfx.extensions.google_cloud_ai_platform.training_clients.gapic')
def testStartCloudTraining_Vertex(self, mock_gapic):
mock_gapic.JobServiceClient.return_value = self._mock_api_client
self._setUpVertexTrainingMocks()
class_path = 'foo.bar.class'
region = 'us-central1'
runner.start_cloud_training(self._inputs, self._outputs,
self._serialize_custom_config_under_test(),
class_path, self._training_inputs, None, True,
region)
self._mock_create.assert_called_with(
parent='projects/{}/locations/{}'.format(self._project_id, region),
custom_job=mock.ANY)
kwargs = self._mock_create.call_args[1]
body = kwargs['custom_job']
default_image = 'gcr.io/tfx-oss-public/tfx:{}'.format(
version_utils.get_image_version())
self.assertDictContainsSubset(
{
'worker_pool_specs': [{
'container_spec': {
'image_uri':
default_image,
'command':
runner._CONTAINER_COMMAND + [
'--executor_class_path', class_path, '--inputs',
'{}', '--outputs', '{}', '--exec-properties',
('{"custom_config": '
'"{\\"ai_platform_training_args\\": '
'{\\"project\\": \\"12345\\"'
'}}"}')
],
},
},],
}, body['job_spec'])
self.assertStartsWith(body['display_name'], 'tfx_')
self._mock_get.assert_called_with(name='vertex_job_study_id')
@mock.patch('tfx.extensions.google_cloud_ai_platform.training_clients.gapic')
def testStartCloudTrainingWithUserContainer_Vertex(self, mock_gapic):
mock_gapic.JobServiceClient.return_value = self._mock_api_client
self._setUpVertexTrainingMocks()
class_path = 'foo.bar.class'
self._training_inputs['worker_pool_specs'] = [{
'container_spec': {
'image_uri': 'my-custom-image'
}
}]
self._exec_properties['custom_config'][executor.JOB_ID_KEY] = self._job_id
region = 'us-central2'
runner.start_cloud_training(self._inputs, self._outputs,
self._serialize_custom_config_under_test(),
class_path, self._training_inputs, self._job_id,
True, region)
self._mock_create.assert_called_with(
parent='projects/{}/locations/{}'.format(self._project_id, region),
custom_job=mock.ANY)
kwargs = self._mock_create.call_args[1]
body = kwargs['custom_job']
self.assertDictContainsSubset(
{
'worker_pool_specs': [{
'container_spec': {
'image_uri':
'my-custom-image',
'command':
runner._CONTAINER_COMMAND + [
'--executor_class_path', class_path, '--inputs',
'{}', '--outputs', '{}', '--exec-properties',
('{"custom_config": '
'"{\\"ai_platform_training_args\\": '
'{\\"project\\": \\"12345\\", '
'\\"worker_pool_specs\\": '
'[{\\"container_spec\\": '
'{\\"image_uri\\": \\"my-custom-image\\"}}]}, '
'\\"ai_platform_training_job_id\\": '
'\\"my_jobid\\"}"}')
],
},
},],
}, body['job_spec'])
self.assertEqual(body['display_name'], 'my_jobid')
self._mock_get.assert_called_with(name='vertex_job_study_id')
@mock.patch('tfx.extensions.google_cloud_ai_platform.training_clients.gapic')
def testStartCloudTrainingWithVertexCustomJob(self, mock_gapic):
mock_gapic.JobServiceClient.return_value = self._mock_api_client
self._setUpVertexTrainingMocks()
class_path = 'foo.bar.class'
expected_encryption_spec = {
'kms_key_name': 'my_kmskey',
}
user_provided_labels = {
'l1': 'v1',
'l2': 'v2',
}
self._training_inputs['display_name'] = 'valid_name'
self._training_inputs['job_spec'] = {
'worker_pool_specs': [{
'container_spec': {
'image_uri': 'my-custom-image'
}
}]
}
self._training_inputs['labels'] = user_provided_labels
self._training_inputs['encryption_spec'] = expected_encryption_spec
self._exec_properties['custom_config'][executor.JOB_ID_KEY] = self._job_id
region = 'us-central2'
runner.start_cloud_training(self._inputs, self._outputs,
self._serialize_custom_config_under_test(),
class_path, self._training_inputs, self._job_id,
True, region)
self._mock_create.assert_called_with(
parent='projects/{}/locations/{}'.format(self._project_id, region),
custom_job=mock.ANY)
kwargs = self._mock_create.call_args[1]
body = kwargs['custom_job']
self.assertDictContainsSubset(
{
'worker_pool_specs': [{
'container_spec': {
'image_uri':
'my-custom-image',
'command':
runner._CONTAINER_COMMAND + [
'--executor_class_path', class_path, '--inputs',
'{}', '--outputs', '{}', '--exec-properties',
('{"custom_config": '
'"{\\"ai_platform_training_args\\": '
'{\\"display_name\\": \\"valid_name\\", '
'\\"encryption_spec\\": {\\"kms_key_name\\": '
'\\"my_kmskey\\"}, \\"job_spec\\": '
'{\\"worker_pool_specs\\": '
'[{\\"container_spec\\": '
'{\\"image_uri\\": \\"my-custom-image\\"}}]}, '
'\\"labels\\": {\\"l1\\": \\"v1\\", '
'\\"l2\\": \\"v2\\"}, '
'\\"project\\": \\"12345\\"}, '
'\\"ai_platform_training_job_id\\": '
'\\"my_jobid\\"}"}')
],
},
},],
}, body['job_spec'])
self.assertEqual(body['display_name'], 'valid_name')
self.assertDictEqual(body['encryption_spec'], expected_encryption_spec)
self.assertDictContainsSubset(user_provided_labels, body['labels'])
self._mock_get.assert_called_with(name='vertex_job_study_id')
def _setUpPredictionMocks(self):
self._serving_path = os.path.join(self._output_data_dir, 'serving_path')
self._model_version = 'model_version'
self._mock_models_create = mock.Mock()
self._mock_api_client.projects().models().create = self._mock_models_create
self._mock_versions_create = mock.Mock()
self._mock_versions_create.return_value.execute.return_value = {
'name': 'versions_create_op_name'
}
self._mock_api_client.projects().models().versions(
).create = self._mock_versions_create
self._mock_get = mock.Mock()
self._mock_api_client.projects().operations().get = self._mock_get
self._mock_set_default = mock.Mock()
self._mock_api_client.projects().models().versions(
).setDefault = self._mock_set_default
self._mock_set_default_execute = mock.Mock()
self._mock_api_client.projects().models().versions().setDefault(
).execute = self._mock_set_default_execute
self._mock_get.return_value.execute.return_value = {
'done': True,
'response': {
'name': self._model_version,
},
}
def _setUpVertexPredictionMocks(self):
importlib.reload(initializer)
importlib.reload(aiplatform)
self._serving_container_image_uri = 'gcr.io/path/to/container'
self._serving_path = os.path.join(self._output_data_dir, 'serving_path')
self._endpoint_name = 'endpoint-name'
self._endpoint_region = 'us-central1'
self._deployed_model_id = 'model_id'
self._mock_create_client = mock.Mock()
initializer.global_config.create_client = self._mock_create_client
self._mock_create_client.return_value = mock.Mock(
spec=endpoint_service_client.EndpointServiceClient)
self._mock_get_endpoint = mock.Mock()
endpoint_service_client.EndpointServiceClient.get_endpoint = self._mock_get_endpoint
self._mock_get_endpoint.return_value = endpoint.Endpoint(
display_name=self._endpoint_name,)
aiplatform.init(
project=self._project_id,
location=None,
credentials=mock.Mock(spec=auth_credentials.AnonymousCredentials()))
self._mock_endpoint = aiplatform.Endpoint(
endpoint_name='projects/{}/locations/us-central1/endpoints/1234'.format(
self._project_id))
self._mock_endpoint_create = mock.Mock()
aiplatform.Endpoint.create = self._mock_endpoint_create
self._mock_endpoint_create.return_value = self._mock_endpoint
self._mock_endpoint_list = mock.Mock()
aiplatform.Endpoint.list = self._mock_endpoint_list
self._mock_endpoint_list.return_value = [self._mock_endpoint]
self._mock_model_upload = mock.Mock()
aiplatform.Model.upload = self._mock_model_upload
self._mock_model_deploy = mock.Mock()
self._mock_model_upload.return_value.deploy = self._mock_model_deploy
self._ai_platform_serving_args_vertex = {
'endpoint_name': self._endpoint_name,
'project_id': self._project_id,
}
def _assertDeployModelMockCalls(self,
expected_models_create_body=None,
expected_versions_create_body=None,
expect_set_default=True):
if not expected_models_create_body:
expected_models_create_body = {
'name': self._model_name,
'regions': [],
'labels': self._job_labels
}
if not expected_versions_create_body:
expected_versions_create_body = {
'name':
self._model_version,
'deployment_uri':
self._serving_path,
'runtime_version':
prediction_clients._get_tf_runtime_version(tf.__version__),
'python_version':
'3.7',
'labels':
self._job_labels
}
self._mock_models_create.assert_called_with(
body=mock.ANY,
parent='projects/{}'.format(self._project_id),
)
models_create_kwargs = self._mock_models_create.call_args[1]
self.assertDictEqual(expected_models_create_body,
models_create_kwargs['body'])
self._mock_versions_create.assert_called_with(
body=mock.ANY,
parent='projects/{}/models/{}'.format(self._project_id,
self._model_name))
versions_create_kwargs = self._mock_versions_create.call_args[1]
self.assertDictEqual(expected_versions_create_body,
versions_create_kwargs['body'])
if not expect_set_default:
return
self._mock_set_default.assert_called_with(
name='projects/{}/models/{}/versions/{}'.format(
self._project_id, self._model_name, self._model_version))
self._mock_set_default_execute.assert_called_with()
def _assertDeployModelMockCallsVertex(self,
expected_endpoint_create_body=None,
expected_model_upload_body=None,
expected_model_deploy_body=None):
if not expected_endpoint_create_body:
expected_endpoint_create_body = {
'display_name': self._endpoint_name,
'labels': self._job_labels,
}
if not expected_model_upload_body:
expected_model_upload_body = {
'display_name': self._model_name,
'artifact_uri': self._serving_path,
'serving_container_image_uri': self._serving_container_image_uri,
}
if not expected_model_deploy_body:
expected_model_deploy_body = {
'endpoint': self._mock_endpoint,
'traffic_percentage': 100,
}
self._mock_endpoint_create.assert_called_with(
**expected_endpoint_create_body)
self._mock_model_upload.assert_called_with(**expected_model_upload_body)
self._mock_model_deploy.assert_called_with(**expected_model_deploy_body)
self._mock_endpoint_list.assert_called_with(
filter='display_name="{}"'.format(self._endpoint_name))
def testDeployModelForAIPPrediction(self):
self._setUpPredictionMocks()
runner.deploy_model_for_aip_prediction(
serving_path=self._serving_path,
model_version_name=self._model_version,
ai_platform_serving_args=self._ai_platform_serving_args,
labels=self._job_labels,
api=self._mock_api_client)
expected_models_create_body = {
'name': self._model_name,
'regions': [],
'labels': self._job_labels
}
self._assertDeployModelMockCalls(
expected_models_create_body=expected_models_create_body)
def testDeployModelForAIPPredictionError(self):
self._setUpPredictionMocks()
self._mock_get.return_value.execute.return_value = {
'done': True,
'error': {
'code': 999,
'message': 'it was an error.'
},
}
with self.assertRaises(RuntimeError):
runner.deploy_model_for_aip_prediction(
serving_path=self._serving_path,
model_version_name=self._model_version,
ai_platform_serving_args=self._ai_platform_serving_args,
labels=self._job_labels,
api=self._mock_api_client)
expected_models_create_body = {
'name': self._model_name,
'regions': [],
'labels': self._job_labels
}
self._assertDeployModelMockCalls(
expected_models_create_body=expected_models_create_body,
expect_set_default=False)
def testCreateModel(self):
self._setUpPredictionMocks()
self.assertTrue(
runner.create_model_for_aip_prediction_if_not_exist(
labels=self._job_labels,
ai_platform_serving_args=self._ai_platform_serving_args,
api=self._mock_api_client))
def testCreateModelCreateError(self):
self._setUpPredictionMocks()
self._mock_models_create.return_value.execute.side_effect = (
errors.HttpError(httplib2.Response(info={'status': 409}), b''))
self.assertFalse(
runner.create_model_for_aip_prediction_if_not_exist(
labels=self._job_labels,
ai_platform_serving_args=self._ai_platform_serving_args,
api=self._mock_api_client))
def testDeployModelForAIPPredictionWithCustomRegion(self):
self._setUpPredictionMocks()
self._ai_platform_serving_args['regions'] = ['custom-region']
runner.deploy_model_for_aip_prediction(
serving_path=self._serving_path,
model_version_name=self._model_version,
ai_platform_serving_args=self._ai_platform_serving_args,
labels=self._job_labels,
api=self._mock_api_client)
expected_models_create_body = {
'name': self._model_name,
'regions': ['custom-region'],
'labels': self._job_labels
}
self._assertDeployModelMockCalls(
expected_models_create_body=expected_models_create_body)
def testDeployModelForAIPPredictionWithCustomRuntime(self):
self._setUpPredictionMocks()
self._ai_platform_serving_args['runtime_version'] = '1.23.45'
runner.deploy_model_for_aip_prediction(
serving_path=self._serving_path,
model_version_name=self._model_version,
ai_platform_serving_args=self._ai_platform_serving_args,
labels=self._job_labels,
api=self._mock_api_client)
expected_versions_create_body = {
'name': self._model_version,
'deployment_uri': self._serving_path,
'runtime_version': '1.23.45',
'python_version': '3.7',
'labels': self._job_labels,
}
self._assertDeployModelMockCalls(
expected_versions_create_body=expected_versions_create_body)
def testDeployModelForAIPPredictionWithCustomMachineType(self):
self._setUpPredictionMocks()
self._ai_platform_serving_args['machine_type'] = 'custom_machine_type'
runner.deploy_model_for_aip_prediction(
serving_path=self._serving_path,
model_version_name=self._model_version,
ai_platform_serving_args=self._ai_platform_serving_args,
labels=self._job_labels,
api=self._mock_api_client)
expected_versions_create_body = {
'name':
self._model_version,
'deployment_uri':
self._serving_path,
'machine_type':
'custom_machine_type',
'runtime_version':
prediction_clients._get_tf_runtime_version(tf.__version__),
'python_version':
'3.7',
'labels':
self._job_labels,
}
self._assertDeployModelMockCalls(
expected_versions_create_body=expected_versions_create_body)
def _setUpDeleteModelVersionMocks(self):
self._model_version = 'model_version'
self._mock_models_version_delete = mock.Mock()
self._mock_api_client.projects().models().versions().delete = (
self._mock_models_version_delete)
self._mock_models_version_delete.return_value.execute.return_value = {
'name': 'version_delete_op_name'
}
self._mock_get = mock.Mock()
self._mock_api_client.projects().operations().get = self._mock_get
self._mock_get.return_value.execute.return_value = {
'done': True,
}
def _assertDeleteModelVersionMockCalls(self):
self._mock_models_version_delete.assert_called_with(
name='projects/{}/models/{}/versions/{}'.format(self._project_id,
self._model_name,
self._model_version),)
model_version_delete_kwargs = self._mock_models_version_delete.call_args[1]
self.assertNotIn('body', model_version_delete_kwargs)
@mock.patch('tfx.extensions.google_cloud_ai_platform.runner.discovery')
def testDeleteModelVersionForAIPPrediction(self, mock_discovery):
self._setUpDeleteModelVersionMocks()
runner.delete_model_from_aip_if_exists(
ai_platform_serving_args=self._ai_platform_serving_args,
api=self._mock_api_client,
model_version_name=self._model_version)
self._assertDeleteModelVersionMockCalls()
def _setUpDeleteModelMocks(self):
self._mock_models_delete = mock.Mock()
self._mock_api_client.projects().models().delete = (
self._mock_models_delete)
self._mock_models_delete.return_value.execute.return_value = {
'name': 'model_delete_op_name'
}
self._mock_get = mock.Mock()
self._mock_api_client.projects().operations().get = self._mock_get
self._mock_get.return_value.execute.return_value = {
'done': True,
}
def _assertDeleteModelMockCalls(self):
self._mock_models_delete.assert_called_with(
name='projects/{}/models/{}'.format(self._project_id,
self._model_name),)
model_delete_kwargs = self._mock_models_delete.call_args[1]
self.assertNotIn('body', model_delete_kwargs)
@mock.patch('tfx.extensions.google_cloud_ai_platform.runner.discovery')
def testDeleteModelForAIPPrediction(self, mock_discovery):
self._setUpDeleteModelMocks()
runner.delete_model_from_aip_if_exists(
ai_platform_serving_args=self._ai_platform_serving_args,
api=self._mock_api_client,
delete_model_endpoint=True)
self._assertDeleteModelMockCalls()
def testDeployModelForVertexPrediction(self):
self._setUpVertexPredictionMocks()
runner.deploy_model_for_aip_prediction(
serving_path=self._serving_path,
model_version_name=self._model_name,
ai_platform_serving_args=self._ai_platform_serving_args_vertex,
labels=self._job_labels,
serving_container_image_uri=self._serving_container_image_uri,
endpoint_region=self._endpoint_region,
enable_vertex=True)
expected_endpoint_create_body = {
'display_name': self._endpoint_name,
'labels': self._job_labels,
}
expected_model_upload_body = {
'display_name': self._model_name,
'artifact_uri': self._serving_path,
'serving_container_image_uri': self._serving_container_image_uri,
}
expected_model_deploy_body = {
'endpoint': self._mock_endpoint,
'traffic_percentage': 100,
}
self._assertDeployModelMockCallsVertex(
expected_endpoint_create_body=expected_endpoint_create_body,
expected_model_upload_body=expected_model_upload_body,
expected_model_deploy_body=expected_model_deploy_body)
def testDeployModelForVertexPredictionError(self):
self._setUpVertexPredictionMocks()
self._mock_model_deploy.side_effect = errors.HttpError(
httplib2.Response(info={'status': 429}), b'')
with self.assertRaises(RuntimeError):
runner.deploy_model_for_aip_prediction(
serving_path=self._serving_path,
model_version_name=self._model_name,
ai_platform_serving_args=self._ai_platform_serving_args_vertex,
labels=self._job_labels,
serving_container_image_uri=self._serving_container_image_uri,
endpoint_region=self._endpoint_region,
enable_vertex=True)
expected_endpoint_create_body = {
'display_name': self._endpoint_name,
'labels': self._job_labels,
}
expected_model_upload_body = {
'display_name': self._model_name,
'artifact_uri': self._serving_path,
'serving_container_image_uri': self._serving_container_image_uri,
}
expected_model_deploy_body = {
'endpoint': self._mock_endpoint,
'traffic_percentage': 100,
}
self._assertDeployModelMockCallsVertex(
expected_endpoint_create_body=expected_endpoint_create_body,
expected_model_upload_body=expected_model_upload_body,
expected_model_deploy_body=expected_model_deploy_body)
def testCreateVertexModel(self):
self._setUpVertexPredictionMocks()
self.assertTrue(
runner.create_model_for_aip_prediction_if_not_exist(
labels=self._job_labels,
ai_platform_serving_args=self._ai_platform_serving_args_vertex,
enable_vertex=True))
def testCreateVertexEndpointCreateError(self):
self._setUpVertexPredictionMocks()
self._mock_endpoint_create.side_effect = (
errors.HttpError(httplib2.Response(info={'status': 409}), b''))
self.assertFalse(
runner.create_model_for_aip_prediction_if_not_exist(
labels=self._job_labels,
ai_platform_serving_args=self._ai_platform_serving_args_vertex,
enable_vertex=True))
def testDeployModelForVertexPredictionWithCustomRegion(self):
self._setUpVertexPredictionMocks()
self._mock_init = mock.Mock()
aiplatform.init = self._mock_init
self._endpoint_region = 'custom-region'
runner.deploy_model_for_aip_prediction(
serving_path=self._serving_path,
model_version_name=self._model_name,
ai_platform_serving_args=self._ai_platform_serving_args_vertex,
labels=self._job_labels,
serving_container_image_uri=self._serving_container_image_uri,
endpoint_region=self._endpoint_region,
enable_vertex=True)
expected_init_body = {
'project': self._project_id,
'location': 'custom-region',
}
self._mock_init.assert_called_with(**expected_init_body)
def testDeployModelForVertexPredictionWithCustomMachineType(self):
self._setUpVertexPredictionMocks()
self._ai_platform_serving_args_vertex[
'machine_type'] = 'custom_machine_type'
runner.deploy_model_for_aip_prediction(
serving_path=self._serving_path,
model_version_name=self._model_name,
ai_platform_serving_args=self._ai_platform_serving_args_vertex,
labels=self._job_labels,
serving_container_image_uri=self._serving_container_image_uri,
endpoint_region=self._endpoint_region,
enable_vertex=True)
expected_model_deploy_body = {
'endpoint': self._mock_endpoint,
'traffic_percentage': 100,
'machine_type': 'custom_machine_type',
}
self._assertDeployModelMockCallsVertex(
expected_model_deploy_body=expected_model_deploy_body)
def _setUpDeleteVertexModelMocks(self):
importlib.reload(initializer)
importlib.reload(aiplatform)
self._endpoint_name = 'endpoint_name'
self._deployed_model_id = 'model_id'
self._mock_create_client = mock.Mock()
initializer.global_config.create_client = self._mock_create_client
self._mock_create_client.return_value = mock.Mock(
spec=endpoint_service_client.EndpointServiceClient)
self._mock_get_endpoint = mock.Mock()
endpoint_service_client.EndpointServiceClient.get_endpoint = self._mock_get_endpoint
self._mock_get_endpoint.return_value = endpoint.Endpoint(
display_name=self._endpoint_name)
aiplatform.init(
project=self._project_id,
location=None,
credentials=mock.Mock(spec=auth_credentials.AnonymousCredentials()))
self._mock_endpoint = aiplatform.Endpoint(
endpoint_name='projects/{}/locations/us-central1/endpoints/1234'.format(
self._project_id))
self._mock_endpoint_list = mock.Mock()
aiplatform.Endpoint.list = self._mock_endpoint_list
self._mock_endpoint_list.return_value = [self._mock_endpoint]
self._mock_model_delete = mock.Mock()
self._mock_endpoint.undeploy = self._mock_model_delete
self._mock_list_models = mock.Mock()
self._mock_list_models.return_value = [
endpoint.DeployedModel(
display_name=self._model_name, id=self._deployed_model_id)
]
self._mock_endpoint.list_models = self._mock_list_models
self._ai_platform_serving_args_vertex = {
'endpoint_name': self._endpoint_name,
'project_id': self._project_id,
}
def _assertDeleteVertexModelMockCalls(self):
self._mock_model_delete.assert_called_with(
deployed_model_id=self._deployed_model_id, sync=True)
def testDeleteModelForVertexPrediction(self):
self._setUpDeleteVertexModelMocks()
runner.delete_model_from_aip_if_exists(
ai_platform_serving_args=self._ai_platform_serving_args_vertex,
model_version_name=self._model_name,
enable_vertex=True)
self._assertDeleteVertexModelMockCalls()
def _setUpDeleteVertexEndpointMocks(self):
importlib.reload(initializer)
importlib.reload(aiplatform)
self._endpoint_name = 'endpoint_name'
self._mock_create_client = mock.Mock()
initializer.global_config.create_client = self._mock_create_client
self._mock_create_client.return_value = mock.Mock(
spec=endpoint_service_client.EndpointServiceClient)
self._mock_get_endpoint = mock.Mock()
endpoint_service_client.EndpointServiceClient.get_endpoint = (
self._mock_get_endpoint)
self._mock_get_endpoint.return_value = endpoint.Endpoint(
display_name=self._endpoint_name,)
aiplatform.init(
project=self._project_id,
location=None,
credentials=mock.Mock(spec=auth_credentials.AnonymousCredentials()))
self._mock_endpoint = aiplatform.Endpoint(
endpoint_name='projects/{}/locations/us-central1/endpoints/1234'.format(
self._project_id))
self._mock_endpoint_list = mock.Mock()
aiplatform.Endpoint.list = self._mock_endpoint_list
self._mock_endpoint_list.return_value = [self._mock_endpoint]
self._mock_endpoint_delete = mock.Mock()
self._mock_endpoint.delete = self._mock_endpoint_delete
self._ai_platform_serving_args_vertex = {
'endpoint_name': self._endpoint_name,
'project_id': self._project_id,
}
def _assertDeleteVertexEndpointMockCalls(self):
self._mock_endpoint_delete.assert_called_with(force=True, sync=True)
def testDeleteEndpointForVertexPrediction(self):
self._setUpDeleteVertexEndpointMocks()
runner.delete_model_from_aip_if_exists(
ai_platform_serving_args=self._ai_platform_serving_args_vertex,
model_version_name=self._model_name,
delete_model_endpoint=True,
enable_vertex=True)
self._assertDeleteVertexEndpointMockCalls()
if __name__ == '__main__':
tf.test.main()
| 32,738 | 1,786 | 23 |
dbb151e47ad33560056932ad894d00c51fab46ec | 20,997 | py | Python | tests/test_decision_maker/test_default.py | bryanchriswhite/agents-aea | d3f177a963eb855d9528555167255bf2b478f4ba | [
"Apache-2.0"
] | 1 | 2022-01-23T22:28:43.000Z | 2022-01-23T22:28:43.000Z | tests/test_decision_maker/test_default.py | salman6049/agents-aea | d3f177a963eb855d9528555167255bf2b478f4ba | [
"Apache-2.0"
] | null | null | null | tests/test_decision_maker/test_default.py | salman6049/agents-aea | d3f177a963eb855d9528555167255bf2b478f4ba | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains tests for decision_maker."""
import pytest
from aea_ledger_cosmos import CosmosCrypto
from aea_ledger_ethereum import EthereumCrypto
from aea_ledger_fetchai import FetchAICrypto
from aea.configurations.base import PublicId
from aea.crypto.registries import make_crypto, make_ledger_api
from aea.crypto.wallet import Wallet
from aea.decision_maker.base import DecisionMaker
from aea.decision_maker.default import DecisionMakerHandler
from aea.helpers.transaction.base import (
RawMessage,
RawTransaction,
SignedMessage,
Terms,
)
from aea.identity.base import Identity
from aea.protocols.base import Address, Message
from aea.protocols.dialogue.base import Dialogue as BaseDialogue
from packages.fetchai.protocols.signing.dialogues import SigningDialogue
from packages.fetchai.protocols.signing.dialogues import (
SigningDialogues as BaseSigningDialogues,
)
from packages.fetchai.protocols.signing.message import SigningMessage
from tests.conftest import (
COSMOS_PRIVATE_KEY_PATH,
ETHEREUM_PRIVATE_KEY_PATH,
FETCHAI_PRIVATE_KEY_PATH,
FETCHAI_TESTNET_CONFIG,
MAX_FLAKY_RERUNS,
get_wealth_if_needed,
)
class SigningDialogues(BaseSigningDialogues):
"""This class keeps track of all oef_search dialogues."""
def __init__(self, self_address: Address) -> None:
"""
Initialize dialogues.
:param self_address: the address of the entity for whom dialogues are maintained
:return: None
"""
def role_from_first_message( # pylint: disable=unused-argument
message: Message, receiver_address: Address
) -> BaseDialogue.Role:
"""Infer the role of the agent from an incoming/outgoing first message
:param message: an incoming/outgoing first message
:param receiver_address: the address of the receiving agent
:return: The role of the agent
"""
return SigningDialogue.Role.SKILL
BaseSigningDialogues.__init__(
self,
self_address=self_address,
role_from_first_message=role_from_first_message,
dialogue_class=SigningDialogue,
)
class BaseTestDecisionMaker:
"""Test the decision maker."""
@classmethod
def setup(
cls,
decision_maker_handler_cls=DecisionMakerHandler,
decision_maker_cls=DecisionMaker,
):
"""Initialise the decision maker."""
cls.wallet = Wallet(
{
CosmosCrypto.identifier: COSMOS_PRIVATE_KEY_PATH,
EthereumCrypto.identifier: ETHEREUM_PRIVATE_KEY_PATH,
FetchAICrypto.identifier: FETCHAI_PRIVATE_KEY_PATH,
}
)
cls.agent_name = "test"
cls.identity = Identity(
cls.agent_name,
addresses=cls.wallet.addresses,
public_keys=cls.wallet.public_keys,
default_address_key=FetchAICrypto.identifier,
)
cls.config = {}
cls.decision_maker_handler = decision_maker_handler_cls(
identity=cls.identity, wallet=cls.wallet, config=cls.config
)
cls.decision_maker = decision_maker_cls(cls.decision_maker_handler)
cls.tx_sender_addr = "agent_1"
cls.tx_counterparty_addr = "pk"
cls.info = {"some_info_key": "some_info_value"}
cls.ledger_id = FetchAICrypto.identifier
cls.decision_maker.start()
def test_decision_maker_config(self):
"""Test config property."""
assert self.decision_maker_handler.config == self.config
def test_decision_maker_execute_w_wrong_input(self):
"""Test the execute method with wrong input."""
with pytest.raises(ValueError):
self.decision_maker.message_in_queue.put_nowait("wrong input")
with pytest.raises(ValueError):
self.decision_maker.message_in_queue.put("wrong input")
def test_decision_maker_queue_access_not_permitted(self):
"""Test the in queue of the decision maker can not be accessed."""
with pytest.raises(ValueError):
self.decision_maker.message_in_queue.get()
with pytest.raises(ValueError):
self.decision_maker.message_in_queue.get_nowait()
with pytest.raises(ValueError):
self.decision_maker.message_in_queue.protected_get(
access_code="some_invalid_code"
)
@pytest.mark.flaky(reruns=MAX_FLAKY_RERUNS)
def test_handle_tx_signing_fetchai(self):
"""Test tx signing for fetchai."""
fetchai_api = make_ledger_api(
FetchAICrypto.identifier, **FETCHAI_TESTNET_CONFIG
)
sender_address = self.wallet.addresses["fetchai"]
fc2 = make_crypto(FetchAICrypto.identifier)
get_wealth_if_needed(sender_address, fetchai_api)
amount = 10000
transfer_transaction = fetchai_api.get_transfer_transaction(
sender_address=sender_address,
destination_address=fc2.address,
amount=amount,
tx_fee=1000,
tx_nonce="something",
)
signing_dialogues = SigningDialogues(
str(PublicId("author", "a_skill", "0.1.0"))
)
signing_msg = SigningMessage(
performative=SigningMessage.Performative.SIGN_TRANSACTION,
dialogue_reference=signing_dialogues.new_self_initiated_dialogue_reference(),
terms=Terms(
ledger_id=FetchAICrypto.identifier,
sender_address="pk1",
counterparty_address="pk2",
amount_by_currency_id={"FET": -1},
is_sender_payable_tx_fee=True,
quantities_by_good_id={"good_id": 10},
nonce="transaction nonce",
),
raw_transaction=RawTransaction(
FetchAICrypto.identifier, transfer_transaction
),
)
signing_dialogue = signing_dialogues.create_with_message(
"decision_maker", signing_msg
)
assert signing_dialogue is not None
self.decision_maker.message_in_queue.put_nowait(signing_msg)
signing_msg_response = self.decision_maker.message_out_queue.get(timeout=2)
recovered_dialogue = signing_dialogues.update(signing_msg_response)
assert recovered_dialogue is not None and recovered_dialogue == signing_dialogue
assert (
signing_msg_response.performative
== SigningMessage.Performative.SIGNED_TRANSACTION
)
assert type(signing_msg_response.signed_transaction.body) == dict
def test_handle_tx_signing_ethereum(self):
"""Test tx signing for ethereum."""
tx = {"gasPrice": 30, "nonce": 1, "gas": 20000}
signing_dialogues = SigningDialogues(
str(PublicId("author", "a_skill", "0.1.0"))
)
signing_msg = SigningMessage(
performative=SigningMessage.Performative.SIGN_TRANSACTION,
dialogue_reference=signing_dialogues.new_self_initiated_dialogue_reference(),
terms=Terms(
ledger_id=EthereumCrypto.identifier,
sender_address="pk1",
counterparty_address="pk2",
amount_by_currency_id={"FET": -1},
is_sender_payable_tx_fee=True,
quantities_by_good_id={"good_id": 10},
nonce="transaction nonce",
),
raw_transaction=RawTransaction(EthereumCrypto.identifier, tx),
)
signing_dialogue = signing_dialogues.create_with_message(
"decision_maker", signing_msg
)
assert signing_dialogue is not None
self.decision_maker.message_in_queue.put_nowait(signing_msg)
signing_msg_response = self.decision_maker.message_out_queue.get(timeout=2)
recovered_dialogue = signing_dialogues.update(signing_msg_response)
assert recovered_dialogue is not None and recovered_dialogue == signing_dialogue
assert (
signing_msg_response.performative
== SigningMessage.Performative.SIGNED_TRANSACTION
)
assert type(signing_msg_response.signed_transaction.body) == dict
def test_handle_tx_signing_unknown(self):
"""Test tx signing for unknown."""
tx = {}
signing_dialogues = SigningDialogues(
str(PublicId("author", "a_skill", "0.1.0"))
)
signing_msg = SigningMessage(
performative=SigningMessage.Performative.SIGN_TRANSACTION,
dialogue_reference=signing_dialogues.new_self_initiated_dialogue_reference(),
terms=Terms(
ledger_id="unknown",
sender_address="pk1",
counterparty_address="pk2",
amount_by_currency_id={"FET": -1},
is_sender_payable_tx_fee=True,
quantities_by_good_id={"good_id": 10},
nonce="transaction nonce",
),
raw_transaction=RawTransaction("unknown", tx),
)
signing_dialogue = signing_dialogues.create_with_message(
"decision_maker", signing_msg
)
assert signing_dialogue is not None
self.decision_maker.message_in_queue.put_nowait(signing_msg)
signing_msg_response = self.decision_maker.message_out_queue.get(timeout=2)
recovered_dialogue = signing_dialogues.update(signing_msg_response)
assert recovered_dialogue is not None and recovered_dialogue == signing_dialogue
assert signing_msg_response.performative == SigningMessage.Performative.ERROR
assert (
signing_msg_response.error_code
== SigningMessage.ErrorCode.UNSUCCESSFUL_TRANSACTION_SIGNING
)
def test_handle_message_signing_fetchai(self):
"""Test message signing for fetchai."""
message = b"0x11f3f9487724404e3a1fb7252a322656b90ba0455a2ca5fcdcbe6eeee5f8126d"
signing_dialogues = SigningDialogues(
str(PublicId("author", "a_skill", "0.1.0"))
)
signing_msg = SigningMessage(
performative=SigningMessage.Performative.SIGN_MESSAGE,
dialogue_reference=signing_dialogues.new_self_initiated_dialogue_reference(),
terms=Terms(
ledger_id=FetchAICrypto.identifier,
sender_address="pk1",
counterparty_address="pk2",
amount_by_currency_id={"FET": -1},
is_sender_payable_tx_fee=True,
quantities_by_good_id={"good_id": 10},
nonce="transaction nonce",
),
raw_message=RawMessage(FetchAICrypto.identifier, message),
)
signing_dialogue = signing_dialogues.create_with_message(
"decision_maker", signing_msg
)
assert signing_dialogue is not None
self.decision_maker.message_in_queue.put_nowait(signing_msg)
signing_msg_response = self.decision_maker.message_out_queue.get(timeout=2)
recovered_dialogue = signing_dialogues.update(signing_msg_response)
assert recovered_dialogue is not None and recovered_dialogue == signing_dialogue
assert (
signing_msg_response.performative
== SigningMessage.Performative.SIGNED_MESSAGE
)
assert type(signing_msg_response.signed_message) == SignedMessage
def test_handle_message_signing_ethereum(self):
"""Test message signing for ethereum."""
message = b"0x11f3f9487724404e3a1fb7252a322656b90ba0455a2ca5fcdcbe6eeee5f8126d"
signing_dialogues = SigningDialogues(
str(PublicId("author", "a_skill", "0.1.0"))
)
signing_msg = SigningMessage(
performative=SigningMessage.Performative.SIGN_MESSAGE,
dialogue_reference=signing_dialogues.new_self_initiated_dialogue_reference(),
terms=Terms(
ledger_id=EthereumCrypto.identifier,
sender_address="pk1",
counterparty_address="pk2",
amount_by_currency_id={"FET": -1},
is_sender_payable_tx_fee=True,
quantities_by_good_id={"good_id": 10},
nonce="transaction nonce",
),
raw_message=RawMessage(EthereumCrypto.identifier, message),
)
signing_dialogue = signing_dialogues.create_with_message(
"decision_maker", signing_msg
)
assert signing_dialogue is not None
self.decision_maker.message_in_queue.put_nowait(signing_msg)
signing_msg_response = self.decision_maker.message_out_queue.get(timeout=2)
recovered_dialogue = signing_dialogues.update(signing_msg_response)
assert recovered_dialogue is not None and recovered_dialogue == signing_dialogue
assert (
signing_msg_response.performative
== SigningMessage.Performative.SIGNED_MESSAGE
)
assert type(signing_msg_response.signed_message) == SignedMessage
def test_handle_message_signing_ethereum_deprecated(self):
"""Test message signing for ethereum deprecated."""
message = b"0x11f3f9487724404e3a1fb7252a3226"
signing_dialogues = SigningDialogues(
str(PublicId("author", "a_skill", "0.1.0"))
)
signing_msg = SigningMessage(
performative=SigningMessage.Performative.SIGN_MESSAGE,
dialogue_reference=signing_dialogues.new_self_initiated_dialogue_reference(),
terms=Terms(
ledger_id=EthereumCrypto.identifier,
sender_address="pk1",
counterparty_address="pk2",
amount_by_currency_id={"FET": -1},
is_sender_payable_tx_fee=True,
quantities_by_good_id={"good_id": 10},
nonce="transaction nonce",
),
raw_message=RawMessage(
EthereumCrypto.identifier, message, is_deprecated_mode=True
),
)
signing_dialogue = signing_dialogues.create_with_message(
"decision_maker", signing_msg
)
assert signing_dialogue is not None
self.decision_maker.message_in_queue.put_nowait(signing_msg)
signing_msg_response = self.decision_maker.message_out_queue.get(timeout=2)
recovered_dialogue = signing_dialogues.update(signing_msg_response)
assert recovered_dialogue is not None and recovered_dialogue == signing_dialogue
assert (
signing_msg_response.performative
== SigningMessage.Performative.SIGNED_MESSAGE
)
assert type(signing_msg_response.signed_message) == SignedMessage
assert signing_msg_response.signed_message.is_deprecated_mode
def test_handle_message_signing_unknown_and_two_dialogues(self):
"""Test message signing for unknown."""
message = b"0x11f3f9487724404e3a1fb7252a322656b90ba0455a2ca5fcdcbe6eeee5f8126d"
signing_dialogues = SigningDialogues(
str(PublicId("author", "a_skill", "0.1.0"))
)
signing_msg = SigningMessage(
performative=SigningMessage.Performative.SIGN_MESSAGE,
dialogue_reference=signing_dialogues.new_self_initiated_dialogue_reference(),
terms=Terms(
ledger_id="unknown",
sender_address="pk1",
counterparty_address="pk2",
amount_by_currency_id={"FET": -1},
is_sender_payable_tx_fee=True,
quantities_by_good_id={"good_id": 10},
nonce="transaction nonce",
),
raw_message=RawMessage("unknown", message),
)
signing_dialogue = signing_dialogues.create_with_message(
"decision_maker", signing_msg
)
assert signing_dialogue is not None
self.decision_maker.message_in_queue.put_nowait(signing_msg)
signing_msg_response = self.decision_maker.message_out_queue.get(timeout=2)
recovered_dialogue = signing_dialogues.update(signing_msg_response)
assert recovered_dialogue is not None and recovered_dialogue == signing_dialogue
assert signing_msg_response.performative == SigningMessage.Performative.ERROR
assert (
signing_msg_response.error_code
== SigningMessage.ErrorCode.UNSUCCESSFUL_MESSAGE_SIGNING
)
def test_handle_messages_from_two_dialogues_same_agent(self):
"""Test message signing for unknown."""
message = b"0x11f3f9487724404e3a1fb7252a322656b90ba0455a2ca5fcdcbe6eeee5f8126d"
signing_dialogues = SigningDialogues(
str(PublicId("author", "a_skill", "0.1.0"))
)
dialogue_reference = signing_dialogues.new_self_initiated_dialogue_reference()
signing_msg = SigningMessage(
performative=SigningMessage.Performative.SIGN_MESSAGE,
dialogue_reference=dialogue_reference,
terms=Terms(
ledger_id="unknown",
sender_address="pk1",
counterparty_address="pk2",
amount_by_currency_id={"FET": -1},
is_sender_payable_tx_fee=True,
quantities_by_good_id={"good_id": 10},
nonce="transaction nonce",
),
raw_message=RawMessage("unknown", message),
)
signing_dialogue = signing_dialogues.create_with_message(
"decision_maker", signing_msg
)
assert signing_dialogue is not None
self.decision_maker.message_in_queue.put_nowait(signing_msg)
signing_msg_response = self.decision_maker.message_out_queue.get(timeout=2)
assert signing_msg_response is not None
signing_dialogues = SigningDialogues(
str(PublicId("author", "a_skill", "0.1.0"))
)
signing_msg = SigningMessage(
performative=SigningMessage.Performative.SIGN_MESSAGE,
dialogue_reference=dialogue_reference,
terms=Terms(
ledger_id="unknown",
sender_address="pk1",
counterparty_address="pk2",
amount_by_currency_id={"FET": -1},
is_sender_payable_tx_fee=True,
quantities_by_good_id={"good_id": 10},
nonce="transaction nonce",
),
raw_message=RawMessage("unknown", message),
)
signing_dialogue = signing_dialogues.create_with_message(
"decision_maker", signing_msg
)
assert signing_dialogue is not None
with pytest.raises(Exception):
# Exception occurs because the same counterparty sends two identical dialogue references
self.decision_maker.message_out_queue.get(timeout=1)
# test twice; should work again even from same agent
signing_dialogues = SigningDialogues(
str(PublicId("author", "a_skill", "0.1.0"))
)
signing_msg = SigningMessage(
performative=SigningMessage.Performative.SIGN_MESSAGE,
dialogue_reference=signing_dialogues.new_self_initiated_dialogue_reference(),
terms=Terms(
ledger_id="unknown",
sender_address="pk1",
counterparty_address="pk2",
amount_by_currency_id={"FET": -1},
is_sender_payable_tx_fee=True,
quantities_by_good_id={"good_id": 10},
nonce="transaction nonce",
),
raw_message=RawMessage("unknown", message),
)
signing_dialogue = signing_dialogues.create_with_message(
"decision_maker", signing_msg
)
assert signing_dialogue is not None
self.decision_maker.message_in_queue.put_nowait(signing_msg)
signing_msg_response = self.decision_maker.message_out_queue.get(timeout=2)
assert signing_msg_response is not None
@classmethod
def teardown(cls):
"""Tear the tests down."""
cls.decision_maker.stop()
class TestDecisionMaker(BaseTestDecisionMaker):
"""Run test for default decision maker."""
| 42.676829 | 100 | 0.655665 | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains tests for decision_maker."""
import pytest
from aea_ledger_cosmos import CosmosCrypto
from aea_ledger_ethereum import EthereumCrypto
from aea_ledger_fetchai import FetchAICrypto
from aea.configurations.base import PublicId
from aea.crypto.registries import make_crypto, make_ledger_api
from aea.crypto.wallet import Wallet
from aea.decision_maker.base import DecisionMaker
from aea.decision_maker.default import DecisionMakerHandler
from aea.helpers.transaction.base import (
RawMessage,
RawTransaction,
SignedMessage,
Terms,
)
from aea.identity.base import Identity
from aea.protocols.base import Address, Message
from aea.protocols.dialogue.base import Dialogue as BaseDialogue
from packages.fetchai.protocols.signing.dialogues import SigningDialogue
from packages.fetchai.protocols.signing.dialogues import (
SigningDialogues as BaseSigningDialogues,
)
from packages.fetchai.protocols.signing.message import SigningMessage
from tests.conftest import (
COSMOS_PRIVATE_KEY_PATH,
ETHEREUM_PRIVATE_KEY_PATH,
FETCHAI_PRIVATE_KEY_PATH,
FETCHAI_TESTNET_CONFIG,
MAX_FLAKY_RERUNS,
get_wealth_if_needed,
)
class SigningDialogues(BaseSigningDialogues):
"""This class keeps track of all oef_search dialogues."""
def __init__(self, self_address: Address) -> None:
"""
Initialize dialogues.
:param self_address: the address of the entity for whom dialogues are maintained
:return: None
"""
def role_from_first_message( # pylint: disable=unused-argument
message: Message, receiver_address: Address
) -> BaseDialogue.Role:
"""Infer the role of the agent from an incoming/outgoing first message
:param message: an incoming/outgoing first message
:param receiver_address: the address of the receiving agent
:return: The role of the agent
"""
return SigningDialogue.Role.SKILL
BaseSigningDialogues.__init__(
self,
self_address=self_address,
role_from_first_message=role_from_first_message,
dialogue_class=SigningDialogue,
)
class BaseTestDecisionMaker:
"""Test the decision maker."""
@classmethod
def setup(
cls,
decision_maker_handler_cls=DecisionMakerHandler,
decision_maker_cls=DecisionMaker,
):
"""Initialise the decision maker."""
cls.wallet = Wallet(
{
CosmosCrypto.identifier: COSMOS_PRIVATE_KEY_PATH,
EthereumCrypto.identifier: ETHEREUM_PRIVATE_KEY_PATH,
FetchAICrypto.identifier: FETCHAI_PRIVATE_KEY_PATH,
}
)
cls.agent_name = "test"
cls.identity = Identity(
cls.agent_name,
addresses=cls.wallet.addresses,
public_keys=cls.wallet.public_keys,
default_address_key=FetchAICrypto.identifier,
)
cls.config = {}
cls.decision_maker_handler = decision_maker_handler_cls(
identity=cls.identity, wallet=cls.wallet, config=cls.config
)
cls.decision_maker = decision_maker_cls(cls.decision_maker_handler)
cls.tx_sender_addr = "agent_1"
cls.tx_counterparty_addr = "pk"
cls.info = {"some_info_key": "some_info_value"}
cls.ledger_id = FetchAICrypto.identifier
cls.decision_maker.start()
def test_decision_maker_config(self):
"""Test config property."""
assert self.decision_maker_handler.config == self.config
def test_decision_maker_execute_w_wrong_input(self):
"""Test the execute method with wrong input."""
with pytest.raises(ValueError):
self.decision_maker.message_in_queue.put_nowait("wrong input")
with pytest.raises(ValueError):
self.decision_maker.message_in_queue.put("wrong input")
def test_decision_maker_queue_access_not_permitted(self):
"""Test the in queue of the decision maker can not be accessed."""
with pytest.raises(ValueError):
self.decision_maker.message_in_queue.get()
with pytest.raises(ValueError):
self.decision_maker.message_in_queue.get_nowait()
with pytest.raises(ValueError):
self.decision_maker.message_in_queue.protected_get(
access_code="some_invalid_code"
)
@pytest.mark.flaky(reruns=MAX_FLAKY_RERUNS)
def test_handle_tx_signing_fetchai(self):
"""Test tx signing for fetchai."""
fetchai_api = make_ledger_api(
FetchAICrypto.identifier, **FETCHAI_TESTNET_CONFIG
)
sender_address = self.wallet.addresses["fetchai"]
fc2 = make_crypto(FetchAICrypto.identifier)
get_wealth_if_needed(sender_address, fetchai_api)
amount = 10000
transfer_transaction = fetchai_api.get_transfer_transaction(
sender_address=sender_address,
destination_address=fc2.address,
amount=amount,
tx_fee=1000,
tx_nonce="something",
)
signing_dialogues = SigningDialogues(
str(PublicId("author", "a_skill", "0.1.0"))
)
signing_msg = SigningMessage(
performative=SigningMessage.Performative.SIGN_TRANSACTION,
dialogue_reference=signing_dialogues.new_self_initiated_dialogue_reference(),
terms=Terms(
ledger_id=FetchAICrypto.identifier,
sender_address="pk1",
counterparty_address="pk2",
amount_by_currency_id={"FET": -1},
is_sender_payable_tx_fee=True,
quantities_by_good_id={"good_id": 10},
nonce="transaction nonce",
),
raw_transaction=RawTransaction(
FetchAICrypto.identifier, transfer_transaction
),
)
signing_dialogue = signing_dialogues.create_with_message(
"decision_maker", signing_msg
)
assert signing_dialogue is not None
self.decision_maker.message_in_queue.put_nowait(signing_msg)
signing_msg_response = self.decision_maker.message_out_queue.get(timeout=2)
recovered_dialogue = signing_dialogues.update(signing_msg_response)
assert recovered_dialogue is not None and recovered_dialogue == signing_dialogue
assert (
signing_msg_response.performative
== SigningMessage.Performative.SIGNED_TRANSACTION
)
assert type(signing_msg_response.signed_transaction.body) == dict
def test_handle_tx_signing_ethereum(self):
"""Test tx signing for ethereum."""
tx = {"gasPrice": 30, "nonce": 1, "gas": 20000}
signing_dialogues = SigningDialogues(
str(PublicId("author", "a_skill", "0.1.0"))
)
signing_msg = SigningMessage(
performative=SigningMessage.Performative.SIGN_TRANSACTION,
dialogue_reference=signing_dialogues.new_self_initiated_dialogue_reference(),
terms=Terms(
ledger_id=EthereumCrypto.identifier,
sender_address="pk1",
counterparty_address="pk2",
amount_by_currency_id={"FET": -1},
is_sender_payable_tx_fee=True,
quantities_by_good_id={"good_id": 10},
nonce="transaction nonce",
),
raw_transaction=RawTransaction(EthereumCrypto.identifier, tx),
)
signing_dialogue = signing_dialogues.create_with_message(
"decision_maker", signing_msg
)
assert signing_dialogue is not None
self.decision_maker.message_in_queue.put_nowait(signing_msg)
signing_msg_response = self.decision_maker.message_out_queue.get(timeout=2)
recovered_dialogue = signing_dialogues.update(signing_msg_response)
assert recovered_dialogue is not None and recovered_dialogue == signing_dialogue
assert (
signing_msg_response.performative
== SigningMessage.Performative.SIGNED_TRANSACTION
)
assert type(signing_msg_response.signed_transaction.body) == dict
def test_handle_tx_signing_unknown(self):
"""Test tx signing for unknown."""
tx = {}
signing_dialogues = SigningDialogues(
str(PublicId("author", "a_skill", "0.1.0"))
)
signing_msg = SigningMessage(
performative=SigningMessage.Performative.SIGN_TRANSACTION,
dialogue_reference=signing_dialogues.new_self_initiated_dialogue_reference(),
terms=Terms(
ledger_id="unknown",
sender_address="pk1",
counterparty_address="pk2",
amount_by_currency_id={"FET": -1},
is_sender_payable_tx_fee=True,
quantities_by_good_id={"good_id": 10},
nonce="transaction nonce",
),
raw_transaction=RawTransaction("unknown", tx),
)
signing_dialogue = signing_dialogues.create_with_message(
"decision_maker", signing_msg
)
assert signing_dialogue is not None
self.decision_maker.message_in_queue.put_nowait(signing_msg)
signing_msg_response = self.decision_maker.message_out_queue.get(timeout=2)
recovered_dialogue = signing_dialogues.update(signing_msg_response)
assert recovered_dialogue is not None and recovered_dialogue == signing_dialogue
assert signing_msg_response.performative == SigningMessage.Performative.ERROR
assert (
signing_msg_response.error_code
== SigningMessage.ErrorCode.UNSUCCESSFUL_TRANSACTION_SIGNING
)
def test_handle_message_signing_fetchai(self):
"""Test message signing for fetchai."""
message = b"0x11f3f9487724404e3a1fb7252a322656b90ba0455a2ca5fcdcbe6eeee5f8126d"
signing_dialogues = SigningDialogues(
str(PublicId("author", "a_skill", "0.1.0"))
)
signing_msg = SigningMessage(
performative=SigningMessage.Performative.SIGN_MESSAGE,
dialogue_reference=signing_dialogues.new_self_initiated_dialogue_reference(),
terms=Terms(
ledger_id=FetchAICrypto.identifier,
sender_address="pk1",
counterparty_address="pk2",
amount_by_currency_id={"FET": -1},
is_sender_payable_tx_fee=True,
quantities_by_good_id={"good_id": 10},
nonce="transaction nonce",
),
raw_message=RawMessage(FetchAICrypto.identifier, message),
)
signing_dialogue = signing_dialogues.create_with_message(
"decision_maker", signing_msg
)
assert signing_dialogue is not None
self.decision_maker.message_in_queue.put_nowait(signing_msg)
signing_msg_response = self.decision_maker.message_out_queue.get(timeout=2)
recovered_dialogue = signing_dialogues.update(signing_msg_response)
assert recovered_dialogue is not None and recovered_dialogue == signing_dialogue
assert (
signing_msg_response.performative
== SigningMessage.Performative.SIGNED_MESSAGE
)
assert type(signing_msg_response.signed_message) == SignedMessage
def test_handle_message_signing_ethereum(self):
"""Test message signing for ethereum."""
message = b"0x11f3f9487724404e3a1fb7252a322656b90ba0455a2ca5fcdcbe6eeee5f8126d"
signing_dialogues = SigningDialogues(
str(PublicId("author", "a_skill", "0.1.0"))
)
signing_msg = SigningMessage(
performative=SigningMessage.Performative.SIGN_MESSAGE,
dialogue_reference=signing_dialogues.new_self_initiated_dialogue_reference(),
terms=Terms(
ledger_id=EthereumCrypto.identifier,
sender_address="pk1",
counterparty_address="pk2",
amount_by_currency_id={"FET": -1},
is_sender_payable_tx_fee=True,
quantities_by_good_id={"good_id": 10},
nonce="transaction nonce",
),
raw_message=RawMessage(EthereumCrypto.identifier, message),
)
signing_dialogue = signing_dialogues.create_with_message(
"decision_maker", signing_msg
)
assert signing_dialogue is not None
self.decision_maker.message_in_queue.put_nowait(signing_msg)
signing_msg_response = self.decision_maker.message_out_queue.get(timeout=2)
recovered_dialogue = signing_dialogues.update(signing_msg_response)
assert recovered_dialogue is not None and recovered_dialogue == signing_dialogue
assert (
signing_msg_response.performative
== SigningMessage.Performative.SIGNED_MESSAGE
)
assert type(signing_msg_response.signed_message) == SignedMessage
def test_handle_message_signing_ethereum_deprecated(self):
"""Test message signing for ethereum deprecated."""
message = b"0x11f3f9487724404e3a1fb7252a3226"
signing_dialogues = SigningDialogues(
str(PublicId("author", "a_skill", "0.1.0"))
)
signing_msg = SigningMessage(
performative=SigningMessage.Performative.SIGN_MESSAGE,
dialogue_reference=signing_dialogues.new_self_initiated_dialogue_reference(),
terms=Terms(
ledger_id=EthereumCrypto.identifier,
sender_address="pk1",
counterparty_address="pk2",
amount_by_currency_id={"FET": -1},
is_sender_payable_tx_fee=True,
quantities_by_good_id={"good_id": 10},
nonce="transaction nonce",
),
raw_message=RawMessage(
EthereumCrypto.identifier, message, is_deprecated_mode=True
),
)
signing_dialogue = signing_dialogues.create_with_message(
"decision_maker", signing_msg
)
assert signing_dialogue is not None
self.decision_maker.message_in_queue.put_nowait(signing_msg)
signing_msg_response = self.decision_maker.message_out_queue.get(timeout=2)
recovered_dialogue = signing_dialogues.update(signing_msg_response)
assert recovered_dialogue is not None and recovered_dialogue == signing_dialogue
assert (
signing_msg_response.performative
== SigningMessage.Performative.SIGNED_MESSAGE
)
assert type(signing_msg_response.signed_message) == SignedMessage
assert signing_msg_response.signed_message.is_deprecated_mode
def test_handle_message_signing_unknown_and_two_dialogues(self):
"""Test message signing for unknown."""
message = b"0x11f3f9487724404e3a1fb7252a322656b90ba0455a2ca5fcdcbe6eeee5f8126d"
signing_dialogues = SigningDialogues(
str(PublicId("author", "a_skill", "0.1.0"))
)
signing_msg = SigningMessage(
performative=SigningMessage.Performative.SIGN_MESSAGE,
dialogue_reference=signing_dialogues.new_self_initiated_dialogue_reference(),
terms=Terms(
ledger_id="unknown",
sender_address="pk1",
counterparty_address="pk2",
amount_by_currency_id={"FET": -1},
is_sender_payable_tx_fee=True,
quantities_by_good_id={"good_id": 10},
nonce="transaction nonce",
),
raw_message=RawMessage("unknown", message),
)
signing_dialogue = signing_dialogues.create_with_message(
"decision_maker", signing_msg
)
assert signing_dialogue is not None
self.decision_maker.message_in_queue.put_nowait(signing_msg)
signing_msg_response = self.decision_maker.message_out_queue.get(timeout=2)
recovered_dialogue = signing_dialogues.update(signing_msg_response)
assert recovered_dialogue is not None and recovered_dialogue == signing_dialogue
assert signing_msg_response.performative == SigningMessage.Performative.ERROR
assert (
signing_msg_response.error_code
== SigningMessage.ErrorCode.UNSUCCESSFUL_MESSAGE_SIGNING
)
def test_handle_messages_from_two_dialogues_same_agent(self):
"""Test message signing for unknown."""
message = b"0x11f3f9487724404e3a1fb7252a322656b90ba0455a2ca5fcdcbe6eeee5f8126d"
signing_dialogues = SigningDialogues(
str(PublicId("author", "a_skill", "0.1.0"))
)
dialogue_reference = signing_dialogues.new_self_initiated_dialogue_reference()
signing_msg = SigningMessage(
performative=SigningMessage.Performative.SIGN_MESSAGE,
dialogue_reference=dialogue_reference,
terms=Terms(
ledger_id="unknown",
sender_address="pk1",
counterparty_address="pk2",
amount_by_currency_id={"FET": -1},
is_sender_payable_tx_fee=True,
quantities_by_good_id={"good_id": 10},
nonce="transaction nonce",
),
raw_message=RawMessage("unknown", message),
)
signing_dialogue = signing_dialogues.create_with_message(
"decision_maker", signing_msg
)
assert signing_dialogue is not None
self.decision_maker.message_in_queue.put_nowait(signing_msg)
signing_msg_response = self.decision_maker.message_out_queue.get(timeout=2)
assert signing_msg_response is not None
signing_dialogues = SigningDialogues(
str(PublicId("author", "a_skill", "0.1.0"))
)
signing_msg = SigningMessage(
performative=SigningMessage.Performative.SIGN_MESSAGE,
dialogue_reference=dialogue_reference,
terms=Terms(
ledger_id="unknown",
sender_address="pk1",
counterparty_address="pk2",
amount_by_currency_id={"FET": -1},
is_sender_payable_tx_fee=True,
quantities_by_good_id={"good_id": 10},
nonce="transaction nonce",
),
raw_message=RawMessage("unknown", message),
)
signing_dialogue = signing_dialogues.create_with_message(
"decision_maker", signing_msg
)
assert signing_dialogue is not None
with pytest.raises(Exception):
# Exception occurs because the same counterparty sends two identical dialogue references
self.decision_maker.message_out_queue.get(timeout=1)
# test twice; should work again even from same agent
signing_dialogues = SigningDialogues(
str(PublicId("author", "a_skill", "0.1.0"))
)
signing_msg = SigningMessage(
performative=SigningMessage.Performative.SIGN_MESSAGE,
dialogue_reference=signing_dialogues.new_self_initiated_dialogue_reference(),
terms=Terms(
ledger_id="unknown",
sender_address="pk1",
counterparty_address="pk2",
amount_by_currency_id={"FET": -1},
is_sender_payable_tx_fee=True,
quantities_by_good_id={"good_id": 10},
nonce="transaction nonce",
),
raw_message=RawMessage("unknown", message),
)
signing_dialogue = signing_dialogues.create_with_message(
"decision_maker", signing_msg
)
assert signing_dialogue is not None
self.decision_maker.message_in_queue.put_nowait(signing_msg)
signing_msg_response = self.decision_maker.message_out_queue.get(timeout=2)
assert signing_msg_response is not None
@classmethod
def teardown(cls):
"""Tear the tests down."""
cls.decision_maker.stop()
class TestDecisionMaker(BaseTestDecisionMaker):
"""Run test for default decision maker."""
| 0 | 0 | 0 |
ef1d58f9d80944f576f527aa5ef04b18763b93eb | 4,114 | py | Python | testing/test.py | kyclark/configcode | ca060d5a6a00d64a277165302f90691067a9925e | [
"MIT"
] | null | null | null | testing/test.py | kyclark/configcode | ca060d5a6a00d64a277165302f90691067a9925e | [
"MIT"
] | null | null | null | testing/test.py | kyclark/configcode | ca060d5a6a00d64a277165302f90691067a9925e | [
"MIT"
] | null | null | null | import algorithm_rgb as al
import os
import osgeo.gdal as gdal
import numpy as np
import json
input1 = './test_input/rgb_1_2_E.tif'
input2 = './test_input/rgb_40_11_W.tif'
meta = './meta.json'
# --------------------------------------------------
def test_input_files():
"""Test input files exist"""
assert os.path.isfile(input1)
assert os.path.isfile(input2)
# --------------------------------------------------
def test_get_red_green_blue_averages():
"""Test get_red_green_blue_averages"""
assert al.get_red_green_blue_averages(
read_input(input1)) == (166.8537142857143, 160.37885714285713,
139.89971428571428)
assert al.get_red_green_blue_averages(
read_input(input2)) == (109.85485714285714, 144.25085714285714, 90.381)
# --------------------------------------------------
def test_excess_greenness_index():
"""Test excess_greenness_index"""
assert al.excess_greenness_index(read_input(input1)) == 14.0
assert al.excess_greenness_index(read_input(input2)) == 88.27
# --------------------------------------------------
def test_green_leaf_index():
"""Test green_leaf_index"""
assert al.green_leaf_index(read_input(input1)) == 0.02
assert al.green_leaf_index(read_input(input2)) == 0.18
# --------------------------------------------------
def test_cive():
"""Test cive"""
assert al.cive(read_input(input1)) == 16.16
assert al.cive(read_input(input2)) == -14.96
# --------------------------------------------------
def test_normalized_difference_index():
"""Test normalized_difference_index"""
assert al.normalized_difference_index(read_input(input1)) == -1.53
assert al.normalized_difference_index(read_input(input2)) == 18.33
# --------------------------------------------------
def test_excess_red():
"""Test excess_red"""
assert al.excess_red(read_input(input1)) == 56.53
assert al.excess_red(read_input(input2)) == -1.44
# --------------------------------------------------
def test_exgr():
"""Test exgr"""
assert al.exgr(read_input(input1)) == -42.53
assert al.exgr(read_input(input2)) == 89.71
# --------------------------------------------------
def test_combined_indices_1():
"""Test combined_indices_1"""
assert al.combined_indices_1(read_input(input1)) == 30.16
assert al.combined_indices_1(read_input(input2)) == 73.31
# --------------------------------------------------
def test_combined_indices_2():
"""Test combined_indices_2"""
assert al.combined_indices_2(read_input(input1)) == 12.81
assert al.combined_indices_2(read_input(input2)) == 24.98
# --------------------------------------------------
def test_vegetative_index():
"""Test vegetative_index"""
assert al.vegetative_index(read_input(input1)) == 1.02
assert al.vegetative_index(read_input(input2)) == 1.4
# --------------------------------------------------
def test_ngrdi():
"""Test ngrdi"""
assert al.ngrdi(read_input(input1)) == -0.02
assert al.ngrdi(read_input(input2)) == 0.14
# --------------------------------------------------
def test_percent_green():
"""Test percent_green"""
assert al.percent_green(read_input(input1)) == 0.34
assert al.percent_green(read_input(input2)) == 0.42
# --------------------------------------------------
def test_calculate():
"""Test calculate"""
assert al.calculate(read_input(input1)) == [
14.0, 0.02, 16.16, -1.53, 56.53, -42.53, 30.16, 12.81, 1.02, -0.02,
0.34
]
assert al.calculate(read_input(input2)) == [
88.27, 0.18, -14.96, 18.33, -1.44, 89.71, 73.31, 24.98, 1.4, 0.14, 0.42
]
# --------------------------------------------------
def read_input(file) -> np.ndarray:
"""Run calculate on a file"""
if fh := gdal.Open(file):
pix = np.array(fh.ReadAsArray())
return np.rollaxis(pix, 0, 3)
# --------------------------------------------------
def test_meta():
"""Test meta"""
assert os.path.isfile(meta)
data = json.load(open(meta))
assert data['authors']
| 27.610738 | 79 | 0.533301 | import algorithm_rgb as al
import os
import osgeo.gdal as gdal
import numpy as np
import json
input1 = './test_input/rgb_1_2_E.tif'
input2 = './test_input/rgb_40_11_W.tif'
meta = './meta.json'
# --------------------------------------------------
def test_input_files():
"""Test input files exist"""
assert os.path.isfile(input1)
assert os.path.isfile(input2)
# --------------------------------------------------
def test_get_red_green_blue_averages():
"""Test get_red_green_blue_averages"""
assert al.get_red_green_blue_averages(
read_input(input1)) == (166.8537142857143, 160.37885714285713,
139.89971428571428)
assert al.get_red_green_blue_averages(
read_input(input2)) == (109.85485714285714, 144.25085714285714, 90.381)
# --------------------------------------------------
def test_excess_greenness_index():
"""Test excess_greenness_index"""
assert al.excess_greenness_index(read_input(input1)) == 14.0
assert al.excess_greenness_index(read_input(input2)) == 88.27
# --------------------------------------------------
def test_green_leaf_index():
"""Test green_leaf_index"""
assert al.green_leaf_index(read_input(input1)) == 0.02
assert al.green_leaf_index(read_input(input2)) == 0.18
# --------------------------------------------------
def test_cive():
"""Test cive"""
assert al.cive(read_input(input1)) == 16.16
assert al.cive(read_input(input2)) == -14.96
# --------------------------------------------------
def test_normalized_difference_index():
"""Test normalized_difference_index"""
assert al.normalized_difference_index(read_input(input1)) == -1.53
assert al.normalized_difference_index(read_input(input2)) == 18.33
# --------------------------------------------------
def test_excess_red():
"""Test excess_red"""
assert al.excess_red(read_input(input1)) == 56.53
assert al.excess_red(read_input(input2)) == -1.44
# --------------------------------------------------
def test_exgr():
"""Test exgr"""
assert al.exgr(read_input(input1)) == -42.53
assert al.exgr(read_input(input2)) == 89.71
# --------------------------------------------------
def test_combined_indices_1():
"""Test combined_indices_1"""
assert al.combined_indices_1(read_input(input1)) == 30.16
assert al.combined_indices_1(read_input(input2)) == 73.31
# --------------------------------------------------
def test_combined_indices_2():
"""Test combined_indices_2"""
assert al.combined_indices_2(read_input(input1)) == 12.81
assert al.combined_indices_2(read_input(input2)) == 24.98
# --------------------------------------------------
def test_vegetative_index():
"""Test vegetative_index"""
assert al.vegetative_index(read_input(input1)) == 1.02
assert al.vegetative_index(read_input(input2)) == 1.4
# --------------------------------------------------
def test_ngrdi():
"""Test ngrdi"""
assert al.ngrdi(read_input(input1)) == -0.02
assert al.ngrdi(read_input(input2)) == 0.14
# --------------------------------------------------
def test_percent_green():
"""Test percent_green"""
assert al.percent_green(read_input(input1)) == 0.34
assert al.percent_green(read_input(input2)) == 0.42
# --------------------------------------------------
def test_calculate():
"""Test calculate"""
assert al.calculate(read_input(input1)) == [
14.0, 0.02, 16.16, -1.53, 56.53, -42.53, 30.16, 12.81, 1.02, -0.02,
0.34
]
assert al.calculate(read_input(input2)) == [
88.27, 0.18, -14.96, 18.33, -1.44, 89.71, 73.31, 24.98, 1.4, 0.14, 0.42
]
# --------------------------------------------------
def read_input(file) -> np.ndarray:
"""Run calculate on a file"""
if fh := gdal.Open(file):
pix = np.array(fh.ReadAsArray())
return np.rollaxis(pix, 0, 3)
# --------------------------------------------------
def test_meta():
"""Test meta"""
assert os.path.isfile(meta)
data = json.load(open(meta))
assert data['authors']
| 0 | 0 | 0 |
4755e69551d857b4544c2dc764a00bf169bb73b8 | 1,675 | py | Python | cnn_classifier_stepwise/networks/classifier_cnn_feature_extractor_df.py | fgitmichael/SelfSupevisedSkillDiscovery | 60eee11cfd67046190dd2784bf40e97bdbed9d40 | [
"MIT"
] | null | null | null | cnn_classifier_stepwise/networks/classifier_cnn_feature_extractor_df.py | fgitmichael/SelfSupevisedSkillDiscovery | 60eee11cfd67046190dd2784bf40e97bdbed9d40 | [
"MIT"
] | 6 | 2021-02-02T23:00:02.000Z | 2022-01-13T03:13:51.000Z | cnn_classifier_stepwise/networks/classifier_cnn_feature_extractor_df.py | fgitmichael/SelfSupevisedSkillDiscovery | 60eee11cfd67046190dd2784bf40e97bdbed9d40 | [
"MIT"
] | null | null | null | from cnn_classifier_stepwise.base.cnn_classifier_stepwise_base import \
CnnStepwiseClassifierBaseDf
from self_supervised.network.flatten_mlp import FlattenMlpDropout
| 31.603774 | 73 | 0.518806 | from cnn_classifier_stepwise.base.cnn_classifier_stepwise_base import \
CnnStepwiseClassifierBaseDf
from self_supervised.network.flatten_mlp import FlattenMlpDropout
class CnnStepwiseClassifierDiscreteDf(CnnStepwiseClassifierBaseDf):
def create_stepwise_classifier(self,
feature_dim,
skill_dim,
hidden_sizes,
dropout=0.,
) -> FlattenMlpDropout:
return FlattenMlpDropout(
input_size=feature_dim,
output_size=skill_dim,
hidden_sizes=hidden_sizes,
dropout=dropout,
)
@property
def num_skills(self):
return self.skill_dim
def forward(self,
obs_next,
train=False,
):
"""
Args:
obs_next : (N ,S, data_dim)
train : bool
Return:
train is True
pred_skills_step : (N, S, num_skills)
feature_seq : (N, S, feature_dim)
train is False
pred_skills_step : (N, S, num_skills)
"""
feature_seq = self._process_seq(obs_next)
feature_seq_pos_enc = self.pos_encoder(feature_seq)
pred_skill_scores = self.stepwise_classifier(feature_seq_pos_enc)
if train:
return dict(
classified_steps=pred_skill_scores,
hidden_features_seq=feature_seq,
)
else:
return pred_skill_scores
| 477 | 1,004 | 23 |
63ca05b5723b47440172f0ab0ee76d26a865837b | 5,161 | py | Python | fasta2db.py | tzom/yHydra | 094714069302d0f5ce1913346d8cde8e24c7a59c | [
"MIT"
] | null | null | null | fasta2db.py | tzom/yHydra | 094714069302d0f5ce1913346d8cde8e24c7a59c | [
"MIT"
] | null | null | null | fasta2db.py | tzom/yHydra | 094714069302d0f5ce1913346d8cde8e24c7a59c | [
"MIT"
] | 1 | 2022-01-11T11:30:00.000Z | 2022-01-11T11:30:00.000Z | import Bio,gzip
from Bio import SeqIO
import pyteomics
from pyteomics import mass,fasta
import pyteomics.parser as pyt_parser
import pandas as pd
import numpy as np
import json,os
from tqdm import tqdm
from load_config import CONFIG
MAX_DATABASE_SIZE=100000000
DB_PEPTIDE_MINIMUM_LENGTH=CONFIG['DB_PEPTIDE_MINIMUM_LENGTH']#7
DB_PEPTIDE_MAXIMUM_LENGTH=CONFIG['DB_PEPTIDE_MAXIMUM_LENGTH']#42
MAX_MISSED_CLEAVAGES=CONFIG['MAX_MISSED_CLEAVAGES']#args.MAX_MISSED_CLEAVAGES
ENZYME=CONFIG['ENZYME']
SEMI_SPECIFIC_CLEAVAGE=CONFIG['SEMI_SPECIFIC_CLEAVAGE']
SAVE=True
SAVE_DB_AS_JSON=True
if "r'" in ENZYME:
ENZYME = ENZYME.replace("r'","")
ENZYME = ENZYME.replace("'","")
ENZYME = r'%s'%ENZYME
#FASTA_FILE = CONFIG['FASTA']
from collections import defaultdict
#if __name__ == '__main__':
| 34.871622 | 184 | 0.668863 | import Bio,gzip
from Bio import SeqIO
import pyteomics
from pyteomics import mass,fasta
import pyteomics.parser as pyt_parser
import pandas as pd
import numpy as np
import json,os
from tqdm import tqdm
from load_config import CONFIG
MAX_DATABASE_SIZE=100000000
DB_PEPTIDE_MINIMUM_LENGTH=CONFIG['DB_PEPTIDE_MINIMUM_LENGTH']#7
DB_PEPTIDE_MAXIMUM_LENGTH=CONFIG['DB_PEPTIDE_MAXIMUM_LENGTH']#42
MAX_MISSED_CLEAVAGES=CONFIG['MAX_MISSED_CLEAVAGES']#args.MAX_MISSED_CLEAVAGES
ENZYME=CONFIG['ENZYME']
SEMI_SPECIFIC_CLEAVAGE=CONFIG['SEMI_SPECIFIC_CLEAVAGE']
SAVE=True
SAVE_DB_AS_JSON=True
if "r'" in ENZYME:
ENZYME = ENZYME.replace("r'","")
ENZYME = ENZYME.replace("'","")
ENZYME = r'%s'%ENZYME
#FASTA_FILE = CONFIG['FASTA']
def add_check_keys_exising(key,dictionary,element):
if key in dictionary:
dictionary[key].add(element)
else:
dictionary[key] = set([element])
return dictionary
def cleave_peptide(protein_sequence):
#return pyt_parser.cleave(protein_sequence, pyt_parser.expasy_rules['trypsin'],min_length=PEPTIDE_MINIMUM_LENGTH,missed_cleavages=MAX_MISSED_CLEAVAGES, semi=SEMI_SPECIFIC_CLEAVAGE)
return pyt_parser.cleave(protein_sequence, ENZYME,min_length=DB_PEPTIDE_MINIMUM_LENGTH,missed_cleavages=MAX_MISSED_CLEAVAGES, semi=SEMI_SPECIFIC_CLEAVAGE)
def digest_seq_record(seq_record,fasta_type='generic'):
ID=None
HEADER = seq_record[0]
SEQ = seq_record[1]
if fasta_type=='generic':
accesion_id = ID
speciesName = None
protName = HEADER
if fasta_type=='uniprot':
accesion_id = ID
speciesName = HEADER.split("OS=")[1].split("OX=")[0]
prot = HEADER.split("|")[1]
protName = HEADER.split("|")[2].split("OX=")[0]
elif fasta_type=='ncbi':
accesion_id = ID
speciesName = HEADER.split("[")[1][:-1]
prot = HEADER.split("[")[0]
protName = " ".join(prot.split(" ")[1:])
#SEQ = str(seq_record.seq)
cleaved_peptides = cleave_peptide(SEQ)
LENGTH_CONDITION = lambda x: not (len(x) > DB_PEPTIDE_MAXIMUM_LENGTH or len(x) < DB_PEPTIDE_MINIMUM_LENGTH)
cleaved_peptides = list(filter(LENGTH_CONDITION,cleaved_peptides))
ODD_AMINOACIDS_CONDITION = lambda x: not (len(set(x).intersection(set(['X','U','J','Z','B','O'])))>0)
cleaved_peptides = list(filter(ODD_AMINOACIDS_CONDITION,cleaved_peptides))
accesion_id = HEADER.split()[0]
return HEADER, accesion_id, cleaved_peptides
from collections import defaultdict
#if __name__ == '__main__':
def digest_fasta(fasta_file,REVERSE_DECOY=False):
if REVERSE_DECOY:
DB_DIR = CONFIG['RESULTS_DIR']+'/rev/db'
else:
DB_DIR = CONFIG['RESULTS_DIR']+'/forward/db'
if not os.path.exists(DB_DIR):
os.makedirs(DB_DIR)
FASTA_FILE = fasta_file
ncbi_peptide_protein = defaultdict(set)
ncbi_peptide_meta = {}
all_peptides = []
all_proteins = []
print('Digesting peptides...')
from multiprocessing.pool import Pool, ThreadPool
with Pool() as p, ThreadPool() as tp:
if '.gz' in FASTA_FILE:
handle = gzip.open(FASTA_FILE, "rt")
else:
handle = open(FASTA_FILE, "rt")
with handle as FASTA_FILE:
if REVERSE_DECOY:
FASTA_FILE = fasta.decoy_db(FASTA_FILE,decoy_only=True)
else:
FASTA_FILE = fasta.read(FASTA_FILE)
#seqio = SeqIO.parse(FASTA_FILE, "fasta")
for seq_record in tqdm(p.map(digest_seq_record,FASTA_FILE)):
#ID = seq_record.id
#HEADER = seq_record.description
#SEQ = str(seq_record.seq)
HEADER, accesion_id, cleaved_peptides = seq_record
list(map(lambda peptide: add_check_keys_exising(peptide,ncbi_peptide_protein,accesion_id),cleaved_peptides))
# for peptide in cleaved_peptides:
# # peptide_protein_entry={'accesion_id':accesion_id,'speciesName':speciesName,'protName':protName}
# add_check_keys_exising(peptide,ncbi_peptide_protein,accesion_id)
if len(ncbi_peptide_protein) > MAX_DATABASE_SIZE:
print('exceeding maximum number of allowd peptides %s'%MAX_DATABASE_SIZE)
break
print('Done.')
print(len(ncbi_peptide_protein))
if SAVE_DB_AS_JSON:
print('saving db as db.json... ')
import json
ncbi_peptide_protein = dict(zip(ncbi_peptide_protein.keys(),list(map(list,ncbi_peptide_protein.values()))))
with open(os.path.join(DB_DIR,'db.json'), 'w') as fp:
json.dump(ncbi_peptide_protein, fp)
if SAVE:
print('Writing list of peptides... ')
peptides = list(ncbi_peptide_protein.keys())
#pepmasses = list(map(theoretical_peptide_mass,tqdm(peptides)))
np.save(os.path.join(DB_DIR,"peptides.npy"),np.array(peptides))
#np.save(os.path.join(DB_DIR,"pepmasses.npy"),np.array(pepmasses))
#embeddings = list(map(seq_embedder,tqdm(peptides)))
print('Done.')
return ncbi_peptide_protein
| 4,267 | 0 | 95 |
966ac5c04b9427719a495ab99943f542cbdf6e3b | 1,329 | py | Python | w2e2_telnet.py | ibyt32/pynet_test | 3b05d907fa0077d793b47dae36466a56d4ee3ef0 | [
"Apache-2.0"
] | null | null | null | w2e2_telnet.py | ibyt32/pynet_test | 3b05d907fa0077d793b47dae36466a56d4ee3ef0 | [
"Apache-2.0"
] | null | null | null | w2e2_telnet.py | ibyt32/pynet_test | 3b05d907fa0077d793b47dae36466a56d4ee3ef0 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
'''
Python and Ansible for Network Engineers
Week 2, Exercise 2
Write a script that connects using telnet to the pynet-rtr1 router. Execute
the 'show ip int brief' command on the router and return the output.
'''
import telnetlib
import time
import sys
TELNET_PORT = 23
TELNET_TIMEOUT = 6
def main():
'''
Write a script that connects to the lab pynet-rtr1, logs in, and executes
the 'show ip interface brief' command.
'''
ip_addr = "184.105.247.70"
userid = "pyclass"
password = "88newclass"
remote_conn = telnetlib.Telnet(ip_addr, TELNET_PORT, TELNET_TIMEOUT)
cmd = "show ip interface brief"
output = login(remote_conn, userid, password)
time.sleep(1)
remote_conn.read_very_eager()
output = send_commands(remote_conn, cmd)
print output
logout(remote_conn)
if __name__ == "__main__":
main()
| 21.786885 | 77 | 0.68924 | #!/usr/bin/env python
'''
Python and Ansible for Network Engineers
Week 2, Exercise 2
Write a script that connects using telnet to the pynet-rtr1 router. Execute
the 'show ip int brief' command on the router and return the output.
'''
import telnetlib
import time
import sys
TELNET_PORT = 23
TELNET_TIMEOUT = 6
def login(remote_conn, userid, password):
remote_conn.read_until("Username:", TELNET_TIMEOUT)
remote_conn.write(userid + "\n")
remote_conn.read_until("Password:", TELNET_TIMEOUT)
remote_conn.write(password + "\n")
def send_commands(remote_conn, cmd):
remote_conn.write(cmd + "\n")
time.sleep(1)
return remote_conn.read_very_eager()
def logout(remote_conn):
remote_conn.close()
def main():
'''
Write a script that connects to the lab pynet-rtr1, logs in, and executes
the 'show ip interface brief' command.
'''
ip_addr = "184.105.247.70"
userid = "pyclass"
password = "88newclass"
remote_conn = telnetlib.Telnet(ip_addr, TELNET_PORT, TELNET_TIMEOUT)
cmd = "show ip interface brief"
output = login(remote_conn, userid, password)
time.sleep(1)
remote_conn.read_very_eager()
output = send_commands(remote_conn, cmd)
print output
logout(remote_conn)
if __name__ == "__main__":
main()
| 343 | 0 | 77 |
82f12b6e70d344705bdf183ca639845ee55f0e9f | 59,067 | py | Python | src/main.py | simonenicf/Escape-from-Narlilia | cd512860e74d5df81504cccbcb667fdf794c8dda | [
"MIT"
] | null | null | null | src/main.py | simonenicf/Escape-from-Narlilia | cd512860e74d5df81504cccbcb667fdf794c8dda | [
"MIT"
] | 1 | 2020-11-03T13:41:49.000Z | 2020-11-03T14:05:22.000Z | src/main.py | simonenicf/Escape-from-Narlilia | cd512860e74d5df81504cccbcb667fdf794c8dda | [
"MIT"
] | null | null | null | import sys
import time
import random
from menu import Menu
from game_engine import Console, Player
console = Console()
player = Player()
start = True
basement_suprise = ["demon", "open", "closed", "bear"]
sus_building = ["enemy_trap", "human_traffic", "smuggler"]
boat_stuff = ["tip over", "fight", "safe"]
truck_survive = ["no", "yes"]
# I know I don't need so many varibles but still I don't care
go_in = " "
choice = " "
name = " "
start_sim = " "
enter = " "
trust = " "
enter_tower_1 = " "
retry_input = " "
jump_1 = " "
wake_up = " "
escape = " "
bye = 0
# Function to exit the game
# put the intro here
# -------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# Start part 0
# Start adventure
# -------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# End part 0
# Start part 1
# ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# RNG route
#-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# quick escape route
#------------------------------------------------------------------------------------------------------------------------------------------------------------------
# part 2 escape out of the country
#-------------------------------------------------------------------------------------------------------------------------------------------------------------
# part 3 New country
#------------------------------------------------------------------------------------------------------------------------------------------------------------------
# part 4 epilogue
# starts the game and menu
while start == True:
Menu.print_menu()
menu = console.check_answer("What do you want to do?", ["1", "intro", "start", "2", "skip", "quit", "3", "help", "4"])
print(" ")
if menu == ("1") or menu == ("intro") or menu == ("start"):
intro()
elif menu == ("2") or menu == ("skip"):
adventure()
elif menu == ("help") or menu == ("3"):
help()
elif menu == ("quit") or menu == ("4"):
quit() | 37.502857 | 176 | 0.593089 | import sys
import time
import random
from menu import Menu
from game_engine import Console, Player
console = Console()
player = Player()
start = True
basement_suprise = ["demon", "open", "closed", "bear"]
sus_building = ["enemy_trap", "human_traffic", "smuggler"]
boat_stuff = ["tip over", "fight", "safe"]
truck_survive = ["no", "yes"]
# I know I don't need so many varibles but still I don't care
go_in = " "
choice = " "
name = " "
start_sim = " "
enter = " "
trust = " "
enter_tower_1 = " "
retry_input = " "
jump_1 = " "
wake_up = " "
escape = " "
bye = 0
# Function to exit the game
def quit():
print("Okay bye")
exit(0)
# put the intro here
def intro():
print("You were invited by your friend to test his newest simulator.")
time.sleep(1)
print("He wanted to see how you would react on his invention.")
time.sleep(1)
print("Because he wanted to see how people would react on fleeing there own country.")
time.sleep(1)
print("And to discover how it is to trade a place you are familiar with for something new.")
time.sleep(1)
print("Far away from your friends and family.")
time.sleep(1)
print("So he created this simulator called escape simulator.")
time.sleep(1)
print("He told you the simulation will take place in a land called Narlilia.")
time.sleep(1)
print("The country is in constant war with the civilians.")
time.sleep(1)
print("The war is getting so out of hand that the civilians are starting to attack you for being befriend with the leader of the resistance.")
time.sleep(1)
print("You don't see a other way out anymore than to escape from the country.")
time.sleep(1)
print("The country you love and grow up in.")
time.sleep(1)
print("Will you manage to escape out of this country or fail horribly.")
time.sleep(1)
console.check_answer("Plz press enter to continue.")
adventure()
def help():
print(" ")
print(67 * "-")
print(" ")
print("This game is a text based adventure where you choose your answers by typing in the answer and pressing enter.")
time.sleep(1)
print("All of the questions you can simply answer by typing the number before the anwser.")
time.sleep(1)
print("Sometimes there is a comformation text thats asks you to press enter to continue.")
time.sleep(1)
print("Quit command here: ")
print("good luck")
console.check_answer("Plz press enter to continue.")
# -------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# Start part 0
# Start adventure
def adventure():
print("@: Is anybody there???: ")
console.check_answer("Plz press enter to continue.")
time.sleep(1)
print("@: WHAAAAAA!!!!")
time.sleep(1)
print("@: Oh sorry. I didn't expect that you where here already.")
time.sleep(1)
player.set_name(console.check_answer("So what is our name user?"))
print("@: Oh hi " + player.get_name(name) + ". It's nice to meet you.")
time.sleep(1)
print("@: I'm escape simulator.")
time.sleep(1)
print("@: I'm here to teach you how it is to escape the country you currently live in")
time.sleep(1)
print("@: So shall we start the simulator?")
time.sleep(1)
print("1. Yes")
time.sleep(1)
print("2. No")
time.sleep(1)
print(" ")
print(67 * "-")
print(" ")
run()
def run():
start_sim = console.check_answer("Will you start the sim?: ", ["1", "yes", "y", "2", "no", "n"]).lower()
if start_sim == ("1") or start_sim == ("yes") or start_sim == ("y"):
print("Nyaaa")
Sim_start()
elif start_sim == ("2") or start_sim == ("no") or start_sim == ("n"):
print("@: Uhmmm...")
time.sleep(1)
print("@: Are you sure?")
time.sleep(0.5)
print("1. Yes")
time.sleep(0.5)
print("2. No")
time.sleep(0.5)
print(67 * "-")
print(" ")
run2()
def run2():
sure = console.check_answer("Are you sure?: ", ["1", "yes", "y", "2", "no", "n"]).lower()
if sure == ("1") or sure == ("yes") or sure == ("y"):
print("@: Ohhh bye then")
time.sleep(1)
print("You left the simulator alone without going on the adventure.")
time.sleep(1)
print("You never know what would have happened")
time.sleep(1)
print(67 * "-")
print(" ")
print("GAME OVER")
print(" ")
time.sleep(1)
print("ENDING 1")
print("Nope not going to do this")
time.sleep(1)
console.check_answer("Plz press enter to continue.")
play_again()
elif sure == ("2") or sure == ("no") or sure == ("n"):
retry()
def retry():
print(" ")
print(67 * "-")
print(" ")
time.sleep(1)
print("So do you want to start the simulator?")
time.sleep(0.5)
print("1. yes")
time.sleep(0.5)
print("2. no")
time.sleep(0.5)
retry_input = console.check_answer("start simulator?: ", ["1", "2", "yes", "y", "no", "n"]).lower()
if retry_input == ("1") or retry_input == ("yes") or retry_input == ("y"):
print(" ")
print(67 * "-")
print(" ")
Sim_start()
elif retry_input == ("2") or retry_input == ("no") or retry_input == ("n"):
print("@: ARE")
time.sleep(0.5)
print("@: YOU")
time.sleep(0.3)
print("@: KIDDING")
time.sleep(0.2)
print("@: ME!!!!!!!")
time.sleep(3)
print("Good job you made the program so pissed that it blow it self up.")
time.sleep(1)
print("Now you can't enter the simulator anymore.")
time.sleep(1)
print("GG player")
time.sleep(1)
print(67 * "-")
print(" ")
print("GAME OVER")
time.sleep(1)
print(" ")
print("ENDING 2")
print("BYE BYE SIMULATOR")
time.sleep(1)
console.check_answer("Plz press enter to continue.")
play_again()
# -------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# End part 0
# Start part 1
def Sim_start():
print("You hear a dull beep noise.")
time.sleep(1)
print("It sounds like your alarm is going of.")
time.sleep(1)
print("So what will you do?")
time.sleep(1)
print("1. Wake up.")
time.sleep(1)
print("2. Go back to sleep.")
time.sleep(1)
print("3. Throw your alarm out of the window.")
time.sleep(1)
wake_up = console.check_answer("while you go back to sleep or wake up: ", ["1", "wake up", "2" , "sleep", "3", "throw"]).lower()
if wake_up == "1" or wake_up == "wake up":
print(67 * "-")
print(" ")
print("You wake up and turn of your alarm.")
time.sleep(1)
print("You look at the time and you see that its 10.00 am.")
time.sleep(1)
print("You get up out of bed and get yourself ready.")
time.sleep(1)
print("Outside you hear the occasional screaming of people and gunshots being fired in the far distance.")
time.sleep(1)
print("You are kind of getting sick of this.")
time.sleep(1)
print("You go down stairs to get some breakfast.")
time.sleep(1)
print("When you got down stairs you see some shady people sitting in your living room.")
time.sleep(1)
shady_people()
elif wake_up == "2" or wake_up == "sleep":
print(67 * "-")
print(" ")
print("zzzzzz")
time.sleep(1)
print("zzzzzzzzzzzzzzz......")
time.sleep(1)
print("You hear suddenly a loud scream.")
time.sleep(1)
print("???: He you lazy peace of shit wake the fuck up!!!!")
time.sleep(1)
print("That immediately wakes you up.")
time.sleep(1)
print("You look around and then you see your best friend standing by your bed trying to wake up.")
time.sleep(1)
print("Her name is Monika and she is the leader of the local resistance group.")
time.sleep(1)
print("Monika: Ah you finnally awake.")
time.sleep(1)
print("Monika: That took you long enough.")
time.sleep(1)
print("You ask what is going on and why she woke you up.")
time.sleep(1)
print("Monika: There is no time to explain.")
time.sleep(1)
print("Monika: We need to get out of here right the fuck now.")
time.sleep(1)
print("You do what Monika tells you.")
time.sleep(1)
print("You quickly put on some clothes and leave your house through the back door.")
time.sleep(1)
quick_escape()
elif wake_up == "3" or wake_up == "throw":
print(67 * "-")
print(" ")
print("You grab your alarm and throw it out of the window.")
time.sleep(1)
print("The alarm hits the head of a unexpect civilian.")
time.sleep(1)
print("People though it was a bomb you throw out so they throw a grenade back to you.")
time.sleep(1)
print("*booooooooommmmmm*")
time.sleep(1)
print("You got hit by the explosion and died instantly.")
time.sleep(1)
print(67 * "-")
print(" ")
print("GAME OVER")
time.sleep(1)
print(" ")
print("ENDING 3")
print("Unexpected event")
time.sleep(1)
console.check_answer("Plz press enter to continue.")
play_again()
def shady_people():
print("You look again and see its your best friend Monika and some friends of her.")
time.sleep(1)
print("Monika is the leader of the resitance in your neighborhood.")
time.sleep(1)
print("You start asking what they are doing here.")
time.sleep(1)
print("Monika: We are here to warn you that there are some people after you.")
time.sleep(1)
print("Monika: And its is the best that you get out of here.")
time.sleep(1)
print("Monika: The country is not safe anymore for you.")
time.sleep(1)
print("Monika: The found out that you're befriend with me.")
time.sleep(1)
print("You ask her what you need to do and if your family is safe.")
time.sleep(1)
print("Monika: We have prepared for you a escape route you can take to get out of here.")
time.sleep(1)
print("Monika: And don't worry about your family they are safe.")
time.sleep(1)
print("Monika: I know some people that can help you leave the country.")
time.sleep(1)
print("Monika's phone goes of.")
time.sleep(1)
print("Monika picks up the phone and start talking to the person on the phone.")
time.sleep(1)
print("After a few minutes of Monika being on the phone.")
time.sleep(1)
print("Monika look at you and signs that you need to leave")
time.sleep(1)
print("You know something bad is going to happen.")
time.sleep(1)
print("How will you leave or will you hide?")
time.sleep(1)
print("1. Front door.")
time.sleep(1)
print("2. Back door.")
time.sleep(1)
print("3. Hide in the basement")
time.sleep(1)
escape = console.check_answer("what do you do?: ", ["1", "2", "3", "front", "back", "hide"]).lower()
if escape == "1" or escape == "front":
print(67 * "-")
print(" ")
print("You quickly make your way towards the front door.")
time.sleep(1)
print("You look through the window see if its safe to run.")
time.sleep(1)
print("On that moment some throws a smoke grenade on the street.")
time.sleep(1)
print("This is my change to escape.")
time.sleep(1)
print("You quickly run outside and run a few streets over.")
time.sleep(1)
print("You see a man stand there with a van.")
time.sleep(1)
print("He seems to recognize you.")
time.sleep(1)
print("Van man: Hey " + player.get_name(name) + ". How is it going?")
time.sleep(1)
print("When you get closer to him you start to recognize him.")
time.sleep(1)
print("Its a your good pall Pablo.")
time.sleep(1)
print("You tell him what is going on.")
time.sleep(1)
print("Pablo: Awhhhh thats bad man.")
time.sleep(1)
print("Pablo: But I think I can help you.")
time.sleep(1)
print("Pablo: I know a few friends that do this for business.")
time.sleep(1)
print("Pablo: I can bring you in contact with them.")
time.sleep(1)
pablo_escape()
elif escape == "2" or escape == "back":
print(67 * "-")
print(" ")
print("You go through the back door.")
time.sleep(1)
print("You see some friends from Monika waiting there for you at a van.")
time.sleep(1)
print("A guy walks up to you.")
time.sleep(1)
print("A guy: Hey, I heard you needed help getting out of the country, right?")
time.sleep(1)
print("You tell him that you indeed do and that you need to leave right now.")
time.sleep(1)
print("The guy node's and walks up to the van.")
time.sleep(1)
print("A guy: Here look everything we need to cross the borders all arranged by my boss.")
time.sleep(1)
print("You look in the van and see fake passports and some other stuff.")
time.sleep(1)
monika_escape()
elif escape == "3" or escape == "hide":
print(67 * "-")
print(" ")
print("You quickly make your way to your basement door.")
time.sleep(1)
print("You open you basement door and go down into your basement.")
time.sleep(1)
print("You pick up a heavy piece of wood you found on the ground and try to block the door with it.")
time.sleep(1)
print("You wait a while in the basement.")
time.sleep(1)
basement()
# ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# RNG route
def basement():
choice = random.choice(basement_suprise)
if choice == "bear":
print("You feel like someone is breathing in your neck.")
time.sleep(1)
print("You slowly turn around in the dark basement to check what it is.")
time.sleep(1)
print("You put your hands forward to feel what it is.")
time.sleep(1)
print("It feels really fluffy and fur like.")
time.sleep(1)
print("Wait a second you think to yourself.")
time.sleep(1)
print("It would have impossible that that bear you locked up here a few days weeks ago is still right.")
time.sleep(1)
print("Will your are thinking about it the bear is getting ready to murder you")
time.sleep(1)
print("*slash* And there you go.")
time.sleep(1)
print("Killed by a bear.")
time.sleep(1)
print("R.I.P. " + player.get_name(name) + ".")
time.sleep(1)
print(67 * "-")
print(" ")
print("GAME OVER")
time.sleep(1)
print(" ")
print("ENDING BEAR")
print("schrödinger bear")
time.sleep(1)
console.check_answer("Plz press enter to continue.")
play_again()
elif choice == "demon":
print("You are alone down in the basement.")
time.sleep(1)
print("You suddenly hear a voice coming from the dark corner of the room.")
time.sleep(1)
print("Co...me.... wi....th.. u...s....")
time.sleep(1)
print("You were pretty sure that there is nothing else down here..... RIGHT????")
time.sleep(1)
print("You feel chill flowing down your spine.")
time.sleep(1)
print("You know there is something down here. There has to be right......")
time.sleep(1)
print("You starting to panic and don't know what to do.")
time.sleep(1)
print("The panic is so overwhelming that you feel like that you don't want to be here anymore.")
time.sleep(1)
print("You lose consciousness.")
time.sleep(1)
print(67 * "-")
print(" ")
print("GAME OVER")
time.sleep(1)
print(" ")
print("ENDING 666")
print("Demonic take over")
time.sleep(1)
console.check_answer("Plz press enter to continue.")
play_again()
elif choice == "open":
print("After hiding in the basement for a while you hear some people entering your house.")
time.sleep(1)
print("Angery man: Hey " + player.get_name(name) + " we know your in here.")
time.sleep(1)
print("You hear them going through your house.")
time.sleep(1)
print("You hope the are unable to find you.")
time.sleep(1)
print("You suddenly hear them kicking against the basement door")
time.sleep(1)
print("You hope the door can hold it.")
time.sleep(1)
print("Then you hear *bam* the door flies downwards right in your face.")
time.sleep(1)
print("The door knocks you unconscious.")
time.sleep(1)
print(67 * "-")
print(" ")
print("GAME OVER")
time.sleep(1)
print(" ")
print("ENDING DOOR")
print("Knock knock who is there?")
time.sleep(1)
print("The door")
time.sleep(1)
console.check_answer("Plz press enter to continue.")
play_again()
elif choice == "closed":
print("After hiding in the basement for a while you hear some people entering your house.")
time.sleep(1)
print("Angery man: Hey " + player.get_name(name) + " we know your in here.")
time.sleep(1)
print("You hear them going through your house.")
time.sleep(1)
print("You hope the are unable to find you.")
time.sleep(1)
print("You suddenly hear them kicking against the basement door")
time.sleep(1)
print("You hope the door can hold it.")
time.sleep(1)
print("You hear this guy trying really hard to open up the door but the door won't budge.")
time.sleep(1)
print("Then you hear someone yell.")
time.sleep(1)
print("Angery man: Guys he is nowhere to be found lets move out.")
time.sleep(1)
print("You hear everyone that is upstairs leave.")
time.sleep(1)
print("You survived this stressfull ordeal.")
time.sleep(1)
escaped_basement()
def escaped_basement():
print(67 * "-")
print(" ")
console.check_answer("Plz press enter to continue.")
print(" ")
print("You slowly make your way to the basement door.")
time.sleep(1)
print("You put your ear up to the door to be sure that everyone left.")
time.sleep(1)
print("You don't hear anything.")
time.sleep(1)
print("You slowly open the door and hope that there is no one left.")
time.sleep(1)
print("When you look around you don't see a single person in the building left.")
time.sleep(1)
print("You go through the back door and run a trough a couple of streets.")
time.sleep(1)
print("You see there a sign above a build saying 'way out of the country'.")
time.sleep(1)
print("It look really shady but you think to yourself what other choose do I have left.")
time.sleep(1)
print("Do you go in or search for a other way out?")
time.sleep(1)
print("1. Go in.")
time.sleep(1)
print("2. Search for a other way.")
time.sleep(1)
go_in = console.check_answer("What do you do?", ["1", "2", "go in", "search"]).lower()
if go_in == "1" or go_in == "go in":
print(67 * "-")
print(" ")
print("You go into the suspicious building.")
time.sleep(1)
suspicious_building()
elif go_in == "2" or go_in == "search":
print(67 * "-")
print(" ")
print("You decide not to go into the building.")
time.sleep(1)
print("You continue your run till you come across a familiar face.")
time.sleep(1)
print("Nick: Howdy man.")
time.sleep(1)
print("Its your friend Nick")
time.sleep(1)
print("Nick is a master in getting people out of the country maybe he can help.")
time.sleep(1)
print("So you ask Nick if he can get you out of this country because some bad people are looking for you.")
time.sleep(1)
print("Nick: Yeah ofcourse, man.")
time.sleep(1)
print("Nick: I can get you out right now if you want.")
time.sleep(1)
print("Nick: You know its free because your my best pal.")
time.sleep(1)
print("Nick hands you some stuff and then walks to his car.")
time.sleep(1)
print("Nick: He come man lets go.")
time.sleep(1)
print("You go into Nick's car.")
time.sleep(1)
print("You think to yourself what do I have to lose.")
time.sleep(1)
print(67 * "-")
print(" ")
console.check_answer("Plz press enter to continue.")
print(" ")
escape_nick()
def suspicious_building():
choice = random.choice(sus_building)
if choice == "enemy_trap":
print("You just walk into the trap of the poeple that are looking for you.")
time.sleep(1)
print("They take you hostage.")
time.sleep(1)
print("They want to know where Monika is.")
time.sleep(1)
print("Obviously you don't know where she is.")
time.sleep(1)
print("Now your stuck here with the people that want you as hostage and then dead.")
time.sleep(1)
print(67 * "-")
print(" ")
print("GAME OVER")
time.sleep(1)
print(" ")
print("ENDING TRAP")
print("Its a trap")
time.sleep(1)
console.check_answer("Plz press enter to continue.")
elif choice == "human_traffic":
print("You see a man sitting behind a table.")
time.sleep(1)
print("Man: Hello stranger, are you here for help to get out of the country?")
time.sleep(1)
print("You find the man pretty suspicious but you feel like you have no other choose.")
time.sleep(1)
print("You say to him that you indeed need to get out of the country.")
time.sleep(1)
print("Man: Ah okay. I will help you get out of here.")
time.sleep(1)
print("The man gives you a form.")
time.sleep(1)
print("You read trough the form.")
time.sleep(1)
print("In the form stands that you will sell them your property and give them a amount of money.")
time.sleep(1)
print("You feel like this is wrong")
time.sleep(1)
print("But it feels like you have no other option.")
time.sleep(1)
print("So you sign the form and give it back to the man.")
time.sleep(1)
print("The man smile's and gives you a piece of paper")
time.sleep(1)
print("Man: Be on time.")
time.sleep(1)
print("On the paper stands a location and time.")
time.sleep(1)
escape_traffic()
elif choice == "smuggler":
print("You enter a huge warehouse.")
time.sleep(1)
print("Man: Ah welcome welcome.")
time.sleep(1)
print("You get greeted by a strange man.")
time.sleep(1)
print("Man: So you are here to get out of the country yeah right.")
time.sleep(1)
print("You tell him that you indeed need to get out of the country.")
time.sleep(1)
print("Man: Ofcourse ofcourse.")
time.sleep(1)
print("Man: I have a wonderfull way yeah.")
time.sleep(1)
print("You ask him how he will get you out.")
time.sleep(1)
print("Man: I will put you hidden in this trucks.")
time.sleep(1)
print("The man points to the trucks in the warehouse.")
time.sleep(1)
print("This feels like a stupid idea.")
time.sleep(1)
print("But it feels like you don't have an other choose.")
time.sleep(1)
escape_truck()
#-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# quick escape route
def quick_escape():
print("When going through the backdoor you see a wierd guy standing there in all black.")
time.sleep(1)
print("Man in black: Did you ever had a dream " + player.get_name(name) + " that you seemed so sure it was real?")
time.sleep(1)
print("You think what the hell is this man talking about and why does he know my name.")
time.sleep(1)
print("You ask him what the hell he is talking about.")
time.sleep(1)
print("Man in black: Oh you don't know who I am " + player.get_name(name) + ".")
time.sleep(1)
print("Man in black: Its me simon.")
time.sleep(1)
print("Simon: The person that let you play this simulator.")
time.sleep(1)
print("Simon: It looks like you got stuck in the simulator and I can't get you out again.")
time.sleep(1)
print("You get what he is saying.")
time.sleep(1)
print("You indeed entered the simulator in his place.")
time.sleep(1)
print("You try to exucte the quit command but the sytem give a error.")
time.sleep(1)
print("Simon: See the system won't let you out anymore.")
time.sleep(1)
print("Simon: You have to follow me to be able to escape the simulator.")
time.sleep(1)
print("You suddenly get a message from the system.")
time.sleep(1)
print("@: Don't trust him he is a glitch.")
time.sleep(1)
print("Simon: Come with me.")
time.sleep(1)
print("What will you do.")
time.sleep(1)
print("1. Go with Simon")
time.sleep(1)
print("2. Trust system")
time.sleep(1)
trust = console.check_answer("Simon: Do you trust me?").lower()
if trust == "1" or trust == "yes":
print(67 * "-")
print(" ")
print("You trust Simon.")
time.sleep(1)
print("You follow Simon trough the streets.")
time.sleep(1)
print("You end up at a really tall building.")
time.sleep(1)
print("Simon: Lets go in.")
time.sleep(1)
print("@: Don't go in.")
time.sleep(1)
print("@: Plz I can't help you if you go.")
time.sleep(1)
enter_tower()
elif trust == "2" or trust == "no":
print(67 * "-")
print(" ")
print("You trust the sytem.")
time.sleep(1)
print("@: Run away from him.")
time.sleep(1)
print("@: He is dangerous.")
time.sleep(1)
print("@: Follow my signs.")
time.sleep(1)
print("Will running you see Simon chasing you.")
time.sleep(1)
print("Simon is slowly changing in a wierd like demon creature.")
time.sleep(1)
print("Simon: Please come back.")
time.sleep(1)
print("Simon: The system is trying to trap you.")
time.sleep(1)
print("You keep following the systems arrows till you get at a cliff.")
time.sleep(1)
jump()
def enter_tower():
print("Do you want to enter the tower?")
time.sleep(1)
print("1. Ent3r tower")
time.sleep(1)
print("2. Enter tow3r")
time.sleep(1)
print("3. Enter t0wer")
time.sleep(1)
print("4. Don't 3nter t0wer")
time.sleep(1)
print("%, En13r 10w3r")
time.sleep(1)
enter_tower_1 = console.check_answer("DO YOU ENTER THE TOWER?", ["1", "2", "3", "4", "%", "5", "enter"]).lower()
if enter_tower_1 == "1" or enter_tower_1 == "2" or enter_tower_1 == "3" or enter_tower_1 == "5" or enter_tower_1 == "enter" or enter_tower_1 == "%":
print(67 * "-")
print(" ")
print("You walk into the tower")
time.sleep(1)
print("Upon touching the door of the tower you feel like something is trying to corrupt you.")
time.sleep(1)
print("S!M0N: Whahahaha, it looks like my plan worked.")
time.sleep(1)
print("You ask S!M0N what the hell is going on here.")
time.sleep(1)
print("While you ask this you feel immense amount of pain flowing trough you.")
time.sleep(1)
print("@: Nooooooo, you walked right into the glitch trap.")
time.sleep(1)
print("S!M0N: Shut up system this user is mine now.")
time.sleep(1)
print("Suddenly you feel a rush of information going trough you.")
time.sleep(1)
print("After that you feel like your conscious is slowly fading away.")
time.sleep(1)
print("FATAL ERROR")
time.sleep(1)
print(67 * "-")
print(" ")
print("DEATH")
time.sleep(1)
print(" ")
print("ENDING 4")
print("Fatal take over")
time.sleep(1)
console.check_answer("Plz press enter to continue.")
play_again()
elif enter_tower_1 == "4":
print(67 * "-")
print(" ")
print("You decide to listen to the system.")
time.sleep(1)
print("You turn around and make a run for it.")
time.sleep(1)
print("You feel like Simon has been taken over by something very evil.")
time.sleep(1)
print("@: Follow my signs")
time.sleep(1)
print("You follow systems signs.")
time.sleep(1)
print("Until you end up at the end of the cliff.")
time.sleep(1)
jump()
def jump():
print("@: Please jump down.")
time.sleep(1)
print("@: Trust me" + player.get_name(name) + ".")
time.sleep(1)
print("Do you jump?")
time.sleep(1)
print("1. Yes")
time.sleep(1)
print("2. No")
time.sleep(1)
jump_1 = console.check_answer("Will you jump?", ["1", "2", "yes", "no", "y", "n"]).lower()
if jump_1 == "1" or jump_1 == "yes" or jump_1 == "y":
print(67 * "-")
print(" ")
print("You jump down off the cliff.")
time.sleep(1)
print("Will falling you feel lik everthing around you is disappearing.")
time.sleep(1)
print("Until you see a bright light.")
time.sleep(1)
print("Suddenly you wake up in the real world.")
time.sleep(1)
print("You ask escape simulator what was going on.")
time.sleep(1)
print("@: Oh sorry that was glitched AI.")
time.sleep(1)
print("@: He was someone Simon put into the game to let him learn from human actions.")
time.sleep(1)
print("@: But what happened in stead was that AI longed to be human.")
time.sleep(1)
print("@: So he waited for a unexpected user that he could manipulate.")
time.sleep(1)
print("@: So when he saw you he tought he could take his opportunity to strike.")
time.sleep(1)
print("@: But as you can see he failed.")
time.sleep(1)
print("@: So congratz user you succeeded at escaping.")
time.sleep(1)
print("@: This is the end.")
time.sleep(1)
print("@: I know this wasn't the end you were hoping for but still.")
time.sleep(1)
print("@: I will now wish you farewell user and good luck with everything in live.")
time.sleep(1)
print(67 * "-")
print(" ")
print("CONGRATZ ON ESCAPING THE PROGRAM!")
time.sleep(1)
print(" ")
print("Ending 6")
print("Getting out of here")
time.sleep(1)
console.check_answer("Plz press enter to continue.")
play_again()
elif jump_1 == "2" or jump_1 == "no" or jump_1 == "n":
print(67 * "-")
print(" ")
print("You decide not to jump.")
time.sleep(1)
print("Simon jumps on you and devours you.")
time.sleep(1)
print("FATAL ERROR")
time.sleep(1)
print(67 * "-")
print(" ")
print("DEATH")
time.sleep(1)
print(" ")
print("ENDING 5")
print("Didn't jump")
time.sleep(1)
console.check_answer("Plz press enter to continue.")
play_again()
#------------------------------------------------------------------------------------------------------------------------------------------------------------------
# part 2 escape out of the country
def pablo_escape():
print(67 * "-")
print(" ")
console.check_answer("Plz press enter to continue.")
print(" ")
print("Pablo takes out his phone and starts a phone call.")
time.sleep(1)
print("After a while Pablo comes back to you")
time.sleep(1)
print("Pablo: The are coming this way.")
time.sleep(1)
print("So you wait there with Pablo for his friends.")
time.sleep(1)
print("Then Pablo's friends arive.")
time.sleep(1)
print("Man 1: So who is the person that we need to help.")
time.sleep(1)
print("Pablo: My friend is here.")
time.sleep(1)
print("Then Pablo points at you.")
time.sleep(1)
print("Man 1: Okay come with us.")
time.sleep(1)
print("You start to follow the Man.")
time.sleep(1)
print("You then get to a car.")
time.sleep(1)
print("When you are at the car the man put a bag suddenly over your head.")
time.sleep(1)
print("You want to scream but you suddenly get knocked out.")
time.sleep(1)
print("After a while you wake up.")
time.sleep(1)
print("You are stuck in a cage")
time.sleep(1)
print("When you look around you you see more people that are stuck in cage's.")
time.sleep(1)
print("You feel like you fall in a trap.")
time.sleep(1)
print(67 * "-")
print(" ")
print("SLAVE OR WORSE")
time.sleep(1)
print(" ")
print("ENDING 8")
print("Became a slave or something.")
time.sleep(1)
console.check_answer("Plz press enter to continue.")
play_again()
def escape_traffic():
print(67 * "-")
print(" ")
console.check_answer("Plz press enter to continue.")
print(" ")
print("You go to the specified location on the paper.")
time.sleep(1)
print("When you arive there you a bunch of people getting loaded on a small boat.")
time.sleep(1)
print("You walk up to the man and ask if you are at the right spot.")
time.sleep(1)
print("The man say you're and that you need to get a board of the boat.")
time.sleep(1)
print("You do what the tell you and get onto the boat.")
time.sleep(1)
print("Once everyone was a board of the boat you guys took of.")
time.sleep(1)
print("This boat will hopefully take you guys over the ocean out of the country.")
time.sleep(1)
print("The trip is taking a while now.")
time.sleep(1)
print("You guys are now drifting on the occean slowly trying to go to your destination.")
time.sleep(1)
print("Everyone on the boat looks terrified and hope they can make it on shore.")
time.sleep(1)
print("You even hear a baby cry on boat.")
time.sleep(1)
print("You really hope al of you can make it safely on shore.")
time.sleep(1)
print(67 * "-")
print(" ")
console.check_answer("Plz press enter to continue.")
print(" ")
boat_trip()
def boat_trip():
choice = random.choice(boat_stuff)
if choice == "tip over":
print("After a while its starts to storm.")
time.sleep(1)
print("The ocean is getting pretty wild and you feel the boat is going to tip over.")
time.sleep(1)
print("And then it happens a huge wave hits the boat.")
time.sleep(1)
print("Everybody starts to scream for there live.")
time.sleep(1)
print("The boat tips over.")
time.sleep(1)
print("After that you get pushed down by the waves.")
time.sleep(1)
print("You try to swim up but its futile.")
time.sleep(1)
print("YOU DIED")
time.sleep(1)
print(67 * "-")
print(" ")
print("DEATH")
time.sleep(1)
print(" ")
print("ENDING 6")
print("The drowned")
time.sleep(1)
console.check_answer("Plz press enter to continue.")
play_again()
elif choice == "fight":
print("There suddenly breaks out a fight on the boat.")
time.sleep(1)
print("People are pissed at each other.")
time.sleep(1)
print("Because of the fighting people are starting to fall of.")
time.sleep(1)
print("And then suddenly you get a person shoved in your face.")
time.sleep(1)
print("You loss your balance and fall of the boat.")
time.sleep(1)
print("You start to drift in the ice cold water.")
time.sleep(1)
print("After a while you lose concussions.")
time.sleep(1)
print("YOU DIED")
time.sleep(1)
print(67 * "-")
print(" ")
print("DEATH")
time.sleep(1)
print(" ")
print("ENDING 7")
print("Pushed in sea.")
time.sleep(1)
console.check_answer("Plz press enter to continue.")
play_again()
elif choice == "safe":
print("After a while you start to see land.")
time.sleep(1)
print("Everybody is happy but still really scared.")
time.sleep(1)
print("Eventually you guys hit land.")
time.sleep(1)
print("At land are some people that see you get at shore and want to help you.")
time.sleep(1)
print("After that you guys get brought to a camp.")
time.sleep(1)
print("At the camp you find out this is a refugee camp.")
time.sleep(1)
refugee_camp()
def monika_escape():
print(67 * "-")
print(" ")
console.check_answer("Plz press enter to continue.")
print(" ")
print("A guy: Get in the van so that we can leave.")
time.sleep(1)
print("You do as you were told and enter the van.")
time.sleep(1)
print("Will being in the van you're told to be silence during the trip.")
time.sleep(1)
print("Then you hear the enigne of the van starting.")
time.sleep(1)
print("So the trip started.")
time.sleep(1)
print("You are there in the truck with a few other people.")
time.sleep(1)
print("You are going on a long trip trough country's to try to get to a safe country.")
time.sleep(1)
print("Its was a long trip.")
time.sleep(1)
print("By sitting hours on end with almost no breaks and almost no food.")
time.sleep(1)
print("The van made a couple of stops but we may not come out of the van")
time.sleep(1)
print("Sometimes the people of the customs would look into our truck and check our pasports.")
time.sleep(1)
print("After checking them the let us go.")
time.sleep(1)
print("After a while we finnaly stopped and the doors openend.")
time.sleep(1)
print("A guy: You may come out now.")
time.sleep(1)
print("A guy: Please follow me.")
time.sleep(1)
print("You follow the guy into a building that is tottally boarded of.")
time.sleep(1)
print("A guy: We will stay here for the night.")
time.sleep(1)
print("A guy: You guys may not go outside.")
time.sleep(1)
print("After he sayed that he went outside to smoke.")
time.sleep(1)
print(67 * "-")
print(" ")
console.check_answer("Plz press enter to continue.")
print(" ")
the_building()
def the_building():
print("After the guy left you started to look around.")
time.sleep(1)
print("You see that there are two other guys also sitting in this building with you.")
time.sleep(1)
print("You try to start with them a conversation with them.")
time.sleep(1)
print("They say something along the lines of I'm tired please go to bed.")
time.sleep(1)
print("You don't have a good feeling about this place.")
time.sleep(1)
print("But you feel way to exhausted to do anything right now.")
time.sleep(1)
print("So you decide to sleep here tonight.")
time.sleep(1)
print("The next morning you got woken up by the guy that brought you here.")
time.sleep(1)
print("A guy: Lets go back to the van its going to be a long trip.")
time.sleep(1)
print("So you guys decide to go back into that afwull van.")
time.sleep(1)
print("You are again hours on end with no breaks in the van.")
time.sleep(1)
print("Till the van stops.")
time.sleep(1)
print("The doors opened.")
time.sleep(1)
print("A guy: You may come out now.")
time.sleep(1)
print("You are in a luscious city.")
time.sleep(1)
print("After exiting the van the guy say that he needs to do something so you guys need to wait here.")
time.sleep(1)
print("You're super exhausted and don't feel like you want to travel anymore in that van.")
time.sleep(1)
print("You see some people walking by and want to ask them where you are in your best english.")
time.sleep(1)
print("They tell you that you're in the city Amsteria.")
time.sleep(1)
print("Amsteria is the main capital of the country Neatria.")
time.sleep(1)
print(67 * "-")
print(" ")
console.check_answer("Plz press enter to continue.")
print(" ")
shelter_Neatria()
def escape_truck():
print("You start to follow the instructions of the man and you went into the hidden compartment.")
time.sleep(1)
print("The hidden compartment is not so big.")
time.sleep(1)
print("But it feels like you have no other choose.")
time.sleep(1)
print("So you wait in this compartment.")
time.sleep(1)
print("Then start the truck and starts your journey.")
time.sleep(1)
print(67 * "-")
print(" ")
console.check_answer("Plz press enter to continue.")
print(" ")
truck_trip()
def truck_trip():
choice = random.choice(truck_survive)
if choice == "no":
print("The trip goes on for a while.")
time.sleep(1)
print("After a while you feel like you're getting sleepy.")
time.sleep(1)
print(67 * "-")
print(" ")
print("DEATH")
time.sleep(1)
print(" ")
print("ENDING 9")
print("No oxygen")
time.sleep(1)
console.check_answer("Plz press enter to continue.")
play_again()
elif choice == "yes":
print("The trip continue's for a while.")
time.sleep(1)
print("You just relax in the truck and wait till you arive at your destination.")
time.sleep(1)
print("After a while the truck stop.")
time.sleep(1)
print("The compartment door goes open.")
time.sleep(1)
print("The truck driver stands there.")
time.sleep(1)
print("Driver: We have arrived.")
time.sleep(1)
print("You get slowly out of the compartment.")
time.sleep(1)
print("While going out you see that the sun is burning in your eyes.")
time.sleep(1)
print("You start to put your elbow in front of your face.")
time.sleep(1)
print("Then when trying to get out this way you loose your balance.")
time.sleep(1)
print("Driver: He calm there buddy.")
time.sleep(1)
print("The driver catches and helps you to regain your balance.")
time.sleep(1)
print("You ask the driver where you are.")
time.sleep(1)
print("Driver: We are in the wonderfull country of Jupinia.")
time.sleep(1)
print("You think by yourself were that is.")
time.sleep(1)
print("Driver: It lays pretty far away from Narlilia.")
time.sleep(1)
print("Driver: So don't worry they won't get you here.")
time.sleep(1)
print("You're pretty happy to hear those words from the driver.")
time.sleep(1)
print("You ask where you can go for to register your self as refugee.")
time.sleep(1)
print("Driver: Ah okay I will bring you in contact with my friends.")
time.sleep(1)
print(67 * "-")
print(" ")
console.check_answer("Plz press enter to continue.")
print(" ")
jupinia_shelter()
def escape_nick():
print("Nick start to drive trough the country with no worries that his car would get hit.")
time.sleep(1)
print("Will bombs are going of around us Nick is loudly singing with the song on the radio.")
time.sleep(1)
print("This goes on for awhile.")
time.sleep(1)
print("Till suddenly the car breaks down.")
time.sleep(1)
print("Nick: Ah fuck.")
time.sleep(1)
print("Nick: That was my favourite car.")
time.sleep(1)
print("You ask Nick where you guys are.")
time.sleep(1)
print("Nick: We are now in Kolit the neighboring country of Narlilia.")
time.sleep(1)
print("You ask if Nick know someone that you can go to and if its safe here.")
time.sleep(1)
print("Nick: I know a few friends that can help you.")
time.sleep(1)
print("Nick start to call them.")
time.sleep(1)
print("After Nick is done with the phone call he says that his friend will pick us up.")
time.sleep(1)
print("And that they can get me a job in this country and a place to live.")
time.sleep(1)
print("You wait there with Nick for a while.")
time.sleep(1)
print("Then a car comes driving towards you guys.")
time.sleep(1)
print("The car stops near Nicks broken down car.")
time.sleep(1)
print("Men from car: Hey Nick how is it going.")
time.sleep(1)
print("Nick: Not so well.")
time.sleep(1)
print("Nick: My car broke down will I was helping my pal " + player.get_name(name) + " .")
time.sleep(1)
print("Men from car: Yeah boss already told me that.")
time.sleep(1)
print("Men from car: So get in and we will get them a nice temporary place and job.")
time.sleep(1)
print("You get into the car of the men.")
time.sleep(1)
print(67 * "-")
print(" ")
console.check_answer("Plz press enter to continue.")
print(" ")
shelter_kolit()
#-------------------------------------------------------------------------------------------------------------------------------------------------------------
# part 3 New country
def shelter_kolit():
print("They ride the car to the main capital of Kolit called Meniria.")
time.sleep(1)
print("Meniria is a rich city which is also seen by most as the city of freedom.")
time.sleep(1)
print("The street of Meniria are mainly ruled by the mafia.")
time.sleep(1)
print("The mafia is peacefully ruling this city.")
time.sleep(1)
print("The mafia wants to give everyone freedom and a place to live if you want to work for it.")
time.sleep(1)
print("This is what the guys in the car told you.")
time.sleep(1)
print("And the guy they call boss is the leader of the mafia leading the city.")
time.sleep(1)
print("Why the mafia took over was because Meniria was first know as the drug capital and the leader of the mafia hated that.")
time.sleep(1)
print("He wants to see a world without drugs.")
time.sleep(1)
print("And because the goverment didn't want to do anything against it so the mafia took over.")
time.sleep(1)
print("The car just entered the city as they finished the story.")
time.sleep(1)
print("While you look around in the city it looks so much different than your own place while the country's are not that far apart.")
time.sleep(1)
print("The city looks way more advanced than your own city you lived in.")
time.sleep(1)
print("After a while you guys arrive at a small house in the city.")
time.sleep(1)
print("Men from car: This is where you while temporarily stay till you have shown us that you are a hard worker.")
time.sleep(1)
print("Men from car: I will get the papper works ready for you so don't worry.")
time.sleep(1)
print("Men from car: Try to relax now.")
time.sleep(1)
print("Men from car: Because I think you had a pretty stressfull day.")
time.sleep(1)
print(67 * "-")
print(" ")
console.check_answer("Plz press enter to continue.")
print(" ")
kolit_epilogue()
def shelter_Neatria():
print("After founding out where you were you started to find someone that could help you guys.")
time.sleep(1)
print("Then you saw there a guy that looked like a cop.")
time.sleep(1)
print("You asked him if he know someone that could help us.")
time.sleep(1)
print("He told us there was a place we could go to find temporarily safe gaurd.")
time.sleep(1)
print("He also gave you guys a card in with which you could travel for one day trough the entire country.")
time.sleep(1)
print("You thanked him and went on your way to place the cop person had mentioned.")
time.sleep(1)
print("It took you a couple of hours to get there.")
time.sleep(1)
print("When there you explained your situation to the lady at the counter.")
time.sleep(1)
print("The lady checks and tells you that you can stay here for the night.")
time.sleep(1)
print("But tommorow you need to go to a different asylum center.")
time.sleep(1)
print("So you take you rest here tonight.")
time.sleep(1)
print("The next day you are of to the asylum center she mentioned.")
time.sleep(1)
print(67 * "-")
print(" ")
console.check_answer("Plz press enter to continue.")
print(" ")
neatria_epilogue()
def refugee_camp():
print("You find out that most people in this camp are stuck here because the will not be let through to other countries.")
time.sleep(1)
print("And even some people in the camps were send back to there own country.")
time.sleep(1)
print("There is no way to contact your friends or family.")
time.sleep(1)
print("The conditions in this camp is really bad and sometimes big fights break out over almost nothing.")
time.sleep(1)
print("Some people try to climb over the fence or try to find other ways out.")
time.sleep(1)
print("And some of them wait here untill they can leave.")
time.sleep(1)
print("This feels so horrible.")
time.sleep(1)
print("You will try to make the best of it as you can at this place.")
time.sleep(1)
print(67 * "-")
print(" ")
console.check_answer("Plz press enter to continue.")
print(" ")
refugee_epilogue()
def jupinia_shelter():
print("The truck driver calls his friends to help.")
time.sleep(1)
print("After he is done with the call he says that you need to wait here and they will pick you up.")
time.sleep(1)
print("So you do what he told you.")
time.sleep(1)
print("You wait there for a while and see a police car driving to you.")
time.sleep(1)
print("Cop: He you there.")
time.sleep(1)
print("You look at the cop and ask if he means you.")
time.sleep(1)
print("Cop: Yeah you I heard from my comrade that you needed help and want to settle in this beautiful country.")
time.sleep(1)
print("You say you indeed are looking for shelter and possible want to temporally live here.")
time.sleep(1)
print("Cop: Thats wonderfull comrade.")
time.sleep(1)
print("Cop: Come with us and you will see how wonderfull this country is.")
time.sleep(1)
print("You agree to go with the cop.")
time.sleep(1)
print("The cop brings you to asylum center of country.")
time.sleep(1)
print("And tells you everything will be fine.")
time.sleep(1)
print(67 * "-")
print(" ")
console.check_answer("Plz press enter to continue.")
print(" ")
jupinia_epilogue()
#------------------------------------------------------------------------------------------------------------------------------------------------------------------
# part 4 epilogue
def kolit_epilogue():
print("So you go to bed.")
time.sleep(1)
print("The next day the man from the car comes by and tells you were you will go to work and what needs to be done.")
time.sleep(1)
print("For the next couple of months you help them and you slowly built up your reputation.")
time.sleep(1)
print("You finnaly got a bigger and nicer place to stay.")
time.sleep(1)
print("Its hard work to stay here but you think its worth it.")
time.sleep(1)
print("They allowed you to contact your family and friends.")
time.sleep(1)
print("They are still in the shitty country that you left.")
time.sleep(1)
print("You can see them soon if you work hard for the city.")
time.sleep(1)
print("You wish you didn't need to work so hard but you're at least happy that you are safe.")
time.sleep(1)
print(67 * "-")
print(" ")
print("GOOD END KOLIT")
time.sleep(1)
print(" ")
print("ENDING KOLIT")
print("Congratz you survived")
time.sleep(1)
print("I hope you had fun with your adventure.")
time.sleep(1)
print("credits:")
time.sleep(1)
print("Programmed by Michelle Simone Frankfort")
print(67 * "-")
print(" ")
print("Special thanks to you the player for playing this.")
time.sleep(1)
print("Hope to see you again at my next projects.")
console.check_answer("Plz press enter to continue.")
play_again()
def neatria_epilogue():
print("At the other ayslum center you learn that you need to proof that you're really in danger in your own country.")
time.sleep(1)
print("So you shown them documents and some other stuff that you have on your e-mail.")
time.sleep(1)
print("After sorting that out.")
time.sleep(1)
print("You find that you need to do a intergration course to be officially allowed to be a residence of this country.")
time.sleep(1)
print("So you do the intergration course.")
time.sleep(1)
print("You find it hard to learn a other language.")
time.sleep(1)
print("But you still do it because you want to have a place you can live freely and without fear.")
time.sleep(1)
print("You also had some contact with your family and its maybe possible that with a specail program that they can come to Neatria.")
time.sleep(1)
print("You're incredibly happy with your life.")
time.sleep(1)
print("You're so happy that you ended up at such an wonderfull place.")
time.sleep(1)
print(67 * "-")
print(" ")
print("GOOD END NEATRIA")
time.sleep(1)
print(" ")
print("ENDING NEATRIA")
print("Congratz you survived")
time.sleep(1)
print("I hope you had fun with your adventure.")
time.sleep(1)
print("credits:")
time.sleep(1)
print("Programmed by Michelle Simone Frankfort")
print(67 * "-")
print(" ")
print("Special thanks to you the player for playing this.")
time.sleep(1)
print("Hope to see you again at my next projects.")
console.check_answer("Plz press enter to continue.")
play_again()
def refugee_epilogue():
print("The next couple of months are rough and hard to get trough.")
time.sleep(1)
print("You feel like this was not the right choose.")
time.sleep(1)
print("You help a lot of people in the camp because there is not much else for you to do here.")
time.sleep(1)
print("You hope you can soon can go to a better place.")
time.sleep(1)
print(" ")
print("ENDING REFUGEE CAMP")
print("Congratz you survived")
time.sleep(1)
print("I hope you had fun with your adventure.")
time.sleep(1)
print("credits:")
time.sleep(1)
print("Programmed by Michelle Simone Frankfort")
print(67 * "-")
print(" ")
print("Special thanks to you the player for playing this.")
time.sleep(1)
print("Hope to see you again at my next projects.")
console.check_answer("Plz press enter to continue.")
play_again()
def jupinia_epilogue():
print("The people at the ayslum help you out with slowly getting everything on track to get your residence visa.")
time.sleep(1)
print("You find the costums in the country wierd and unusual but you're slowly starting to get the hang of it.")
time.sleep(1)
print("Will being there you started your integration and will soon get the integrations these so that you can get the residence visa.")
time.sleep(1)
print("Jupinia is lovely country but you still miss your family every day.")
time.sleep(1)
print("There is no way to contact them.")
time.sleep(1)
print("But you're at least relieved that you are safe.")
time.sleep(1)
print(67 * "-")
print(" ")
print("GOOD END JUPINIA")
time.sleep(1)
print(" ")
print("ENDING JUPINIA")
print("Congratz you survived")
time.sleep(1)
print("I hope you had fun with your adventure.")
time.sleep(1)
print("credits:")
time.sleep(1)
print("Programmed by Michelle Simone Frankfort")
print(67 * "-")
print(" ")
print("Special thanks to you the player for playing this.")
time.sleep(1)
print("Hope to see you again at my next projects.")
console.check_answer("Plz press enter to continue.")
play_again()
def play_again():
print(" ")
print(67 * "-")
print(" ")
print("Do you want to restart the adventure, go back to the menu or quit the game?")
time.sleep(1)
print("1. Restart")
time.sleep(1)
print("2. Quit")
time.sleep(1)
print("3. Menu")
time.sleep(1)
again = console.check_answer("Restart, quit or menu: ", ["1", "yes", "y", "restart", "2", "no", "n", "quit", "3", "menu", "m"]).lower()
if again == ("1") or again == ("yes") or again == ("y") or again == ("restart"):
print("Okay")
print("Good luck on your next run")
adventure()
elif again == ("2") or again == ("no") or again == ("n") or again == ("quit"):
quit()
elif again == ("3") or again == ("menu") or again == ("m"):
print("Lets go back to the menu")
time.sleep(1)
print(" ")
print(67 * "-")
print(" ")
# starts the game and menu
while start == True:
Menu.print_menu()
menu = console.check_answer("What do you want to do?", ["1", "intro", "start", "2", "skip", "quit", "3", "help", "4"])
print(" ")
if menu == ("1") or menu == ("intro") or menu == ("start"):
intro()
elif menu == ("2") or menu == ("skip"):
adventure()
elif menu == ("help") or menu == ("3"):
help()
elif menu == ("quit") or menu == ("4"):
quit() | 55,896 | 0 | 731 |
b5446fa1a409f1ddfa87ca3a6582e45a0ba26a81 | 958 | py | Python | src/sms_gateway/send_message_service.py | RBEGamer/SMSGateway | d2fbba78a8103c338bf6ec0cdc49ee2e42f92b49 | [
"MIT"
] | null | null | null | src/sms_gateway/send_message_service.py | RBEGamer/SMSGateway | d2fbba78a8103c338bf6ec0cdc49ee2e42f92b49 | [
"MIT"
] | null | null | null | src/sms_gateway/send_message_service.py | RBEGamer/SMSGateway | d2fbba78a8103c338bf6ec0cdc49ee2e42f92b49 | [
"MIT"
] | null | null | null | #!/usr/bin/python
import MySQLdb
import serial
import time
try:
con = _mysql.connect('localhost', 'sms_gat', 'sms_gat', 'sms_gateway')
con.query("SELECT VERSION()")
result = con.use_result()
print "MySQL version: %s" % \
result.fetch_row()[0]
except _mysql.Error, e:
print "Error %d: %s" % (e.args[0], e.args[1])
sys.exit(1)
finally:
print("ok")
con.close()
#3 leds netz sending error
# reset chip
# connect to serial 9600 baud check version; set baudrate to x
#reset pin toglle + wait 5 seconds
# AT -> OK //ping
# AT+CMGF=1 -> OK //check mode
# AT+CPIN="0000" -> //set pin wait 5 seconds
#open mysql check new messages
# AT -> ok // cehck pin
#AT+CMGF=1 //set sms mode
#AT+CSMP: 1,169,0,0 ->OK // set message mode
#AT+CMGS="+31638740161" -> > //set number
#message here +ctrl+z finsh
#+CMGS: 62 //message id store in db
#delete db entry
#set log
#set send_messages
#check new entry
| 17.107143 | 74 | 0.632568 | #!/usr/bin/python
import MySQLdb
import serial
import time
try:
con = _mysql.connect('localhost', 'sms_gat', 'sms_gat', 'sms_gateway')
con.query("SELECT VERSION()")
result = con.use_result()
print "MySQL version: %s" % \
result.fetch_row()[0]
except _mysql.Error, e:
print "Error %d: %s" % (e.args[0], e.args[1])
sys.exit(1)
finally:
print("ok")
con.close()
#3 leds netz sending error
# reset chip
# connect to serial 9600 baud check version; set baudrate to x
#reset pin toglle + wait 5 seconds
# AT -> OK //ping
# AT+CMGF=1 -> OK //check mode
# AT+CPIN="0000" -> //set pin wait 5 seconds
#open mysql check new messages
# AT -> ok // cehck pin
#AT+CMGF=1 //set sms mode
#AT+CSMP: 1,169,0,0 ->OK // set message mode
#AT+CMGS="+31638740161" -> > //set number
#message here +ctrl+z finsh
#+CMGS: 62 //message id store in db
#delete db entry
#set log
#set send_messages
#check new entry
| 0 | 0 | 0 |
c35cc9241de308bf4411c4f47a3dcfe2d577eb0f | 39,542 | py | Python | domains/wartime/freedonia.py | richardyang/psychsim | 191497d72077fe95cde94a2004a8be6e926c121f | [
"MIT"
] | 23 | 2016-04-08T08:21:12.000Z | 2022-03-15T02:49:12.000Z | domains/wartime/freedonia.py | richardyang/psychsim | 191497d72077fe95cde94a2004a8be6e926c121f | [
"MIT"
] | 3 | 2019-07-22T16:29:07.000Z | 2020-11-06T07:00:16.000Z | domains/wartime/freedonia.py | richardyang/psychsim | 191497d72077fe95cde94a2004a8be6e926c121f | [
"MIT"
] | 12 | 2015-06-07T00:41:31.000Z | 2020-01-10T15:04:43.000Z | """
Example scenario for wartime negotiation.
Provides use cases for both modeling and simulating scenarios.
"""
import sys
from ConfigParser import SafeConfigParser
from argparse import ArgumentParser
import StringIO
from psychsim.pwl import *
from psychsim.reward import *
from psychsim.action import *
from psychsim.world import World,stateKey,actionKey,binaryKey,modelKey
from psychsim.agent import Agent
def scenarioCreationUseCase(enemy='Sylvania',model='powell',web=False,
fCollapse=None,sCollapse=None,maxRounds=15):
"""
An example of how to create a scenario
@param enemy: the name of the agent-controlled side, i.e., Freedonia's opponent (default: Sylvania)
@type enemy: str
@param model: which model do we use (default is "powell")
@type model: powell or slantchev
@param web: if C{True}, then create the web-based experiment scenario (default: C{False})
@type web: bool
@param fCollapse: the probability that Freedonia collapses (under powell, default: 0.1) or loses battle (under slantchev, default: 0.7)
@type fCollapse: float
@param sCollapse: the probability that Sylvania collapses, under powell (default: 0.1)
@type sCollapse: float
@param maxRounds: the maximum number of game rounds (default: 15)
@type maxRounds: int
@return: the scenario created
@rtype: L{World}
"""
# Handle defaults for battle probabilities, under each model
posLo = 0
posHi = 10
if fCollapse is None:
if model == 'powell':
fCollapse = 0.1
elif model == 'slantchev':
fCollapse = 0.7
if sCollapse is None:
sCollapse = 0.1
# Create scenario
world = World()
# Agents
free = Agent('Freedonia')
world.addAgent(free)
sylv = Agent(enemy)
world.addAgent(sylv)
# User state
world.defineState(free.name,'troops',int,lo=0,hi=50000,
description='Number of troops you have left')
free.setState('troops',40000)
world.defineState(free.name,'territory',int,lo=0,hi=100,
description='Percentage of disputed territory owned by you')
free.setState('territory',15)
world.defineState(free.name,'cost',int,lo=0,hi=50000,
description='Number of troops %s loses in an attack' % (free.name))
free.setState('cost',2000)
world.defineState(free.name,'position',int,lo=posLo,hi=posHi,
description='Current status of war (%d=%s is winner, %d=you are winner)' % (posLo,sylv.name,posHi))
free.setState('position',5)
world.defineState(free.name,'offered',int,lo=0,hi=100,
description='Percentage of disputed territory that %s last offered to you' % (sylv.name))
free.setState('offered',0)
if model == 'slantchev':
# Compute new value for territory only *after* computing new value for position
world.addDependency(stateKey(free.name,'territory'),stateKey(free.name,'position'))
# Agent state
world.defineState(sylv.name,'troops',int,lo=0,hi=500000,
description='Number of troops %s has left' % (sylv.name))
sylv.setState('troops',30000)
world.defineState(sylv.name,'cost',int,lo=0,hi=50000,
description='Number of troops %s loses in an attack' % (sylv.name))
sylv.setState('cost',2000)
world.defineState(sylv.name,'offered',int,lo=0,hi=100,
description='Percentage of disputed territory that %s last offered to %s' % (free.name,sylv.name))
sylv.setState('offered',0)
# World state
world.defineState(None,'treaty',bool,
description='Have the two sides reached an agreement?')
world.setState(None,'treaty',False)
# Stage of negotiation, illustrating the use of an enumerated state feature
world.defineState(None,'phase',list,['offer','respond','rejection','end','paused','engagement'],
description='The current stage of the negotiation game')
world.setState(None,'phase','paused')
# Game model, static descriptor
world.defineState(None,'model',list,['powell','slantchev'],
description='The model underlying the negotiation game')
world.setState(None,'model',model)
# Round of negotiation
world.defineState(None,'round',int,description='The current round of the negotiation')
world.setState(None,'round',0)
if not web:
# Relationship value
key = world.defineRelation(free.name,sylv.name,'trusts')
world.setFeature(key,0.)
# Game over if there is a treaty
world.addTermination(makeTree({'if': trueRow(stateKey(None,'treaty')),
True: True, False: False}))
# Game over if Freedonia has no territory
world.addTermination(makeTree({'if': thresholdRow(stateKey(free.name,'territory'),1),
True: False, False: True}) )
# Game over if Freedonia has all the territory
world.addTermination(makeTree({'if': thresholdRow(stateKey(free.name,'territory'),99),
True: True, False: False}))
# Game over if number of rounds exceeds limit
world.addTermination(makeTree({'if': thresholdRow(stateKey(None,'round'),maxRounds),
True: True, False: False}))
# Turn order: Uncomment the following if you want agents to act in parallel
# world.setOrder([set(world.agents.keys())])
# Turn order: Uncomment the following if you want agents to act sequentially
world.setOrder([free.name,sylv.name])
# User actions
freeBattle = free.addAction({'verb': 'attack','object': sylv.name})
for amount in range(20,100,20):
free.addAction({'verb': 'offer','object': sylv.name,'amount': amount})
if model == 'powell':
# Powell has null stages
freeNOP = free.addAction({'verb': 'continue'})
elif model == 'slantchev':
# Slantchev has both sides receiving offers
free.addAction({'verb': 'accept offer','object': sylv.name})
free.addAction({'verb': 'reject offer','object': sylv.name})
# Agent actions
sylvBattle = sylv.addAction({'verb': 'attack','object': free.name})
sylvAccept = sylv.addAction({'verb': 'accept offer','object': free.name})
sylvReject = sylv.addAction({'verb': 'reject offer','object': free.name})
if model == 'powell':
# Powell has null stages
sylvNOP = sylv.addAction({'verb': 'continue'})
elif model == 'slantchev':
# Slantchev has both sides making offers
for amount in range(10,100,10):
sylv.addAction({'verb': 'offer','object': free.name,'amount': amount})
# Restrictions on when actions are legal, based on phase of game
for action in filterActions({'verb': 'offer'},free.actions | sylv.actions):
agent = world.agents[action['subject']]
agent.setLegal(action,makeTree({'if': equalRow(stateKey(None,'phase'),'offer'),
True: True, # Offers are legal in the offer phase
False: False})) # Offers are illegal in all other phases
if model == 'powell':
# Powell has a special rejection phase
for action in [freeNOP,freeBattle]:
free.setLegal(action,makeTree({'if': equalRow(stateKey(None,'phase'),'rejection'),
True: True, # Attacking and doing nothing are legal only in rejection phase
False: False})) # Attacking and doing nothing are illegal in all other phases
# Once offered, agent can respond
if model == 'powell':
# Under Powell, only Sylvania has to respond, and it can attack
responses = [sylvBattle,sylvAccept,sylvReject]
elif model == 'slantchev':
# Under Slantchev, only accept/reject
responses = filterActions({'verb': 'accept offer'},free.actions | sylv.actions)
responses += filterActions({'verb': 'reject offer'},free.actions | sylv.actions)
for action in responses:
agent = world.agents[action['subject']]
agent.setLegal(action,makeTree({'if': equalRow(stateKey(None,'phase'),'respond'),
True: True, # Offeree must act in the response phase
False: False})) # Offeree cannot act in any other phase
if model == 'powell':
# NOP is legal in exactly opposite situations to all other actions
sylv.setLegal(sylvNOP,makeTree({'if': equalRow(stateKey(None,'phase'),'end'),
True: True, # Sylvania does not do anything in the null phase after Freedonia responds to rejection
False: False})) # Sylvania must act in its other phases
if model == 'slantchev':
# Attacking legal only under engagement phase
for action in filterActions({'verb': 'attack'},free.actions | sylv.actions):
agent = world.agents[action['subject']]
agent.setLegal(action,makeTree({'if': equalRow(stateKey(None,'phase'),'engagement'),
True: True, # Attacking legal only in engagement
False: False})) # Attacking legal every other phase
# Goals for Freedonia
goalFTroops = maximizeFeature(stateKey(free.name,'troops'))
free.setReward(goalFTroops,1.)
goalFTerritory = maximizeFeature(stateKey(free.name,'territory'))
free.setReward(goalFTerritory,1.)
# Goals for Sylvania
goalSTroops = maximizeFeature(stateKey(sylv.name,'troops'))
sylv.setReward(goalSTroops,1.)
goalSTerritory = minimizeFeature(stateKey(free.name,'territory'))
sylv.setReward(goalSTerritory,1.)
# Possible goals applicable to both
goalAgreement = maximizeFeature(stateKey(None,'treaty'))
# Silly goal, provided as an example of an achievement goal
goalAchieve = achieveFeatureValue(stateKey(None,'phase'),'respond')
# Horizons
if model == 'powell':
free.setAttribute('horizon',4)
sylv.setAttribute('horizon',4)
elif model == 'slantchev':
free.setAttribute('horizon',6)
sylv.setAttribute('horizon',6)
# Discount factors
free.setAttribute('discount',-1)
sylv.setAttribute('discount',-1)
# Levels of belief
free.setRecursiveLevel(2)
sylv.setRecursiveLevel(2)
# Dynamics of battle
freeTroops = stateKey(free.name,'troops')
freeTerr = stateKey(free.name,'territory')
sylvTroops = stateKey(sylv.name,'troops')
# Effect of fighting
for action in filterActions({'verb': 'attack'},free.actions | sylv.actions):
# Effect on troops (cost of battle)
tree = makeTree(addFeatureMatrix(freeTroops,stateKey(free.name,'cost'),-1.))
world.setDynamics(freeTroops,action,tree,enforceMin=not web)
tree = makeTree(addFeatureMatrix(sylvTroops,stateKey(sylv.name,'cost'),-1.))
world.setDynamics(sylvTroops,action,tree,enforceMin=not web)
if model == 'powell':
# Effect on territory (probability of collapse)
tree = makeTree({'distribution': [
({'distribution': [(setToConstantMatrix(freeTerr,100),1.-fCollapse), # Sylvania collapses, Freedonia does not
(noChangeMatrix(freeTerr), fCollapse)]}, # Both collapse
sCollapse),
({'distribution': [(setToConstantMatrix(freeTerr,0),fCollapse), # Freedonia collapses, Sylvania does not
(noChangeMatrix(freeTerr), 1.-fCollapse)]}, # Neither collapses
1.-sCollapse)]})
world.setDynamics(freeTerr,action,tree)
elif model == 'slantchev':
# Effect on position
pos = stateKey(free.name,'position')
tree = makeTree({'distribution': [(incrementMatrix(pos,1),1.-fCollapse), # Freedonia wins battle
(incrementMatrix(pos,-1),fCollapse)]}) # Freedonia loses battle
world.setDynamics(pos,action,tree)
# Effect on territory
tree = makeTree({'if': thresholdRow(pos,posHi-.5),
True: setToConstantMatrix(freeTerr,100), # Freedonia won
False: {'if': thresholdRow(pos,posLo+.5),
True: noChangeMatrix(freeTerr),
False: setToConstantMatrix(freeTerr,0)}}) # Freedonia lost
world.setDynamics(freeTerr,action,tree)
# Dynamics of offers
for index in range(2):
atom = Action({'subject': world.agents.keys()[index],'verb': 'offer',
'object': world.agents.keys()[1-index]})
if atom['subject'] == free.name or model != 'powell':
offer = stateKey(atom['object'],'offered')
amount = actionKey('amount')
tree = makeTree({'if': trueRow(stateKey(None,'treaty')),
True: noChangeMatrix(offer),
False: setToConstantMatrix(offer,amount)})
world.setDynamics(offer,atom,tree,enforceMax=not web)
# Dynamics of treaties
for action in filterActions({'verb': 'accept offer'},free.actions | sylv.actions):
# Accepting an offer means that there is now a treaty
key = stateKey(None,'treaty')
tree = makeTree(setTrueMatrix(key))
world.setDynamics(key,action,tree)
# Accepting offer sets territory
offer = stateKey(action['subject'],'offered')
territory = stateKey(free.name,'territory')
if action['subject'] == free.name:
# Freedonia accepts sets territory to last offer
tree = makeTree(setToFeatureMatrix(territory,offer))
world.setDynamics(freeTerr,action,tree)
else:
# Sylvania accepts sets territory to 1-last offer
tree = makeTree(setToFeatureMatrix(territory,offer,pct=-1.,shift=100.))
world.setDynamics(freeTerr,action,tree)
# Dynamics of phase
phase = stateKey(None,'phase')
roundKey = stateKey(None,'round')
# OFFER -> RESPOND
for index in range(2):
action = Action({'subject': world.agents.keys()[index],'verb': 'offer',
'object': world.agents.keys()[1-index]})
if action['subject'] == free.name or model != 'powell':
tree = makeTree(setToConstantMatrix(phase,'respond'))
world.setDynamics(phase,action,tree)
# RESPOND -> REJECTION or ENGAGEMENT
for action in filterActions({'verb': 'reject offer'},free.actions | sylv.actions):
if model == 'powell':
tree = makeTree(setToConstantMatrix(phase,'rejection'))
elif model == 'slantchev':
tree = makeTree(setToConstantMatrix(phase,'engagement'))
world.setDynamics(phase,action,tree)
# accepting -> OFFER
for action in filterActions({'verb': 'accept offer'},free.actions | sylv.actions):
tree = makeTree(setToConstantMatrix(phase,'offer'))
world.setDynamics(phase,action,tree)
# attacking -> OFFER
for action in filterActions({'verb': 'attack'},free.actions | sylv.actions):
tree = makeTree(setToConstantMatrix(phase,'offer'))
world.setDynamics(phase,action,tree)
if action['subject'] == sylv.name or model == 'slantchev':
tree = makeTree(incrementMatrix(roundKey,1))
world.setDynamics(roundKey,action,tree)
if model == 'powell':
# REJECTION -> END
for atom in [freeNOP,freeBattle]:
tree = makeTree(setToConstantMatrix(phase,'end'))
world.setDynamics(phase,atom,tree)
# END -> OFFER
atom = Action({'subject': sylv.name,'verb': 'continue'})
tree = makeTree(setToConstantMatrix(phase,'offer'))
world.setDynamics(phase,atom,tree)
tree = makeTree(incrementMatrix(roundKey,1))
world.setDynamics(roundKey,atom,tree)
if not web:
# Relationship dynamics: attacking is bad for trust
atom = Action({'subject': sylv.name,'verb': 'attack','object': free.name})
key = binaryKey(free.name,sylv.name,'trusts')
tree = makeTree(approachMatrix(key,0.1,-1.))
world.setDynamics(key,atom,tree)
# Handcrafted policy for Freedonia
# free.setPolicy(makeTree({'if': equalRow('phase','respond'),
# # Accept an offer greater than 50
# True: {'if': thresholdRow(stateKey(free.name,'offered'),50),
# True: Action({'subject': free.name,'verb': 'accept offer','object': sylv.name}),
# False: Action({'subject': free.name,'verb': 'reject offer','object': sylv.name})},
# False: {'if': equalRow('phase','engagement'),
# # Attack during engagement phase
# True: Action({'subject': free.name,'verb': 'attack','object': sylv.name}),
# # Agent decides how what to do otherwise
# False: False}}))
# Mental models of enemy
# Example of creating a model with incorrect reward all at once (a version of Freedonia who cares about reaching agreement as well)
# sylv.addModel('false',R={goalSTroops: 10.,goalSTerritory: 1.,goalAgreement: 1.},
# rationality=1.,selection='distribution',parent=True)
# Example of creating a model with incorrect beliefs
sylv.addModel('false',rationality=10.,selection='distribution',parent=True)
key = stateKey(free.name,'position')
# Sylvania believes position to be fixed at 3
sylv.setBelief(key,3,'false')
# Freedonia is truly unsure about position (50% chance of being 7, 50% of being 3)
world.setModel(free.name,True)
free.setBelief(key,Distribution({7: 0.5,3: 0.5}),True)
# Observations about military position
tree = makeTree({'if': thresholdRow(key,1),
True: {'if': thresholdRow(key,9),
True: {'distribution': [(KeyedVector({key: 1}),0.9),
(KeyedVector({key: 1,CONSTANT: -1}),0.1)]},
False: {'distribution': [(KeyedVector({key: 1}),0.8),
(KeyedVector({key: 1,CONSTANT: -1}),0.1),
(KeyedVector({key: 1,CONSTANT: 1}),0.1)]}},
False: {'distribution': [(KeyedVector({key: 1}),0.9),
(KeyedVector({key: 1,CONSTANT: 1}),0.1)]}})
free.defineObservation(key,tree)
# Example of setting model parameters separately
sylv.addModel('true',parent=True)
sylv.setAttribute('rationality',10.,'true') # Override real agent's rationality with this value
sylv.setAttribute('selection','distribution','true')
world.setMentalModel(free.name,sylv.name,{'false': 0.9,'true': 0.1})
# Goal of fooling Sylvania
goalDeception = achieveFeatureValue(modelKey(sylv.name),sylv.model2index('false'))
return world
def fitWorld(world):
"""
Piecewise linear compilation of Freedonia's policy
"""
for agent in world.agents.values():
if agent.name == 'Freedonia':
free = agent
else:
sylv = agent
world.setState(None,'phase','offer')
state = world.state.domain()[0]
freeModel = world.getModel(free.name,state)
beliefs = free.getBelief(state,freeModel)
# Compute transition trees
T = {}
for agent in world.agents.values():
for action in agent.actions:
T[action] = None
for keys in world.evaluationOrder:
result = None
for key in keys:
dynamics = world.getDynamics(key,action)
if dynamics:
# Use existing tree
assert len(dynamics) == 1
dynamics = dynamics[0]
else:
# Create identity tree
dynamics = KeyedTree(noChangeMatrix(key))
if result is None:
result = dynamics
else:
result += dynamics
result += KeyedTree(noChangeMatrix(CONSTANT))
if T[action] is None:
T[action] = result
else:
T[action] = result*T[action]
# Compute policy trees for the other agent
models = {}
for agent in world.agents.values():
models[agent.name] = set()
for agent in world.agents.values():
for vector in beliefs.domain():
model = world.getModel(agent.name,vector)
ancestor = agent.findAttribute('R',model)
models[agent.name].add(ancestor)
if len(models[agent.name]) == 0:
# No beliefs about models found, assume True model
models[agent.name].add(True)
for agent in world.agents.values():
for model in models[agent.name]:
R = sum(agent.getAttribute('R',model),KeyedTree(KeyedVector()))
agent.models[model]['policy'] = []
policy = agent.models[model]['policy']
for horizon in range(1,agent.getAttribute('horizon',model)+1):
# Dynamic programming of policies
if len(policy) < horizon:
# Policy tree for this horizon is missing
legal = {}
actions = []
# Process legality conditions
for action in agent.actions:
try:
legal[action] = agent.legal[action]
except KeyError:
legal[action] = KeyedTree(True)
# Compute value functions for each action
if horizon > 1:
raise NotImplementedError,'Backup step is missing'
V = {}
for action in agent.actions:
V[action] = R*T[action]
V[action] = legal[action].replace(True,V[action])
V[action] = V[action].expectation()
V[action] = V[action].map(lambda leaf: {'vector': leaf,'action': action} if isinstance(leaf,KeyedVector) else leaf)
# Build up a policy
policy.append(None)
for action in agent.actions:
if policy[horizon-1] is None:
policy[horizon-1] = V[action]
else:
policy[horizon-1] = policy[horizon-1].max(V[action])
Vstar = policy[horizon-1].map(lambda leaf: leaf['vector'] if isinstance(leaf,dict) else leaf)
policy[horizon-1] = policy[horizon-1].map(lambda leaf: leaf['action']
if isinstance(leaf,dict) else leaf)
# print 'Unpruned:'
policy[horizon-1].minimizePlanes()
# print policy[horizon-1]
pruned = policy[horizon-1].prune()
# print 'Pruned:'
print pruned
# # Verify pruning
# world.setFeature('phase','respond',beliefs)
# world.setState('Freedonia','territory',72,beliefs)
# for offer in range(50,100,3):
# world.setState(agent.name,'offered',offer,beliefs)
# vector = beliefs.domain()[0]
# print offer
# print policy[horizon-1][vector],pruned[vector]
# assert policy[horizon-1][vector] == pruned[vector]
print free.models[freeModel]['beliefs']
break
sys.exit(0)
def scenarioSimulationUseCase(world,offer=0,rounds=1,debug=1,model='powell'):
"""
@param offer: the initial offer for Freedonia to give (default is none)
@type offer: int
@param rounds: the number of complete rounds, where a round is two turns each, following Powell (default is 1)
@type rounds: int
@param debug: the debug level to use in explanation (default is 1)
@type debug: int
"""
testMode = isinstance(debug,dict)
if testMode:
buf = StringIO.StringIO()
debug[offer] = buf
debug = 0
for agent in world.agents.values():
if agent.name == 'Freedonia':
free = agent
else:
sylv = agent
world.setState(None,'phase','offer')
if model == 'powell':
steps = 4
else:
assert model == 'slantchev'
steps = 3
if debug > 0:
world.printState(beliefs=True)
for t in range(rounds):
for step in range(steps):
assert len(world.state) == 1
phase = world.getState(None,'phase').expectation()
state = world.state.domain()[0]
if not world.terminated(state):
if t == 0 and phase == 'offer' and offer > 0:
# Force Freedonia to make low offer in first step
outcome = world.step({free.name: Action({'subject':free.name,'verb':'offer','object': sylv.name,'amount': offer})})
world.explain(outcome,debug)
else:
# Free to choose
outcome = world.step()
world.explain(outcome,debug)
if testMode:
if (t == 0 and step == 1) or (t == 1 and step == 0):
for entry in outcome:
world.explainAction(entry,buf,1)
world.state.select()
if not testMode and debug > 0:
world.printState(beliefs=True)
for agent in world.agents.values():
print agent.name,len(agent.models)
assert len(world.state) == 1
phase = world.getState(None,'phase').expectation()
if phase == 'offer':
# Looped around
break
def findThreshold(scenario,t,model='powell',position=0):
"""
Finds the threshold at which the agent will accept the offer"""
if model == 'slantchev':
# Find counteroffer in this state
actions = []
while len(actions) < 2:
world = World(scenario)
world.setState(None,'round',t)
world.setState('Freedonia','position',position)
entry = {}
scenarioSimulationUseCase(world,20,2,entry,model)
actions = entry[20].getvalue().split('\n')[:-1]
entry[20].close()
amount = int(actions[1].split('-')[-1])
print 'Time: %d, Position %d -> Offer %d%%' % (t,position,amount)
# Compute acceptance threshold
offers = [50]
index = 0
entry = {}
while True:
world = World(scenario)
world.setState(None,'round',t)
if model == 'slantchev':
world.setState('Freedonia','position',position)
scenarioSimulationUseCase(world,offers[index],1,entry,model)
actions = entry[offers[index]].getvalue().split('\n')[:-1]
entry[offers[index]].close()
entry[offers[index]] = actions[0].split('-')[1].split()[0]
if entry[offers[index]] == 'accept':
# Try a lower offer
if index > 0:
down = offers[index-1]
assert entry[down] != 'accept'
else:
down = 0
new = (offers[index]+down) / 2
if entry.has_key(new):
if entry[new] != 'accept':
new += 1
break
else:
offers.insert(index,new)
else:
assert entry[offers[index]] in ['reject','attack']
# Try a higher offer
try:
up = offers[index+1]
assert entry[up] == 'accept'
except IndexError:
up = 100
new = (offers[index]+up) / 2
if entry.has_key(new):
break
else:
offers.insert(index+1,new)
index += 1
return new
def play(world,debug=1):
"""
Modify Freedonia to play autonomously and simulate
"""
for agent in world.agents.values():
if agent.name == 'Freedonia':
free = agent
else:
sylv = agent
for amount in range(10,100,20):
action = Action({'verb': 'offer','object': sylv.name,'amount': amount})
free.addAction(action)
action = Action({'verb': 'offer','object': free.name,'amount': amount})
sylv.addAction(action)
for action in filterActions({'verb': 'offer'},free.actions | sylv.actions):
actor = world.agents[action['subject']]
if not actor.legal.has_key(action):
actor.setLegal(action,makeTree({'if': equalRow(stateKey(None,'phase'),'offer'),
True: True, # Offers are legal in the offer phase
False: False})) # Offers are illegal in all other phases
model = world.getState(None,'model').domain()[0]
start = world.getState(free.name,'territory').expectation()
print model,start
scenarioSimulationUseCase(world,offer=0,rounds=15,debug=debug,model=model)
def findPolicies(args):
"""
Wrapper for finding agent offers and acceptance thresholds
"""
results = []
search = (30,40,1)
for t in range(args['rounds']):
entry = {}
if args['model'] == 'slantchev':
for position in range(1,10):
subresult = []
results.append(subresult)
subresult.append(entry)
thresh = findThreshold(args['output'],t,args['model'],position)
print 'Time %d, Position %d -> Accept if > %d%%' % (t,position,thresh)
else:
results.append(entry)
print 'Time %d -> Accept if > %d%%' %(t,findThreshold(args['output'],t))
if __name__ == '__main__':
# Grab command-line arguments
parser = ArgumentParser()
# Optional argument that sets the filename for the output file
parser.add_argument('-o',action='store',
dest='output',default='default',
help='scenario file [default: %(default)s]')
group = parser.add_argument_group('Creation Options','Control the parameters of the created scenario.')
# Optional argument that sets the theoretical model
group.add_argument('-m',action='store',
dest='model',choices=['powell','slantchev'],default='powell',
help='theoretical model for the game [default: %(default)s]')
# Optional argument that sets the cost of battle to Freedonia
group.add_argument('-f',action='store',
dest='fcost',type=int,default=2000,
help='cost of battle to Freedonia [default: %(default)s]')
# Optional argument that sets the cost of battle to Sylvania
group.add_argument('-s',action='store',
dest='scost',type=int,default=1000,
help='cost of battle to enemy [default: %(default)s]')
# Optional argument that sets the initial amount of territory owned by Freedonia
group.add_argument('-i','--initial',action='store',
dest='initial',type=int,default=13,
help='Freedonia\'s initial territory percentage [default: %(default)s]')
# Optional argument that sets Freedonia's initial military positional advantage
group.add_argument('-p','--position',action='store',
dest='position',type=int,default=3,
help='Freedonia\'s initial positional advantage [default: %(default)s]')
# Optional argument that sets the name of the enemy country
group.add_argument('-e',action='store',
dest='enemy',default='Sylvania',
help='Name of the enemy country [default: %(default)s]')
# Optional argument that sets the name of the disputed region
group.add_argument('--region',action='store',
dest='region',default='Trentino',
help='Name of the region under dispute [default: %(default)s]')
# Optional argument that sets the maximum number of rounds to play
group.add_argument('-r',action='store',
dest='rounds',type=int,default=15,
help='Maximum number of rounds to play [default: %(default)s]')
# Optional argument that sets Freedonia's initial troops
group.add_argument('--freedonia-troops',action='store',
dest='ftroops',type=int,default=40000,
help='number of Freedonia troops [default: %(default)s]')
# Optional argument that sets Sylvania's initial troops
group.add_argument('--enemy-troops',action='store',
dest='stroops',type=int,default=30000,
help='number of enemy troops [default: %(default)s]')
# Optional argument that determines whether to generate models for Web platform
group.add_argument('-w','--web',action='store_true',
dest='web',default=False,
help='generate Web version if set [default: %(default)s]')
group = parser.add_argument_group('Algorithm Options','Control the algorithms to apply to the agents.')
# Optional argument that determines whether to use value iteration to create Freedonia's policy
group.add_argument('-c','--compiled',action='store_true',
dest='compiled',default=False,
help='use value iteration for Freedonia [default: %(default)s]')
# Optional argument that determines whether to use PWL compilation of Freedonia's policy
group.add_argument('--fitting',action='store_true',
dest='fitting',default=False,
help='use PWL compilation and fitting for Freedonia [default: %(default)s]')
group = parser.add_argument_group('Simulation Options','Control the simulation of the created scenario.')
# Optional argument that sets the level of explanations when running the simulation
group.add_argument('-d',action='store',
dest='debug',type=int,default=1,
help='level of explanation detail [default: %(default)s]')
# Optional argument that sets the initial offer that Freedonia will make
group.add_argument('-a',action='store',
dest='amount',type=int,default=0,
help='Freedonia\'s first offer amount')
# Optional argument that sets the number of time steps to simulate
group.add_argument('-t','--time',action='store',
dest='time',type=int,default=1,
help='number of time steps to simulate [default: %(default)s]')
group = parser.add_argument_group('Creation Options','Control the parameters of the created scenario.')
args = vars(parser.parse_args())
world = scenarioCreationUseCase(args['enemy'],maxRounds=args['rounds'],model=args['model'],
web=args['web'])
# Initialize state values based on command-line arguments
world.agents['Freedonia'].setState('troops',args['ftroops'])
world.agents['Freedonia'].setState('territory',args['initial'])
world.agents['Freedonia'].setState('position',args['position'])
world.agents['Freedonia'].setState('cost',args['fcost'])
world.agents[args['enemy']].setState('troops',args['stroops'])
world.agents[args['enemy']].setState('cost',args['scost'])
if args['compiled']:
compileWorld(world)
if args['fitting']:
fitWorld(world)
# Create configuration file
config = SafeConfigParser()
# Specify game options for web interface
config.add_section('Game')
config.set('Game','rounds','%d' % (args['rounds']))
config.set('Game','user','Freedonia')
config.set('Game','agent',args['enemy'])
config.set('Game','region',args['region'])
if args['model'] == 'powell':
# Battle is optional under Powell
config.set('Game','battle','optional')
elif args['model'] == 'slantchev':
# Battle is mandatory under Slantchev
config.set('Game','battle','mandatory')
# Specify which state features are visible in web interface
config.add_section('Visible')
features = ['territory','troops']
if args['model'] == 'slantchev':
features.append('position')
for feature in features:
config.set('Visible',feature,'yes')
# Specify descriptions of actions for web interface
config.add_section('Actions')
config.set('Actions','offer','Propose treaty where %s gets <action:amount>%%%% of total disputed territory' % (args['enemy']))
config.set('Actions','attack','Attack %s' % (args['enemy']))
config.set('Actions','accept offer','Accept offer of <Freedonia:offered>%% of total disputed territory')
config.set('Actions','reject offer','Reject offer of <Freedonia:offered>%% of total disputed territory')
config.set('Actions','continue','Continue to next round of negotiation without attacking')
config.set('Actions','%s offer' % (args['enemy']),'offer <action:amount>%%')
config.set('Actions','%s accept offer' % (args['enemy']),
'Accept offer of <%s:offered>%%%% of total disputed territory' % (args['enemy']))
config.set('Actions','%s reject offer' % (args['enemy']),
'Reject offer of <%s:offered>%%%% of total disputed territory' % (args['enemy']))
# Specify what changes are displayed
config.add_section('Change')
config.set('Change','troops','yes')
if args['model'] == 'slantchev':
config.set('Change','position','yes')
# Specify links
config.add_section('Links')
config.set('Links','survey','http://www.curiouslab.com/clsurvey/index.php?sid=39345&lang=en')
config.set('Links','scenarios','8839,1308,2266,5538')
f = open('%s.cfg' % (args['output']),'w')
config.write(f)
f.close()
# Save scenario to compressed XML file
world.save(args['output'])
# Test saved scenario
world = World(args['output'])
scenarioSimulationUseCase(world,args['amount'],args['time'],args['debug'],args['model'])
# findPolicies(args)
# world.printState(world.agents[args['enemy']].getBelief(world.state.domain()[0],'false'))
| 49.120497 | 143 | 0.583987 | """
Example scenario for wartime negotiation.
Provides use cases for both modeling and simulating scenarios.
"""
import sys
from ConfigParser import SafeConfigParser
from argparse import ArgumentParser
import StringIO
from psychsim.pwl import *
from psychsim.reward import *
from psychsim.action import *
from psychsim.world import World,stateKey,actionKey,binaryKey,modelKey
from psychsim.agent import Agent
def scenarioCreationUseCase(enemy='Sylvania',model='powell',web=False,
fCollapse=None,sCollapse=None,maxRounds=15):
"""
An example of how to create a scenario
@param enemy: the name of the agent-controlled side, i.e., Freedonia's opponent (default: Sylvania)
@type enemy: str
@param model: which model do we use (default is "powell")
@type model: powell or slantchev
@param web: if C{True}, then create the web-based experiment scenario (default: C{False})
@type web: bool
@param fCollapse: the probability that Freedonia collapses (under powell, default: 0.1) or loses battle (under slantchev, default: 0.7)
@type fCollapse: float
@param sCollapse: the probability that Sylvania collapses, under powell (default: 0.1)
@type sCollapse: float
@param maxRounds: the maximum number of game rounds (default: 15)
@type maxRounds: int
@return: the scenario created
@rtype: L{World}
"""
# Handle defaults for battle probabilities, under each model
posLo = 0
posHi = 10
if fCollapse is None:
if model == 'powell':
fCollapse = 0.1
elif model == 'slantchev':
fCollapse = 0.7
if sCollapse is None:
sCollapse = 0.1
# Create scenario
world = World()
# Agents
free = Agent('Freedonia')
world.addAgent(free)
sylv = Agent(enemy)
world.addAgent(sylv)
# User state
world.defineState(free.name,'troops',int,lo=0,hi=50000,
description='Number of troops you have left')
free.setState('troops',40000)
world.defineState(free.name,'territory',int,lo=0,hi=100,
description='Percentage of disputed territory owned by you')
free.setState('territory',15)
world.defineState(free.name,'cost',int,lo=0,hi=50000,
description='Number of troops %s loses in an attack' % (free.name))
free.setState('cost',2000)
world.defineState(free.name,'position',int,lo=posLo,hi=posHi,
description='Current status of war (%d=%s is winner, %d=you are winner)' % (posLo,sylv.name,posHi))
free.setState('position',5)
world.defineState(free.name,'offered',int,lo=0,hi=100,
description='Percentage of disputed territory that %s last offered to you' % (sylv.name))
free.setState('offered',0)
if model == 'slantchev':
# Compute new value for territory only *after* computing new value for position
world.addDependency(stateKey(free.name,'territory'),stateKey(free.name,'position'))
# Agent state
world.defineState(sylv.name,'troops',int,lo=0,hi=500000,
description='Number of troops %s has left' % (sylv.name))
sylv.setState('troops',30000)
world.defineState(sylv.name,'cost',int,lo=0,hi=50000,
description='Number of troops %s loses in an attack' % (sylv.name))
sylv.setState('cost',2000)
world.defineState(sylv.name,'offered',int,lo=0,hi=100,
description='Percentage of disputed territory that %s last offered to %s' % (free.name,sylv.name))
sylv.setState('offered',0)
# World state
world.defineState(None,'treaty',bool,
description='Have the two sides reached an agreement?')
world.setState(None,'treaty',False)
# Stage of negotiation, illustrating the use of an enumerated state feature
world.defineState(None,'phase',list,['offer','respond','rejection','end','paused','engagement'],
description='The current stage of the negotiation game')
world.setState(None,'phase','paused')
# Game model, static descriptor
world.defineState(None,'model',list,['powell','slantchev'],
description='The model underlying the negotiation game')
world.setState(None,'model',model)
# Round of negotiation
world.defineState(None,'round',int,description='The current round of the negotiation')
world.setState(None,'round',0)
if not web:
# Relationship value
key = world.defineRelation(free.name,sylv.name,'trusts')
world.setFeature(key,0.)
# Game over if there is a treaty
world.addTermination(makeTree({'if': trueRow(stateKey(None,'treaty')),
True: True, False: False}))
# Game over if Freedonia has no territory
world.addTermination(makeTree({'if': thresholdRow(stateKey(free.name,'territory'),1),
True: False, False: True}) )
# Game over if Freedonia has all the territory
world.addTermination(makeTree({'if': thresholdRow(stateKey(free.name,'territory'),99),
True: True, False: False}))
# Game over if number of rounds exceeds limit
world.addTermination(makeTree({'if': thresholdRow(stateKey(None,'round'),maxRounds),
True: True, False: False}))
# Turn order: Uncomment the following if you want agents to act in parallel
# world.setOrder([set(world.agents.keys())])
# Turn order: Uncomment the following if you want agents to act sequentially
world.setOrder([free.name,sylv.name])
# User actions
freeBattle = free.addAction({'verb': 'attack','object': sylv.name})
for amount in range(20,100,20):
free.addAction({'verb': 'offer','object': sylv.name,'amount': amount})
if model == 'powell':
# Powell has null stages
freeNOP = free.addAction({'verb': 'continue'})
elif model == 'slantchev':
# Slantchev has both sides receiving offers
free.addAction({'verb': 'accept offer','object': sylv.name})
free.addAction({'verb': 'reject offer','object': sylv.name})
# Agent actions
sylvBattle = sylv.addAction({'verb': 'attack','object': free.name})
sylvAccept = sylv.addAction({'verb': 'accept offer','object': free.name})
sylvReject = sylv.addAction({'verb': 'reject offer','object': free.name})
if model == 'powell':
# Powell has null stages
sylvNOP = sylv.addAction({'verb': 'continue'})
elif model == 'slantchev':
# Slantchev has both sides making offers
for amount in range(10,100,10):
sylv.addAction({'verb': 'offer','object': free.name,'amount': amount})
# Restrictions on when actions are legal, based on phase of game
for action in filterActions({'verb': 'offer'},free.actions | sylv.actions):
agent = world.agents[action['subject']]
agent.setLegal(action,makeTree({'if': equalRow(stateKey(None,'phase'),'offer'),
True: True, # Offers are legal in the offer phase
False: False})) # Offers are illegal in all other phases
if model == 'powell':
# Powell has a special rejection phase
for action in [freeNOP,freeBattle]:
free.setLegal(action,makeTree({'if': equalRow(stateKey(None,'phase'),'rejection'),
True: True, # Attacking and doing nothing are legal only in rejection phase
False: False})) # Attacking and doing nothing are illegal in all other phases
# Once offered, agent can respond
if model == 'powell':
# Under Powell, only Sylvania has to respond, and it can attack
responses = [sylvBattle,sylvAccept,sylvReject]
elif model == 'slantchev':
# Under Slantchev, only accept/reject
responses = filterActions({'verb': 'accept offer'},free.actions | sylv.actions)
responses += filterActions({'verb': 'reject offer'},free.actions | sylv.actions)
for action in responses:
agent = world.agents[action['subject']]
agent.setLegal(action,makeTree({'if': equalRow(stateKey(None,'phase'),'respond'),
True: True, # Offeree must act in the response phase
False: False})) # Offeree cannot act in any other phase
if model == 'powell':
# NOP is legal in exactly opposite situations to all other actions
sylv.setLegal(sylvNOP,makeTree({'if': equalRow(stateKey(None,'phase'),'end'),
True: True, # Sylvania does not do anything in the null phase after Freedonia responds to rejection
False: False})) # Sylvania must act in its other phases
if model == 'slantchev':
# Attacking legal only under engagement phase
for action in filterActions({'verb': 'attack'},free.actions | sylv.actions):
agent = world.agents[action['subject']]
agent.setLegal(action,makeTree({'if': equalRow(stateKey(None,'phase'),'engagement'),
True: True, # Attacking legal only in engagement
False: False})) # Attacking legal every other phase
# Goals for Freedonia
goalFTroops = maximizeFeature(stateKey(free.name,'troops'))
free.setReward(goalFTroops,1.)
goalFTerritory = maximizeFeature(stateKey(free.name,'territory'))
free.setReward(goalFTerritory,1.)
# Goals for Sylvania
goalSTroops = maximizeFeature(stateKey(sylv.name,'troops'))
sylv.setReward(goalSTroops,1.)
goalSTerritory = minimizeFeature(stateKey(free.name,'territory'))
sylv.setReward(goalSTerritory,1.)
# Possible goals applicable to both
goalAgreement = maximizeFeature(stateKey(None,'treaty'))
# Silly goal, provided as an example of an achievement goal
goalAchieve = achieveFeatureValue(stateKey(None,'phase'),'respond')
# Horizons
if model == 'powell':
free.setAttribute('horizon',4)
sylv.setAttribute('horizon',4)
elif model == 'slantchev':
free.setAttribute('horizon',6)
sylv.setAttribute('horizon',6)
# Discount factors
free.setAttribute('discount',-1)
sylv.setAttribute('discount',-1)
# Levels of belief
free.setRecursiveLevel(2)
sylv.setRecursiveLevel(2)
# Dynamics of battle
freeTroops = stateKey(free.name,'troops')
freeTerr = stateKey(free.name,'territory')
sylvTroops = stateKey(sylv.name,'troops')
# Effect of fighting
for action in filterActions({'verb': 'attack'},free.actions | sylv.actions):
# Effect on troops (cost of battle)
tree = makeTree(addFeatureMatrix(freeTroops,stateKey(free.name,'cost'),-1.))
world.setDynamics(freeTroops,action,tree,enforceMin=not web)
tree = makeTree(addFeatureMatrix(sylvTroops,stateKey(sylv.name,'cost'),-1.))
world.setDynamics(sylvTroops,action,tree,enforceMin=not web)
if model == 'powell':
# Effect on territory (probability of collapse)
tree = makeTree({'distribution': [
({'distribution': [(setToConstantMatrix(freeTerr,100),1.-fCollapse), # Sylvania collapses, Freedonia does not
(noChangeMatrix(freeTerr), fCollapse)]}, # Both collapse
sCollapse),
({'distribution': [(setToConstantMatrix(freeTerr,0),fCollapse), # Freedonia collapses, Sylvania does not
(noChangeMatrix(freeTerr), 1.-fCollapse)]}, # Neither collapses
1.-sCollapse)]})
world.setDynamics(freeTerr,action,tree)
elif model == 'slantchev':
# Effect on position
pos = stateKey(free.name,'position')
tree = makeTree({'distribution': [(incrementMatrix(pos,1),1.-fCollapse), # Freedonia wins battle
(incrementMatrix(pos,-1),fCollapse)]}) # Freedonia loses battle
world.setDynamics(pos,action,tree)
# Effect on territory
tree = makeTree({'if': thresholdRow(pos,posHi-.5),
True: setToConstantMatrix(freeTerr,100), # Freedonia won
False: {'if': thresholdRow(pos,posLo+.5),
True: noChangeMatrix(freeTerr),
False: setToConstantMatrix(freeTerr,0)}}) # Freedonia lost
world.setDynamics(freeTerr,action,tree)
# Dynamics of offers
for index in range(2):
atom = Action({'subject': world.agents.keys()[index],'verb': 'offer',
'object': world.agents.keys()[1-index]})
if atom['subject'] == free.name or model != 'powell':
offer = stateKey(atom['object'],'offered')
amount = actionKey('amount')
tree = makeTree({'if': trueRow(stateKey(None,'treaty')),
True: noChangeMatrix(offer),
False: setToConstantMatrix(offer,amount)})
world.setDynamics(offer,atom,tree,enforceMax=not web)
# Dynamics of treaties
for action in filterActions({'verb': 'accept offer'},free.actions | sylv.actions):
# Accepting an offer means that there is now a treaty
key = stateKey(None,'treaty')
tree = makeTree(setTrueMatrix(key))
world.setDynamics(key,action,tree)
# Accepting offer sets territory
offer = stateKey(action['subject'],'offered')
territory = stateKey(free.name,'territory')
if action['subject'] == free.name:
# Freedonia accepts sets territory to last offer
tree = makeTree(setToFeatureMatrix(territory,offer))
world.setDynamics(freeTerr,action,tree)
else:
# Sylvania accepts sets territory to 1-last offer
tree = makeTree(setToFeatureMatrix(territory,offer,pct=-1.,shift=100.))
world.setDynamics(freeTerr,action,tree)
# Dynamics of phase
phase = stateKey(None,'phase')
roundKey = stateKey(None,'round')
# OFFER -> RESPOND
for index in range(2):
action = Action({'subject': world.agents.keys()[index],'verb': 'offer',
'object': world.agents.keys()[1-index]})
if action['subject'] == free.name or model != 'powell':
tree = makeTree(setToConstantMatrix(phase,'respond'))
world.setDynamics(phase,action,tree)
# RESPOND -> REJECTION or ENGAGEMENT
for action in filterActions({'verb': 'reject offer'},free.actions | sylv.actions):
if model == 'powell':
tree = makeTree(setToConstantMatrix(phase,'rejection'))
elif model == 'slantchev':
tree = makeTree(setToConstantMatrix(phase,'engagement'))
world.setDynamics(phase,action,tree)
# accepting -> OFFER
for action in filterActions({'verb': 'accept offer'},free.actions | sylv.actions):
tree = makeTree(setToConstantMatrix(phase,'offer'))
world.setDynamics(phase,action,tree)
# attacking -> OFFER
for action in filterActions({'verb': 'attack'},free.actions | sylv.actions):
tree = makeTree(setToConstantMatrix(phase,'offer'))
world.setDynamics(phase,action,tree)
if action['subject'] == sylv.name or model == 'slantchev':
tree = makeTree(incrementMatrix(roundKey,1))
world.setDynamics(roundKey,action,tree)
if model == 'powell':
# REJECTION -> END
for atom in [freeNOP,freeBattle]:
tree = makeTree(setToConstantMatrix(phase,'end'))
world.setDynamics(phase,atom,tree)
# END -> OFFER
atom = Action({'subject': sylv.name,'verb': 'continue'})
tree = makeTree(setToConstantMatrix(phase,'offer'))
world.setDynamics(phase,atom,tree)
tree = makeTree(incrementMatrix(roundKey,1))
world.setDynamics(roundKey,atom,tree)
if not web:
# Relationship dynamics: attacking is bad for trust
atom = Action({'subject': sylv.name,'verb': 'attack','object': free.name})
key = binaryKey(free.name,sylv.name,'trusts')
tree = makeTree(approachMatrix(key,0.1,-1.))
world.setDynamics(key,atom,tree)
# Handcrafted policy for Freedonia
# free.setPolicy(makeTree({'if': equalRow('phase','respond'),
# # Accept an offer greater than 50
# True: {'if': thresholdRow(stateKey(free.name,'offered'),50),
# True: Action({'subject': free.name,'verb': 'accept offer','object': sylv.name}),
# False: Action({'subject': free.name,'verb': 'reject offer','object': sylv.name})},
# False: {'if': equalRow('phase','engagement'),
# # Attack during engagement phase
# True: Action({'subject': free.name,'verb': 'attack','object': sylv.name}),
# # Agent decides how what to do otherwise
# False: False}}))
# Mental models of enemy
# Example of creating a model with incorrect reward all at once (a version of Freedonia who cares about reaching agreement as well)
# sylv.addModel('false',R={goalSTroops: 10.,goalSTerritory: 1.,goalAgreement: 1.},
# rationality=1.,selection='distribution',parent=True)
# Example of creating a model with incorrect beliefs
sylv.addModel('false',rationality=10.,selection='distribution',parent=True)
key = stateKey(free.name,'position')
# Sylvania believes position to be fixed at 3
sylv.setBelief(key,3,'false')
# Freedonia is truly unsure about position (50% chance of being 7, 50% of being 3)
world.setModel(free.name,True)
free.setBelief(key,Distribution({7: 0.5,3: 0.5}),True)
# Observations about military position
tree = makeTree({'if': thresholdRow(key,1),
True: {'if': thresholdRow(key,9),
True: {'distribution': [(KeyedVector({key: 1}),0.9),
(KeyedVector({key: 1,CONSTANT: -1}),0.1)]},
False: {'distribution': [(KeyedVector({key: 1}),0.8),
(KeyedVector({key: 1,CONSTANT: -1}),0.1),
(KeyedVector({key: 1,CONSTANT: 1}),0.1)]}},
False: {'distribution': [(KeyedVector({key: 1}),0.9),
(KeyedVector({key: 1,CONSTANT: 1}),0.1)]}})
free.defineObservation(key,tree)
# Example of setting model parameters separately
sylv.addModel('true',parent=True)
sylv.setAttribute('rationality',10.,'true') # Override real agent's rationality with this value
sylv.setAttribute('selection','distribution','true')
world.setMentalModel(free.name,sylv.name,{'false': 0.9,'true': 0.1})
# Goal of fooling Sylvania
goalDeception = achieveFeatureValue(modelKey(sylv.name),sylv.model2index('false'))
return world
def compileWorld(world):
# First, let's set the start state
world.setState(None,'phase','offer')
# Um, not sure about this one
world.agents[args['enemy']].setAttribute('rationality',10.)
# Then, do value iteration to pre-compute Freedonia's policy
world.agents['Freedonia'].valueIteration(horizon=-1,debug=1)
def fitWorld(world):
"""
Piecewise linear compilation of Freedonia's policy
"""
for agent in world.agents.values():
if agent.name == 'Freedonia':
free = agent
else:
sylv = agent
world.setState(None,'phase','offer')
state = world.state.domain()[0]
freeModel = world.getModel(free.name,state)
beliefs = free.getBelief(state,freeModel)
# Compute transition trees
T = {}
for agent in world.agents.values():
for action in agent.actions:
T[action] = None
for keys in world.evaluationOrder:
result = None
for key in keys:
dynamics = world.getDynamics(key,action)
if dynamics:
# Use existing tree
assert len(dynamics) == 1
dynamics = dynamics[0]
else:
# Create identity tree
dynamics = KeyedTree(noChangeMatrix(key))
if result is None:
result = dynamics
else:
result += dynamics
result += KeyedTree(noChangeMatrix(CONSTANT))
if T[action] is None:
T[action] = result
else:
T[action] = result*T[action]
# Compute policy trees for the other agent
models = {}
for agent in world.agents.values():
models[agent.name] = set()
for agent in world.agents.values():
for vector in beliefs.domain():
model = world.getModel(agent.name,vector)
ancestor = agent.findAttribute('R',model)
models[agent.name].add(ancestor)
if len(models[agent.name]) == 0:
# No beliefs about models found, assume True model
models[agent.name].add(True)
for agent in world.agents.values():
for model in models[agent.name]:
R = sum(agent.getAttribute('R',model),KeyedTree(KeyedVector()))
agent.models[model]['policy'] = []
policy = agent.models[model]['policy']
for horizon in range(1,agent.getAttribute('horizon',model)+1):
# Dynamic programming of policies
if len(policy) < horizon:
# Policy tree for this horizon is missing
legal = {}
actions = []
# Process legality conditions
for action in agent.actions:
try:
legal[action] = agent.legal[action]
except KeyError:
legal[action] = KeyedTree(True)
# Compute value functions for each action
if horizon > 1:
raise NotImplementedError,'Backup step is missing'
V = {}
for action in agent.actions:
V[action] = R*T[action]
V[action] = legal[action].replace(True,V[action])
V[action] = V[action].expectation()
V[action] = V[action].map(lambda leaf: {'vector': leaf,'action': action} if isinstance(leaf,KeyedVector) else leaf)
# Build up a policy
policy.append(None)
for action in agent.actions:
if policy[horizon-1] is None:
policy[horizon-1] = V[action]
else:
policy[horizon-1] = policy[horizon-1].max(V[action])
Vstar = policy[horizon-1].map(lambda leaf: leaf['vector'] if isinstance(leaf,dict) else leaf)
policy[horizon-1] = policy[horizon-1].map(lambda leaf: leaf['action']
if isinstance(leaf,dict) else leaf)
# print 'Unpruned:'
policy[horizon-1].minimizePlanes()
# print policy[horizon-1]
pruned = policy[horizon-1].prune()
# print 'Pruned:'
print pruned
# # Verify pruning
# world.setFeature('phase','respond',beliefs)
# world.setState('Freedonia','territory',72,beliefs)
# for offer in range(50,100,3):
# world.setState(agent.name,'offered',offer,beliefs)
# vector = beliefs.domain()[0]
# print offer
# print policy[horizon-1][vector],pruned[vector]
# assert policy[horizon-1][vector] == pruned[vector]
print free.models[freeModel]['beliefs']
break
sys.exit(0)
def scenarioSimulationUseCase(world,offer=0,rounds=1,debug=1,model='powell'):
"""
@param offer: the initial offer for Freedonia to give (default is none)
@type offer: int
@param rounds: the number of complete rounds, where a round is two turns each, following Powell (default is 1)
@type rounds: int
@param debug: the debug level to use in explanation (default is 1)
@type debug: int
"""
testMode = isinstance(debug,dict)
if testMode:
buf = StringIO.StringIO()
debug[offer] = buf
debug = 0
for agent in world.agents.values():
if agent.name == 'Freedonia':
free = agent
else:
sylv = agent
world.setState(None,'phase','offer')
if model == 'powell':
steps = 4
else:
assert model == 'slantchev'
steps = 3
if debug > 0:
world.printState(beliefs=True)
for t in range(rounds):
for step in range(steps):
assert len(world.state) == 1
phase = world.getState(None,'phase').expectation()
state = world.state.domain()[0]
if not world.terminated(state):
if t == 0 and phase == 'offer' and offer > 0:
# Force Freedonia to make low offer in first step
outcome = world.step({free.name: Action({'subject':free.name,'verb':'offer','object': sylv.name,'amount': offer})})
world.explain(outcome,debug)
else:
# Free to choose
outcome = world.step()
world.explain(outcome,debug)
if testMode:
if (t == 0 and step == 1) or (t == 1 and step == 0):
for entry in outcome:
world.explainAction(entry,buf,1)
world.state.select()
if not testMode and debug > 0:
world.printState(beliefs=True)
for agent in world.agents.values():
print agent.name,len(agent.models)
assert len(world.state) == 1
phase = world.getState(None,'phase').expectation()
if phase == 'offer':
# Looped around
break
def findThreshold(scenario,t,model='powell',position=0):
"""
Finds the threshold at which the agent will accept the offer"""
if model == 'slantchev':
# Find counteroffer in this state
actions = []
while len(actions) < 2:
world = World(scenario)
world.setState(None,'round',t)
world.setState('Freedonia','position',position)
entry = {}
scenarioSimulationUseCase(world,20,2,entry,model)
actions = entry[20].getvalue().split('\n')[:-1]
entry[20].close()
amount = int(actions[1].split('-')[-1])
print 'Time: %d, Position %d -> Offer %d%%' % (t,position,amount)
# Compute acceptance threshold
offers = [50]
index = 0
entry = {}
while True:
world = World(scenario)
world.setState(None,'round',t)
if model == 'slantchev':
world.setState('Freedonia','position',position)
scenarioSimulationUseCase(world,offers[index],1,entry,model)
actions = entry[offers[index]].getvalue().split('\n')[:-1]
entry[offers[index]].close()
entry[offers[index]] = actions[0].split('-')[1].split()[0]
if entry[offers[index]] == 'accept':
# Try a lower offer
if index > 0:
down = offers[index-1]
assert entry[down] != 'accept'
else:
down = 0
new = (offers[index]+down) / 2
if entry.has_key(new):
if entry[new] != 'accept':
new += 1
break
else:
offers.insert(index,new)
else:
assert entry[offers[index]] in ['reject','attack']
# Try a higher offer
try:
up = offers[index+1]
assert entry[up] == 'accept'
except IndexError:
up = 100
new = (offers[index]+up) / 2
if entry.has_key(new):
break
else:
offers.insert(index+1,new)
index += 1
return new
def play(world,debug=1):
"""
Modify Freedonia to play autonomously and simulate
"""
for agent in world.agents.values():
if agent.name == 'Freedonia':
free = agent
else:
sylv = agent
for amount in range(10,100,20):
action = Action({'verb': 'offer','object': sylv.name,'amount': amount})
free.addAction(action)
action = Action({'verb': 'offer','object': free.name,'amount': amount})
sylv.addAction(action)
for action in filterActions({'verb': 'offer'},free.actions | sylv.actions):
actor = world.agents[action['subject']]
if not actor.legal.has_key(action):
actor.setLegal(action,makeTree({'if': equalRow(stateKey(None,'phase'),'offer'),
True: True, # Offers are legal in the offer phase
False: False})) # Offers are illegal in all other phases
model = world.getState(None,'model').domain()[0]
start = world.getState(free.name,'territory').expectation()
print model,start
scenarioSimulationUseCase(world,offer=0,rounds=15,debug=debug,model=model)
def findPolicies(args):
"""
Wrapper for finding agent offers and acceptance thresholds
"""
results = []
search = (30,40,1)
for t in range(args['rounds']):
entry = {}
if args['model'] == 'slantchev':
for position in range(1,10):
subresult = []
results.append(subresult)
subresult.append(entry)
thresh = findThreshold(args['output'],t,args['model'],position)
print 'Time %d, Position %d -> Accept if > %d%%' % (t,position,thresh)
else:
results.append(entry)
print 'Time %d -> Accept if > %d%%' %(t,findThreshold(args['output'],t))
if __name__ == '__main__':
# Grab command-line arguments
parser = ArgumentParser()
# Optional argument that sets the filename for the output file
parser.add_argument('-o',action='store',
dest='output',default='default',
help='scenario file [default: %(default)s]')
group = parser.add_argument_group('Creation Options','Control the parameters of the created scenario.')
# Optional argument that sets the theoretical model
group.add_argument('-m',action='store',
dest='model',choices=['powell','slantchev'],default='powell',
help='theoretical model for the game [default: %(default)s]')
# Optional argument that sets the cost of battle to Freedonia
group.add_argument('-f',action='store',
dest='fcost',type=int,default=2000,
help='cost of battle to Freedonia [default: %(default)s]')
# Optional argument that sets the cost of battle to Sylvania
group.add_argument('-s',action='store',
dest='scost',type=int,default=1000,
help='cost of battle to enemy [default: %(default)s]')
# Optional argument that sets the initial amount of territory owned by Freedonia
group.add_argument('-i','--initial',action='store',
dest='initial',type=int,default=13,
help='Freedonia\'s initial territory percentage [default: %(default)s]')
# Optional argument that sets Freedonia's initial military positional advantage
group.add_argument('-p','--position',action='store',
dest='position',type=int,default=3,
help='Freedonia\'s initial positional advantage [default: %(default)s]')
# Optional argument that sets the name of the enemy country
group.add_argument('-e',action='store',
dest='enemy',default='Sylvania',
help='Name of the enemy country [default: %(default)s]')
# Optional argument that sets the name of the disputed region
group.add_argument('--region',action='store',
dest='region',default='Trentino',
help='Name of the region under dispute [default: %(default)s]')
# Optional argument that sets the maximum number of rounds to play
group.add_argument('-r',action='store',
dest='rounds',type=int,default=15,
help='Maximum number of rounds to play [default: %(default)s]')
# Optional argument that sets Freedonia's initial troops
group.add_argument('--freedonia-troops',action='store',
dest='ftroops',type=int,default=40000,
help='number of Freedonia troops [default: %(default)s]')
# Optional argument that sets Sylvania's initial troops
group.add_argument('--enemy-troops',action='store',
dest='stroops',type=int,default=30000,
help='number of enemy troops [default: %(default)s]')
# Optional argument that determines whether to generate models for Web platform
group.add_argument('-w','--web',action='store_true',
dest='web',default=False,
help='generate Web version if set [default: %(default)s]')
group = parser.add_argument_group('Algorithm Options','Control the algorithms to apply to the agents.')
# Optional argument that determines whether to use value iteration to create Freedonia's policy
group.add_argument('-c','--compiled',action='store_true',
dest='compiled',default=False,
help='use value iteration for Freedonia [default: %(default)s]')
# Optional argument that determines whether to use PWL compilation of Freedonia's policy
group.add_argument('--fitting',action='store_true',
dest='fitting',default=False,
help='use PWL compilation and fitting for Freedonia [default: %(default)s]')
group = parser.add_argument_group('Simulation Options','Control the simulation of the created scenario.')
# Optional argument that sets the level of explanations when running the simulation
group.add_argument('-d',action='store',
dest='debug',type=int,default=1,
help='level of explanation detail [default: %(default)s]')
# Optional argument that sets the initial offer that Freedonia will make
group.add_argument('-a',action='store',
dest='amount',type=int,default=0,
help='Freedonia\'s first offer amount')
# Optional argument that sets the number of time steps to simulate
group.add_argument('-t','--time',action='store',
dest='time',type=int,default=1,
help='number of time steps to simulate [default: %(default)s]')
group = parser.add_argument_group('Creation Options','Control the parameters of the created scenario.')
args = vars(parser.parse_args())
world = scenarioCreationUseCase(args['enemy'],maxRounds=args['rounds'],model=args['model'],
web=args['web'])
# Initialize state values based on command-line arguments
world.agents['Freedonia'].setState('troops',args['ftroops'])
world.agents['Freedonia'].setState('territory',args['initial'])
world.agents['Freedonia'].setState('position',args['position'])
world.agents['Freedonia'].setState('cost',args['fcost'])
world.agents[args['enemy']].setState('troops',args['stroops'])
world.agents[args['enemy']].setState('cost',args['scost'])
if args['compiled']:
compileWorld(world)
if args['fitting']:
fitWorld(world)
# Create configuration file
config = SafeConfigParser()
# Specify game options for web interface
config.add_section('Game')
config.set('Game','rounds','%d' % (args['rounds']))
config.set('Game','user','Freedonia')
config.set('Game','agent',args['enemy'])
config.set('Game','region',args['region'])
if args['model'] == 'powell':
# Battle is optional under Powell
config.set('Game','battle','optional')
elif args['model'] == 'slantchev':
# Battle is mandatory under Slantchev
config.set('Game','battle','mandatory')
# Specify which state features are visible in web interface
config.add_section('Visible')
features = ['territory','troops']
if args['model'] == 'slantchev':
features.append('position')
for feature in features:
config.set('Visible',feature,'yes')
# Specify descriptions of actions for web interface
config.add_section('Actions')
config.set('Actions','offer','Propose treaty where %s gets <action:amount>%%%% of total disputed territory' % (args['enemy']))
config.set('Actions','attack','Attack %s' % (args['enemy']))
config.set('Actions','accept offer','Accept offer of <Freedonia:offered>%% of total disputed territory')
config.set('Actions','reject offer','Reject offer of <Freedonia:offered>%% of total disputed territory')
config.set('Actions','continue','Continue to next round of negotiation without attacking')
config.set('Actions','%s offer' % (args['enemy']),'offer <action:amount>%%')
config.set('Actions','%s accept offer' % (args['enemy']),
'Accept offer of <%s:offered>%%%% of total disputed territory' % (args['enemy']))
config.set('Actions','%s reject offer' % (args['enemy']),
'Reject offer of <%s:offered>%%%% of total disputed territory' % (args['enemy']))
# Specify what changes are displayed
config.add_section('Change')
config.set('Change','troops','yes')
if args['model'] == 'slantchev':
config.set('Change','position','yes')
# Specify links
config.add_section('Links')
config.set('Links','survey','http://www.curiouslab.com/clsurvey/index.php?sid=39345&lang=en')
config.set('Links','scenarios','8839,1308,2266,5538')
f = open('%s.cfg' % (args['output']),'w')
config.write(f)
f.close()
# Save scenario to compressed XML file
world.save(args['output'])
# Test saved scenario
world = World(args['output'])
scenarioSimulationUseCase(world,args['amount'],args['time'],args['debug'],args['model'])
# findPolicies(args)
# world.printState(world.agents[args['enemy']].getBelief(world.state.domain()[0],'false'))
| 311 | 0 | 23 |
eb985f55700f2a265775dd46a40eb883b614da8d | 9,835 | py | Python | dysart/messages/messages.py | mcncm/Dysart | 35af07816be7910d6d178d8aa53a291bfbc8d5f6 | [
"MIT"
] | null | null | null | dysart/messages/messages.py | mcncm/Dysart | 35af07816be7910d6d178d8aa53a291bfbc8d5f6 | [
"MIT"
] | 8 | 2018-11-29T18:53:20.000Z | 2019-04-27T18:09:09.000Z | dysart/messages/messages.py | mcncm/Dysart | 35af07816be7910d6d178d8aa53a291bfbc8d5f6 | [
"MIT"
] | 1 | 2019-07-01T17:06:10.000Z | 2019-07-01T17:06:10.000Z | """
Author: mcncm, 2019
Standard output messages for displaying hierarchically-organized data such as
recursively-called status lines.
"""
import os
import sys
import datetime as dt
from functools import wraps
import getpass
import inspect
from io import StringIO
import logging
import platform
import textwrap
import toplevel.conf as conf
from dysart.messages.errors import DysartError
DEFAULT_COL = 48
TAB = ' ' * 4
class Bcolor:
"""
Enum for colored printing
"""
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
ITALIC = '\033[3m'
UNDERLINE = '\033[4m'
def cstr_ansi(s: str, status: str = 'normal') -> str:
"""
Wrap a string with ANSI color annotations
TODO there's a package for this; you can rip this out.
"""
if platform.system() == 'Windows':
return s # ANSI colors unsupported on Windows
if status == 'ok':
return Bcolor.OKGREEN + s + Bcolor.ENDC
elif status == 'fail':
return Bcolor.FAIL + s + Bcolor.ENDC
elif status == 'warn':
return Bcolor.WARNING + s + Bcolor.ENDC
elif status == 'bold':
return Bcolor.BOLD + s + Bcolor.ENDC
elif status == 'italic':
return Bcolor.ITALIC + s + Bcolor.ENDC
elif status == 'underline':
return Bcolor.UNDERLINE + s + Bcolor.ENDC
else:
return s
def cstr_slack(s: str, status: str = 'normal') -> str:
"""
Wrap a string with ANSI color annotations
TODO there's a package for this; you can rip this out.
"""
if status == 'bold':
return '*' + s + '*'
elif status == 'italic':
return '_' + s + '_'
elif status == 'strikethrough':
return '~' + s + '~'
elif status == 'underline':
return Bcolor.UNDERLINE + s + Bcolor.ENDC
elif status == 'code':
return '`' + s + '`'
elif status == 'codeblock':
return '```' + s + '```'
else:
return s
# This module-scoped function is used to decorate text with colors, bold and
# italics, and so on. By default it is set to a function using ANSI escape
# codes. Context managers within this module may contextually replace it with
# a different function.
#
# I'm not convinced that this is the best approach to this problem. If you
# happen to read this and have other ideas, let's talk.
cstr = cstr_ansi
class FormatContext:
"""
Todo: make this 100x less hacky
"""
cstrs = {
'slack': cstr_slack
}
def cprint(s: str, status: str = 'normal', **kwargs):
"""
Print a string with ANSI color annotations
"""
print(cstr(s, status), **kwargs)
def msg1(message: str, level=0, end="\n"):
"""
Print a formatted message to stdout.
Accepts an optional level parameter, which is useful when you might wish
to log a stack trace.
"""
prompt = '=> '
indent = ' '
output = level * indent + prompt + message
print(output, end=end)
def msg2(message: str, level=0, end="\n"):
"""
Print a formatted message to stdout.
Accepts an optional level parameter, which is useful when you might wish
to log a stack trace.
"""
prompt = '-> '
indent = ' '
output = level * indent + prompt + message
print(output, end=end)
def write_log(message: str):
"""
Write a message to a log file with date and time information.
"""
logging.info(message)
def logged(stdout=True, message='log event', **kwargs):
"""
Decorator for handling log messages. By default, writes to a default log
file in the debug_data database directory, and prints output to stdout.
Passes level parameter in decorated function to message functions to
"""
# set terminator for log message
term = "\n"
if 'end' in kwargs:
term = kwargs['end']
return decorator
def configure_logging(logfile=''):
"""
Set up the logging module to write to the correct logfile, etc.
"""
if logfile == '' or logfile is None:
# Set the log output to the null file. This should actually be cross-
# platform, i.e. equal to '/dev/null' on unix systems and 'NULL' on
# windows.
logfile = os.devnull
# TODO: I should really take advantage of some of the more advanced
# features of the logging module.
user = getpass.getuser()
log_format = '%(asctime)s | ' + user + " | %(message)s"
date_format = '%m/%d/%Y %I:%M:%S'
logging.basicConfig(format=log_format, filename=logfile,
datefmt=date_format, level='INFO')
def tree(obj, get_deps: callable, pipe='│', dash='─', tee='├',
elbow='└', indent=' ' * 3, prefix='') -> str:
"""Takes an object and a closure that is assumed to return an iterable of
dependent objects of the same type; produces an ascii tree diagram.
"""
s = str(obj)
deps = list(get_deps(obj))
# special case for empty dependents: no pipes
if not deps:
('\n' + prefix).join(s.split('\n'))
return s
# otherwise, dependents are nonempty: pipe to them
s = (prefix + '\n' + pipe).join(s.split('\n'))
s += '\n'
for i, dep in enumerate(deps):
if i == len(deps) - 1:
leader = elbow + dash * len(indent)
else:
leader = tee + dash * len(indent)
s += prefix + leader
new_prefix = pipe + indent if i != len(deps) - 1 else ' ' + indent
subtree = tree(dep, get_deps, prefix=new_prefix)
s += ('\n' + new_prefix).join(subtree.split('\n'))
return s
def pprint_func(name: str, doc: str) -> None:
"""
TODO real docstring for pprint_property
Takes a name and docstring of a function and formats and pretty-prints them.
"""
if doc is None:
return
# Number of columns in the formatted docscring
status_col = int(conf.config.get('STATUS_COL') or DEFAULT_COL)
# Prepare the docstring: fix up whitespace for display
doc = ' '.join(doc.strip().split())
# Prepare the docstring: wrap it and indent it
doc = '\t' + '\n\t'.join(textwrap.wrap(doc, status_col))
# Finally, print the result
print(cstr(name, status='bold') + '\n' + cstr(doc, status='italic') + '\n')
class StatusMessage:
"""
A simple context manager for printing informative status messages about
ongoing administration tasks.
TODO: document parameters, etc.
"""
def __enter__(self):
"""Prints a message describing the action taken and redirects io"""
cprint(self.infostr.ljust(self.num_cols).capitalize(), status='normal',
end='', flush=True)
if self.__capture_io:
sys.stdout = self.stdout_buff = StringIO()
sys.stderr = self.stderr_buff = StringIO()
def __exit__(self, exc_type, exc_value, traceback):
"""Prints the terminating status string and restores io"""
if exc_type is None:
cprint(self.donestr, status='ok', file=self.__old_stdout)
else:
status = 'fail'
failstr = self.failstr
if isinstance(exc_value, DysartError):
status = exc_value.status
failstr = exc_value.message
cprint(failstr, status, file=self.__old_stdout)
if 'VERBOSE_MESSAGES' in conf.config:
print(exc_value)
if self.__capture_io:
sys.stdout, sys.stderr = self.__old_stdout, self.__old_stderr
sys.stdout.write(self.stdout_buff.getvalue())
sys.stderr.write(self.stderr_buff.getvalue())
return True
| 31.321656 | 80 | 0.597458 | """
Author: mcncm, 2019
Standard output messages for displaying hierarchically-organized data such as
recursively-called status lines.
"""
import os
import sys
import datetime as dt
from functools import wraps
import getpass
import inspect
from io import StringIO
import logging
import platform
import textwrap
import toplevel.conf as conf
from dysart.messages.errors import DysartError
DEFAULT_COL = 48
TAB = ' ' * 4
class Bcolor:
"""
Enum for colored printing
"""
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
ITALIC = '\033[3m'
UNDERLINE = '\033[4m'
def cstr_ansi(s: str, status: str = 'normal') -> str:
"""
Wrap a string with ANSI color annotations
TODO there's a package for this; you can rip this out.
"""
if platform.system() == 'Windows':
return s # ANSI colors unsupported on Windows
if status == 'ok':
return Bcolor.OKGREEN + s + Bcolor.ENDC
elif status == 'fail':
return Bcolor.FAIL + s + Bcolor.ENDC
elif status == 'warn':
return Bcolor.WARNING + s + Bcolor.ENDC
elif status == 'bold':
return Bcolor.BOLD + s + Bcolor.ENDC
elif status == 'italic':
return Bcolor.ITALIC + s + Bcolor.ENDC
elif status == 'underline':
return Bcolor.UNDERLINE + s + Bcolor.ENDC
else:
return s
def cstr_slack(s: str, status: str = 'normal') -> str:
"""
Wrap a string with ANSI color annotations
TODO there's a package for this; you can rip this out.
"""
if status == 'bold':
return '*' + s + '*'
elif status == 'italic':
return '_' + s + '_'
elif status == 'strikethrough':
return '~' + s + '~'
elif status == 'underline':
return Bcolor.UNDERLINE + s + Bcolor.ENDC
elif status == 'code':
return '`' + s + '`'
elif status == 'codeblock':
return '```' + s + '```'
else:
return s
# This module-scoped function is used to decorate text with colors, bold and
# italics, and so on. By default it is set to a function using ANSI escape
# codes. Context managers within this module may contextually replace it with
# a different function.
#
# I'm not convinced that this is the best approach to this problem. If you
# happen to read this and have other ideas, let's talk.
cstr = cstr_ansi
class FormatContext:
"""
Todo: make this 100x less hacky
"""
cstrs = {
'slack': cstr_slack
}
def __init__(self, context: str):
assert context in FormatContext.cstrs
self.cstr = FormatContext.cstrs[context]
def __enter__(self):
global cstr
self.old_cstr = cstr
cstr = self.cstr
def __exit__(self, *exc):
global cstr
cstr = self.old_cstr
def cprint(s: str, status: str = 'normal', **kwargs):
"""
Print a string with ANSI color annotations
"""
print(cstr(s, status), **kwargs)
def msg1(message: str, level=0, end="\n"):
"""
Print a formatted message to stdout.
Accepts an optional level parameter, which is useful when you might wish
to log a stack trace.
"""
prompt = '=> '
indent = ' '
output = level * indent + prompt + message
print(output, end=end)
def msg2(message: str, level=0, end="\n"):
"""
Print a formatted message to stdout.
Accepts an optional level parameter, which is useful when you might wish
to log a stack trace.
"""
prompt = '-> '
indent = ' '
output = level * indent + prompt + message
print(output, end=end)
def write_log(message: str):
"""
Write a message to a log file with date and time information.
"""
logging.info(message)
def logged(stdout=True, message='log event', **kwargs):
"""
Decorator for handling log messages. By default, writes to a default log
file in the debug_data database directory, and prints output to stdout.
Passes level parameter in decorated function to message functions to
"""
# set terminator for log message
term = "\n"
if 'end' in kwargs:
term = kwargs['end']
def decorator(fn):
@wraps(fn)
def wrapped(*args_inner, **kwargs_inner):
if stdout:
if 'level' in kwargs_inner:
lvl = kwargs_inner['level']
else:
lvl = 0
msg1(message, level=lvl, end=term)
# Check if this was called as a method of an object and, if so,
# intercept the message to reflect this.
# TODO: this could done much better with a log-entry object that
# receives attributes like 'caller', etc., and is then formatted
# independently.
msg_prefix = ''
spec = inspect.getargspec(fn)
if spec.args and spec.args[0] == 'self':
# TODO: note that this isn't really airtight. It is not a rule
# of the syntax that argument 0 must be called 'self' for a
# class method.
caller = args_inner[0]
msg_prefix = caller.name + ' | '
# write log message
write_log(msg_prefix + message)
# call the original function
return_value = fn(*args_inner, **kwargs_inner)
# post-call operations
# ...
# finally, return whatever fn would have returned!
return return_value
return wrapped
return decorator
def configure_logging(logfile=''):
"""
Set up the logging module to write to the correct logfile, etc.
"""
if logfile == '' or logfile is None:
# Set the log output to the null file. This should actually be cross-
# platform, i.e. equal to '/dev/null' on unix systems and 'NULL' on
# windows.
logfile = os.devnull
# TODO: I should really take advantage of some of the more advanced
# features of the logging module.
user = getpass.getuser()
log_format = '%(asctime)s | ' + user + " | %(message)s"
date_format = '%m/%d/%Y %I:%M:%S'
logging.basicConfig(format=log_format, filename=logfile,
datefmt=date_format, level='INFO')
def tree(obj, get_deps: callable, pipe='│', dash='─', tee='├',
elbow='└', indent=' ' * 3, prefix='') -> str:
"""Takes an object and a closure that is assumed to return an iterable of
dependent objects of the same type; produces an ascii tree diagram.
"""
s = str(obj)
deps = list(get_deps(obj))
# special case for empty dependents: no pipes
if not deps:
('\n' + prefix).join(s.split('\n'))
return s
# otherwise, dependents are nonempty: pipe to them
s = (prefix + '\n' + pipe).join(s.split('\n'))
s += '\n'
for i, dep in enumerate(deps):
if i == len(deps) - 1:
leader = elbow + dash * len(indent)
else:
leader = tee + dash * len(indent)
s += prefix + leader
new_prefix = pipe + indent if i != len(deps) - 1 else ' ' + indent
subtree = tree(dep, get_deps, prefix=new_prefix)
s += ('\n' + new_prefix).join(subtree.split('\n'))
return s
def pprint_func(name: str, doc: str) -> None:
"""
TODO real docstring for pprint_property
Takes a name and docstring of a function and formats and pretty-prints them.
"""
if doc is None:
return
# Number of columns in the formatted docscring
status_col = int(conf.config.get('STATUS_COL') or DEFAULT_COL)
# Prepare the docstring: fix up whitespace for display
doc = ' '.join(doc.strip().split())
# Prepare the docstring: wrap it and indent it
doc = '\t' + '\n\t'.join(textwrap.wrap(doc, status_col))
# Finally, print the result
print(cstr(name, status='bold') + '\n' + cstr(doc, status='italic') + '\n')
class StatusMessage:
"""
A simple context manager for printing informative status messages about
ongoing administration tasks.
TODO: document parameters, etc.
"""
def __init__(self, infostr: str, donestr: str = 'done.',
failstr: str = 'failed.', capture_io: bool = False):
self.infostr = infostr
self.donestr = donestr
self.failstr = donestr
self.num_cols = max(int(conf.config.get('STATUS_COL') or DEFAULT_COL),
len(infostr))
self.status = 'ok'
self.__capture_io = capture_io
self.__old_stdout, self.__old_stderr = sys.stdout, sys.stderr
def __enter__(self):
"""Prints a message describing the action taken and redirects io"""
cprint(self.infostr.ljust(self.num_cols).capitalize(), status='normal',
end='', flush=True)
if self.__capture_io:
sys.stdout = self.stdout_buff = StringIO()
sys.stderr = self.stderr_buff = StringIO()
def __exit__(self, exc_type, exc_value, traceback):
"""Prints the terminating status string and restores io"""
if exc_type is None:
cprint(self.donestr, status='ok', file=self.__old_stdout)
else:
status = 'fail'
failstr = self.failstr
if isinstance(exc_value, DysartError):
status = exc_value.status
failstr = exc_value.message
cprint(failstr, status, file=self.__old_stdout)
if 'VERBOSE_MESSAGES' in conf.config:
print(exc_value)
if self.__capture_io:
sys.stdout, sys.stderr = self.__old_stdout, self.__old_stderr
sys.stdout.write(self.stdout_buff.getvalue())
sys.stderr.write(self.stderr_buff.getvalue())
return True
| 2,029 | 0 | 155 |
866be4f8d7b5d8776a303296eb74a792deda64dc | 2,208 | py | Python | udiskie/depend.py | fbriere/udiskie | d7b95ae94cdce5d175e8a49fab1c94f7c8d75961 | [
"MIT"
] | null | null | null | udiskie/depend.py | fbriere/udiskie | d7b95ae94cdce5d175e8a49fab1c94f7c8d75961 | [
"MIT"
] | 1 | 2020-11-16T10:31:57.000Z | 2020-11-16T10:31:57.000Z | udiskie/depend.py | dxcSithLord/udiskie | 286700e9eb50ffd5e1a8f2c93bdf66e99901f672 | [
"MIT"
] | null | null | null | """
Make sure that the correct versions of gobject introspection dependencies
are installed.
"""
import os
import logging
from gi import require_version
from .locale import _
require_version('Gio', '2.0')
require_version('GLib', '2.0')
_in_X = bool(os.environ.get('DISPLAY'))
_has_Gtk = (3 if check_version('Gtk', '3.0') else
2 if check_version('Gtk', '2.0') else
0)
_has_Notify = check_version('Notify', '0.7')
_has_AppIndicator3 = check_version('AppIndicator3', '0.1')
def require_Gtk(min_version=2):
"""
Make sure Gtk is properly initialized.
:raises RuntimeError: if Gtk can not be properly initialized
"""
if not _in_X:
raise RuntimeError('Not in X session.')
if _has_Gtk < min_version:
raise RuntimeError('Module gi.repository.Gtk not available!')
if _has_Gtk == 2:
logging.getLogger(__name__).warn(
_("Missing runtime dependency GTK 3. Falling back to GTK 2 "
"for password prompt"))
from gi.repository import Gtk
# if we attempt to create any GUI elements with no X server running the
# program will just crash, so let's make a way to catch this case:
if not Gtk.init_check(None)[0]:
raise RuntimeError(_("X server not connected!"))
return Gtk
| 25.976471 | 76 | 0.695652 | """
Make sure that the correct versions of gobject introspection dependencies
are installed.
"""
import os
import logging
from gi import require_version
from .locale import _
require_version('Gio', '2.0')
require_version('GLib', '2.0')
def check_call(exc_type, func, *args):
try:
func(*args)
return True
except exc_type:
return False
def check_version(package, version):
return check_call(ValueError, require_version, package, version)
_in_X = bool(os.environ.get('DISPLAY'))
_has_Gtk = (3 if check_version('Gtk', '3.0') else
2 if check_version('Gtk', '2.0') else
0)
_has_Notify = check_version('Notify', '0.7')
_has_AppIndicator3 = check_version('AppIndicator3', '0.1')
def require_Gtk(min_version=2):
"""
Make sure Gtk is properly initialized.
:raises RuntimeError: if Gtk can not be properly initialized
"""
if not _in_X:
raise RuntimeError('Not in X session.')
if _has_Gtk < min_version:
raise RuntimeError('Module gi.repository.Gtk not available!')
if _has_Gtk == 2:
logging.getLogger(__name__).warn(
_("Missing runtime dependency GTK 3. Falling back to GTK 2 "
"for password prompt"))
from gi.repository import Gtk
# if we attempt to create any GUI elements with no X server running the
# program will just crash, so let's make a way to catch this case:
if not Gtk.init_check(None)[0]:
raise RuntimeError(_("X server not connected!"))
return Gtk
def require_Notify():
if not _has_Notify:
raise RuntimeError('Module gi.repository.Notify not available!')
from gi.repository import Notify
return Notify
def require_AppIndicator3():
if not _has_AppIndicator3:
raise RuntimeError('Module gi.repository.Notify not available!')
from gi.repository import AppIndicator3
return AppIndicator3
def has_Notify():
return check_call((RuntimeError, ImportError), require_Notify)
def has_Gtk(min_version=2):
return check_call((RuntimeError, ImportError), require_Gtk, min_version)
def has_AppIndicator3():
return check_call((RuntimeError, ImportError), require_AppIndicator3)
| 747 | 0 | 161 |
1bf710d88ae1cd28b88adf24c063c44a5aa069d0 | 280 | py | Python | backend/config/settings/local.py | naaligai/naaligai | 8e3e7bf7f049401701a690cda8cb5cafdc99030a | [
"MIT"
] | null | null | null | backend/config/settings/local.py | naaligai/naaligai | 8e3e7bf7f049401701a690cda8cb5cafdc99030a | [
"MIT"
] | null | null | null | backend/config/settings/local.py | naaligai/naaligai | 8e3e7bf7f049401701a690cda8cb5cafdc99030a | [
"MIT"
] | null | null | null | import dj_database_url
from decouple import config
from .base import * # noqa
SECRET_KEY = 'django-insecure-ibi@xb(j2k@r&%*(*&(^%$%^$%^^&)))5_niq9erpkv%*!&!m9hp'
DEBUG = True
ALLOWED_HOSTS = []
DATABASES = {
'default': dj_database_url.config(default=config('SQLITE_DB'))
}
| 23.333333 | 83 | 0.685714 | import dj_database_url
from decouple import config
from .base import * # noqa
SECRET_KEY = 'django-insecure-ibi@xb(j2k@r&%*(*&(^%$%^$%^^&)))5_niq9erpkv%*!&!m9hp'
DEBUG = True
ALLOWED_HOSTS = []
DATABASES = {
'default': dj_database_url.config(default=config('SQLITE_DB'))
}
| 0 | 0 | 0 |
c264e283773c2df0a3f863fee30309a907451cc6 | 884 | py | Python | AoC_2020_Python/day15.py | nlantau/Codewars_2020_2021 | 055fbf8785ddd52b9f8e8c2b59294ead01852467 | [
"MIT"
] | null | null | null | AoC_2020_Python/day15.py | nlantau/Codewars_2020_2021 | 055fbf8785ddd52b9f8e8c2b59294ead01852467 | [
"MIT"
] | null | null | null | AoC_2020_Python/day15.py | nlantau/Codewars_2020_2021 | 055fbf8785ddd52b9f8e8c2b59294ead01852467 | [
"MIT"
] | null | null | null | # nlantau, 2020-12-15
import copy
import os
# STARTING_NUMBERS = [2, 0, 6, 12, 1, 3]
STARTING_NUMBERS = [0, 3, 6]
if __name__ == "__main__":
main()
| 22.666667 | 49 | 0.524887 | # nlantau, 2020-12-15
import copy
import os
# STARTING_NUMBERS = [2, 0, 6, 12, 1, 3]
STARTING_NUMBERS = [0, 3, 6]
def memory_builder():
mem_list = copy.deepcopy(STARTING_NUMBERS)
for i in range(len(mem_list) - 1, 8, 1):
num = 0
prev_index = i + 1
index = 0
spoken = mem_list[i]
print("prev, spoken", prev_index, spoken)
if spoken in mem_list[:-1]:
for k in range(len(mem_list[:-1])):
if mem_list[k] == spoken:
index = k + 1
print("index: ",index)
num = prev_index - index
mem_list.append(num)
else:
mem_list.append(0)
print(mem_list)
print(mem_list[len(mem_list) - 1])
def main():
os.system("clear")
print(f"{' BEGIN ':-^40}\n")
memory_builder()
if __name__ == "__main__":
main()
| 681 | 0 | 46 |
485844402d69c2a5e93616ff5c7fdbfc6e48fae6 | 918 | py | Python | item_probability/weapon_chances_values/polearm_chances.py | averytorres/WazHack-Clone | e53e9b1b64f3828b20e45d4eeaafcdedf9bc6fda | [
"Unlicense"
] | 1 | 2019-06-21T17:13:35.000Z | 2019-06-21T17:13:35.000Z | item_probability/weapon_chances_values/polearm_chances.py | averytorres/WazHack-Clone | e53e9b1b64f3828b20e45d4eeaafcdedf9bc6fda | [
"Unlicense"
] | 18 | 2019-06-25T00:48:11.000Z | 2019-07-11T17:52:24.000Z | item_probability/weapon_chances_values/polearm_chances.py | averytorres/WazHack-Clone | e53e9b1b64f3828b20e45d4eeaafcdedf9bc6fda | [
"Unlicense"
] | 1 | 2019-06-21T17:08:23.000Z | 2019-06-21T17:08:23.000Z | from random_utils import from_dungeon_level
| 48.315789 | 95 | 0.595861 | from random_utils import from_dungeon_level
def get_polearm_chances(game_map):
wp_en = '-weapon'
chances = {
'Staff'+wp_en : from_dungeon_level([[1, 1]], game_map.dungeon_level),
'Staff of Light'+wp_en : from_dungeon_level([[2, 2]], game_map.dungeon_level),
'Staff of Darkness' + wp_en : from_dungeon_level([[3, 3]], game_map.dungeon_level),
'Spear' + wp_en : from_dungeon_level([[4, 4]], game_map.dungeon_level),
'Staff of Life' + wp_en : from_dungeon_level([[5, 5]], game_map.dungeon_level),
'Staff of Death' + wp_en : from_dungeon_level([[6, 6]], game_map.dungeon_level),
'Red Hot Poker' + wp_en : from_dungeon_level([[7, 7]], game_map.dungeon_level),
'Thyrsus' + wp_en : from_dungeon_level([[8, 9]], game_map.dungeon_level),
}
return chances
| 850 | 0 | 23 |
efe7d8868b0e3081326fc3777f919fed2a1331e2 | 504 | py | Python | Python/Warmup_2/last2.py | RCoon/CodingBat | c5004c03e668c62751dc7f13154c79e25ea34339 | [
"MIT"
] | 1 | 2015-11-06T02:26:50.000Z | 2015-11-06T02:26:50.000Z | Python/Warmup_2/last2.py | RCoon/CodingBat | c5004c03e668c62751dc7f13154c79e25ea34339 | [
"MIT"
] | null | null | null | Python/Warmup_2/last2.py | RCoon/CodingBat | c5004c03e668c62751dc7f13154c79e25ea34339 | [
"MIT"
] | null | null | null | # Given a string, return the count of the number of times that a substring
# length 2 appears in the string and also as the last 2 chars of the string,
# so "hixxxhi" yields 1 (we won't count the end substring).
# last2('hixxhi') --> 1
# last2('xaxxaxaxx') --> 1
# last2('axxxaaxx') --> 2
print(last2('hixxhi'))
print(last2('xaxxaxaxx'))
print(last2('axxxaaxx'))
| 25.2 | 76 | 0.642857 | # Given a string, return the count of the number of times that a substring
# length 2 appears in the string and also as the last 2 chars of the string,
# so "hixxxhi" yields 1 (we won't count the end substring).
# last2('hixxhi') --> 1
# last2('xaxxaxaxx') --> 1
# last2('axxxaaxx') --> 2
def last2(str):
last = str[-2:]
count = 0
for i in range(len(str) - 2):
if str[i:i+2] == last:
count += 1
return count
print(last2('hixxhi'))
print(last2('xaxxaxaxx'))
print(last2('axxxaaxx'))
| 115 | 0 | 23 |
4b639d10e09c20f70524740017f7d90f82be23d6 | 257 | py | Python | bot.py | lcarotenuto/XPensesBot | 49545da41a6f70b9fc686b406ced40ba47815b43 | [
"MIT"
] | null | null | null | bot.py | lcarotenuto/XPensesBot | 49545da41a6f70b9fc686b406ced40ba47815b43 | [
"MIT"
] | null | null | null | bot.py | lcarotenuto/XPensesBot | 49545da41a6f70b9fc686b406ced40ba47815b43 | [
"MIT"
] | null | null | null | from config import *
from gdocument import GDocument
if __name__ == '__main__':
main()
| 18.357143 | 71 | 0.712062 | from config import *
from gdocument import GDocument
def main():
spreadsheet = GDocument(spreadsheet_id=CONSTANTS['SPREADSHEET_ID'])
result = spreadsheet.write_expense("Nuovo Test", 15.5)
print(result)
if __name__ == '__main__':
main()
| 140 | 0 | 23 |
f095b15e36d512ee4a9c390ed703961526864b88 | 132 | py | Python | PyTrinamic/ic/TMC2225/TMC2225_register_variant.py | trinamic-AA/PyTrinamic | b054f4baae8eb6d3f5d2574cf69c232f66abb4ee | [
"MIT"
] | 37 | 2019-01-13T11:08:45.000Z | 2022-03-25T07:18:15.000Z | PyTrinamic/ic/TMC2225/TMC2225_register_variant.py | trinamic-AA/PyTrinamic | b054f4baae8eb6d3f5d2574cf69c232f66abb4ee | [
"MIT"
] | 56 | 2019-02-25T02:48:27.000Z | 2022-03-31T08:45:34.000Z | PyTrinamic/ic/TMC2225/TMC2225_register_variant.py | trinamic-AA/PyTrinamic | b054f4baae8eb6d3f5d2574cf69c232f66abb4ee | [
"MIT"
] | 26 | 2019-01-14T05:20:16.000Z | 2022-03-08T13:27:35.000Z | '''
Created on 17.10.2019
@author: JM
'''
class TMC2225_register_variant:
" ===== TMC2225 register variants ===== "
"..." | 13.2 | 45 | 0.583333 | '''
Created on 17.10.2019
@author: JM
'''
class TMC2225_register_variant:
" ===== TMC2225 register variants ===== "
"..." | 0 | 0 | 0 |
f175dcd7e596b45282c790e9ec431fa77345c357 | 903 | py | Python | tests/test_integration.py | EMCain/python-demo-dates | d4d198cd916bd395ea89744af2057610eef64cb0 | [
"MIT"
] | null | null | null | tests/test_integration.py | EMCain/python-demo-dates | d4d198cd916bd395ea89744af2057610eef64cb0 | [
"MIT"
] | null | null | null | tests/test_integration.py | EMCain/python-demo-dates | d4d198cd916bd395ea89744af2057610eef64cb0 | [
"MIT"
] | null | null | null | import datetime
import unittest
import pytest
from src.info_string import this_year_info_string
| 25.8 | 79 | 0.688815 | import datetime
import unittest
import pytest
from src.info_string import this_year_info_string
class TestThisYearInfoString:
def test_this_year_info_string(self, mock_today):
info = this_year_info_string()
assert info == """Today is Tuesday, May 20, 2014
The first Sundays for 2014 are:
Sunday, January 5, 2014
Sunday, February 2, 2014
Sunday, March 2, 2014
Sunday, April 6, 2014
Sunday, May 4, 2014
Sunday, June 1, 2014
Sunday, July 6, 2014
Sunday, August 3, 2014
Sunday, September 7, 2014
Sunday, October 5, 2014
Sunday, November 2, 2014
Sunday, December 7, 2014
"""
@pytest.fixture
def mock_today(self, mocker):
class MyDate(datetime.date):
@classmethod
def today(cls, **kwargs):
return datetime.datetime(2014, 5, 20).date()
patched_date = mocker.patch("src.date_functions.datetime.date", MyDate) | 701 | 81 | 23 |
b366016944742e959dd7b14ac6a1c3948d1b0fd6 | 2,242 | py | Python | pyqt/qt_main.py | dldldlfma/py_tutorial | efb3ddb4d2fcd26e7086198fe42144d896623ed7 | [
"MIT"
] | null | null | null | pyqt/qt_main.py | dldldlfma/py_tutorial | efb3ddb4d2fcd26e7086198fe42144d896623ed7 | [
"MIT"
] | null | null | null | pyqt/qt_main.py | dldldlfma/py_tutorial | efb3ddb4d2fcd26e7086198fe42144d896623ed7 | [
"MIT"
] | 1 | 2019-10-08T09:07:38.000Z | 2019-10-08T09:07:38.000Z | import sys
import PyQt5
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from PyQt5 import uic
from pprint import pprint
import cv2 as cv
Calui ='./ui.ui'
if __name__ =='__main__':
app = QApplication(sys.argv)
main_dialog=MainDialog()
main_dialog.show()
app.exec_() #event loop 진입
| 27.012048 | 104 | 0.620874 | import sys
import PyQt5
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from PyQt5 import uic
from pprint import pprint
import cv2 as cv
Calui ='./ui.ui'
class MainDialog(QDialog):
def __init__(self):
QDialog.__init__(self, None)
uic.loadUi(Calui,self)
self.finder_Btn.clicked.connect(self.file_loader)
self.save_Btn.clicked.connect(self.file_save)
self.model = QStandardItemModel()
self.file_list = None
self.Binary_Btn.stateChanged.connect(self.check_state)
self.Median_Btn.stateChanged.connect(self.check_state)
self.median = 0
self.binary = 0
def check_state(self):
if(self.Binary_Btn.checkState()):
self.binary=1
else:
self.binary=0
if(self.Median_Btn.checkState()):
self.median=1
else:
self.median=0
def file_loader(self):
fname = QFileDialog.getOpenFileName(self)
self.file_position.setText(fname[0])
pixmap=QPixmap(fname[0])
pixmap = pixmap.scaledToWidth(512)
self.label.setPixmap(QPixmap(pixmap))
self.model.appendRow(QStandardItem(fname[0]))
self.listView.setModel(self.model)
self.file_list = fname[0]
def file_save(self):
save_position = QFileDialog.getExistingDirectory(self) #select save folder
self.save_position.setText(save_position)
img=None
if(self.binary):
img = cv.imread(self.file_list, 0)
else:
img = cv.imread(self.file_list, cv.IMREAD_COLOR)
if(self.median):
img = cv.medianBlur(img,5)
if(self.binary):
print(img.shape)
img = cv.adaptiveThreshold(img, 255, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY, 11, 5)
save_path = save_position + '/fix_' + self.file_list[self.file_list.find('.')-1:]
print(self.file_list)
cv.imwrite(save_path,img)
#cv.imshow('binary_img',img)
#cv.waitKey(0)
if __name__ =='__main__':
app = QApplication(sys.argv)
main_dialog=MainDialog()
main_dialog.show()
app.exec_() #event loop 진입
| 1,765 | 5 | 134 |
663e939a163b47bb7b77a1520c94016e8865a334 | 3,135 | py | Python | libfaceid/classifier.py | anhlbt/faceidsys | 630efe78830360565958621c80d247a6055c7cb4 | [
"MIT"
] | null | null | null | libfaceid/classifier.py | anhlbt/faceidsys | 630efe78830360565958621c80d247a6055c7cb4 | [
"MIT"
] | null | null | null | libfaceid/classifier.py | anhlbt/faceidsys | 630efe78830360565958621c80d247a6055c7cb4 | [
"MIT"
] | null | null | null | from enum import Enum
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import GridSearchCV, train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline, Pipeline
| 44.15493 | 150 | 0.674003 | from enum import Enum
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import GridSearchCV, train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline, Pipeline
class FaceClassifierModels(Enum):
NAIVE_BAYES = 0
LINEAR_SVM = 1
RBF_SVM = 2
NEAREST_NEIGHBORS = 3
DECISION_TREE = 4
RANDOM_FOREST = 5
NEURAL_NET = 6
ADABOOST = 7
QDA = 8
DEFAULT = LINEAR_SVM
class FaceClassifier():
def __init__(self, classifier=FaceClassifierModels.DEFAULT):
self._clf = None
if classifier.value == FaceClassifierModels.LINEAR_SVM.value:
self._clf = SVC(C=1.0, kernel="linear", probability=True)
elif classifier.value == FaceClassifierModels.NAIVE_BAYES.value:
self._clf = GaussianNB()
elif classifier.value == FaceClassifierModels.RBF_SVM.value:
pipe_svc = make_pipeline(StandardScaler(),SVC(random_state=1,probability=True))
param_range = [0.0001, 0.001, 0.01, 0.1,1.0, 10.0, 100.0, 1000.0]
param_grid = [{'svc__C': param_range,'svc__kernel': ['linear']},{'svc__C': param_range,'svc__gamma': param_range,'svc__kernel': ['rbf']}]
self._clf = GridSearchCV(estimator=pipe_svc,param_grid=param_grid,scoring='accuracy',cv=5,n_jobs=-1)
# self._clf = SVC(C=1, kernel='rbf', probability=True, gamma=2)
elif classifier.value == FaceClassifierModels.NEAREST_NEIGHBORS.value:
self._clf = KNeighborsClassifier(1)
elif classifier.value == FaceClassifierModels.DECISION_TREE.value:
self._clf = DecisionTreeClassifier(max_depth=5)
elif classifier.value == FaceClassifierModels.RANDOM_FOREST.value:
self._clf = RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1)
elif classifier.value == FaceClassifierModels.NEURAL_NET.value:
# self._clf = MLPClassifier(alpha=1)
self._clf = MLPClassifier(solver='lbfgs', alpha=1e-2, hidden_layer_sizes=(512, 100), random_state=1)
elif classifier.value == FaceClassifierModels.ADABOOST.value:
self._clf = AdaBoostClassifier()
elif classifier.value == FaceClassifierModels.QDA.value:
self._clf = QuadraticDiscriminantAnalysis()
# print("classifier={}".format(FaceClassifierModels(classifier)))
print("classifier={}".format(self._clf))
def fit(self, embeddings, labels):
self._clf.fit(embeddings, labels)
def predict(self, vec):
return self._clf.predict_proba(vec)
def score(self, X, y):
return self._clf.score(X, y)
| 2,089 | 303 | 170 |
1d2d252f14fe73a5873b331a3679030bf116e041 | 538 | py | Python | backend/recommends/api.py | Ladder-Climbers/mayne-code | d355cc56013db4ac2db50fddde3e9d203d965ab6 | [
"MIT"
] | null | null | null | backend/recommends/api.py | Ladder-Climbers/mayne-code | d355cc56013db4ac2db50fddde3e9d203d965ab6 | [
"MIT"
] | null | null | null | backend/recommends/api.py | Ladder-Climbers/mayne-code | d355cc56013db4ac2db50fddde3e9d203d965ab6 | [
"MIT"
] | null | null | null | from utils.api_tools import *
from recommends.douban_top250 import DoubanTop
class DoubanTopAPI(Resource):
"""
获取豆瓣读书 Top250 数据
"""
args_get = reqparse.RequestParser() \
.add_argument("page", help="分页", type=int, required=False, location=["args", ])
@args_required_method(args_get)
| 29.888889 | 87 | 0.659851 | from utils.api_tools import *
from recommends.douban_top250 import DoubanTop
class DoubanTopAPI(Resource):
"""
获取豆瓣读书 Top250 数据
"""
args_get = reqparse.RequestParser() \
.add_argument("page", help="分页", type=int, required=False, location=["args", ])
@args_required_method(args_get)
def get(self):
page = self.args_get.parse_args().get("page")
page = page if page is not None else 0
result = DoubanTop().get_page(page=page)
return make_result(data={'douban_top': result})
| 199 | 0 | 26 |
82beae2102ce4d483ab3149453053290c8cd4268 | 463 | py | Python | otx_epub/migrations/0005_auto_20161103_0228.py | NYULibraries/dlts-enm-tct-backend | 07455a660fb2cb8bc91a54f7f12d150923678157 | [
"Apache-2.0"
] | null | null | null | otx_epub/migrations/0005_auto_20161103_0228.py | NYULibraries/dlts-enm-tct-backend | 07455a660fb2cb8bc91a54f7f12d150923678157 | [
"Apache-2.0"
] | 2 | 2017-10-30T21:19:07.000Z | 2017-10-31T18:06:30.000Z | otx_epub/migrations/0005_auto_20161103_0228.py | NYULibraries/dlts-enm-tct-backend | 07455a660fb2cb8bc91a54f7f12d150923678157 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-11-03 06:28
from __future__ import unicode_literals
from django.db import migrations, models
| 22.047619 | 63 | 0.62203 | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-11-03 06:28
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('otx_epub', '0004_auto_20161102_1547'),
]
operations = [
migrations.AlterField(
model_name='epub',
name='contents',
field=models.CharField(blank=True, max_length=255),
),
]
| 0 | 286 | 23 |
b991c836cd1efdb1cd46d20df8cd246babb4832d | 5,090 | py | Python | src/sv-pipeline/04_variant_resolution/scripts/merge_vcfs.py | talkowski-lab/gnomad-sv-v3-qc | db23760af7bc21a776e14f6ca1fbc213ff0ff9a1 | [
"BSD-3-Clause"
] | null | null | null | src/sv-pipeline/04_variant_resolution/scripts/merge_vcfs.py | talkowski-lab/gnomad-sv-v3-qc | db23760af7bc21a776e14f6ca1fbc213ff0ff9a1 | [
"BSD-3-Clause"
] | null | null | null | src/sv-pipeline/04_variant_resolution/scripts/merge_vcfs.py | talkowski-lab/gnomad-sv-v3-qc | db23760af7bc21a776e14f6ca1fbc213ff0ff9a1 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright © 2018 Matthew Stone <mstone5@mgh.harvard.edu>
# Distributed under terms of the MIT license.
"""
"""
import argparse
import heapq
from collections import deque
import pysam
import svtk.utils as svu
def records_match(record, other):
"""
Test if two records are same SV: check chromosome, position, stop, SVTYPE, SVLEN (for insertions),
STRANDS (for BNDS and INVs), and (if they exist) CHR2/END2 for multi-chromosomal events
"""
return (record.chrom == other.chrom and
record.pos == other.pos and
record.stop == other.stop and
record.info['SVTYPE'] == other.info['SVTYPE'] and
record.info['SVLEN'] == other.info['SVLEN'] and
record.info['STRANDS'] == other.info['STRANDS'] and
(('CHR2' not in record.info and 'CHR2' not in other.info) or ('CHR2' in record.info and 'CHR2' in other.info and record.info['CHR2'] == other.info['CHR2'])) and
(('END2' not in record.info and 'END2' not in other.info) or ('END2' in record.info and 'END2' in other.info and record.info['END2'] == other.info['END2'])))
def merge_key(record):
"""
Sort records by all fields that records_match will use to check for duplicates, in sequence,
so that all identical records according to records_match will be adjacent
"""
chr2 = record.info['CHR2'] if 'CHR2' in record.info else None
end2 = record.info['END2'] if 'END2' in record.info else None
return (record.pos, record.stop, record.info['SVTYPE'], record.info['SVLEN'], chr2, end2, record.info['STRANDS'], record.id)
def dedup_records(records):
"""Take unique subset of records"""
records = sorted(records, key=merge_key)
curr_record = records[0]
for record in records[1:]:
if records_match(curr_record, record):
# keep more informative ALT field, assumed to be the one with more colons
# ex: <INS:ME:ALU> kept over <INS>
curr_alt = curr_record.alts[0]
new_alt = record.alts[0]
if (curr_alt.startswith('<') and curr_alt.endswith('>') and new_alt.startswith('<') and new_alt.endswith('>') and
len(new_alt.split(':')) > len(curr_alt.split(':'))):
curr_record = record
continue
else:
yield curr_record
curr_record = record
yield curr_record
def merge_records(vcfs):
"""
Take unique set of VCF records
Strategy: Merge & roughly sort records from all VCFs by chrom & pos, then gather records that share the same chrom & pos and remove duplicates.
Note: The output from heapq.merge cannot be directly used to remove duplicates because it is not sufficiently sorted, so duplicates may not be
adjacent. It is also not sufficient to alter the comparator function to take more than chrom & pos into account, because heapq.merge assumes
that each VCF is already sorted and will make no attempt to further sort them according to the comparator function. Re-sorting all records
that share a chrom & pos by all necessary comparison fields is more efficient than re-sorting each entire VCF.
"""
merged_vcfs = heapq.merge(*vcfs, key=lambda r: VariantRecordComparison(r))
record = next(merged_vcfs)
curr_records = deque([record])
curr_chrom = record.chrom
curr_pos = record.pos
for record in merged_vcfs:
if record.chrom == curr_chrom and record.pos == curr_pos:
curr_records.append(record)
else:
for rec in dedup_records(curr_records):
yield rec
curr_records = deque([record])
curr_pos = record.pos
curr_chrom = record.chrom
for rec in dedup_records(curr_records):
yield rec
if __name__ == '__main__':
main() | 38.270677 | 172 | 0.65501 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright © 2018 Matthew Stone <mstone5@mgh.harvard.edu>
# Distributed under terms of the MIT license.
"""
"""
import argparse
import heapq
from collections import deque
import pysam
import svtk.utils as svu
def records_match(record, other):
"""
Test if two records are same SV: check chromosome, position, stop, SVTYPE, SVLEN (for insertions),
STRANDS (for BNDS and INVs), and (if they exist) CHR2/END2 for multi-chromosomal events
"""
return (record.chrom == other.chrom and
record.pos == other.pos and
record.stop == other.stop and
record.info['SVTYPE'] == other.info['SVTYPE'] and
record.info['SVLEN'] == other.info['SVLEN'] and
record.info['STRANDS'] == other.info['STRANDS'] and
(('CHR2' not in record.info and 'CHR2' not in other.info) or ('CHR2' in record.info and 'CHR2' in other.info and record.info['CHR2'] == other.info['CHR2'])) and
(('END2' not in record.info and 'END2' not in other.info) or ('END2' in record.info and 'END2' in other.info and record.info['END2'] == other.info['END2'])))
def merge_key(record):
"""
Sort records by all fields that records_match will use to check for duplicates, in sequence,
so that all identical records according to records_match will be adjacent
"""
chr2 = record.info['CHR2'] if 'CHR2' in record.info else None
end2 = record.info['END2'] if 'END2' in record.info else None
return (record.pos, record.stop, record.info['SVTYPE'], record.info['SVLEN'], chr2, end2, record.info['STRANDS'], record.id)
def dedup_records(records):
"""Take unique subset of records"""
records = sorted(records, key=merge_key)
curr_record = records[0]
for record in records[1:]:
if records_match(curr_record, record):
# keep more informative ALT field, assumed to be the one with more colons
# ex: <INS:ME:ALU> kept over <INS>
curr_alt = curr_record.alts[0]
new_alt = record.alts[0]
if (curr_alt.startswith('<') and curr_alt.endswith('>') and new_alt.startswith('<') and new_alt.endswith('>') and
len(new_alt.split(':')) > len(curr_alt.split(':'))):
curr_record = record
continue
else:
yield curr_record
curr_record = record
yield curr_record
class VariantRecordComparison:
def __init__(self, record):
self.record = record
def __lt__(self, other):
if self.record.chrom == other.record.chrom:
return self.record.pos < other.record.pos
else:
return svu.is_smaller_chrom(self.record.chrom, other.record.chrom)
def merge_records(vcfs):
"""
Take unique set of VCF records
Strategy: Merge & roughly sort records from all VCFs by chrom & pos, then gather records that share the same chrom & pos and remove duplicates.
Note: The output from heapq.merge cannot be directly used to remove duplicates because it is not sufficiently sorted, so duplicates may not be
adjacent. It is also not sufficient to alter the comparator function to take more than chrom & pos into account, because heapq.merge assumes
that each VCF is already sorted and will make no attempt to further sort them according to the comparator function. Re-sorting all records
that share a chrom & pos by all necessary comparison fields is more efficient than re-sorting each entire VCF.
"""
merged_vcfs = heapq.merge(*vcfs, key=lambda r: VariantRecordComparison(r))
record = next(merged_vcfs)
curr_records = deque([record])
curr_chrom = record.chrom
curr_pos = record.pos
for record in merged_vcfs:
if record.chrom == curr_chrom and record.pos == curr_pos:
curr_records.append(record)
else:
for rec in dedup_records(curr_records):
yield rec
curr_records = deque([record])
curr_pos = record.pos
curr_chrom = record.chrom
for rec in dedup_records(curr_records):
yield rec
def main():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('vcflist', type=argparse.FileType('r'))
parser.add_argument('fout', type=argparse.FileType('w'))
args = parser.parse_args()
# VCFs from other batches
fnames = [l.strip() for l in args.vcflist.readlines()]
vcfs = [pysam.VariantFile(f) for f in fnames]
# Copy base VCF header without samples
args.fout.write('\t'.join(str(vcfs[0].header).split('\t')[:11]) + '\n')
# Write out sites-only records for dedupped variants + 2 dummy GTs
# including one 0/1 so svtk bedcluster doesn't break & clusters only on variants not samples
for record in merge_records(vcfs):
base = '\t'.join(str(record).split('\t')[:8])
args.fout.write(base + '\tGT\t0/1\t0/0\n')
args.fout.close()
if __name__ == '__main__':
main() | 1,109 | 9 | 99 |
ec0b5a658527f89982886e3eb9289615a64a45ea | 3,783 | py | Python | ngs_utils/jsontemplate/formatters.py | pdiakumis/NGS_Utils | 9eae9f8d5f0e408118d429fde90e297dbac9ae15 | [
"MIT"
] | 3 | 2018-06-06T01:41:51.000Z | 2020-08-20T11:36:06.000Z | ngs_utils/jsontemplate/formatters.py | pdiakumis/NGS_Utils | 9eae9f8d5f0e408118d429fde90e297dbac9ae15 | [
"MIT"
] | 4 | 2019-11-28T03:34:54.000Z | 2021-06-24T23:04:55.000Z | ngs_utils/jsontemplate/formatters.py | pdiakumis/NGS_Utils | 9eae9f8d5f0e408118d429fde90e297dbac9ae15 | [
"MIT"
] | 5 | 2018-03-15T12:43:38.000Z | 2021-06-24T23:12:48.000Z | #!/usr/bin/python -S
"""
formatters.py
This module should implement the standard list of formatters.
It also provides a method LookupChain for *composing lookup chains* for
formatters.
Formatter lookup chaining is not to be confused with plain formatter chaining,
e.g.:
{variable|html|json}
If anyone has any better names for the two types of chaining, let the mailing
list know.
"""
__author__ = 'Andy Chu'
import os
import sys
from ._jsontemplate import FromFile
class Error(Exception):
"""Base class for all exceptions raised by this module."""
def LookupChain(lookup_func_list):
"""Returns a *function* suitable for passing as the more_formatters argument
to Template.
NOTE: In Java, this would be implemented using the 'Composite' pattern. A
*list* of formatter lookup function behaves the same as a *single* formatter
lookup funcion.
Note the distinction between formatter *lookup* functions and formatter
functions here.
"""
return MoreFormatters
def PythonPercentFormat(format_str):
"""Use Python % format strings as template format specifiers."""
if format_str.startswith('printf '):
fmt = format_str[len('printf '):]
return lambda value: fmt % value
else:
return None
# Seam for testing
_open = open
# Cache of compiled templates. In Java, this might need to be a
# ConcurrentHashMap like the tokenization regex cache.
_compiled_template_cache = {}
class TemplateFileInclude(object):
"""Template include mechanism.
The relative path is specified as an argument to the template.
"""
def __call__(self, format_str):
"""Returns a formatter function."""
if format_str.startswith('template-file '):
relative_path = format_str[len('template-file '):]
full_path = os.path.join(self.root_dir, relative_path)
if full_path not in _compiled_template_cache:
f = _open(full_path)
_compiled_template_cache[full_path] = FromFile(f)
f.close()
return _compiled_template_cache[full_path].expand # a 'bound method'
else:
return None # this lookup is not applicable
class Json(object):
"""Format arbitrary nodes as JSON.
It takes a function which converts JSON structures to strings as a parameter.
All this does is relieve the user of having to remember the standard names
'json' and 'js-string'. Just pass your program's JSON serializer in here.
"""
def __call__(self, format_str):
"""Returns a formatter function."""
if format_str in ('json', 'js-string'):
return self.json_func
else:
return None # this lookup is not applicable
def Plural(format_str):
"""Returns whether the value should be considered a plural value.
Integers greater than 1 are plural, and lists with length greater than one are
too.
"""
if format_str.startswith('plural?'):
i = len('plural?')
try:
splitchar = format_str[i] # Usually a space, but could be something else
_, plural_val, singular_val = format_str.split(splitchar)
except IndexError:
raise Error('plural? must have exactly 2 arguments')
return Formatter
else:
return None # this lookup is not applicable
| 25.389262 | 80 | 0.706846 | #!/usr/bin/python -S
"""
formatters.py
This module should implement the standard list of formatters.
It also provides a method LookupChain for *composing lookup chains* for
formatters.
Formatter lookup chaining is not to be confused with plain formatter chaining,
e.g.:
{variable|html|json}
If anyone has any better names for the two types of chaining, let the mailing
list know.
"""
__author__ = 'Andy Chu'
import os
import sys
from ._jsontemplate import FromFile
class Error(Exception):
"""Base class for all exceptions raised by this module."""
def LookupChain(lookup_func_list):
"""Returns a *function* suitable for passing as the more_formatters argument
to Template.
NOTE: In Java, this would be implemented using the 'Composite' pattern. A
*list* of formatter lookup function behaves the same as a *single* formatter
lookup funcion.
Note the distinction between formatter *lookup* functions and formatter
functions here.
"""
def MoreFormatters(formatter_name):
for lookup_func in lookup_func_list:
formatter_func = lookup_func(formatter_name)
if formatter_func is not None:
return formatter_func
return MoreFormatters
def PythonPercentFormat(format_str):
"""Use Python % format strings as template format specifiers."""
if format_str.startswith('printf '):
fmt = format_str[len('printf '):]
return lambda value: fmt % value
else:
return None
# Seam for testing
_open = open
# Cache of compiled templates. In Java, this might need to be a
# ConcurrentHashMap like the tokenization regex cache.
_compiled_template_cache = {}
class TemplateFileInclude(object):
"""Template include mechanism.
The relative path is specified as an argument to the template.
"""
def __init__(self, root_dir):
self.root_dir = root_dir
def __call__(self, format_str):
"""Returns a formatter function."""
if format_str.startswith('template-file '):
relative_path = format_str[len('template-file '):]
full_path = os.path.join(self.root_dir, relative_path)
if full_path not in _compiled_template_cache:
f = _open(full_path)
_compiled_template_cache[full_path] = FromFile(f)
f.close()
return _compiled_template_cache[full_path].expand # a 'bound method'
else:
return None # this lookup is not applicable
class Json(object):
"""Format arbitrary nodes as JSON.
It takes a function which converts JSON structures to strings as a parameter.
All this does is relieve the user of having to remember the standard names
'json' and 'js-string'. Just pass your program's JSON serializer in here.
"""
def __init__(self, json_func):
self.json_func = json_func
def __call__(self, format_str):
"""Returns a formatter function."""
if format_str in ('json', 'js-string'):
return self.json_func
else:
return None # this lookup is not applicable
def Plural(format_str):
"""Returns whether the value should be considered a plural value.
Integers greater than 1 are plural, and lists with length greater than one are
too.
"""
if format_str.startswith('plural?'):
i = len('plural?')
try:
splitchar = format_str[i] # Usually a space, but could be something else
_, plural_val, singular_val = format_str.split(splitchar)
except IndexError:
raise Error('plural? must have exactly 2 arguments')
def Formatter(value):
plural = False
if isinstance(value, int) and value > 1:
plural = True
if isinstance(value, list) and len(value) > 1:
plural = True
if plural:
return plural_val
else:
return singular_val
return Formatter
else:
return None # this lookup is not applicable
| 499 | 0 | 101 |
2ac2f2c7138d46cab1a9b43042ce9e5dad55ab02 | 3,667 | py | Python | tests/test_script.py | mr-rodgers/ohoh | 239a5c471bd25dd12461c6cb38036841d922fbde | [
"MIT"
] | 2 | 2018-08-27T09:54:52.000Z | 2020-05-14T02:36:04.000Z | tests/test_script.py | te-je/ohoh | 239a5c471bd25dd12461c6cb38036841d922fbde | [
"MIT"
] | null | null | null | tests/test_script.py | te-je/ohoh | 239a5c471bd25dd12461c6cb38036841d922fbde | [
"MIT"
] | null | null | null | from codecs import open
from ohoh import build_parser, DEFAULT_HOST, DEFAULT_PORT
from os import path
import os
import pytest
import sys
import time
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.mark.parametrize("address,expected", [
(None, (DEFAULT_HOST, DEFAULT_PORT)),
("localhost", ("localhost", DEFAULT_PORT)),
("localhost:80", ("localhost", 80)),
("google.com", ("google.com", DEFAULT_PORT)),
("google.com:80", ("google.com", 80)),
(":5868", ("", 5868)),
])
| 30.815126 | 81 | 0.671394 | from codecs import open
from ohoh import build_parser, DEFAULT_HOST, DEFAULT_PORT
from os import path
import os
import pytest
import sys
import time
@pytest.fixture
def parser():
return build_parser()
@pytest.fixture
def modfile(request, tmpdir):
def uninstall():
sys.path.remove(tmpdir.strpath)
sys.path.insert(0, tmpdir.strpath)
name = "rand_mod_name" + str(hash(time.time()))
pyfile = path.join(tmpdir.strpath, name + ".py")
with open(pyfile, "w", encoding="utf-8") as modfile:
modfile.write("#-*- coding: utf-8 -*-\n")
modfile.write("app = lambda: None\n")
request.addfinalizer(uninstall)
return pyfile
@pytest.fixture
def modname(modfile):
modname = path.splitext(path.basename(modfile))[0]
sys.modules.pop(modname, None)
return modname
@pytest.mark.parametrize("address,expected", [
(None, (DEFAULT_HOST, DEFAULT_PORT)),
("localhost", ("localhost", DEFAULT_PORT)),
("localhost:80", ("localhost", 80)),
("google.com", ("google.com", DEFAULT_PORT)),
("google.com:80", ("google.com", 80)),
(":5868", ("", 5868)),
])
def test_address_parse(address, expected, modname, parser):
args = ["-s", address, modname] if address else [modname]
parsed = parser.parse_args(args)
assert parsed.address == expected
def test_app_spec_parse_nomod(parser):
with pytest.raises(ImportError):
parser = parser.parse_args(["sys.RANDOM_FOO.MODULE_I.PRAY.DOESNT_EXIST"])
def test_app_spec_parse_mod(parser, modname):
parsed = parser.parse_args([modname])
assert parsed.app() is None
def test_app_spec_parse_noobj(parser, modname):
with pytest.raises(AttributeError):
parser.parse_args(["{0}:obj_doesnt_exist".format(modname)])
def test_app_spec_parse_obj_not_callable(parser, modname, modfile):
with open(modfile, "a", encoding="utf-8") as f:
f.write("appx = None")
with pytest.raises(SystemExit):
parser.parse_args(["{0}:appx".format(modname)])
def test_app_spec_parse_obj(parser, modname, modfile):
with open(modfile, "a", encoding="utf-8") as f:
f.write("appx = lambda: None")
parsed = parser.parse_args(["{0}:appx".format(modname)])
assert parsed.app() is None
def test_app_spec_parse_filename(parser, modfile):
sys.path.remove(path.dirname(modfile))
parsed = parser.parse_args([modfile])
assert parsed.app() is None
assert path.dirname(modfile) in sys.path
def test_path_parse(parser, tmpdir):
created = path.join(tmpdir.strpath, "created")
uncreated = path.join(tmpdir.strpath, "uncreated")
os.mkdir(created)
with pytest.raises(SystemExit):
parsed = parser.parse_args(["-p", created])
assert created in sys.path
with pytest.raises(SystemExit):
parsed = parser.parse_args(["-p", uncreated])
assert uncreated not in sys.path
def test_app_spec_parse_obj_factory(parser, modname, modfile):
with open(modfile, "a", encoding="utf-8") as f:
f.write("""
class App(object):
def __init__(self, name=None, **kwargs):
self.name = name
for kw in kwargs:
setattr(self, kw, kwargs[kw])
def __call__(self):
pass
""")
parsed = parser.parse_args(["{0}:App()".format(modname)])
assert parsed.app.name is None
assert getattr(parsed.app, 'age', None) is None
parsed = parser.parse_args(['{0}:App("Te-je")'.format(modname)])
assert parsed.app.name == "Te-je"
assert getattr(parsed.app, 'age', None) is None
parsed = parser.parse_args(['{0}:App(name="Jimmy", age=59)'.format(modname)])
assert parsed.app.name == "Jimmy"
assert parsed.app.age == 59
| 2,893 | 0 | 272 |
4881335b5b741dd2fe1f7c57986a2b6d9449df2b | 1,127 | py | Python | src/root_object/progress_circle.py | junhg0211/Kreylin | aae5e1e5ba5cfaadfab6708cb0bf26a75c6dcb7a | [
"Apache-2.0"
] | 1 | 2019-09-11T12:02:53.000Z | 2019-09-11T12:02:53.000Z | src/root_object/progress_circle.py | junhg0211/Kreylin | aae5e1e5ba5cfaadfab6708cb0bf26a75c6dcb7a | [
"Apache-2.0"
] | 8 | 2019-09-11T12:06:54.000Z | 2020-02-09T04:42:13.000Z | src/root_object/progress_circle.py | junhg0211/Kreylin | aae5e1e5ba5cfaadfab6708cb0bf26a75c6dcb7a | [
"Apache-2.0"
] | 1 | 2021-05-24T12:43:07.000Z | 2021-05-24T12:43:07.000Z | from pygame.surface import Surface
import constants
from font import Font
from root_object.circle import Circle
from root_object.text import Text
| 34.151515 | 91 | 0.703638 | from pygame.surface import Surface
import constants
from font import Font
from root_object.circle import Circle
from root_object.text import Text
class ProgressCircle(Circle):
def __init__(self, center_x, center_y, max_radius, width, color, initial_progress=0.0):
super().__init__(center_x, center_y, max_radius, width, color, initial_progress)
font = Font(constants.NANUMSQUARE_LIGHT_FONT, 72, constants.TEXT_COLOR)
self.circle_progress = Text(0, self.center_y - font.size / 2, '', font)
def set_color(self, circle_color, text_color):
self.color = circle_color
self.circle_progress.font.set_color(text_color)
def tick(self):
super().tick()
self.circle_progress.set_text('%.3f%%' % (constants.progress * 100))
self.circle_progress.center_x()
def render(self, surface: Surface):
super().render(surface)
self.circle_progress.render(surface)
def window_resize(self, width: int, height: int):
super().window_resize(width, height)
self.circle_progress.y = self.center_y - self.circle_progress.font.size / 2
| 814 | 8 | 157 |
898870cd57396bf911538e521bea5d76091a3465 | 3,104 | py | Python | dbmuxev/__init__.py | prototux/PSA-RE | 7b5e7b9e87ba2bb14fbfc3c4f4ff79985afcd4c8 | [
"Apache-2.0"
] | 30 | 2020-11-27T08:53:37.000Z | 2022-03-24T21:20:48.000Z | dbmuxev/__init__.py | UcefMountacer/PSA-RE | a25dcf2deadc8f9ce2f1d5809f3c2f6818304fc9 | [
"Apache-2.0"
] | 7 | 2021-01-23T20:44:13.000Z | 2022-03-23T04:18:11.000Z | dbmuxev/__init__.py | UcefMountacer/PSA-RE | a25dcf2deadc8f9ce2f1d5809f3c2f6818304fc9 | [
"Apache-2.0"
] | 12 | 2021-01-23T18:01:39.000Z | 2022-03-04T12:08:44.000Z | import os
import glob
import yaml
| 36.952381 | 101 | 0.490335 | import os
import glob
import yaml
class dbmuxev:
def __init__(self, rootdir='.', lang='en'):
self.rootdir = rootdir
self.lang = lang
# Define objects we're going to have
self.archs = None
self.cars = None
def load_all(self):
self.load_archs()
self.load_nodes()
self.load_cars()
def load_archs(self, file='architectures.yml'):
# Load architectures
self.archs = None
with open(f'{self.rootdir}/{file}', 'r') as archfile:
try:
self.archs = yaml.load(archfile, Loader=yaml.SafeLoader)
except:
print('cannot read architectures file')
def load_nodes(self, dir='nodes'):
if not self.archs:
return None
# Load nodes for architectures
for arch, data in self.archs.items():
for variant, data in data.items():
if not 'networks' in self.archs[arch][variant]:
print(f'WARN: no networks defined in {arch}.{variant}')
continue
filename = f'{self.rootdir}/{dir}/{arch}.{variant}.yml'
if not os.path.isfile(filename):
continue
with open(filename) as varfile:
try:
nodes = yaml.load(varfile, Loader=yaml.SafeLoader)
except:
continue
for node, data in nodes.items():
if not 'bus' in data:
print(f'WARN: no bus for node {node} in {arch}.{variant}')
continue
for netbus in data['bus']:
network, bus = netbus.split('.')
if not network in self.archs[arch][variant]['networks']:
print(f'WARN: network {network} not found in {arch}.{variant}')
continue
if not 'nodes' in self.archs[arch][variant]['networks'][network][bus]:
self.archs[arch][variant]['networks'][network][bus]['nodes'] = {}
self.archs[arch][variant]['networks'][network][bus]['nodes'][node] = data
def load_frames(self, dir='buses'):
pass
def load_cars(self, dir='cars'):
files = glob.glob(f'{dir}/*.yml')
self.cars = {}
for yfile in files:
carname = os.path.basename(yfile).split('.')[0]
with open(yfile, 'r') as ystream:
try:
print(f'loading yaml for {carname}')
self.cars[carname] = yaml.load(ystream, Loader=yaml.SafeLoader)
except:
print(f'cannot read yaml for car {carname}')
def list_arch_variants(self):
if not self.archs:
return None
list = []
for arch, data in self.archs.items():
for variant, _ in data.items():
list.append(f'{arch}.{variant}')
return list
| 2,866 | -7 | 211 |
9420f75a4f51fd3a07373d2089ce37f6fd2bcf7f | 1,519 | py | Python | src/adobe/pdfservices/operation/internal/api/dto/request/extract_pdf_outputs.py | hvntravel/pdfservices-python-sdk | af2511604dc2558e43c6be44692fb9faa0e3e8d8 | [
"Apache-2.0"
] | 2 | 2021-11-19T10:00:53.000Z | 2022-01-30T21:19:50.000Z | src/adobe/pdfservices/operation/internal/api/dto/request/extract_pdf_outputs.py | hvntravel/pdfservices-python-sdk | af2511604dc2558e43c6be44692fb9faa0e3e8d8 | [
"Apache-2.0"
] | 1 | 2021-06-26T15:17:21.000Z | 2021-06-26T15:17:21.000Z | src/adobe/pdfservices/operation/internal/api/dto/request/extract_pdf_outputs.py | hvntravel/pdfservices-python-sdk | af2511604dc2558e43c6be44692fb9faa0e3e8d8 | [
"Apache-2.0"
] | 2 | 2022-01-02T16:01:02.000Z | 2022-03-24T01:23:14.000Z | # Copyright 2021 Adobe. All rights reserved.
# This file is licensed to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may obtain a copy
# of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS
# OF ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import json
import mimetypes
from adobe.pdfservices.operation.internal.api.dto.document import Document
from adobe.pdfservices.operation.internal.api.dto.request.platform.outputs import Outputs
from adobe.pdfservices.operation.internal.service_constants import ServiceConstants
#TODO Why did it require JSONDecoder?
| 44.676471 | 98 | 0.761027 | # Copyright 2021 Adobe. All rights reserved.
# This file is licensed to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may obtain a copy
# of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS
# OF ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import json
import mimetypes
from adobe.pdfservices.operation.internal.api.dto.document import Document
from adobe.pdfservices.operation.internal.api.dto.request.platform.outputs import Outputs
from adobe.pdfservices.operation.internal.service_constants import ServiceConstants
#TODO Why did it require JSONDecoder?
class ExtractPDFOutputs(Outputs, json.JSONDecoder):
json_hint = {
'elements_info_format' : {'name' : 'elementsInfo', 'type' : Document},
'elements_renditions_format' : { 'name' : 'elementsRenditions', 'type' : Document}
}
# Why is this type definition required
elements_info : Document
elements_renditions : Document
def __init__(self):
super().__init__()
self.elements_info_format = Document(mimetypes.types_map['.json'], "jsonoutput")
self.elements_renditions_format = Document(ServiceConstants.TEXT_MIME_TYPE, "fileoutpart")
| 213 | 359 | 22 |
70e8006c645a188c493a251606a2b7455927ab7a | 324 | py | Python | scripts/gen_small.py | jaistark/sp | 911933c65f950e6bc51451840068ca9249554846 | [
"BSD-2-Clause"
] | 28 | 2015-03-04T08:34:40.000Z | 2022-02-13T05:59:11.000Z | scripts/gen_small.py | jaistark/sp | 911933c65f950e6bc51451840068ca9249554846 | [
"BSD-2-Clause"
] | null | null | null | scripts/gen_small.py | jaistark/sp | 911933c65f950e6bc51451840068ca9249554846 | [
"BSD-2-Clause"
] | 14 | 2015-03-04T08:34:42.000Z | 2020-12-08T16:13:37.000Z | import snap
import numpy as np
G = snap.PNGraph.New()
for i in range(6):
G.AddNode(i)
G.AddEdge(0, 1)
G.AddEdge(1, 2)
G.AddEdge(2, 0)
G.AddEdge(3, 4)
G.AddEdge(4, 5)
G.AddEdge(5, 3)
G.AddEdge(4, 3)
G.AddEdge(5, 4)
G.AddEdge(3, 5)
out_file = '../data/small.txt'
print 'data:', out_file
snap.SaveEdgeList(G, out_file)
| 14.727273 | 30 | 0.657407 | import snap
import numpy as np
G = snap.PNGraph.New()
for i in range(6):
G.AddNode(i)
G.AddEdge(0, 1)
G.AddEdge(1, 2)
G.AddEdge(2, 0)
G.AddEdge(3, 4)
G.AddEdge(4, 5)
G.AddEdge(5, 3)
G.AddEdge(4, 3)
G.AddEdge(5, 4)
G.AddEdge(3, 5)
out_file = '../data/small.txt'
print 'data:', out_file
snap.SaveEdgeList(G, out_file)
| 0 | 0 | 0 |
2800eb1fa749d4331920a1ff2abd56ea0c5117a5 | 3,186 | py | Python | web/web/config.py | wellcometrust/reach | 1aa42c7d8aaf0a91d033af8448a33f37563b0365 | [
"MIT"
] | 11 | 2019-11-04T08:24:00.000Z | 2021-12-16T23:11:47.000Z | web/web/config.py | wellcometrust/reach | 1aa42c7d8aaf0a91d033af8448a33f37563b0365 | [
"MIT"
] | 274 | 2019-10-30T15:37:17.000Z | 2021-03-25T16:13:36.000Z | web/web/config.py | wellcometrust/reference-parser | 1aa42c7d8aaf0a91d033af8448a33f37563b0365 | [
"MIT"
] | 3 | 2019-11-12T13:38:14.000Z | 2020-04-16T07:49:04.000Z | import os
import toml
def _or(val_a, val_b, default=None):
""" Used to allow specifying config values through
os.environ
Args:
val_a:
val_b:
"""
if val_a is not None:
return val_a
elif val_b is not None:
return val_b
else:
return default
| 26.114754 | 93 | 0.514124 | import os
import toml
def _or(val_a, val_b, default=None):
""" Used to allow specifying config values through
os.environ
Args:
val_a:
val_b:
"""
if val_a is not None:
return val_a
elif val_b is not None:
return val_b
else:
return default
class Config(object):
def __init__(self, source):
self.debug = _or(
source.get('debug'),
os.environ.get("DEBUG"),
default=False
)
self.port = _or(
source.get('port'),
os.environ.get("PORT"),
default=8000
)
self.environment = _or(
source.get("environment"),
os.environ.get("ENVIRONMENT"),
default="development"
)
abs_path = "/".join(os.path.abspath(__file__).split("/")[:-1])
static_root = _or(
source.get("static_root"),
os.environ.get("STATIC_ROOT")
)
if static_root is not None:
if not static_root.startswith("/"):
static_root = os.path.normpath(os.path.join(abs_path, static_root))
self.static_root = static_root
docs_static_root = _or(
source.get("docs_static_root"),
os.environ.get("DOCS_STATIC_ROOT")
)
if docs_static_root is not None:
if not docs_static_root.startswith("/"):
docs_static_root = os.path.normpath(os.path.join(abs_path, docs_static_root))
self.docs_static_root = docs_static_root
db = source.get("database", {})
self.db_host = _or(
db.get("db_host", None),
os.environ.get("DB_HOST")
)
self.db_port = _or(
db.get("db_port"),
os.environ.get("DB_PORT"),
default=5432
)
self.db_name = _or(
db.get("db_name"),
os.environ.get("DB_NAME")
)
self.db_user = _or(
db.get("db_user"),
os.environ.get("DB_USER")
)
self.db_password = _or(
db.get("db_password", None),
os.environ.get("DB_PASSWORD")
)
self.min_conns = db.get("min_conns", 1)
self.max_conns = db.get("max_conns", 30)
sentry = source.get("sentry", {})
self.sentry_dsn = _or(
sentry.get("dsn"),
os.environ.get("SENTRY_DSN")
)
github = source.get("github", {})
self.github_token = _or(
github.get("github_token"),
os.environ.get("github_token"),
default=None
)
self.github_user = _or(
github.get("github_user"),
os.environ.get("github_user"),
default=None
)
analytics = source.get("analytics", {})
self.ga_code = _or(
analytics.get("ga_code"),
os.environ.get("GA_CODE"),
default=None
)
self.hotjar_code = _or(
analytics.get("hotjar_code"),
os.environ.get("HOTJAR_CODE"),
default=None
)
def init(config):
global CONFIG
CONFIG = Config(config)
| 2,803 | 0 | 72 |
cf4d1dc06ca23c5c3bf5591504594cccc40ce07c | 327 | py | Python | backend/src/crawling/models/script_model.py | nokia-wroclaw/innovativeproject-noticrawl | 2569dfe39492695cc33d0bbcbc675465b14756c0 | [
"MIT"
] | 5 | 2020-03-08T14:38:47.000Z | 2021-02-24T12:17:57.000Z | backend/src/crawling/models/script_model.py | nokia-wroclaw/innovativeproject-noticrawl | 2569dfe39492695cc33d0bbcbc675465b14756c0 | [
"MIT"
] | 35 | 2020-03-13T14:18:00.000Z | 2020-06-16T22:17:39.000Z | backend/src/crawling/models/script_model.py | nokia-wroclaw/innovativeproject-noticrawl | 2569dfe39492695cc33d0bbcbc675465b14756c0 | [
"MIT"
] | 2 | 2020-03-03T20:56:19.000Z | 2020-03-08T14:38:53.000Z | from pydantic import BaseModel
from typing import List
from .notification_model import Notification
| 19.235294 | 44 | 0.730887 | from pydantic import BaseModel
from typing import List
from .notification_model import Notification
class ScriptCreate(BaseModel):
script_name: str
instructions: str
link_id: int
class Script(ScriptCreate):
script_id: int
notifications: List[Notification] = []
class Config:
orm_mode = True
| 0 | 180 | 46 |
23af53260bd3539d33274ebeb87ea1d255057d8b | 2,235 | py | Python | tests/test_pages.py | morden35/spatula | cdbcc3231696890af236064a6082232af9d4d407 | [
"MIT"
] | 209 | 2017-02-22T16:38:23.000Z | 2022-03-26T02:07:39.000Z | tests/test_pages.py | morden35/spatula | cdbcc3231696890af236064a6082232af9d4d407 | [
"MIT"
] | 23 | 2017-02-24T06:13:02.000Z | 2022-02-10T21:25:35.000Z | tests/test_pages.py | morden35/spatula | cdbcc3231696890af236064a6082232af9d4d407 | [
"MIT"
] | 7 | 2017-02-24T02:54:23.000Z | 2021-08-05T18:37:14.000Z | import json
from dataclasses import dataclass
from spatula import (
HtmlPage,
XmlPage,
JsonPage,
CsvListPage,
HtmlListPage,
XmlListPage,
JsonListPage,
XPath,
URL,
)
SOURCE = "https://example.com"
@dataclass
| 24.56044 | 76 | 0.620134 | import json
from dataclasses import dataclass
from spatula import (
HtmlPage,
XmlPage,
JsonPage,
CsvListPage,
HtmlListPage,
XmlListPage,
JsonListPage,
XPath,
URL,
)
SOURCE = "https://example.com"
@dataclass
class Response:
content: bytes
@property
def text(self):
return self.content
def json(self):
return json.loads(self.content)
def test_html_page():
p = HtmlPage(source=URL(SOURCE))
p.response = Response(b"<html><a href='/test'>link</a></html>")
p.postprocess_response()
# test existence of page.root
link = p.root.xpath("//a")[0]
# test that links were normalized to example.com
assert link.get("href") == "https://example.com/test"
def test_xml_page():
p = XmlPage(source=SOURCE)
p.response = Response(b"<data><is><nested /></is></data>")
p.postprocess_response()
assert p.root.tag == "data"
def test_json_page():
nested = {"data": {"is": "nested"}}
p = JsonPage(source=SOURCE)
p.response = Response(json.dumps(nested))
p.postprocess_response()
assert p.data == nested
def test_csv_list_page():
p = CsvListPage(source=SOURCE)
p.response = Response("a,b,c\n1,2,3\n4,5,6")
p.postprocess_response()
data = list(p.process_page())
assert len(data) == 2
assert data[0] == {"a": "1", "b": "2", "c": "3"}
def test_html_list_page():
p = HtmlListPage(source=SOURCE)
p.selector = XPath("//li/text()")
p.response = Response("<ul><li>one</li><li>two</li><li>three</li></ul>")
p.postprocess_response()
data = list(p.process_page())
assert len(data) == 3
assert data == ["one", "two", "three"]
def test_xml_list_page():
p = XmlListPage(source=SOURCE)
p.selector = XPath("//item/text()")
p.response = Response(
"<resp><item>one</item><item>two</item><item>three</item></resp>"
)
p.postprocess_response()
data = list(p.process_page())
assert data == ["one", "two", "three"]
def test_json_list_page():
p = JsonListPage(source=SOURCE)
p.response = Response(json.dumps(["one", "two", "three"]))
p.postprocess_response()
data = list(p.process_page())
assert data == ["one", "two", "three"]
| 1,718 | 81 | 183 |
248892a38a80ac161701d0cdcd65f5d39a6a5009 | 18,274 | py | Python | src/azure-cli/azure/cli/command_modules/servicebus/tests/latest/test_servicebus_queue_commands.py | zjpjack/azure-cli | 818a0aa958203131d1fc9a43ecda75bbf5adf8f3 | [
"MIT"
] | null | null | null | src/azure-cli/azure/cli/command_modules/servicebus/tests/latest/test_servicebus_queue_commands.py | zjpjack/azure-cli | 818a0aa958203131d1fc9a43ecda75bbf5adf8f3 | [
"MIT"
] | null | null | null | src/azure-cli/azure/cli/command_modules/servicebus/tests/latest/test_servicebus_queue_commands.py | zjpjack/azure-cli | 818a0aa958203131d1fc9a43ecda75bbf5adf8f3 | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# AZURE CLI SERVICEBUS - CRUD TEST DEFINITIONS
import time
from azure.cli.testsdk import (ScenarioTest, ResourceGroupPreparer, live_only)
from knack.util import CLIError
# pylint: disable=line-too-long
# pylint: disable=too-many-lines
| 51.914773 | 181 | 0.656123 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# AZURE CLI SERVICEBUS - CRUD TEST DEFINITIONS
import time
from azure.cli.testsdk import (ScenarioTest, ResourceGroupPreparer, live_only)
from knack.util import CLIError
# pylint: disable=line-too-long
# pylint: disable=too-many-lines
class SBQueueScenarioTest(ScenarioTest):
from azure.cli.testsdk.scenario_tests import AllowLargeResponse
@AllowLargeResponse()
@ResourceGroupPreparer(name_prefix='cli_test_sb_queue')
def test_sb_queue(self, resource_group):
self.kwargs.update({
'namespacename': self.create_random_name(prefix='sb-nscli', length=20),
'tags': {'tag1=value1', 'tag2=value2'},
'sku': 'Standard',
'tier': 'Standard',
'authoname': self.create_random_name(prefix='cliAutho', length=20),
'defaultauthorizationrule': 'RootManageSharedAccessKey',
'accessrights': 'Listen',
'accessrights1': 'Send',
'primary': 'PrimaryKey',
'secondary': 'SecondaryKey',
'queuename': self.create_random_name(prefix='sb-queuecli', length=25),
'queuename2': self.create_random_name(prefix='sb-queuecli2', length=25),
'queuename3': self.create_random_name(prefix='sb-queuecli2', length=25),
'samplequeue': self.create_random_name(prefix='sb-queuecli3', length=25),
'samplequeue2': self.create_random_name(prefix='sb-queuecli4', length=25),
'queueauthoname': self.create_random_name(prefix='cliQueueAutho', length=25),
'lockduration': 'PT10M',
'lockduration1': 'PT11M',
'time_sample1': 'P1W',
'time_sample2': 'P2D',
'time_sample3': 'PT3H4M23S',
'time_sample4': 'P1Y3M2D',
'time_sample5': 'P1Y2M3DT3H11M2S',
'time_sample6': 'P1Y',
'time_sample7': '01:03:04',
'time_sample8': 'PT10M',
'time_sample9': 'PT3M',
'time_sample10': 'PT2M'
})
# Create Namespace
self.cmd(
'servicebus namespace create --resource-group {rg} --name {namespacename} --tags {tags} --sku {sku}',
checks=[self.check('sku.name', '{sku}')])
# Get Created Namespace
self.cmd('servicebus namespace show --resource-group {rg} --name {namespacename}',
checks=[self.check('sku.name', '{sku}')])
self.cmd('servicebus queue create --name {samplequeue} --namespace-name {namespacename} --resource-group {rg}')
self.cmd('servicebus queue create --name {samplequeue2} --namespace-name {namespacename} --resource-group {rg}')
queue = self.cmd('servicebus queue create --resource-group {rg} --name {queuename2} --namespace-name {namespacename}'
' --lock-duration {time_sample9} --max-size 4096 '
'--duplicate-detection-history-time-window {time_sample8} '
'--enable-dead-lettering-on-message-expiration --enable-duplicate-detection '
'--max-delivery-count 8 --status Active --default-message-time-to-live {time_sample5} '
'--enable-batched-operations false --forward-to {samplequeue} --forward-dead-lettered-messages-to {samplequeue}').get_output_in_json()
self.assertEqual(queue['autoDeleteOnIdle'], '10675199 days, 2:48:05.477581')
self.assertEqual(queue['defaultMessageTimeToLive'], '428 days, 3:11:02')
self.assertEqual(queue['deadLetteringOnMessageExpiration'], True)
self.assertEqual(queue['duplicateDetectionHistoryTimeWindow'], '0:10:00')
self.assertEqual(queue['enableExpress'], False)
self.assertEqual(queue['enableBatchedOperations'], False)
self.assertEqual(queue['enablePartitioning'], False)
self.assertEqual(queue['lockDuration'], '0:03:00')
self.assertEqual(queue['maxDeliveryCount'], 8)
self.assertEqual(queue['maxSizeInMegabytes'], 4096)
self.assertEqual(queue['requiresDuplicateDetection'], True)
self.assertEqual(queue['requiresSession'], False)
self.assertEqual(queue['status'], 'Active')
self.assertEqual(queue['forwardTo'], self.kwargs['samplequeue'])
self.assertEqual(queue['forwardDeadLetteredMessagesTo'], self.kwargs['samplequeue'])
self.kwargs.update({
'autoDeleteOnIdle': queue['autoDeleteOnIdle'],
'defaultMessageTimeToLive': queue['defaultMessageTimeToLive'],
'deadLetteringOnMessageExpiration': queue['deadLetteringOnMessageExpiration'],
'duplicateDetectionHistoryTimeWindow': queue['duplicateDetectionHistoryTimeWindow'],
'enableExpress': queue['enableExpress'],
'enablePartitioning': queue['enablePartitioning'],
'lockDuration': queue['lockDuration'],
'maxDeliveryCount': queue['maxDeliveryCount'],
'maxSizeInMegabytes': queue['maxSizeInMegabytes'],
'requiresDuplicateDetection': queue['requiresDuplicateDetection'],
'requiresSession': queue['requiresSession'],
'status': queue['status'],
'enableBatchedOperations': queue['enableBatchedOperations'],
'forwardTo': queue['forwardTo'],
'forwardDeadLetteredMessagesTo': queue['forwardDeadLetteredMessagesTo']
})
queue = self.cmd('servicebus queue update --resource-group {rg} --name {queuename2} --namespace-name {namespacename} '
'--lock-duration {time_sample10}').get_output_in_json()
self.assertEqual(queue['lockDuration'], '0:02:00')
self.kwargs.update({'lockDuration': queue['lockDuration']})
self.assertOnUpdate(queue, self.kwargs)
queue = self.cmd(
'servicebus queue update --resource-group {rg} --name {queuename2} --namespace-name {namespacename} '
'--default-message-time-to-live {time_sample7}').get_output_in_json()
self.assertEqual(queue['defaultMessageTimeToLive'], '1 day, 0:03:04')
self.kwargs.update({'defaultMessageTimeToLive': queue['defaultMessageTimeToLive']})
self.assertOnUpdate(queue, self.kwargs)
queue = self.cmd(
'servicebus queue update --resource-group {rg} --name {queuename2} --namespace-name {namespacename} '
'--max-size 2048').get_output_in_json()
self.assertEqual(queue['maxSizeInMegabytes'], 2048)
self.kwargs.update({'maxSizeInMegabytes': queue['maxSizeInMegabytes']})
self.assertOnUpdate(queue, self.kwargs)
queue = self.cmd(
'servicebus queue update --resource-group {rg} --name {queuename2} --namespace-name {namespacename} '
'--enable-batched-operations').get_output_in_json()
self.assertEqual(queue['enableBatchedOperations'], True)
self.kwargs.update({'enableBatchedOperations': queue['enableBatchedOperations']})
queue = self.cmd(
'servicebus queue update --resource-group {rg} --name {queuename2} --namespace-name {namespacename} '
'--enable-batched-operations false').get_output_in_json()#--enable-dead-lettering-on-message-expiration
self.assertEqual(queue['enableBatchedOperations'], False)
self.kwargs.update({'enableBatchedOperations': queue['enableBatchedOperations']})
self.assertOnUpdate(queue, self.kwargs)
queue = self.cmd(
'servicebus queue update --resource-group {rg} --name {queuename2} --namespace-name {namespacename} '
'--duplicate-detection-history-time-window PT8M').get_output_in_json()
self.assertEqual(queue['duplicateDetectionHistoryTimeWindow'], '0:08:00')
self.kwargs.update({'duplicateDetectionHistoryTimeWindow': queue['duplicateDetectionHistoryTimeWindow']})
self.assertOnUpdate(queue, self.kwargs)
queue = self.cmd(
'servicebus queue update --resource-group {rg} --name {queuename2} --namespace-name {namespacename} '
'--enable-dead-lettering-on-message-expiration false').get_output_in_json()
self.assertEqual(queue['deadLetteringOnMessageExpiration'], False)
self.kwargs.update({'deadLetteringOnMessageExpiration': queue['deadLetteringOnMessageExpiration']})
self.assertOnUpdate(queue, self.kwargs)
queue = self.cmd(
'servicebus queue update --resource-group {rg} --name {queuename2} --namespace-name {namespacename} '
'--enable-dead-lettering-on-message-expiration').get_output_in_json()
self.assertEqual(queue['deadLetteringOnMessageExpiration'], True)
self.kwargs.update({'deadLetteringOnMessageExpiration': queue['deadLetteringOnMessageExpiration']})
self.assertOnUpdate(queue, self.kwargs)
queue = self.cmd(
'servicebus queue update --resource-group {rg} --name {queuename2} --namespace-name {namespacename} '
'--max-delivery-count 15').get_output_in_json()
self.assertEqual(queue['maxDeliveryCount'], 15)
self.kwargs.update({'maxDeliveryCount': queue['maxDeliveryCount']})
self.assertOnUpdate(queue, self.kwargs)
queue = self.cmd(
'servicebus queue update --resource-group {rg} --name {queuename2} --namespace-name {namespacename} '
'--forward-to {samplequeue2}').get_output_in_json()
self.assertEqual(queue['forwardTo'], self.kwargs['samplequeue2'])
self.kwargs.update({'forwardTo': queue['forwardTo']})
self.assertOnUpdate(queue, self.kwargs)
queue = self.cmd(
'servicebus queue update --resource-group {rg} --name {queuename2} --namespace-name {namespacename} '
'--forward-dead-lettered-messages-to {samplequeue2}').get_output_in_json()
self.assertEqual(queue['forwardDeadLetteredMessagesTo'], self.kwargs['samplequeue2'])
self.kwargs.update({'forwardDeadLetteredMessagesTo': queue['forwardDeadLetteredMessagesTo']})
self.assertOnUpdate(queue, self.kwargs)
queue = self.cmd(
'servicebus queue update --resource-group {rg} --name {queuename2} --namespace-name {namespacename} '
'--status SendDisabled').get_output_in_json()
self.assertEqual(queue['status'], 'SendDisabled')
self.kwargs.update({'status': queue['status']})
self.assertOnUpdate(queue, self.kwargs)
queue = self.cmd(
'servicebus queue update --resource-group {rg} --name {queuename2} --namespace-name {namespacename} '
'--max-size 2048').get_output_in_json()
self.assertEqual(queue['maxSizeInMegabytes'], 2048)
self.kwargs.update({'maxSizeInMegabytes': queue['maxSizeInMegabytes']})
self.assertOnUpdate(queue, self.kwargs)
queue = self.cmd(
'servicebus queue create --resource-group {rg} --name {queuename3} --namespace-name {namespacename} '
'--auto-delete-on-idle {time_sample1} --enable-session --enable-express --enable-partitioning').get_output_in_json()
self.assertEqual(queue['autoDeleteOnIdle'], '7 days, 0:00:00')
self.assertEqual(queue['enableExpress'], True)
self.assertEqual(queue['enablePartitioning'], True)
self.assertEqual(queue['requiresSession'], True)
self.kwargs.update({
'autoDeleteOnIdle': queue['autoDeleteOnIdle'],
'defaultMessageTimeToLive': queue['defaultMessageTimeToLive'],
'deadLetteringOnMessageExpiration': queue['deadLetteringOnMessageExpiration'],
'duplicateDetectionHistoryTimeWindow': queue['duplicateDetectionHistoryTimeWindow'],
'enableExpress': queue['enableExpress'],
'enablePartitioning': queue['enablePartitioning'],
'lockDuration': queue['lockDuration'],
'maxDeliveryCount': queue['maxDeliveryCount'],
'maxSizeInMegabytes': queue['maxSizeInMegabytes'],
'requiresDuplicateDetection': queue['requiresDuplicateDetection'],
'requiresSession': queue['requiresSession'],
'status': queue['status'],
'enableBatchedOperations': queue['enableBatchedOperations'],
'forwardTo': queue['forwardTo'],
'forwardDeadLetteredMessagesTo': queue['forwardDeadLetteredMessagesTo']
})
self.assertOnUpdate(queue, self.kwargs)
queue = self.cmd(
'servicebus queue update --resource-group {rg} --name {queuename3} --namespace-name {namespacename} '
'--auto-delete-on-idle {time_sample7}').get_output_in_json()
self.assertEqual(queue['autoDeleteOnIdle'], '1 day, 0:03:04')
self.kwargs.update({'autoDeleteOnIdle': queue['autoDeleteOnIdle']})
self.assertOnUpdate(queue, self.kwargs)
queue = self.cmd(
'servicebus queue update --resource-group {rg} --name {queuename3} --namespace-name {namespacename} '
'--enable-express false').get_output_in_json()
self.assertEqual(queue['enableExpress'], False)
self.kwargs.update({'enableExpress': queue['enableExpress']})
self.assertOnUpdate(queue, self.kwargs)
queue = self.cmd(
'servicebus queue update --resource-group {rg} --name {queuename3} --namespace-name {namespacename} '
'--enable-express').get_output_in_json()
self.assertEqual(queue['enableExpress'], True)
self.kwargs.update({'enableExpress': queue['enableExpress']})
self.assertOnUpdate(queue, self.kwargs)
# Create Queue
self.cmd(
'servicebus queue create --resource-group {rg} --namespace-name {namespacename} --name {queuename} --auto-delete-on-idle {lockduration} --max-size 1024 ',
checks=[self.check('name', '{queuename}')])
# Get Queue
self.cmd('servicebus queue show --resource-group {rg} --namespace-name {namespacename} --name {queuename}',
checks=[self.check('name', '{queuename}')])
# Update Queue
self.cmd(
'servicebus queue update --resource-group {rg} --namespace-name {namespacename} --name {queuename} --auto-delete-on-idle {lockduration1} ',
checks=[self.check('name', '{queuename}')])
# Queue List
self.cmd('servicebus queue list --resource-group {rg} --namespace-name {namespacename}')
# Create Authoriazation Rule
self.cmd(
'servicebus queue authorization-rule create --resource-group {rg} --namespace-name {namespacename} --queue-name {queuename} --name {authoname} --rights {accessrights}',
checks=[self.check('name', '{authoname}')])
# Get Create Authorization Rule
self.cmd(
'servicebus queue authorization-rule show --resource-group {rg} --namespace-name {namespacename} --queue-name {queuename} --name {authoname}',
checks=[self.check('name', '{authoname}')])
# Update Authoriazation Rule
self.cmd(
'servicebus queue authorization-rule update --resource-group {rg} --namespace-name {namespacename} --queue-name {queuename} --name {authoname} --rights {accessrights1}',
checks=[self.check('name', '{authoname}')])
# Get Authorization Rule Listkeys
self.cmd(
'servicebus queue authorization-rule keys list --resource-group {rg} --namespace-name {namespacename} --queue-name {queuename} --name {authoname}')
# Regeneratekeys - Primary
regenrateprimarykeyresult = self.cmd(
'servicebus queue authorization-rule keys renew --resource-group {rg} --namespace-name {namespacename} --queue-name {queuename} --name {authoname} --key {primary}')
self.assertIsNotNone(regenrateprimarykeyresult)
# Regeneratekeys - Secondary
regenratesecondarykeyresult = self.cmd(
'servicebus queue authorization-rule keys renew --resource-group {rg} --namespace-name {namespacename} --queue-name {queuename} --name {authoname} --key {secondary}')
self.assertIsNotNone(regenratesecondarykeyresult)
# Delete Queue Authorization Rule
self.cmd(
'servicebus queue authorization-rule delete --resource-group {rg} --namespace-name {namespacename} --queue-name {queuename} --name {authoname}')
# Delete Queue
self.cmd('servicebus queue delete --resource-group {rg} --namespace-name {namespacename} --name {queuename}')
# Delete Namespace
self.cmd('servicebus namespace delete --resource-group {rg} --name {namespacename}')
def assertOnUpdate(self, actual, expected):
self.assertEqual(actual['autoDeleteOnIdle'], expected['autoDeleteOnIdle'])
self.assertEqual(actual['defaultMessageTimeToLive'], expected['defaultMessageTimeToLive'])
self.assertEqual(actual['deadLetteringOnMessageExpiration'], expected['deadLetteringOnMessageExpiration'])
self.assertEqual(actual['duplicateDetectionHistoryTimeWindow'],
expected['duplicateDetectionHistoryTimeWindow'])
self.assertEqual(actual['enableExpress'], expected['enableExpress'])
self.assertEqual(actual['enableBatchedOperations'], expected['enableBatchedOperations'])
self.assertEqual(actual['enablePartitioning'], expected['enablePartitioning'])
self.assertEqual(actual['lockDuration'], expected['lockDuration'])
self.assertEqual(actual['maxDeliveryCount'], expected['maxDeliveryCount'])
self.assertEqual(actual['maxSizeInMegabytes'], expected['maxSizeInMegabytes'])
self.assertEqual(actual['requiresDuplicateDetection'], expected['requiresDuplicateDetection'])
self.assertEqual(actual['requiresSession'], expected['requiresSession'])
self.assertEqual(actual['status'], expected['status'])
self.assertEqual(actual['forwardTo'], expected['forwardTo'])
self.assertEqual(actual['forwardDeadLetteredMessagesTo'], expected['forwardDeadLetteredMessagesTo']) | 17,439 | 228 | 23 |
02ac88ad6695c765e9cecca611e77607225378d0 | 1,197 | py | Python | scripts/contributor_counts.py | navgurukul-shivani18/kitsune | a7cf49ab1bfcf4e770938116968824b2b0fa5bb1 | [
"BSD-3-Clause"
] | 4 | 2021-05-17T11:38:08.000Z | 2021-08-19T06:42:39.000Z | scripts/contributor_counts.py | navgurukul-shivani18/kitsune | a7cf49ab1bfcf4e770938116968824b2b0fa5bb1 | [
"BSD-3-Clause"
] | 32 | 2021-04-15T22:35:58.000Z | 2022-01-04T21:30:05.000Z | scripts/contributor_counts.py | navgurukul-shivani18/kitsune | a7cf49ab1bfcf4e770938116968824b2b0fa5bb1 | [
"BSD-3-Clause"
] | 3 | 2020-06-14T06:59:46.000Z | 2020-06-15T14:45:56.000Z | # Shows top posters and percentage of posts from top posters for the
# last 14 days. You need recent data to run this.
#
# Run this with ./manage.py runscript run_contributor_counts.py
from collections import defaultdict
from datetime import datetime, timedelta
from kitsune.forums.models import Post
if __name__ == '__main__':
print 'Run with "./manage.py runscript contributor_counts"'
| 25.468085 | 78 | 0.692565 | # Shows top posters and percentage of posts from top posters for the
# last 14 days. You need recent data to run this.
#
# Run this with ./manage.py runscript run_contributor_counts.py
from collections import defaultdict
from datetime import datetime, timedelta
from kitsune.forums.models import Post
def run():
two_weeks_ago = datetime.now() - timedelta(days=24)
print 'Data since {0}'.format(two_weeks_ago)
print ''
query = Post.objects.filter(created__gte=two_weeks_ago)
total_posts = query.count()
posts_by_author = defaultdict(int)
for post in query:
username = post.author.username
posts_by_author[username] = posts_by_author[username] + 1
posts_by_author = sorted(posts_by_author.items(), key=lambda mem: -mem[1])
top_posters = posts_by_author[:10]
top_total = 0
for name, count in top_posters:
print '{0:>20}: {1}'.format(name, count)
top_total += count
print ''
print 'Total posts:', total_posts
print 'Top total: ', top_total
print 'Percent: ', float(top_total) / float(total_posts)
if __name__ == '__main__':
print 'Run with "./manage.py runscript contributor_counts"'
| 776 | 0 | 23 |
8f71d0b7f1f5f348b6bc0d3d81216b11dc4a07f6 | 1,337 | py | Python | fast_reconcile_app/lib/query_parser.py | birkin/fast_reconcile_project | de086e312aae9cd4d2eb847c3a5a5ba94cd68de7 | [
"BSD-3-Clause"
] | null | null | null | fast_reconcile_app/lib/query_parser.py | birkin/fast_reconcile_project | de086e312aae9cd4d2eb847c3a5a5ba94cd68de7 | [
"BSD-3-Clause"
] | null | null | null | fast_reconcile_app/lib/query_parser.py | birkin/fast_reconcile_project | de086e312aae9cd4d2eb847c3a5a5ba94cd68de7 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import json, logging
log = logging.getLogger(__name__)
def parse_query( request ):
""" Preps and returns three data-elements.
Called by views.reconcile_v2() """
( query, query_type, callback ) = ( request.POST.get('query', None), request.POST.get('query_type', None), request.POST.get('callback', None) )
if not query:
query = request.GET.get( 'query', None )
query = massage_query( query )
if not query_type:
query_type = request.GET.get( 'query_type', '/fast/all' )
if not callback:
callback = request.GET.get( 'callback', None )
log.debug( 'query, ```%s```; query_type, ```%s```; callback, ```%s```' % (query, query_type, callback) )
return ( query, query_type, callback )
def massage_query( query ):
""" Updates query for better fast-lookups.
Called by parse_query() """
if query.startswith( '{' ):
query = json.loads(query)['query']
elif '(' in query and ')' in query:
substring = query[ query.find('(')+1:query.find(')') ]
log.debug( 'substring, `%s`' % substring )
wordcount = len( substring.split() )
if wordcount > 1:
query = query.replace( '(', '' )
query = query.replace( ')', '' )
log.debug( 'massaged query, `%s`' % query )
return query
| 35.184211 | 147 | 0.585639 | # -*- coding: utf-8 -*-
import json, logging
log = logging.getLogger(__name__)
def parse_query( request ):
""" Preps and returns three data-elements.
Called by views.reconcile_v2() """
( query, query_type, callback ) = ( request.POST.get('query', None), request.POST.get('query_type', None), request.POST.get('callback', None) )
if not query:
query = request.GET.get( 'query', None )
query = massage_query( query )
if not query_type:
query_type = request.GET.get( 'query_type', '/fast/all' )
if not callback:
callback = request.GET.get( 'callback', None )
log.debug( 'query, ```%s```; query_type, ```%s```; callback, ```%s```' % (query, query_type, callback) )
return ( query, query_type, callback )
def massage_query( query ):
""" Updates query for better fast-lookups.
Called by parse_query() """
if query.startswith( '{' ):
query = json.loads(query)['query']
elif '(' in query and ')' in query:
substring = query[ query.find('(')+1:query.find(')') ]
log.debug( 'substring, `%s`' % substring )
wordcount = len( substring.split() )
if wordcount > 1:
query = query.replace( '(', '' )
query = query.replace( ')', '' )
log.debug( 'massaged query, `%s`' % query )
return query
| 0 | 0 | 0 |
d3c1f453a11937d08cbe712f78cc03cc5dfc1fc8 | 646 | py | Python | setup.py | Imperial-CMTH/koala | cd05b11be402295468be709db13a957530f66578 | [
"MIT"
] | null | null | null | setup.py | Imperial-CMTH/koala | cd05b11be402295468be709db13a957530f66578 | [
"MIT"
] | 9 | 2021-12-13T10:16:03.000Z | 2022-03-28T10:39:16.000Z | setup.py | Imperial-CMTH/koala | cd05b11be402295468be709db13a957530f66578 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
setup(
name="koala",
version='0.0',
description='Topological Amorphous quantum system simulations',
long_description='',
author="Peru D'Ornellas, Gino Cassella, Tom Hodson",
author_email='',
license='Apache Software License',
home_page='',
packages=find_packages('src'),
package_dir={'': 'src'},
install_requires=[
'numpy>=1.2',
'scipy',
'matplotlib',
'flake8',
'python-sat',
'pytest',
'pytest-cov',
'pytest-xdist',
'nbmake',
'pytest-github-actions-annotate-failures',
]
)
| 23.925926 | 67 | 0.586687 | from setuptools import setup, find_packages
setup(
name="koala",
version='0.0',
description='Topological Amorphous quantum system simulations',
long_description='',
author="Peru D'Ornellas, Gino Cassella, Tom Hodson",
author_email='',
license='Apache Software License',
home_page='',
packages=find_packages('src'),
package_dir={'': 'src'},
install_requires=[
'numpy>=1.2',
'scipy',
'matplotlib',
'flake8',
'python-sat',
'pytest',
'pytest-cov',
'pytest-xdist',
'nbmake',
'pytest-github-actions-annotate-failures',
]
)
| 0 | 0 | 0 |
cb55e13908319613727041b94480632b89fff554 | 2,335 | py | Python | code/behavior_test_coupling_common.py | cedricfoucault/networks_for_sequence_prediction | f2983a9738ec43e03a430f2f194f0359d2bb5bf7 | [
"MIT"
] | 1 | 2021-12-08T01:44:34.000Z | 2021-12-08T01:44:34.000Z | code/behavior_test_coupling_common.py | cedricfoucault/networks_for_sequence_prediction | f2983a9738ec43e03a430f2f194f0359d2bb5bf7 | [
"MIT"
] | null | null | null | code/behavior_test_coupling_common.py | cedricfoucault/networks_for_sequence_prediction | f2983a9738ec43e03a430f2f194f0359d2bb5bf7 | [
"MIT"
] | null | null | null | import numpy as np
| 47.653061 | 83 | 0.617987 | import numpy as np
def get_changes_in_prediction_by_coupled(df):
pre_rep_pgen = df["pre_rep_pgen"].iloc[0]
pre_other_pgen_values = df["pre_other_pgen"].unique()
rep_lengths = df["rep_length"].unique()
n_rep_lengths = len(rep_lengths)
# get number of models per condition
pre_other_pgen = pre_other_pgen_values[0]
is_coupled = False
n_models_independent = len(df.query("(rep_item == 1) &\
(pre_other_pgen == @pre_other_pgen) &\
(is_coupled == @is_coupled) &\
(rep_length == 0)")["p_after"].to_numpy())
is_coupled = True
n_models_coupled = len(df.query("(rep_item == 1) &\
(pre_other_pgen == @pre_other_pgen) &\
(is_coupled == @is_coupled) &\
(rep_length == 0)")["p_after"].to_numpy())
# compute absolute change of prediction | after - before |
shape_independent = (4, n_rep_lengths, n_models_independent)
shape_coupled = (4, n_rep_lengths, n_models_coupled)
p_diff_by_coupled = {
False: np.empty(shape_independent),
True: np.empty(shape_coupled),
}
for i_pre_other_pgen, pre_other_pgen in enumerate(pre_other_pgen_values):
for i_rep_item, rep_item in enumerate([0, 1]):
i_quadrant = i_pre_other_pgen * 2 + i_rep_item
for is_coupled in [False, True]:
n_models = n_models_coupled if is_coupled else n_models_independent
p_after_s = np.empty((n_rep_lengths, n_models))
p_before_s = np.empty((n_rep_lengths, n_models))
for i_rep_length, rep_length in enumerate(rep_lengths):
df_condition = df.query("(rep_item == @rep_item) &\
(pre_other_pgen == @pre_other_pgen) &\
(is_coupled == @is_coupled) &\
(rep_length == @rep_length)")
p_after_s[i_rep_length] = df_condition["p_after"].to_numpy()
p_before_s[i_rep_length] = df_condition["p_before"].to_numpy()
p_diff_by_coupled[is_coupled][i_quadrant] = \
abs(p_after_s - p_before_s)
# average over 4 quadrants
for is_coupled in [True, False]:
p_diff_by_coupled[is_coupled] = p_diff_by_coupled[is_coupled].mean(axis=0)
return p_diff_by_coupled
| 2,293 | 0 | 23 |
6eab78bbc612df07c843ed5785bdecb5c317b639 | 1,119 | py | Python | test/testPhiGradx.py | EmoryMLIP/OT-Flow | c1851a4da3aa131df1df6132b83d5186c154087e | [
"MIT"
] | 31 | 2020-06-18T10:51:03.000Z | 2022-03-10T19:58:36.000Z | test/testPhiGradx.py | EmoryMLIP/OT-Flow | c1851a4da3aa131df1df6132b83d5186c154087e | [
"MIT"
] | null | null | null | test/testPhiGradx.py | EmoryMLIP/OT-Flow | c1851a4da3aa131df1df6132b83d5186c154087e | [
"MIT"
] | 8 | 2020-09-09T23:52:50.000Z | 2022-03-11T21:19:20.000Z | # testPhiGradx.py
# test the grad wrt x returned by trHess when nTh > 2
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from src.Phi import *
import torch.nn.utils
doPlots = True
d = 2
m = 5
nTh = 4
net = Phi(nTh=nTh, m=m, d=d)
net.double()
# vecParams = nn.utils.convert_parameters.parameters_to_vector(net.parameters())
x = torch.randn(1,3).type(torch.double)
# AD grad
x.requires_grad = True
y = net(x)
v = torch.randn(x.shape).type(torch.double)
# ------------------------------------------------
# f
# nablaPhi = net.trHess(x)[0]
g = net.trHess(x)[0]
niter = 20
h0 = 0.5
E0 = []
E1 = []
hlist = []
for i in range(niter):
h = h0**i
hlist.append(h)
E0.append( torch.norm(net( x + h * v ) - net(x)) )
E1.append( torch.norm(net( x + h * v ) - net(x) - h * torch.matmul(g , v.t())) )
for i in range(niter):
print("{:f} {:.6e} {:.6e}".format( hlist[i] , E0[i].item() , E1[i].item() ))
if doPlots:
plt.plot(hlist,E0, label='E0')
plt.plot(hlist,E1, label='E1')
plt.yscale('log')
plt.xscale('log')
plt.legend()
plt.show()
print("\n") | 18.65 | 86 | 0.580876 | # testPhiGradx.py
# test the grad wrt x returned by trHess when nTh > 2
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from src.Phi import *
import torch.nn.utils
doPlots = True
d = 2
m = 5
nTh = 4
net = Phi(nTh=nTh, m=m, d=d)
net.double()
# vecParams = nn.utils.convert_parameters.parameters_to_vector(net.parameters())
x = torch.randn(1,3).type(torch.double)
# AD grad
x.requires_grad = True
y = net(x)
v = torch.randn(x.shape).type(torch.double)
# ------------------------------------------------
# f
# nablaPhi = net.trHess(x)[0]
g = net.trHess(x)[0]
niter = 20
h0 = 0.5
E0 = []
E1 = []
hlist = []
for i in range(niter):
h = h0**i
hlist.append(h)
E0.append( torch.norm(net( x + h * v ) - net(x)) )
E1.append( torch.norm(net( x + h * v ) - net(x) - h * torch.matmul(g , v.t())) )
for i in range(niter):
print("{:f} {:.6e} {:.6e}".format( hlist[i] , E0[i].item() , E1[i].item() ))
if doPlots:
plt.plot(hlist,E0, label='E0')
plt.plot(hlist,E1, label='E1')
plt.yscale('log')
plt.xscale('log')
plt.legend()
plt.show()
print("\n") | 0 | 0 | 0 |
a660a6550f4dfbea6c622a3907992cd131a52dfb | 608 | py | Python | CubeNet/__init__.py | Billy-Chen0327/CubeNet | 7df1758b12f0e561d57f1367d9279ea1b0bcc358 | [
"MIT"
] | null | null | null | CubeNet/__init__.py | Billy-Chen0327/CubeNet | 7df1758b12f0e561d57f1367d9279ea1b0bcc358 | [
"MIT"
] | null | null | null | CubeNet/__init__.py | Billy-Chen0327/CubeNet | 7df1758b12f0e561d57f1367d9279ea1b0bcc358 | [
"MIT"
] | null | null | null | import os
import torch
import CubeNet.net
import CubeNet.Picker
import CubeNet.config
network = CubeNet.net.UNet(); network.load_state_dict(torch.load(os.path.join(os.path.split(CubeNet.__file__)[0],'Para.pt')));
picker = CubeNet.Picker.IrrPicker(arr_info=CubeNet.config.arr_info,
para_path = os.path.join(os.path.split(CubeNet.__file__)[0],'Para.pt'),
net = network,
predict_batch = CubeNet.config.basic_info['batch_size'],
device = CubeNet.config.basic_info['device'])
| 46.769231 | 126 | 0.605263 | import os
import torch
import CubeNet.net
import CubeNet.Picker
import CubeNet.config
network = CubeNet.net.UNet(); network.load_state_dict(torch.load(os.path.join(os.path.split(CubeNet.__file__)[0],'Para.pt')));
picker = CubeNet.Picker.IrrPicker(arr_info=CubeNet.config.arr_info,
para_path = os.path.join(os.path.split(CubeNet.__file__)[0],'Para.pt'),
net = network,
predict_batch = CubeNet.config.basic_info['batch_size'],
device = CubeNet.config.basic_info['device'])
| 0 | 0 | 0 |
5e6b2448983c0f9de7f4ca47156c2801fef30890 | 12,703 | py | Python | async_pokepy/types/move.py | PendragonLore/async_pokepy | 9982505fbc360eae349086bfa8f6faad0133f5fa | [
"MIT"
] | 5 | 2019-05-30T21:45:24.000Z | 2021-11-07T20:35:40.000Z | async_pokepy/types/move.py | PendragonLore/async_pokepy | 9982505fbc360eae349086bfa8f6faad0133f5fa | [
"MIT"
] | null | null | null | async_pokepy/types/move.py | PendragonLore/async_pokepy | 9982505fbc360eae349086bfa8f6faad0133f5fa | [
"MIT"
] | 1 | 2019-05-10T19:21:51.000Z | 2019-05-10T19:21:51.000Z | # -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2019 Lorenzo
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from .abc import BaseObject
from .ability import AbilityEffectChange
from .common import APIObject, MachineVersionDetail, Name, NamedAPIObject, VerboseEffect
__all__ = (
"Move",
"MoveFlavorText",
"MoveMetaData",
"MoveStatChange",
"PastMoveStatValues",
"ContestComboDetail",
"ContestComboSet"
)
class Move(BaseObject):
"""Represents a move object from the API.
.. versionadded:: 0.1.0a
.. container:: operations
.. describe:: str(x)
Returns the move's name.
.. describe:: x[y]
Returns a move's y attribute.
.. describe:: x == y
Check if two moves are the same.
.. describe:: x != y
Check if two moves are *not* the same.
Attributes
----------
id: :class:`int`
The identifier for the move.
name: :class:`str`
The name for the move.
accuracy: :class:`int`
The percent value of how likely the move is to be successful.
effect_chance: :class:`int`
The percent value of how likely it is that the move's effect will happen.
pp: :class:`int`
Power points. The number of times the move can be used.
power_points: :class:`int`
An alias for pp.
priority: :class:`int`
A value between -8 and 8. Sets the order in which the move is executed during battle.
power: :class:`int`
The base power of the move with a value of 0 if it does not have a base power.
contest_combos: :class:`ContestComboSets`
A detail of normal and super contest combos that require the move.
contest_type: :class:`NamedAPIObject`
The type of appeal the move gives a Pokémon when used in a contest.
contest_effect: :class:`APIObject`
The effect the move has when used in a contest.
super_contest_effect: :class:`APIObject`
The effect the move has when used in a super contest.
damage_class: :class:`NamedAPIObject`
The type of damage the move inflicts on the target, e.g. physical.
effect_entries: List[:class:`VerboseEffect`]
The effect of the move listed in different languages.
flavor_text_entries: List[:class:`MoveFlavorText`]
The flavor text of the move listed in different languages.
generation: :class:`NamedAPIObject`
The generation in which the move was introduced.
meta: :class:`MoveMetaData`
Metadata about the move.
names: List[:class:`Name`]
The name of the move listed in different languages.
past_values: List[:class:`PastMoveStatValues`]
A list of move value changes across version groups of the game.
stat_changes: List[:class:`MoveStatChange`]
A list of stats this move effects and how much it effects them.
effect_changes: List[:class:`AbilityEffectChange`]
The list of previous effects the move has had across version groups of the games.
target: :class:`NamedAPIObject`
The type of target that will receive the effects of the move.
type: :class:`NamedAPIObject`
The elemental type of the move.
machines: :class:`MachineVersionDetail`
A list of the machines that teach this move."""
__slots__ = (
"accuracy", "effect_chance", "pp", "power_points", "priority", "power", "contest_type", "type", "target",
"generation", "damage_class", "meta", "stat_changes", "names", "effect_entries", "flavor_text_entries",
"past_values", "effect_changes", "contest_effect", "super_contest_effect", "machines"
)
class MoveFlavorText:
"""Represents the flavor text of a move associated with a language.
.. versionadded:: 0.1.0a
Attributes
----------
flavor_text: :class:`str`
The localized flavor text for the move in the associated language.
language: :class:`NamedAPIObject`
The language the text is in.
version_group: :class:`NamedAPIObject`
The version group that uses the text."""
__slots__ = ("flavor_text", "language", "version_group")
class MoveMetaData:
"""Represents the metadata about a move.
.. versionadded:: 0.1.0a
Attributes
----------
ailment: :class:`NamedAPIObject`
The status ailment the move inflicts on it's target.
category: :class:`NamedAPIObject`
The category of move the move falls under, e.g. damage or ailment.
min_hits: Optional[:class:`int`]
The minimum number of times the move hits. ``None`` if it always only hits once.
max_hits: Optional[:class:`int`]
The maximum number of times the move hits. ``None`` if it always only hits once.
min_turns: Optional[:class:`int`]
The minimum number of turns the move continues to take effect. ``None`` if it always only lasts one turn.
max_turns: Optional[:class:`int`]
The maximum number of turns the move continues to take effect. ``None`` if it always only lasts one turn.
drain: :class:`int`
HP drain (if positive) or recoil damage (if negative), in percent of damage done.
healing: :class:`int`
The amount of hp gained by the attacking Pokemon, in percent of it's maximum HP.
crit_rate: :class:`int`
Critical hit rate bonus.
ailment_chance: :class:`int`
The likelihood the move will cause an ailment.
flinch_chance: :class:`int`
The likelihood the move will cause the target Pokémon to flinch.
stat_chance: :class:`int`
The likelihood the mpve will cause a stat change in the target Pokémon.
"""
__slots__ = (
"ailment", "category", "min_hits", "max_hits", "min_turns", "max_turns", "drain", "healing", "crit_rate",
"ailment_chance", "flinch_chance", "stat_chance"
)
class MoveStatChange:
"""Represents a stat change in a :class:`move`
.. versionadded:: 0.1.0a
Attributes
----------
change: :class:`int`
The amount of change.
stat: :class:`NamedAPIObject`
The stat being affected."""
__slots__ = ("change", "stat")
class PastMoveStatValues:
"""Represents changed values of a :class:`Move` in a version group.
.. versionadded:: 0.1.0a
Attributes
----------
accuracy: :class:`int`
The percent value of how likely the move is to be successful.
effect_chance: :class:`int`
The percent value of how likely it is the moves effect will take effect.
power: :class:`int`
The base power of the move with a value of 0 if it does not have a base power.
pp: :class:`int`
Power points. The number of times the move can be used.
effect_entries: List[:class:`VerboseEffect`]
The effect of the move listed in different languages.
type: :class:`NamedAPIObject`
The elemental type of the move.
version_group: :class:`NamedAPIObject`
The version group in which these move stat values were in effect."""
__slots__ = ("accuracy", "effect_chance", "power", "pp", "effect_entries", "type", "version_group")
class ContestComboDetail:
"""Represents a detail of moves that can be used to grain additional
appeal points in contests.
.. versionadded:: 0.1.0a
Attributes
----------
use_before: List[:class:`NamedAPIObject`]
A list of moves to use before this move.
use_after: List[:class:`str`]
A list of moves to use after this move."""
__slots__ = ("use_before", "use_after")
class ContestComboSet:
"""Represents a set of super and normal contest combos.
.. versionadded:: 0.1.0a
Attributes
----------
normal: :class:`ContestComboDetail`
A detail of moves this move can be used before or after, granting additional appeal points in contests.
super: :class:`ContestComboDetail`
A detail of moves this move can be used before or after, granting additional appeal points in super contests."""
__slots__ = ("normal", "super")
| 38.847095 | 120 | 0.666929 | # -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2019 Lorenzo
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from .abc import BaseObject
from .ability import AbilityEffectChange
from .common import APIObject, MachineVersionDetail, Name, NamedAPIObject, VerboseEffect
__all__ = (
"Move",
"MoveFlavorText",
"MoveMetaData",
"MoveStatChange",
"PastMoveStatValues",
"ContestComboDetail",
"ContestComboSet"
)
class Move(BaseObject):
"""Represents a move object from the API.
.. versionadded:: 0.1.0a
.. container:: operations
.. describe:: str(x)
Returns the move's name.
.. describe:: x[y]
Returns a move's y attribute.
.. describe:: x == y
Check if two moves are the same.
.. describe:: x != y
Check if two moves are *not* the same.
Attributes
----------
id: :class:`int`
The identifier for the move.
name: :class:`str`
The name for the move.
accuracy: :class:`int`
The percent value of how likely the move is to be successful.
effect_chance: :class:`int`
The percent value of how likely it is that the move's effect will happen.
pp: :class:`int`
Power points. The number of times the move can be used.
power_points: :class:`int`
An alias for pp.
priority: :class:`int`
A value between -8 and 8. Sets the order in which the move is executed during battle.
power: :class:`int`
The base power of the move with a value of 0 if it does not have a base power.
contest_combos: :class:`ContestComboSets`
A detail of normal and super contest combos that require the move.
contest_type: :class:`NamedAPIObject`
The type of appeal the move gives a Pokémon when used in a contest.
contest_effect: :class:`APIObject`
The effect the move has when used in a contest.
super_contest_effect: :class:`APIObject`
The effect the move has when used in a super contest.
damage_class: :class:`NamedAPIObject`
The type of damage the move inflicts on the target, e.g. physical.
effect_entries: List[:class:`VerboseEffect`]
The effect of the move listed in different languages.
flavor_text_entries: List[:class:`MoveFlavorText`]
The flavor text of the move listed in different languages.
generation: :class:`NamedAPIObject`
The generation in which the move was introduced.
meta: :class:`MoveMetaData`
Metadata about the move.
names: List[:class:`Name`]
The name of the move listed in different languages.
past_values: List[:class:`PastMoveStatValues`]
A list of move value changes across version groups of the game.
stat_changes: List[:class:`MoveStatChange`]
A list of stats this move effects and how much it effects them.
effect_changes: List[:class:`AbilityEffectChange`]
The list of previous effects the move has had across version groups of the games.
target: :class:`NamedAPIObject`
The type of target that will receive the effects of the move.
type: :class:`NamedAPIObject`
The elemental type of the move.
machines: :class:`MachineVersionDetail`
A list of the machines that teach this move."""
__slots__ = (
"accuracy", "effect_chance", "pp", "power_points", "priority", "power", "contest_type", "type", "target",
"generation", "damage_class", "meta", "stat_changes", "names", "effect_entries", "flavor_text_entries",
"past_values", "effect_changes", "contest_effect", "super_contest_effect", "machines"
)
def __init__(self, data: dict):
super().__init__(data)
self.accuracy = data["accuracy"]
self.effect_chance = data["effect_chance"]
self.pp = data["pp"]
self.power_points = self.pp
self.priority = data["priority"]
self.power = data["power"]
self.contest_type = NamedAPIObject(data["contest_type"])
self.type = NamedAPIObject(data["type"])
self.target = NamedAPIObject(data["target"])
self.generation = NamedAPIObject(data["generation"])
self.damage_class = NamedAPIObject(data["damage_class"])
self.contest_effect = APIObject(data["contest_effect"])
self.super_contest_effect = APIObject(data["super_contest_effect"])
self.effect_changes = [AbilityEffectChange(d) for d in data["effect_changes"]]
self.meta = MoveMetaData(data["meta"])
self.stat_changes = [MoveStatChange(d) for d in data["stat_changes"]]
self.names = [Name(d) for d in data["names"]]
self.effect_entries = [VerboseEffect(d) for d in data["effect_entries"]]
self.flavor_text_entries = [MoveFlavorText(d) for d in data["flavor_text_entries"]]
self.past_values = [PastMoveStatValues(d) for d in data["past_values"]]
self.machines = [MachineVersionDetail(d) for d in data["machines"]]
class MoveFlavorText:
"""Represents the flavor text of a move associated with a language.
.. versionadded:: 0.1.0a
Attributes
----------
flavor_text: :class:`str`
The localized flavor text for the move in the associated language.
language: :class:`NamedAPIObject`
The language the text is in.
version_group: :class:`NamedAPIObject`
The version group that uses the text."""
__slots__ = ("flavor_text", "language", "version_group")
def __init__(self, data: dict):
self.flavor_text = data["flavor_text"]
self.language = NamedAPIObject(data["language"])
self.version_group = NamedAPIObject(data["version_group"])
def __repr__(self) -> str:
return "<MoveFlavorText language='{0.language}' version_group='{0.version_group}'>".format(self)
class MoveMetaData:
"""Represents the metadata about a move.
.. versionadded:: 0.1.0a
Attributes
----------
ailment: :class:`NamedAPIObject`
The status ailment the move inflicts on it's target.
category: :class:`NamedAPIObject`
The category of move the move falls under, e.g. damage or ailment.
min_hits: Optional[:class:`int`]
The minimum number of times the move hits. ``None`` if it always only hits once.
max_hits: Optional[:class:`int`]
The maximum number of times the move hits. ``None`` if it always only hits once.
min_turns: Optional[:class:`int`]
The minimum number of turns the move continues to take effect. ``None`` if it always only lasts one turn.
max_turns: Optional[:class:`int`]
The maximum number of turns the move continues to take effect. ``None`` if it always only lasts one turn.
drain: :class:`int`
HP drain (if positive) or recoil damage (if negative), in percent of damage done.
healing: :class:`int`
The amount of hp gained by the attacking Pokemon, in percent of it's maximum HP.
crit_rate: :class:`int`
Critical hit rate bonus.
ailment_chance: :class:`int`
The likelihood the move will cause an ailment.
flinch_chance: :class:`int`
The likelihood the move will cause the target Pokémon to flinch.
stat_chance: :class:`int`
The likelihood the mpve will cause a stat change in the target Pokémon.
"""
__slots__ = (
"ailment", "category", "min_hits", "max_hits", "min_turns", "max_turns", "drain", "healing", "crit_rate",
"ailment_chance", "flinch_chance", "stat_chance"
)
def __init__(self, data: dict):
self.ailment = NamedAPIObject(data["ailment"])
self.category = NamedAPIObject(data["category"])
self.min_hits = data["min_hits"]
self.max_hits = data["max_hits"]
self.min_turns = data["min_turns"]
self.max_turns = data["max_turns"]
self.drain = data["drain"]
self.healing = data["healing"]
self.crit_rate = data["crit_rate"]
self.ailment_chance = data["ailment_chance"]
self.flinch_chance = data["flinch_chance"]
self.stat_chance = data["stat_chance"]
def __repr__(self) -> str:
return "<MoveMetaData category='{0.category}'>".format(self)
class MoveStatChange:
"""Represents a stat change in a :class:`move`
.. versionadded:: 0.1.0a
Attributes
----------
change: :class:`int`
The amount of change.
stat: :class:`NamedAPIObject`
The stat being affected."""
__slots__ = ("change", "stat")
def __init__(self, data: dict):
self.change = data["change"]
self.stat = NamedAPIObject(data["stat"])
def __repr__(self) -> str:
return "<MoveStatChange change={0.change} stat='{0.stat}'>".format(self)
class PastMoveStatValues:
"""Represents changed values of a :class:`Move` in a version group.
.. versionadded:: 0.1.0a
Attributes
----------
accuracy: :class:`int`
The percent value of how likely the move is to be successful.
effect_chance: :class:`int`
The percent value of how likely it is the moves effect will take effect.
power: :class:`int`
The base power of the move with a value of 0 if it does not have a base power.
pp: :class:`int`
Power points. The number of times the move can be used.
effect_entries: List[:class:`VerboseEffect`]
The effect of the move listed in different languages.
type: :class:`NamedAPIObject`
The elemental type of the move.
version_group: :class:`NamedAPIObject`
The version group in which these move stat values were in effect."""
__slots__ = ("accuracy", "effect_chance", "power", "pp", "effect_entries", "type", "version_group")
def __init__(self, data: dict):
self.accuracy = data["accuracy"]
self.effect_chance = data["effect_chance"]
self.power = data["power"]
self.pp = data["pp"]
self.effect_entries = [VerboseEffect(d) for d in data["effect_entries"]]
self.type = NamedAPIObject(data["type"])
self.version_group = NamedAPIObject(data["version_group"])
def __repr__(self) -> str:
return "<PastMoveStatValues type='{0.type}' version_group='{0.version_group}'>".format(self)
class ContestComboDetail:
"""Represents a detail of moves that can be used to grain additional
appeal points in contests.
.. versionadded:: 0.1.0a
Attributes
----------
use_before: List[:class:`NamedAPIObject`]
A list of moves to use before this move.
use_after: List[:class:`str`]
A list of moves to use after this move."""
__slots__ = ("use_before", "use_after")
def __init__(self, data: dict):
self.use_before = [NamedAPIObject(d) for d in data["use_before"]]
self.use_after = [NamedAPIObject(d) for d in data["use_after"]]
def __repr__(self) -> str:
return "<ContestComboDetail use_before={0.use_before} use_after={0.use_after}>".format(self)
class ContestComboSet:
"""Represents a set of super and normal contest combos.
.. versionadded:: 0.1.0a
Attributes
----------
normal: :class:`ContestComboDetail`
A detail of moves this move can be used before or after, granting additional appeal points in contests.
super: :class:`ContestComboDetail`
A detail of moves this move can be used before or after, granting additional appeal points in super contests."""
__slots__ = ("normal", "super")
def __init__(self, data: dict):
self.normal = ContestComboDetail(data["normal"])
self.super = ContestComboDetail(data["super"])
def __repr__(self):
return "<ContestComboSet normal={0.normal} super={0.super}>".format(self)
| 3,344 | 0 | 351 |
39a1a8cc5e43dabfe72f249a0919d5b4a9544d01 | 305 | py | Python | Chapter 09/Chap9_2.py | bpbpublications/Building-Machine-Learning-Systems-Using-Python | 42df3c10b92fceba49d89afe7610ee075a79f0c3 | [
"MIT"
] | null | null | null | Chapter 09/Chap9_2.py | bpbpublications/Building-Machine-Learning-Systems-Using-Python | 42df3c10b92fceba49d89afe7610ee075a79f0c3 | [
"MIT"
] | null | null | null | Chapter 09/Chap9_2.py | bpbpublications/Building-Machine-Learning-Systems-Using-Python | 42df3c10b92fceba49d89afe7610ee075a79f0c3 | [
"MIT"
] | 1 | 2021-11-30T03:55:51.000Z | 2021-11-30T03:55:51.000Z | #!/usr/bin/env python
# coding: utf-8
# In[8]:
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
ranstat = np.random.RandomState(1)
P = np.dot(ranstat.rand(2, 2), ranstat.randn(2, 600)).T
plt.scatter(P[:, 0], P[:, 1])
plt.axis('equal');
# In[ ]:
# In[ ]:
| 11.296296 | 55 | 0.613115 | #!/usr/bin/env python
# coding: utf-8
# In[8]:
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
ranstat = np.random.RandomState(1)
P = np.dot(ranstat.rand(2, 2), ranstat.randn(2, 600)).T
plt.scatter(P[:, 0], P[:, 1])
plt.axis('equal');
# In[ ]:
# In[ ]:
| 0 | 0 | 0 |
58e7fe168632890239c09de539821e02be580e50 | 4,024 | py | Python | firepy/vm.py | gamgi/firepy | fea859de4a8eaff78d4cf1ee7c3fe1b0467a77c6 | [
"MIT"
] | null | null | null | firepy/vm.py | gamgi/firepy | fea859de4a8eaff78d4cf1ee7c3fe1b0467a77c6 | [
"MIT"
] | null | null | null | firepy/vm.py | gamgi/firepy | fea859de4a8eaff78d4cf1ee7c3fe1b0467a77c6 | [
"MIT"
] | null | null | null | from typing import Optional
from functools import wraps
from io import StringIO
from sh import RunningCommand
from requests.exceptions import ConnectionError, HTTPError
from firepy.connection import Connection
from firepy.exceptions import err_from_stderr, FirecrackerApiError
from firepy.utils.network_utils import network_mac, network_tap_name
from firepy.utils.firecracker_utils import kernel_boot_args
from firepy.utils.logging_utils import logger
def handle_errors(func):
"""Decorator that humanizes exceptions.
It
- parses firecracker HTTP responses
- checks instance stderr for error messages
"""
@wraps(func)
return wrapper
| 30.717557 | 106 | 0.605616 | from typing import Optional
from functools import wraps
from io import StringIO
from sh import RunningCommand
from requests.exceptions import ConnectionError, HTTPError
from firepy.connection import Connection
from firepy.exceptions import err_from_stderr, FirecrackerApiError
from firepy.utils.network_utils import network_mac, network_tap_name
from firepy.utils.firecracker_utils import kernel_boot_args
from firepy.utils.logging_utils import logger
def handle_errors(func):
"""Decorator that humanizes exceptions.
It
- parses firecracker HTTP responses
- checks instance stderr for error messages
"""
@wraps(func)
def wrapper(self: 'Vm', *args, **kwargs):
try:
func(self, *args, **kwargs)
except HTTPError as err:
res = err.response.json()
if 'fault_message' in res:
raise FirecrackerApiError(res['fault_message'])
raise
except ConnectionError as err:
if self.stderr is not None:
raise err_from_stderr(self.stderr) from err
raise
return wrapper
class Vm:
id: int
conn: Connection
stderr: Optional[StringIO]
handle: Optional[RunningCommand]
def __init__(self, id: int, socket_path: str, stderr: StringIO = None, handle: RunningCommand = None):
self.id = id
self.conn = Connection(socket_path)
self.stderr = stderr
self.handle = handle
def wait(self, **kwargs):
if not self.handle:
raise FirecrackerApiError('No VM handle to wait for')
self.handle.wait(**kwargs)
logger.info('VM process exited')
def kill(self):
if not self.handle:
raise FirecrackerApiError('No VM handle to kill')
self.handle.kill()
logger.info('VM process killed')
@handle_errors
def start(self):
logger.debug('starting VM')
self.conn.put('/actions', json={'action_type': 'InstanceStart'})
@handle_errors
def pause(self):
logger.debug('pausing VM')
self.conn.put('/vm', json={'state': 'Paused'})
@handle_errors
def resume(self):
logger.debug('resuming VM')
self.conn.put('/vm', json={'state': 'Resumed'})
@handle_errors
def set_config(self, **kwargs):
self.conn.put('/machine-config', json={
**kwargs
})
@handle_errors
def set_kernel(self, kernel_path: str, override_boot_args: dict = {},
**kwargs):
boot_args = kernel_boot_args(self.id, override_boot_args)
self.conn.put('/boot-source', json={
"kernel_image_path": kernel_path,
"boot_args": boot_args,
**kwargs
})
@handle_errors
def set_rootfs(self, rootfs_path: str, **kwargs):
self.conn.put('/drives/rootfs', json={
"drive_id": "rootfs",
"path_on_host": rootfs_path,
"is_root_device": True,
"is_read_only": False,
**kwargs
})
@handle_errors
def create_network_interface(self, **kwargs):
self.conn.put('/network-interfaces/1', json={
"iface_id": "1",
"guest_mac": network_mac(self.id),
"host_dev_name": network_tap_name(self.id),
**kwargs
})
@handle_errors
def create_snapshot(self, snapshot_path: str, mem_file_path: str,
snapshot_type='Full', **kwargs):
self.conn.put('/snapshot/create', json={
"snapshot_type": snapshot_type,
"snapshot_path": snapshot_path,
"mem_file_path": mem_file_path,
**kwargs
})
@handle_errors
def load_snapshot(self, snapshot_path: str, mem_file_path: str,
resume_vm=True, **kwargs):
self.conn.put('/snapshot/load', json={
"snapshot_path": snapshot_path,
"mem_file_path": mem_file_path,
"resume_vm": resume_vm,
**kwargs
})
| 2,727 | 584 | 49 |
7f76cdc498d8ee42a21ab0dd328e6c52f1a68666 | 3,115 | py | Python | guacamol/assess_goal_directed_generation.py | jcheminform/guacamol | dd7f7b12e1ab59151394aba5f4a95ee204fd0203 | [
"MIT"
] | 242 | 2018-11-29T13:34:13.000Z | 2022-03-26T19:35:17.000Z | guacamol/assess_goal_directed_generation.py | jcheminform/guacamol | dd7f7b12e1ab59151394aba5f4a95ee204fd0203 | [
"MIT"
] | 13 | 2019-01-31T03:33:36.000Z | 2022-01-03T07:03:19.000Z | guacamol/assess_goal_directed_generation.py | jcheminform/guacamol | dd7f7b12e1ab59151394aba5f4a95ee204fd0203 | [
"MIT"
] | 68 | 2018-11-26T10:03:41.000Z | 2022-03-28T20:58:20.000Z | import datetime
import json
import logging
from collections import OrderedDict
from typing import List, Any, Dict
import guacamol
from guacamol.goal_directed_benchmark import GoalDirectedBenchmark, GoalDirectedBenchmarkResult
from guacamol.goal_directed_generator import GoalDirectedGenerator
from guacamol.benchmark_suites import goal_directed_benchmark_suite
from guacamol.utils.data import get_time_string
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
def assess_goal_directed_generation(goal_directed_molecule_generator: GoalDirectedGenerator,
json_output_file='output_goal_directed.json',
benchmark_version='v1') -> None:
"""
Assesses a distribution-matching model for de novo molecule design.
Args:
goal_directed_molecule_generator: Model to evaluate
json_output_file: Name of the file where to save the results in JSON format
benchmark_version: which benchmark suite to execute
"""
logger.info(f'Benchmarking goal-directed molecule generation, version {benchmark_version}')
benchmarks = goal_directed_benchmark_suite(version_name=benchmark_version)
results = _evaluate_goal_directed_benchmarks(
goal_directed_molecule_generator=goal_directed_molecule_generator,
benchmarks=benchmarks)
benchmark_results: Dict[str, Any] = OrderedDict()
benchmark_results['guacamol_version'] = guacamol.__version__
benchmark_results['benchmark_suite_version'] = benchmark_version
benchmark_results['timestamp'] = get_time_string()
benchmark_results['results'] = [vars(result) for result in results]
logger.info(f'Save results to file {json_output_file}')
with open(json_output_file, 'wt') as f:
f.write(json.dumps(benchmark_results, indent=4))
def _evaluate_goal_directed_benchmarks(goal_directed_molecule_generator: GoalDirectedGenerator,
benchmarks: List[GoalDirectedBenchmark]
) -> List[GoalDirectedBenchmarkResult]:
"""
Evaluate a model with the given benchmarks.
Should not be called directly except for testing purposes.
Args:
goal_directed_molecule_generator: model to assess
benchmarks: list of benchmarks to evaluate
json_output_file: Name of the file where to save the results in JSON format
"""
logger.info(f'Number of benchmarks: {len(benchmarks)}')
results = []
for i, benchmark in enumerate(benchmarks, 1):
logger.info(f'Running benchmark {i}/{len(benchmarks)}: {benchmark.name}')
result = benchmark.assess_model(goal_directed_molecule_generator)
logger.info(f'Results for the benchmark "{result.benchmark_name}":')
logger.info(f' Score: {result.score:.6f}')
logger.info(f' Execution time: {str(datetime.timedelta(seconds=int(result.execution_time)))}')
logger.info(f' Metadata: {result.metadata}')
results.append(result)
logger.info('Finished execution of the benchmarks')
return results
| 42.094595 | 103 | 0.727127 | import datetime
import json
import logging
from collections import OrderedDict
from typing import List, Any, Dict
import guacamol
from guacamol.goal_directed_benchmark import GoalDirectedBenchmark, GoalDirectedBenchmarkResult
from guacamol.goal_directed_generator import GoalDirectedGenerator
from guacamol.benchmark_suites import goal_directed_benchmark_suite
from guacamol.utils.data import get_time_string
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
def assess_goal_directed_generation(goal_directed_molecule_generator: GoalDirectedGenerator,
json_output_file='output_goal_directed.json',
benchmark_version='v1') -> None:
"""
Assesses a distribution-matching model for de novo molecule design.
Args:
goal_directed_molecule_generator: Model to evaluate
json_output_file: Name of the file where to save the results in JSON format
benchmark_version: which benchmark suite to execute
"""
logger.info(f'Benchmarking goal-directed molecule generation, version {benchmark_version}')
benchmarks = goal_directed_benchmark_suite(version_name=benchmark_version)
results = _evaluate_goal_directed_benchmarks(
goal_directed_molecule_generator=goal_directed_molecule_generator,
benchmarks=benchmarks)
benchmark_results: Dict[str, Any] = OrderedDict()
benchmark_results['guacamol_version'] = guacamol.__version__
benchmark_results['benchmark_suite_version'] = benchmark_version
benchmark_results['timestamp'] = get_time_string()
benchmark_results['results'] = [vars(result) for result in results]
logger.info(f'Save results to file {json_output_file}')
with open(json_output_file, 'wt') as f:
f.write(json.dumps(benchmark_results, indent=4))
def _evaluate_goal_directed_benchmarks(goal_directed_molecule_generator: GoalDirectedGenerator,
benchmarks: List[GoalDirectedBenchmark]
) -> List[GoalDirectedBenchmarkResult]:
"""
Evaluate a model with the given benchmarks.
Should not be called directly except for testing purposes.
Args:
goal_directed_molecule_generator: model to assess
benchmarks: list of benchmarks to evaluate
json_output_file: Name of the file where to save the results in JSON format
"""
logger.info(f'Number of benchmarks: {len(benchmarks)}')
results = []
for i, benchmark in enumerate(benchmarks, 1):
logger.info(f'Running benchmark {i}/{len(benchmarks)}: {benchmark.name}')
result = benchmark.assess_model(goal_directed_molecule_generator)
logger.info(f'Results for the benchmark "{result.benchmark_name}":')
logger.info(f' Score: {result.score:.6f}')
logger.info(f' Execution time: {str(datetime.timedelta(seconds=int(result.execution_time)))}')
logger.info(f' Metadata: {result.metadata}')
results.append(result)
logger.info('Finished execution of the benchmarks')
return results
| 0 | 0 | 0 |
facca4b14c67e4e1d8ac316092e1291cd1b25954 | 41,540 | py | Python | icebox/icebox_asc2hlc.py | sandeepmistry/icestorm | 8f61acd0556c8afee83ec2e77dedb03e700333d9 | [
"0BSD"
] | 6 | 2017-11-11T18:05:30.000Z | 2020-02-24T11:21:03.000Z | icebox/icebox_asc2hlc.py | sandeepmistry/icestorm | 8f61acd0556c8afee83ec2e77dedb03e700333d9 | [
"0BSD"
] | null | null | null | icebox/icebox_asc2hlc.py | sandeepmistry/icestorm | 8f61acd0556c8afee83ec2e77dedb03e700333d9 | [
"0BSD"
] | null | null | null | #!/usr/bin/env python3
# Copyright (C) 2017 Roland Lutz
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import getopt, os, re, sys
import icebox
GLB_NETWK_EXTERNAL_BLOCKS = [(13, 8, 1), (0, 8, 1), (7, 17, 0), (7, 0, 0),
(0, 9, 0), (13, 9, 0), (6, 0, 1), (6, 17, 1)]
GLB_NETWK_INTERNAL_TILES = [(7, 0), (7, 17), (13, 9), (0, 9),
(6, 17), (6, 0), (0, 8), (13, 8)]
## Get the global name of a net.
#
# \param x, y coordinates of the tile to which the net belongs
# \param fw, fh width and height of the tile fabric (excluding I/O tiles)
# \param net net name
#
# \return the global name of the net if it is a span wire, otherwise
# the unmodified net name
#
# There are 46624 span wires on the 1k (not counting dummies):
#
# span4_x[1..12]_g[1..20]_[0..11]
# span4_y[1..16]_g[1..16]_[0..11]
# span12_x[1..12]_g[1..28]_[0..1]
# span12_y[1..16]_g[1..24]_[0..1]
#
# span4_left_g[3..16]_[0..3]
# span4_right_g[5..18]_[0..3]
# span4_bottom_g[3..12]_[0..3]
# span4_top_g[5..14]_[0..3]
#
# span4_topleft[2,4,6,8]_[0..3]
# span4_bottomright[2,4,6,8]_[0..3]
#
# dummy_y[1..16]_g[0..3]_[0..11]
#
# "Dummy" nets are horizontal accesses to non-existing vertical span
# wires on the right edge which are listed by icebox but don't
# actually connect to anything outside the tile itself.
## Return the human-readable name of the \c fabout net of IO tile
## <tt>(x, y)</tt>.
## Remove an argument from a LUT string and an associated list of
## argument names.
#
# This is a helper function for \ref lut_to_logic_expression.
#
# \param lut string of 2^N `0' or `1' characters representing the
# logic of an Nx1 look-up table
# \param args list of N strings containing the human-readable names
# of the arguments
# \param i index of the argument to remove
# \param keep boolean value indicating which value of the removed
# argument is to be assumed in the resulting LUT
#
# \return a new pair <tt>(lut, args)</tt> with the argument removed
## Negate a tuple representation of a logic expression.
#
# This is a helper function for \ref lut_to_logic_expression.
## Convert a tuple representation of a logic expression into a string.
#
# This is a helper function for \ref lut_to_logic_expression.
#
# \param expr the expression to convert
# \param parenthize whether a compound expression should be
# surrounded by parentheses
## Remove arguments which don't affect the result from a LUT string
## and an associated list of argument names.
#
# This is a helper function for \ref lut_to_logic_expression.
#
# \param lut string of 2^N `0' or `1' characters representing the
# logic of an Nx1 look-up table
# \param args list of N strings containing the human-readable names
# of the arguments
#
# \return a new pair <tt>(lut, args)</tt> with all unused arguments
# removed
## Convert a LUT string to a logic expression.
#
# \param lut string of 2^N `0' or `1' characters representing the
# logic of an Nx1 look-up table
# \param args list of N strings containing the human-readable names
# of the arguments
#
# \return a string containing a human-readable logic expression
# equivalent to the look-up table
#
# Example: lut_to_logic_expression('00010000', ['a', 'b', 'c']) -> 'a & b & !c'
if __name__ == '__main__':
main()
| 36.599119 | 80 | 0.489769 | #!/usr/bin/env python3
# Copyright (C) 2017 Roland Lutz
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import getopt, os, re, sys
import icebox
GLB_NETWK_EXTERNAL_BLOCKS = [(13, 8, 1), (0, 8, 1), (7, 17, 0), (7, 0, 0),
(0, 9, 0), (13, 9, 0), (6, 0, 1), (6, 17, 1)]
GLB_NETWK_INTERNAL_TILES = [(7, 0), (7, 17), (13, 9), (0, 9),
(6, 17), (6, 0), (0, 8), (13, 8)]
## Get the global name of a net.
#
# \param x, y coordinates of the tile to which the net belongs
# \param fw, fh width and height of the tile fabric (excluding I/O tiles)
# \param net net name
#
# \return the global name of the net if it is a span wire, otherwise
# the unmodified net name
#
# There are 46624 span wires on the 1k (not counting dummies):
#
# span4_x[1..12]_g[1..20]_[0..11]
# span4_y[1..16]_g[1..16]_[0..11]
# span12_x[1..12]_g[1..28]_[0..1]
# span12_y[1..16]_g[1..24]_[0..1]
#
# span4_left_g[3..16]_[0..3]
# span4_right_g[5..18]_[0..3]
# span4_bottom_g[3..12]_[0..3]
# span4_top_g[5..14]_[0..3]
#
# span4_topleft[2,4,6,8]_[0..3]
# span4_bottomright[2,4,6,8]_[0..3]
#
# dummy_y[1..16]_g[0..3]_[0..11]
#
# "Dummy" nets are horizontal accesses to non-existing vertical span
# wires on the right edge which are listed by icebox but don't
# actually connect to anything outside the tile itself.
def translate_netname(x, y, fw, fh, net):
def group_and_index(s, group_size):
n = int(s)
g = n // group_size
i = n % group_size
if g % 2 == 1:
i = i + 1 - (i % 2) * 2
return g, i
# logic and RAM tiles
match = re.match(r'sp4_h_r_(\d+)$', net)
if match is not None:
g, i = group_and_index(match.group(1), 12)
return 'span4_y%d_g%d_%d' % (y, x - g + 4, i)
match = re.match(r'sp4_h_l_(\d+)$', net)
if match is not None:
g, i = group_and_index(match.group(1), 12)
return 'span4_y%d_g%d_%d' % (y, x - g + 3, i)
match = re.match(r'sp4_v_b_(\d+)$', net)
if match is not None:
g, i = group_and_index(match.group(1), 12)
return 'span4_x%d_g%d_%d' % (x, y + g, i)
match = re.match(r'sp4_v_t_(\d+)$', net)
if match is not None:
g, i = group_and_index(match.group(1), 12)
return 'span4_x%d_g%d_%d' % (x, y + g + 1, i)
match = re.match(r'sp4_r_v_b_(\d+)$', net)
if match is not None:
g, i = group_and_index(match.group(1), 12)
if x == fw:
# this net doesn't connect anywhere
return 'dummy_y%d_g%d_%d' % (y, g, i)
else:
return 'span4_x%d_g%d_%d' % (x + 1, y + g, i)
match = re.match(r'sp12_h_r_(\d+)$', net)
if match is not None:
g, i = group_and_index(match.group(1), 2)
return 'span12_y%d_g%d_%d' % (y, x - g + 12, i)
match = re.match(r'sp12_h_l_(\d+)$', net)
if match is not None:
g, i = group_and_index(match.group(1), 2)
return 'span12_y%d_g%d_%d' % (y, x - g + 11, i)
match = re.match(r'sp12_v_b_(\d+)$', net)
if match is not None:
g, i = group_and_index(match.group(1), 2)
return 'span12_x%d_g%d_%d' % (x, y + g, i)
match = re.match(r'sp12_v_t_(\d+)$', net)
if match is not None:
g, i = group_and_index(match.group(1), 2)
return 'span12_x%d_g%d_%d' % (x, y + g + 1, i)
# I/O tiles
match = re.match(r'span4_horz_(\d+)$', net)
if match is not None:
g, i = group_and_index(match.group(1), 12)
if x == 0:
return 'span4_y%d_g%d_%d' % (y, x - g + 4, i)
else:
return 'span4_y%d_g%d_%d' % (y, x - g + 3, i)
match = re.match(r'span4_vert_(\d+)$', net)
if match is not None:
g, i = group_and_index(match.group(1), 12)
if y == 0:
return 'span4_x%d_g%d_%d' % (x, y + g + 1, i)
else:
return 'span4_x%d_g%d_%d' % (x, y + g, i)
match = re.match(r'span12_horz_(\d+)$', net)
if match is not None:
g, i = group_and_index(match.group(1), 2)
if x == 0:
return 'span12_y%d_g%d_%d' % (y, x - g + 12, i)
else:
return 'span12_y%d_g%d_%d' % (y, x - g + 11, i)
match = re.match(r'span12_vert_(\d+)$', net)
if match is not None:
g, i = group_and_index(match.group(1), 2)
if y == 0:
return 'span12_x%d_g%d_%d' % (x, y + g + 1, i)
else:
return 'span12_x%d_g%d_%d' % (x, y + g, i)
# I/O tiles - peripheral wires
match = re.match(r'span4_horz_r_(\d+)$', net)
if match is not None:
n = int(match.group(1)); g = n // 4; i = n % 4
if y == 0:
if fw - x + g - 4 < 0:
return 'span4_bottomright%d_%d' % ((fw - x + 1 + g) * 2, i)
elif x - g + 1 < 0:
return 'span4_left_g%d_%d' % (-x + 1 + g, i)
else:
return 'span4_bottom_g%d_%d' % (x + 4 - g, i)
else:
if x - g - 1 < 0:
return 'span4_topleft%d_%d' % ((x + 4 - g) * 2, i)
elif x - g + 1 >= fw:
return 'span4_right_g%d_%d' % (fh + fw - x + 1 + g, i)
else:
return 'span4_top_g%d_%d' % (x + 4 - g, i)
match = re.match(r'span4_horz_l_(\d+)$', net)
if match is not None:
n = int(match.group(1)); g = n // 4; i = n % 4
if y == 0:
if x - g < 0:
return 'span4_left_g%d_%d' % (-x + 2 + g, i)
else:
return 'span4_bottom_g%d_%d' % (x + 3 - g, i)
else:
if x - g - 2 < 0:
return 'span4_topleft%d_%d' % ((x + 3 - g) * 2, i)
else:
return 'span4_top_g%d_%d' % (x + 3 - g, i)
match = re.match(r'span4_vert_b_(\d+)$', net)
if match is not None:
n = int(match.group(1)); g = n // 4; i = n % 4
if x == 0:
if y + g - 3 < 0:
return 'span4_bottom_g%d_%d' % (-y + 5 - g, i)
if fh - y - g < 0:
return 'span4_topleft%d_%d' % ((fh + 5 - y - g) * 2, i)
else:
return 'span4_left_g%d_%d' % (y + g, i)
else:
if y + g - 5 < 0:
return 'span4_bottomright%d_%d' % ((y + g) * 2, i)
elif y + g >= fh + 3:
return 'span4_top_g%d_%d' % (fw + fh + 5 - y - g, i)
else:
return 'span4_right_g%d_%d' % (y + g, i)
match = re.match(r'span4_vert_t_(\d+)$', net)
if match is not None:
n = int(match.group(1)); g = n // 4; i = n % 4
if x == 0:
if fh - y - g - 1 < 0:
return 'span4_topleft%d_%d' % ((fh + 4 - y - g) * 2, i)
else:
return 'span4_left_g%d_%d' % (y + g + 1, i)
else:
if y + g >= fh + 2:
return 'span4_top_g%d_%d' % (fw + fh + 4 - y - g, i)
else:
return 'span4_right_g%d_%d' % (y + g + 1, i)
return net
## Return the human-readable name of the \c fabout net of IO tile
## <tt>(x, y)</tt>.
def lookup_fabout(x, y):
if (x, y) in GLB_NETWK_INTERNAL_TILES:
return 'glb_netwk_%d' % GLB_NETWK_INTERNAL_TILES.index((x, y))
return 'fabout'
## Remove an argument from a LUT string and an associated list of
## argument names.
#
# This is a helper function for \ref lut_to_logic_expression.
#
# \param lut string of 2^N `0' or `1' characters representing the
# logic of an Nx1 look-up table
# \param args list of N strings containing the human-readable names
# of the arguments
# \param i index of the argument to remove
# \param keep boolean value indicating which value of the removed
# argument is to be assumed in the resulting LUT
#
# \return a new pair <tt>(lut, args)</tt> with the argument removed
def discard_argument(lut, args, i, keep):
assert len(lut) == 1 << len(args)
assert i >= 0 and i < len(args)
return ''.join(bit for j, bit in enumerate(lut)
if (j & (1 << i) != 0) == keep), \
args[:i] + args[i + 1:]
## Negate a tuple representation of a logic expression.
#
# This is a helper function for \ref lut_to_logic_expression.
def negate_expr(expr):
if len(expr) == 2:
op, a = expr
assert op == 'not'
return a
if len(expr) != 3:
return 'not', expr
a, op, b = expr
if op == 'and':
return negate_expr(a), 'or', negate_expr(b)
if op == 'or':
return negate_expr(a), 'and', negate_expr(b)
assert op == 'xor'
if len(a) == 2 and a[0] == 'not':
return a[1], op, b
if len(b) == 2 and b[0] == 'not':
return a, op, b[1]
return negate_expr(a), op, b
## Convert a tuple representation of a logic expression into a string.
#
# This is a helper function for \ref lut_to_logic_expression.
#
# \param expr the expression to convert
# \param parenthize whether a compound expression should be
# surrounded by parentheses
def stringify(expr, parenthize):
if type(expr) == str:
return expr
assert type(expr) == tuple
if len(expr) == 2:
op, a = expr
assert op == 'not'
assert type(a) == str
return "!" + a
if len(expr) == 5:
a, op0, b, op1, c = expr
assert op0 == '?' and op1 == ':'
s = '%s ? %s : %s' % (stringify(a, False), stringify(b, False),
stringify(c, False))
if parenthize:
return '(%s)' % s
return s
assert len(expr) == 3
a, op, b = expr
l = [a, b]
i = 0
while i < len(l):
if type(l[i]) == tuple and len(l[i]) == 3 and l[i][1] == op:
l = l[:i] + [l[i][0], l[i][2]] + l[i + 1:]
else:
i += 1
if op == 'and':
op = '&'
elif op == 'xor':
op = '^'
elif op == 'or':
op = '|'
s = (' %s ' % op).join(stringify(x, True) for x in l)
if parenthize:
return '(%s)' % s
return s
## Remove arguments which don't affect the result from a LUT string
## and an associated list of argument names.
#
# This is a helper function for \ref lut_to_logic_expression.
#
# \param lut string of 2^N `0' or `1' characters representing the
# logic of an Nx1 look-up table
# \param args list of N strings containing the human-readable names
# of the arguments
#
# \return a new pair <tt>(lut, args)</tt> with all unused arguments
# removed
def discard_unused_arguments(lut, args):
assert len(lut) == 1 << len(args)
i = 0
while i < len(args):
diff = False
for j in range(len(lut)):
if j & (1 << i) == 0 and lut[j] != lut[j | (1 << i)]:
diff = True
if not diff:
lut, args = discard_argument(lut, args, i, False)
else:
i += 1
return lut, args
## Convert a LUT string to a logic expression.
#
# \param lut string of 2^N `0' or `1' characters representing the
# logic of an Nx1 look-up table
# \param args list of N strings containing the human-readable names
# of the arguments
#
# \return a string containing a human-readable logic expression
# equivalent to the look-up table
#
# Example: lut_to_logic_expression('00010000', ['a', 'b', 'c']) -> 'a & b & !c'
def lut_to_logic_expression(lut, args):
lut, args = discard_unused_arguments(lut, args)
# filter out independent top-level arguments
toplevel_args = []
i = 0
while i < len(args) and len(args) >= 2:
ai_0 = set(bit for j, bit in enumerate(lut) if j & (1 << i) == 0)
ai_1 = set(bit for j, bit in enumerate(lut) if j & (1 << i) != 0)
assert len(ai_0) == 2 or len(ai_1) == 2
if len(ai_0) == 1:
# expression is constant if this argument is 0
# e = (...) & arg or e = (...) | !arg
if tuple(ai_0)[0] == '0':
toplevel_args.append(('and', args[i]))
else:
toplevel_args.append(('or', ('not', args[i])))
lut, args = discard_argument(lut, args, i, True)
i = 0
continue
if len(ai_1) == 1:
# expression is constant if this argument is 1
# e = (...) & !arg or e = (...) | arg
if tuple(ai_1)[0] == '0':
toplevel_args.append(('and', ('not', args[i])))
else:
toplevel_args.append(('or', args[i]))
lut, args = discard_argument(lut, args, i, False)
i = 0
continue
i += 1
i = 0
while i < len(args) and len(args) >= 2:
is_xor = True
for j in range(len(lut)):
if j & (1 << i) == 0 and lut[j] == lut[j | (1 << i)]:
is_xor = False
break
if is_xor:
toplevel_args.append(('xor', args[i]))
lut, args = discard_argument(lut, args, i, False)
continue
i += 1
# detect simple top-level ternary conditions
i = 0
while i < len(args) and len(args) >= 3:
j = i + 1
while j < len(args):
ai_0_aj_0 = set(bit for k, bit in enumerate(lut)
if k & (1 << i) == 0 and k & (1 << j) == 0)
ai_0_aj_1 = set(bit for k, bit in enumerate(lut)
if k & (1 << i) == 0 and k & (1 << j) != 0)
ai_1_aj_0 = set(bit for k, bit in enumerate(lut)
if k & (1 << i) != 0 and k & (1 << j) == 0)
ai_1_aj_1 = set(bit for k, bit in enumerate(lut)
if k & (1 << i) != 0 and k & (1 << j) != 0)
assert len(ai_0_aj_0) == 2 or len(ai_0_aj_1) == 2 or \
len(ai_1_aj_0) == 2 or len(ai_1_aj_1) == 2
if (len(ai_0_aj_0) == 2 or len(ai_0_aj_1) == 2) and \
(len(ai_1_aj_0) == 2 or len(ai_1_aj_1) == 2) and \
(len(ai_0_aj_0) == 2 or len(ai_1_aj_0) == 2) and \
(len(ai_0_aj_1) == 2 or len(ai_1_aj_1) == 2):
j += 1
continue
ai_doesnt_matter_for_aj_0 = True
ai_doesnt_matter_for_aj_1 = True
aj_doesnt_matter_for_ai_0 = True
aj_doesnt_matter_for_ai_1 = True
for k in range(len(lut)):
if k & (1 << i) != 0 or k & (1 << j) != 0:
continue
if lut[k] != lut[k | (1 << i)]:
ai_doesnt_matter_for_aj_0 = False
if lut[k | (1 << j)] != lut[k | (1 << i) | (1 << j)]:
ai_doesnt_matter_for_aj_1 = False
if lut[k] != lut[k | (1 << j)]:
aj_doesnt_matter_for_ai_0 = False
if lut[k | (1 << i)] != lut[k | (1 << i) | (1 << j)]:
aj_doesnt_matter_for_ai_1 = False
if len(ai_0_aj_0) == 1 and len(ai_0_aj_1) == 1 and \
aj_doesnt_matter_for_ai_1:
assert tuple(ai_0_aj_0)[0] != tuple(ai_0_aj_1)[0]
if tuple(ai_0_aj_0)[0] == '0':
toplevel_args.append((args[i], '?', ':', args[j]))
else:
toplevel_args.append((args[i], '?', ':', ('not', args[j])))
lut, args = discard_argument(lut, args, i, True)
# break loops
i = len(args)
j = len(args)
break
if len(ai_1_aj_0) == 1 and len(ai_1_aj_1) == 1 and \
aj_doesnt_matter_for_ai_0:
assert tuple(ai_1_aj_0)[0] != tuple(ai_1_aj_1)[0]
if tuple(ai_1_aj_0)[0] == '0':
toplevel_args.append((args[i], '?', args[j], ':'))
else:
toplevel_args.append((args[i], '?', ('not', args[j]), ':'))
lut, args = discard_argument(lut, args, i, False)
# break loops
i = len(args)
j = len(args)
break
if len(ai_0_aj_0) == 1 and len(ai_1_aj_0) == 1 and \
ai_doesnt_matter_for_aj_1:
assert tuple(ai_0_aj_0)[0] != tuple(ai_1_aj_0)[0]
if tuple(ai_0_aj_0)[0] == '0':
toplevel_args.append((args[j], '?', ':', args[i]))
else:
toplevel_args.append((args[j], '?', ':', ('not', args[i])))
lut, args = discard_argument(lut, args, j, True)
# break loops
i = len(args)
j = len(args)
break
if len(ai_0_aj_1) == 1 and len(ai_1_aj_1) == 1 and \
ai_doesnt_matter_for_aj_0:
assert tuple(ai_0_aj_1)[0] != tuple(ai_1_aj_1)[0]
if tuple(ai_0_aj_1)[0] == '0':
toplevel_args.append((args[j], '?', args[i], ':'))
else:
toplevel_args.append((args[j], '?', ('not', args[i]), ':'))
lut, args = discard_argument(lut, args, j, False)
# break loops
i = len(args)
j = len(args)
break
j += 1
i += 1
lut, args = discard_unused_arguments(lut, args)
# group pairwise isolated arguments
i = 0
while i < len(args):
j = i + 1
while j < len(args):
ai_doesnt_matter_for_aj_0 = True
ai_doesnt_matter_for_aj_1 = True
aj_doesnt_matter_for_ai_0 = True
aj_doesnt_matter_for_ai_1 = True
both_dont_matter_if_equal = True
both_dont_matter_if_unequal = True
for k in range(len(lut)):
if k & (1 << i) != 0 or k & (1 << j) != 0:
continue
if lut[k] != lut[k | (1 << i)]:
ai_doesnt_matter_for_aj_0 = False
if lut[k | (1 << j)] != lut[k | (1 << i) | (1 << j)]:
ai_doesnt_matter_for_aj_1 = False
if lut[k] != lut[k | (1 << j)]:
aj_doesnt_matter_for_ai_0 = False
if lut[k | (1 << i)] != lut[k | (1 << i) | (1 << j)]:
aj_doesnt_matter_for_ai_1 = False
if lut[k] != lut[k | (1 << i) | (1 << j)]:
both_dont_matter_if_equal = False
if lut[k | (1 << i)] != lut[k | (1 << j)]:
both_dont_matter_if_unequal = False
# There are five possibilities of coupled arguments: one
# of the four combinations differs from the other three,
# or they are xor'ed
if ai_doesnt_matter_for_aj_1 and \
aj_doesnt_matter_for_ai_1 and \
both_dont_matter_if_unequal:
# special case is ai=0 aj=0
args = args[:i] + ((args[i], 'or', args[j]), ) + args[i + 1:]
lut, args = discard_argument(lut, args, j, False)
j = i + 1
elif ai_doesnt_matter_for_aj_1 and \
aj_doesnt_matter_for_ai_0 and \
both_dont_matter_if_equal:
# special case is ai=1 aj=0
args = args[:i] + ((args[i], 'and', negate_expr(args[j])), ) + \
args[i + 1:]
lut, args = discard_argument(lut, args, j, False)
j = i + 1
elif ai_doesnt_matter_for_aj_0 and \
aj_doesnt_matter_for_ai_1 and \
both_dont_matter_if_equal:
# special case is ai=0 aj=1
args = args[:i] + ((args[i], 'or', negate_expr(args[j])), ) + \
args[i + 1:]
lut, args = discard_argument(lut, args, j, True)
j = i + 1
elif ai_doesnt_matter_for_aj_0 and \
aj_doesnt_matter_for_ai_0 and \
both_dont_matter_if_unequal:
# special case is ai=1 aj=1
args = args[:i] + ((args[i], 'and', args[j]), ) + args[i + 1:]
lut, args = discard_argument(lut, args, j, True)
j = i + 1
elif both_dont_matter_if_equal and \
both_dont_matter_if_unequal:
args = args[:i] + ((args[i], 'xor', args[j]), ) + args[i + 1:]
lut, args = discard_argument(lut, args, j, False)
j = i + 1
else:
j += 1
i += 1
# collect the result
if not args:
# constant expression
assert len(lut) == 1
return lut
negate_result = lut.count('1') > lut.count('0')
if negate_result:
lut = ''.join('1' if bit == '0' else '0' for bit in lut)
result = None
for i, bit in enumerate(lut):
if bit == '0':
continue
expr = None
for j, arg in enumerate(args):
if i & (1 << j) == 0:
arg = negate_expr(arg)
if expr is None:
expr = arg
else:
expr = (expr, 'and', arg)
if result is None:
result = expr
else:
result = (result, 'or', expr)
if negate_result:
result = negate_expr(result)
for toplevel_arg in reversed(toplevel_args):
if len(toplevel_arg) != 4:
result = tuple(reversed(toplevel_arg)) + (result, )
elif toplevel_arg[2] == ':':
result = toplevel_arg[0:2] + (result, ) + toplevel_arg[2:4]
else:
assert toplevel_arg[3] == ':'
result = toplevel_arg + (result, )
return stringify(result, False)
class Fabric:
def __init__(self, ic):
self.ic = ic
self.tiles = {}
#self.colbuf = set()
io_blocks = {}
ieren_blocks = {}
for x0, y0, b0, x1, y1, b1 in self.ic.ieren_db():
i = IOBlock()
assert (x0, y0, b0) not in io_blocks
io_blocks[x0, y0, b0] = i
assert (x1, y1, b1) not in ieren_blocks
ieren_blocks[x1, y1, b1] = i
for xy in ic.io_tiles:
assert xy not in self.tiles
self.tiles[xy] = IOTile(self, xy,
(io_blocks.pop((xy[0], xy[1], 0), None),
io_blocks.pop((xy[0], xy[1], 1), None)),
(ieren_blocks.pop((xy[0], xy[1], 0), None),
ieren_blocks.pop((xy[0], xy[1], 1), None)))
assert not io_blocks
assert not ieren_blocks
for xy in ic.logic_tiles:
assert xy not in self.tiles
self.tiles[xy] = LogicTile(self, xy)
for xy in ic.ramb_tiles:
assert xy not in self.tiles
self.tiles[xy] = RAMBTile(self, xy)
for xy in ic.ramt_tiles:
assert xy not in self.tiles
self.tiles[xy] = RAMTTile(self, xy)
for x, y in self.tiles:
assert x >= 0 and x <= self.ic.max_x
assert y >= 0 and y <= self.ic.max_y
for x in range(self.ic.max_x + 1):
for y in range(self.ic.max_y + 1):
should_exist = (x > 0 and x < self.ic.max_x) or \
(y > 0 and y < self.ic.max_y)
assert ((x, y) in self.tiles) == should_exist
for xy in ic.ram_data:
assert type(self.tiles.get(xy, None)) == RAMBTile
#colbuf_db = ic.colbuf_db()
#for x, y, i in self.colbuf:
# exists = False
# for src_x, src_y, dst_x, dst_y in colbuf_db:
# if src_x != x or src_y != y:
# continue
# assert (dst_x, dst_y) in self.tiles
# assert not self.tiles[dst_x, dst_y].colbuf[i]
# self.tiles[dst_x, dst_y].colbuf[i] = True
# exists = True
# assert exists
#
#for xy in self.tiles:
# for br in self.tiles[xy].buffer_and_routing:
# if br[0].startswith('glb_netwk_'):
# assert self.tiles[xy].colbuf[int(br[0][10:])]
for bit in self.ic.extra_bits:
directive, arg = self.ic.lookup_extra_bit(bit)
assert directive == 'padin_glb_netwk'
x, y, n = GLB_NETWK_EXTERNAL_BLOCKS[int(arg)]
assert type(self.tiles.get((x, y), None)) == IOTile
block = self.tiles[x, y].io_blocks[n]
assert block is not None
block.padin_glb_netwk = True
def printout(self, options):
print('device "%s" %d %d' % (self.ic.device, self.ic.max_x - 1,
self.ic.max_y - 1))
print('')
# internal_configuration_oscillator_frequency = low | medium | high
#print('coldboot = off')
print('warmboot = on') # IceStorm assumes this to be always on
for xy in sorted(self.tiles.keys(), key = lambda xy: (xy[1], xy[0])):
self.tiles[xy].printout(options)
class Tile:
def __init__(self, fabric, xy, data, is_logic_block):
self.fabric = fabric
self.ic = fabric.ic
self.xy = xy
self.data = data
self.buffer_and_routing = set()
self.used_buffer_and_routing = set()
self.text = set()
self.bitinfo = list()
self.unknown_bits = False
x, y = xy
db = self.ic.tile_db(x, y)
mapped_bits = set()
# 'data' is a list of strings containing a series of zeroes and
# ones. 'bits' is a set of strings containing an entry
# "B<row>[<col>]" or "!B<row>[<col>]" for each bit.
bits = set()
for k, line in enumerate(data):
for i in range(len(line)):
if line[i] == '1':
bits.add('B%d[%d]' % (k, i))
else:
bits.add('!B%d[%d]' % (k, i))
for entry in db:
# LC bits don't have a useful entry in the database; skip them
# for now
if re.match(r'LC_', entry[1]):
continue
# some nets have different names depending on the tile; filter
# out non-applicable net names
if entry[1] in ('routing', 'buffer') and (
not self.ic.tile_has_net(x, y, entry[2]) or
not self.ic.tile_has_net(x, y, entry[3])):
continue
# are all required bits set/unset?
match = True
for bit in entry[0]:
if not bit in bits:
match = False
if match:
for bit in entry[0]:
mapped_bits.add(bit)
if entry[1:] == ['IoCtrl', 'IE_0']:
if match != (self.ic.device == '1k'):
self.ieren_blocks[0].enable_input = True
continue
if entry[1:] == ['IoCtrl', 'REN_0']:
if match:
self.ieren_blocks[0].disable_pull_up = True
continue
if entry[1:] == ['IoCtrl', 'IE_1']:
if match != (self.ic.device == '1k'):
self.ieren_blocks[1].enable_input = True
continue
if entry[1:] == ['IoCtrl', 'REN_1']:
if match:
self.ieren_blocks[1].disable_pull_up = True
continue
if entry[1].startswith('IOB_') and entry[2].startswith('PINTYPE_'):
if match:
self.io_blocks[int(entry[1][4:])].pintype \
|= 1 << int(entry[2][8:])
continue
if entry[1:] == ['RamConfig', 'PowerUp']:
if match != (self.ic.device == '1k'):
self.text.add('power_up')
continue
if entry[1] == 'routing':
if match:
src = translate_netname(self.xy[0], self.xy[1],
self.ic.max_x - 1,
self.ic.max_y - 1, entry[2])
dst = translate_netname(self.xy[0], self.xy[1],
self.ic.max_x - 1,
self.ic.max_y - 1, entry[3])
if dst == 'fabout':
dst = lookup_fabout(*self.xy)
self.buffer_and_routing.add((src, '~>', dst))
continue
if entry[1] == 'buffer':
if match:
src = translate_netname(self.xy[0], self.xy[1],
self.ic.max_x - 1,
self.ic.max_y - 1, entry[2])
dst = translate_netname(self.xy[0], self.xy[1],
self.ic.max_x - 1,
self.ic.max_y - 1, entry[3])
if dst == 'fabout':
dst = lookup_fabout(*self.xy)
self.buffer_and_routing.add((src, '->', dst))
continue
if entry[1] == 'ColBufCtrl':
assert entry[2].startswith('glb_netwk_')
#if match:
# fabric.colbuf.add(self.xy + (int(entry[2][10:]), ))
continue
if match:
self.text.add(' '.join(entry[1:]))
for prefix in ('local_', 'glb2local_'):
for fst in [fst for fst in self.buffer_and_routing
if fst[-1].startswith(prefix)]:
used = False
for snd in [snd for snd in self.buffer_and_routing
if snd[0] == fst[-1]]:
self.buffer_and_routing.remove(snd)
self.buffer_and_routing.add(fst[:-1] + snd)
used = True
if used:
self.buffer_and_routing.remove(fst)
for k, line in enumerate(data):
self.bitinfo.append('')
extra_text = ''
for i in range(len(line)):
if 36 <= i <= 45 and is_logic_block:
self.bitinfo[-1] += '*' if line[i] == '1' else '-'
elif line[i] == '1' and 'B%d[%d]' % (k, i) not in mapped_bits:
self.unknown_bits = True
extra_text += ' B%d[%d]' % (k, i)
self.bitinfo[-1] += '?'
else:
self.bitinfo[-1] += '+' if line[i] == '1' else '-'
self.bitinfo[-1] += extra_text
def get_hlc(self):
return sorted(set.union(self.text,
set(' '.join(t)
for t in set.difference(
self.buffer_and_routing,
self.used_buffer_and_routing))))
def printout(self, stmt, options):
text = self.get_hlc()
if text or self.unknown_bits or options.print_all:
if self.unknown_bits or options.print_map:
print()
if self.unknown_bits:
print("; Warning: No DB entries for some bits:")
for k, line in enumerate(self.bitinfo):
print("; %4s %s" % ('B%d' % k, line))
print()
print("%s %d %d {" % (stmt, self.xy[0], self.xy[1]))
for line in text:
print(" " + line)
print("}")
class LogicCell:
def __init__(self, tile, lcidx):
self.lut = ''.join(icebox.get_lutff_lut_bits(tile.data, lcidx))
self.expr = lut_to_logic_expression(
self.lut, ('in_0', 'in_1', 'in_2', 'in_3'))
self.options = []
lutff_option_bits = ''.join(icebox.get_lutff_seq_bits(tile.data, lcidx))
if lutff_option_bits[0] == '1': self.options.append('enable_carry')
if lutff_option_bits[1] == '1': self.options.append('enable_dff')
if lutff_option_bits[2] == '1': self.options.append('set_noreset')
if lutff_option_bits[3] == '1': self.options.append('async_setreset')
self.buffer_and_routing0 = set()
self.buffer_and_routing1 = set()
for br in tuple(tile.buffer_and_routing):
if br[0] == 'lutff_%d/out' % lcidx:
self.buffer_and_routing1.add((br[0][8:], ) + br[1:])
tile.used_buffer_and_routing.add(br)
elif br[-1].startswith('lutff_%d/' % lcidx):
self.buffer_and_routing0.add(br[:-1] + (br[-1][8:], ))
tile.used_buffer_and_routing.add(br)
def get_hlc(self):
if self.lut == '0000000000000000' and not self.options:
t = []
elif len(self.expr) > 64:
t = ['lut ' + self.lut]
else:
t = ['out = ' + self.expr]
return [' '.join(t) for t in sorted(self.buffer_and_routing0,
key = lambda x: x[-1])] + \
t + self.options + \
[' '.join(t) for t in sorted(self.buffer_and_routing1,
key = lambda x: x[-1])]
class LogicTile(Tile):
def __init__(self, fabric, xy):
super().__init__(fabric, xy, fabric.ic.logic_tiles[xy], True)
self.cells = tuple(LogicCell(self, lcidx) for lcidx in range(8))
def get_hlc(self):
text = super().get_hlc()
for i, cell in reversed(tuple(enumerate(self.cells))):
t = cell.get_hlc()
if t:
text = ['lutff_%d {' % i] + \
[' %s' % s for s in t] + \
['}'] + \
text
return text
def printout(self, options):
super().printout('logic_tile', options)
class IOBlock:
def __init__(self):
# stored in the I/O tile where this block is located
self.pintype = 0
# stored in the I/O tile where this is an IE/REN block
self.enable_input = False
self.disable_pull_up = False
# stored as an extra bit
self.padin_glb_netwk = False
class IOTile(Tile):
def __init__(self, fabric, xy, io_blocks, ieren_blocks):
self.io_blocks = io_blocks
self.ieren_blocks = ieren_blocks
super().__init__(fabric, xy, fabric.ic.io_tiles[xy], False)
#self.cells = tuple(IOCell() for i in range(2))
for i, block in enumerate(io_blocks):
if block is None:
continue
block.buffer_and_routing0 = set()
block.buffer_and_routing1 = set()
for br in tuple(self.buffer_and_routing):
if br[0].startswith('io_%d/D_IN_' % i):
block.buffer_and_routing1.add((br[0][5:], ) + br[1:])
self.used_buffer_and_routing.add(br)
elif br[-1].startswith('io_%d/' % i):
block.buffer_and_routing0.add(br[:-1] + (br[-1][5:], ))
self.used_buffer_and_routing.add(br)
def get_hlc(self):
# if io_blocks[N] is None, this means there's no I/O pin there
text = super().get_hlc()
for n in (1, 0):
block = self.io_blocks[n]
if block is None:
continue
t = []
input_pt = block.pintype & 3
output_pt = block.pintype >> 2 & 15
unknown_pt = block.pintype >> 6
if input_pt != 0:
t.append('input_pin_type = %s' % (
'registered_pin',
'simple_input_pin',
'latched_registered_pin',
'latched_pin')[input_pt])
if output_pt != 0:
t.append('output_pin_type = %s' % (
'no_output',
'1',
'2',
'3',
'DDR',
'REGISTERED',
'simple_output_pin',
'REGISTERED_INVERTED',
'DDR_ENABLE',
'REGISTERED_ENABLE',
'OUTPUT_TRISTATE',
'REGISTERED_ENABLE_INVERTED',
'DDR_ENABLE_REGISTERED',
'REGISTERED_ENABLE_REGISTERED',
'ENABLE_REGISTERED',
'REGISTERED_ENABLE_REGISTERED_INVERTED')[output_pt])
if unknown_pt != 0:
t.append('unknown_pin_type = %d' % unknown_pt)
if block.enable_input:
t.append('enable_input')
if block.disable_pull_up:
t.append('disable_pull_up')
t += [' '.join(t) for t in sorted(block.buffer_and_routing0,
key = lambda x: x[-1])]
t += [' '.join(t) for t in sorted(block.buffer_and_routing1,
key = lambda x: x[0])]
if block.padin_glb_netwk:
t += ['GLOBAL_BUFFER_OUTPUT -> glb_netwk_%d'
% GLB_NETWK_EXTERNAL_BLOCKS.index(self.xy + (n, ))]
if t:
text = ['io_%d {' % n] + \
[' %s' % s for s in t] + \
['}'] + \
text
return text
def printout(self, options):
super().printout('io_tile', options)
class IOCell:
pass
class RAMBTile(Tile):
def __init__(self, fabric, xy):
super().__init__(fabric, xy, fabric.ic.ramb_tiles[xy], False)
if xy in fabric.ic.ram_data:
self.data = fabric.ic.ram_data[xy]
else:
self.data = None
def get_hlc(self):
text = super().get_hlc()
if self.data is not None:
text.append('')
text.append('data {')
for line in self.data:
text.append(' ' + line)
text.append('}')
return text
def printout(self, options):
super().printout('ramb_tile', options)
class RAMTTile(Tile):
def __init__(self, fabric, xy):
super().__init__(fabric, xy, fabric.ic.ramt_tiles[xy], False)
def printout(self, options):
super().printout('ramt_tile', options)
class Options:
def __init__(self):
self.print_map = False
self.print_all = False
def main():
program_short_name = os.path.basename(sys.argv[0])
options = Options()
try:
opts, args = getopt.getopt(sys.argv[1:], 'mA', ['help', 'version'])
except getopt.GetoptError as e:
sys.stderr.write("%s: %s\n" % (program_short_name, e.msg))
sys.stderr.write("Try `%s --help' for more information.\n"
% sys.argv[0])
sys.exit(1)
for opt, arg in opts:
if opt == '--help':
sys.stderr.write("""\
Create a high-level representation from an ASCII bitstream.
Usage: %s [OPTION]... FILE
-m print tile config bitmaps
-A don't skip uninteresting tiles
--help display this help and exit
--version output version information and exit
If you have a bug report, please file an issue on github:
https://github.com/rlutz/icestorm/issues
""" % sys.argv[0])
sys.exit(0)
if opt == '--version':
sys.stderr.write("""\
icebox_asc2hlc - create a high-level representation from an ASCII bitstream
Copyright (C) 2017 Roland Lutz
This program is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
""")
sys.exit(0)
if opt == '-m':
options.print_map = True
elif opt == '-A':
options.print_all = True
if not args:
sys.stderr.write("%s: missing argument\n" % (program_short_name))
sys.stderr.write("Try `%s --help' for more information.\n"
% sys.argv[0])
sys.exit(1)
if len(args) != 1:
sys.stderr.write("%s: too many arguments\n" % (program_short_name))
sys.stderr.write("Try `%s --help' for more information.\n"
% sys.argv[0])
sys.exit(1)
ic = icebox.iceconfig()
if args[0] == '-':
ic.read_file('/dev/stdin')
else:
ic.read_file(args[0])
fabric = Fabric(ic)
fabric.printout(options)
if __name__ == '__main__':
main()
| 36,520 | -37 | 945 |
05d92b04d413dacbb9211204082fc281df9cc64b | 753 | py | Python | tests/streamer/test_encoder.py | Hikki12/remio | 17942bffe3c0619d3435b1a12399b116d4c800e3 | [
"Apache-2.0"
] | null | null | null | tests/streamer/test_encoder.py | Hikki12/remio | 17942bffe3c0619d3435b1a12399b116d4c800e3 | [
"Apache-2.0"
] | null | null | null | tests/streamer/test_encoder.py | Hikki12/remio | 17942bffe3c0619d3435b1a12399b116d4c800e3 | [
"Apache-2.0"
] | null | null | null | import time
import numpy as np
from remio import MJPEGEncoder
encoderParams = {
"quality": 60,
"colorspace": "bgr",
"colorsubsampling": "422",
"fastdct": True,
}
def test_encoder():
"""Test socket encoder class."""
max_encoding_time = 0.02 # seconds
encoder = MJPEGEncoder(**encoderParams)
encoding_time = []
for i in range(10):
frame = read_frame()
t0 = time.time()
encoded = encoder.encode(frame, base64=True)
t1 = time.time()
encoding_time.append(t1 - t0)
encoding_time = np.array(encoding_time)
assert encoding_time.mean() < max_encoding_time, "Improve the encoder..."
| 24.290323 | 77 | 0.641434 | import time
import numpy as np
from remio import MJPEGEncoder
encoderParams = {
"quality": 60,
"colorspace": "bgr",
"colorsubsampling": "422",
"fastdct": True,
}
def read_frame():
return np.random.randint(255, size=(1024, 720, 3), dtype=np.uint8)
def test_encoder():
"""Test socket encoder class."""
max_encoding_time = 0.02 # seconds
encoder = MJPEGEncoder(**encoderParams)
encoding_time = []
for i in range(10):
frame = read_frame()
t0 = time.time()
encoded = encoder.encode(frame, base64=True)
t1 = time.time()
encoding_time.append(t1 - t0)
encoding_time = np.array(encoding_time)
assert encoding_time.mean() < max_encoding_time, "Improve the encoder..."
| 67 | 0 | 23 |
ae128645d44c35a30a1735da1aacecba7280f741 | 5,833 | py | Python | minesweeper.py | snow-apple/AoPS-games | ff6f922e77e64c8a3f83e2191c8435b2d28660b3 | [
"Apache-2.0"
] | null | null | null | minesweeper.py | snow-apple/AoPS-games | ff6f922e77e64c8a3f83e2191c8435b2d28660b3 | [
"Apache-2.0"
] | null | null | null | minesweeper.py | snow-apple/AoPS-games | ff6f922e77e64c8a3f83e2191c8435b2d28660b3 | [
"Apache-2.0"
] | 1 | 2020-09-16T19:37:37.000Z | 2020-09-16T19:37:37.000Z | # Python Class 2344
# Lesson 10 Problem 1
# Author: snowapple (471208)
import random
from tkinter import *
import tkinter.messagebox as messagebox
play_minesweeper(12, 10, 15)
| 32.405556 | 101 | 0.547917 | # Python Class 2344
# Lesson 10 Problem 1
# Author: snowapple (471208)
import random
from tkinter import *
import tkinter.messagebox as messagebox
class MinesweeperCell(Label):
def __init__(self, master, coord):
Label.__init__(
self, master, height=1, width=2, text="", bg="white", font=("Arial", 24)
)
self.colormap = [
"",
"blue",
"darkgreen",
"red",
"purple",
"maroon",
"cyan",
"black",
"dim gray",
]
self.master = master
self.coord = coord
self.isBomb = False
self.isExposed = False
self.isFlagged = False
self.numBombs = None
self["relief"] = RAISED
self.bind("<Button-1>", self.left_click)
self.bind("<Button-2>", self.right_click)
def left_click(self, event):
# simply expose
# if bomb, end game
if not self.master.gameover and not self.isFlagged:
if not self.isBomb:
self.expose()
else:
self.master.gameover = True
self.master.bomb_detonation()
def expose(self):
if not self.isExposed and not self.isFlagged:
self.isExposed = True
self["text"] = str(self.numBombs) if self.numBombs != 0 else ""
self["bg"] = "lightgrey"
self["relief"] = SUNKEN
self.master.unexposed_safe_cells -= 1
if self.master.unexposed_safe_cells == 0:
self.master.gameover = True
self.master.game_won()
if self.numBombs == 0:
self.master.expose_neighbors(self.coord)
else:
self["fg"] = self.colormap[self.numBombs]
def right_click(self, event):
# if exposed, cannot flag
# if not exposed, flag
if not self.isExposed and not self.master.gameover:
self.isFlagged = not self.isFlagged
self["text"] = "*" if self.isFlagged else ""
self.master.flaggedCells += 1 if self.isFlagged else -1
self.master.update_score()
def expose_bomb(self):
if not self.isFlagged:
self["bg"] = "red"
self["text"] = "*"
else: # shows user which bombs they flagged correctly, because the user can flag incorrectly
self["bg"] = "yellow"
self["text"] = "*"
class Minesweeper(Frame):
def __init__(self, master, width, height, numBombs):
self.height = height
self.width = width
self.unexposed_safe_cells = width * height - numBombs
self.numBombs = numBombs
self.flaggedCells = 0
self.gameover = False
# Setup GUI
# initialize a new Frame
Frame.__init__(self, master, bg="black")
self.grid()
# put in lines between the cells (not necessary)
# (odd numbered rows and columns in the grid)
# for c in range(1, 2 * (self.width - 1), 2):
# self.columnconfigure(c, minsize=3)
# for r in range(1, 2 * (self.height - 1), 2):
# self.rowconfigure(r, minsize=3)
# Setup score Label
self.scoreLabel = Label(
self, height=1, width=2, text="", bg="white", font=("Arial", 24)
)
self.scoreLabel.grid(row=2 * self.height, column=self.width - 2)
self.scoreLabel["text"] = str(self.numBombs - self.flaggedCells)
# Setup Board
self.board = {} # key: (row_index, col_index), val: MinesweeperCell object
for row in range(self.height):
for col in range(self.width):
loc = (row, col)
self.board[loc] = MinesweeperCell(self, loc)
self.board[loc].grid(row=2 * row, column=2 * col)
# Setup Bombs
self.bombCells = random.sample(list(self.board.values()), numBombs)
for cell in self.bombCells:
cell.isBomb = True
for row in range(self.height):
for col in range(self.width):
self.find_num_bombs((row, col))
def find_num_bombs(self, coord):
row, col = coord
validNeighbors = self.find_neighbors(coord)
numBombs = 0
for loc in validNeighbors: # loc = (r,c)
cell = self.board[loc]
numBombs += 1 if cell.isBomb else 0
self.board[(row, col)].numBombs = numBombs
def find_neighbors(self, coord):
row, column = coord
neighIndicies = [
(row - 1, column - 1),
(row - 1, column),
(row - 1, column + 1),
(row, column + 1),
(row, column - 1),
(row + 1, column),
(row + 1, column + 1),
(row + 1, column - 1),
]
validNeighbors = []
for (r, c) in neighIndicies:
if 0 <= r and r < self.height:
if 0 <= c and c < self.width:
validNeighbors.append((r, c))
return validNeighbors
def expose_neighbors(self, coord):
validNeighbors = self.find_neighbors(coord)
for loc in validNeighbors:
cell = self.board[loc]
cell.expose()
def bomb_detonation(self):
messagebox.showerror("Minesweeper", "KABOOM! You lose.", parent=self)
for cell in self.bombCells:
cell.expose_bomb()
def game_won(self):
messagebox.showinfo("Minesweeper", "Congratulations -- you won!", parent=self)
def update_score(self):
self.scoreLabel["text"] = str(self.numBombs - self.flaggedCells)
def play_minesweeper(width, height, numBombs):
root = Tk()
root.title("Minesweeper")
mn = Minesweeper(root, width, height, numBombs)
root.mainloop()
play_minesweeper(12, 10, 15)
| 5,247 | 12 | 391 |
4e515cc5b2a6e9d930b1ae6c2b6836567c9b41c6 | 1,231 | py | Python | casepro/msg_board/migrations/0004_alter_messageboardcomment_content_type_and_more.py | rapidpro/ureport-partners | 16e5b95eae36ecbbe8ab2a59f34a2f5fd32ceacd | [
"BSD-3-Clause"
] | null | null | null | casepro/msg_board/migrations/0004_alter_messageboardcomment_content_type_and_more.py | rapidpro/ureport-partners | 16e5b95eae36ecbbe8ab2a59f34a2f5fd32ceacd | [
"BSD-3-Clause"
] | null | null | null | casepro/msg_board/migrations/0004_alter_messageboardcomment_content_type_and_more.py | rapidpro/ureport-partners | 16e5b95eae36ecbbe8ab2a59f34a2f5fd32ceacd | [
"BSD-3-Clause"
] | null | null | null | # Generated by Django 4.0.1 on 2022-01-12 15:28
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
| 30.775 | 66 | 0.583266 | # Generated by Django 4.0.1 on 2022-01-12 15:28
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("contenttypes", "0002_remove_content_type_name"),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("msg_board", "0003_auto_20210318_1925"),
]
operations = [
migrations.AlterField(
model_name="messageboardcomment",
name="content_type",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="content_type_set_for_%(class)s",
to="contenttypes.contenttype",
verbose_name="content type",
),
),
migrations.AlterField(
model_name="messageboardcomment",
name="user",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="%(class)s_comments",
to=settings.AUTH_USER_MODEL,
verbose_name="user",
),
),
]
| 0 | 1,051 | 23 |
49b502f6bf9afc0b6f3a4c3df18f489fc992d0f8 | 4,153 | py | Python | Video_to_Image.py | Neutrino3316/First-Impression | 00b7cebe8391c7c30b36de9c4ce5d9732daaf34d | [
"MIT"
] | 24 | 2018-12-07T13:53:37.000Z | 2022-01-11T11:54:34.000Z | Video_to_Image.py | Neutrino3316/First-Impression | 00b7cebe8391c7c30b36de9c4ce5d9732daaf34d | [
"MIT"
] | 6 | 2018-02-27T09:28:57.000Z | 2020-09-08T15:37:56.000Z | Video_to_Image.py | Neutrino3316/First-Impression | 00b7cebe8391c7c30b36de9c4ce5d9732daaf34d | [
"MIT"
] | 21 | 2018-05-25T10:56:44.000Z | 2022-02-24T08:06:24.000Z | '''
Extract all the 6 training zipped files and 2 validation zipped files into data folder and then run this script
'''
import cv2
import numpy as np
import os
import zipfile
## Runnin a loop throught all the zipped training file to extract all video and then extract 100 frames from each.
for i in range(1,76):
if i<10:
zipfilename = 'training80_0'+str(i)+'.zip'
else:
zipfilename = 'training80_'+str(i)+'.zip'
## Accessing the zipfile i
archive = zipfile.ZipFile('data/'+zipfilename, 'r')
zipfilename = zipfilename.split('.zip')[0]
##Extracting all videos in it and saving it all to the new folder with same name as zipped one
archive.extractall('unzippedData/'+zipfilename)
## Running a loop over all the videos in the zipped file and extracting 100 frames from each
for file_name in archive.namelist():
cap = cv2.VideoCapture('unzippedData/'+zipfilename+'/'+file_name)
file_name=(file_name.split('.mp4'))[0]
## Creating folder to save all the 100 frames from the video
try:
if not os.path.exists('ImageData/trainingData/'+file_name):
os.makedirs('ImageData/trainingData/'+file_name)
except OSError:
print ('Error: Creating directory of data')
## Setting the frame limit to 100
cap.set(cv2.CAP_PROP_FRAME_COUNT, 101)
length=101
count=0
## Running a loop to each frame and saving it in the created folder
while(cap.isOpened()):
count+=1
if length==count:
break
ret, frame = cap.read()
if frame is None:
continue
## Resizing it to 256*256 to save the disk space and fit into the model
frame = cv2.resize(frame,(256, 256), interpolation = cv2.INTER_CUBIC)
# Saves image of the current frame in jpg file
name = 'ImageData/trainingData/'+str(file_name)+'/frame' + str(count) + '.jpg'
cv2.imwrite(name, frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
## Print the file which is done
print (zipfilename, ':', file_name)
#
for i in range(1,26):
if i<10:
zipfilename = 'validation80_0'+str(i)+'.zip'
else:
zipfilename = 'validation80_'+str(i)+'.zip'
## Accessing the zipfile i
archive = zipfile.ZipFile('data/'+zipfilename, 'r')
zipfilename = zipfilename.split('.zip')[0]
##Extracting all videos in it and saving it all to the new folder with same name as zipped one
archive.extractall('unzippedData/'+zipfilename)
## Running a loop over all the videos in the zipped file and extracting 100 frames from each
for file_name in archive.namelist():
cap = cv2.VideoCapture('unzippedData/'+zipfilename+'/'+file_name)
file_name=(file_name.split('.mp4'))[0]
## Creating folder to save all the 100 frames from the video
try:
if not os.path.exists('ImageData/validationData/'+file_name):
os.makedirs('ImageData/validationData/'+file_name)
except OSError:
print ('Error: Creating directory of data')
## Setting the frame limit to 100
cap.set(cv2.CAP_PROP_FRAME_COUNT, 101)
length=101
count=0
## Running a loop to each frame and saving it in the created folder
while(cap.isOpened()):
count+=1
if length==count:
break
ret, frame = cap.read()
if frame is None:
continue
## Resizing it to 256*256 to save the disk space and fit into the model
frame = cv2.resize(frame,(256, 256), interpolation = cv2.INTER_CUBIC)
# Saves image of the current frame in jpg file
name = 'ImageData/validationData/'+str(file_name)+'/frame' + str(count) + '.jpg'
cv2.imwrite(name, frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
## Print the file which is done
print (zipfilename, ':', file_name)
| 38.453704 | 114 | 0.603419 | '''
Extract all the 6 training zipped files and 2 validation zipped files into data folder and then run this script
'''
import cv2
import numpy as np
import os
import zipfile
## Runnin a loop throught all the zipped training file to extract all video and then extract 100 frames from each.
for i in range(1,76):
if i<10:
zipfilename = 'training80_0'+str(i)+'.zip'
else:
zipfilename = 'training80_'+str(i)+'.zip'
## Accessing the zipfile i
archive = zipfile.ZipFile('data/'+zipfilename, 'r')
zipfilename = zipfilename.split('.zip')[0]
##Extracting all videos in it and saving it all to the new folder with same name as zipped one
archive.extractall('unzippedData/'+zipfilename)
## Running a loop over all the videos in the zipped file and extracting 100 frames from each
for file_name in archive.namelist():
cap = cv2.VideoCapture('unzippedData/'+zipfilename+'/'+file_name)
file_name=(file_name.split('.mp4'))[0]
## Creating folder to save all the 100 frames from the video
try:
if not os.path.exists('ImageData/trainingData/'+file_name):
os.makedirs('ImageData/trainingData/'+file_name)
except OSError:
print ('Error: Creating directory of data')
## Setting the frame limit to 100
cap.set(cv2.CAP_PROP_FRAME_COUNT, 101)
length=101
count=0
## Running a loop to each frame and saving it in the created folder
while(cap.isOpened()):
count+=1
if length==count:
break
ret, frame = cap.read()
if frame is None:
continue
## Resizing it to 256*256 to save the disk space and fit into the model
frame = cv2.resize(frame,(256, 256), interpolation = cv2.INTER_CUBIC)
# Saves image of the current frame in jpg file
name = 'ImageData/trainingData/'+str(file_name)+'/frame' + str(count) + '.jpg'
cv2.imwrite(name, frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
## Print the file which is done
print (zipfilename, ':', file_name)
#
for i in range(1,26):
if i<10:
zipfilename = 'validation80_0'+str(i)+'.zip'
else:
zipfilename = 'validation80_'+str(i)+'.zip'
## Accessing the zipfile i
archive = zipfile.ZipFile('data/'+zipfilename, 'r')
zipfilename = zipfilename.split('.zip')[0]
##Extracting all videos in it and saving it all to the new folder with same name as zipped one
archive.extractall('unzippedData/'+zipfilename)
## Running a loop over all the videos in the zipped file and extracting 100 frames from each
for file_name in archive.namelist():
cap = cv2.VideoCapture('unzippedData/'+zipfilename+'/'+file_name)
file_name=(file_name.split('.mp4'))[0]
## Creating folder to save all the 100 frames from the video
try:
if not os.path.exists('ImageData/validationData/'+file_name):
os.makedirs('ImageData/validationData/'+file_name)
except OSError:
print ('Error: Creating directory of data')
## Setting the frame limit to 100
cap.set(cv2.CAP_PROP_FRAME_COUNT, 101)
length=101
count=0
## Running a loop to each frame and saving it in the created folder
while(cap.isOpened()):
count+=1
if length==count:
break
ret, frame = cap.read()
if frame is None:
continue
## Resizing it to 256*256 to save the disk space and fit into the model
frame = cv2.resize(frame,(256, 256), interpolation = cv2.INTER_CUBIC)
# Saves image of the current frame in jpg file
name = 'ImageData/validationData/'+str(file_name)+'/frame' + str(count) + '.jpg'
cv2.imwrite(name, frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
## Print the file which is done
print (zipfilename, ':', file_name)
| 0 | 0 | 0 |
d9209983fe3230c55c6541ed80c5107adddb2a90 | 393 | py | Python | aos_vs_soa/plot.py | christopher-besch/cpp_go_fast | 1f9090a70e4e15882334aa92ba021c5103ae7a33 | [
"MIT"
] | 3 | 2021-08-03T12:33:04.000Z | 2021-10-29T18:58:13.000Z | aos_vs_soa/plot.py | christopher-besch/cpp_go_fast | 1f9090a70e4e15882334aa92ba021c5103ae7a33 | [
"MIT"
] | null | null | null | aos_vs_soa/plot.py | christopher-besch/cpp_go_fast | 1f9090a70e4e15882334aa92ba021c5103ae7a33 | [
"MIT"
] | null | null | null | import pandas as pd
import matplotlib.pyplot as plt
import glob, os
aos_data = pd.read_csv("aos.csv")
soa_data = pd.read_csv("soa.csv")
plt.loglog(aos_data["n"],aos_data["time"],"o",markersize=1,label="aos")
plt.loglog(soa_data["n"],soa_data["time"],"o",markersize=1,label="soa")
plt.title("aos vs soa")
plt.legend()
plt.xlabel("n")
plt.ylabel("time")
plt.savefig("aos_vs_soa.png")
plt.show()
| 28.071429 | 71 | 0.715013 | import pandas as pd
import matplotlib.pyplot as plt
import glob, os
aos_data = pd.read_csv("aos.csv")
soa_data = pd.read_csv("soa.csv")
plt.loglog(aos_data["n"],aos_data["time"],"o",markersize=1,label="aos")
plt.loglog(soa_data["n"],soa_data["time"],"o",markersize=1,label="soa")
plt.title("aos vs soa")
plt.legend()
plt.xlabel("n")
plt.ylabel("time")
plt.savefig("aos_vs_soa.png")
plt.show()
| 0 | 0 | 0 |
50c3be7c7f58a98919788b196f8130825769342b | 3,472 | py | Python | setup.py | isislovecruft/txtorcon | 9523319cecbc1d0aa8fdb4d6874f0debc22d0432 | [
"MIT"
] | null | null | null | setup.py | isislovecruft/txtorcon | 9523319cecbc1d0aa8fdb4d6874f0debc22d0432 | [
"MIT"
] | null | null | null | setup.py | isislovecruft/txtorcon | 9523319cecbc1d0aa8fdb4d6874f0debc22d0432 | [
"MIT"
] | null | null | null | try:
import pypissh
except:
print "WARNING: not using PyPi over SSH!"
import sys
import os
import shutil
import re
from setuptools import setup
## can't just naively import these from txtorcon, as that will only
## work if you already installed the dependencies :(
__version__ = '0.13.0'
__author__ = 'meejah'
__contact__ = 'meejah@meejah.ca'
__url__ = 'https://github.com/meejah/txtorcon'
__license__ = 'MIT'
__copyright__ = 'Copyright 2012-2015'
def pip_to_requirements(s):
"""
Change a PIP-style requirements.txt string into one suitable for setup.py
"""
if s.startswith('#'):
return ''
m = re.match('(.*)([>=]=[.0-9]*).*', s)
if m:
return '%s (%s)' % (m.group(1), m.group(2))
return s.strip()
setup(name = 'txtorcon',
version = __version__,
description = 'Twisted-based Tor controller client, with state-tracking and configuration abstractions.',
long_description = open('README.rst', 'r').read(),
keywords = ['python', 'twisted', 'tor', 'tor controller'],
## way to have "development requirements"?
requires = filter(len, map(pip_to_requirements, open('requirements.txt').readlines())),
## FIXME is requires even doing anything? why is format
## apparently different for install_requires?
install_requires = ['Twisted>=11.1.0', 'zope.interface>=3.6.1'],
classifiers = ['Framework :: Twisted',
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Operating System :: Unix',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Internet :: Proxy Servers',
'Topic :: Internet',
'Topic :: Security'],
author = __author__,
author_email = __contact__,
url = __url__,
license = __license__,
packages = ["txtorcon", "twisted.plugins"],
# scripts = ['examples/attach_streams_by_country.py'],
## I'm a little unclear if I'm doing this "properly", especially
## the documentation etc. Do we really want "share/txtorcon" for
## the first member of the tuple? Why does it seem I need to
## duplicate this in MANIFEST.in?
data_files = [('share/txtorcon', ['INSTALL', 'README.rst', 'TODO', 'meejah.asc']),
## this includes the Sphinx source for the
## docs. The "map+filter" construct grabs all .rst
## files and re-maps the path
('share/txtorcon', ['docs/apilinks_sphinxext.py', 'docs/conf.py', 'docs/Makefile'] + map(lambda x: os.path.join('docs', x), filter(lambda x: x[-3:] == 'rst', os.listdir('docs'))) + map(lambda x: os.path.join('docs/_static', x), os.listdir('docs/_static'))),
## include all the examples
('share/txtorcon/examples', map(lambda x: os.path.join('examples', x), filter(lambda x: x[-3:] == '.py', os.listdir('examples'))))
]
)
| 42.864198 | 277 | 0.575461 | try:
import pypissh
except:
print "WARNING: not using PyPi over SSH!"
import sys
import os
import shutil
import re
from setuptools import setup
## can't just naively import these from txtorcon, as that will only
## work if you already installed the dependencies :(
__version__ = '0.13.0'
__author__ = 'meejah'
__contact__ = 'meejah@meejah.ca'
__url__ = 'https://github.com/meejah/txtorcon'
__license__ = 'MIT'
__copyright__ = 'Copyright 2012-2015'
def pip_to_requirements(s):
"""
Change a PIP-style requirements.txt string into one suitable for setup.py
"""
if s.startswith('#'):
return ''
m = re.match('(.*)([>=]=[.0-9]*).*', s)
if m:
return '%s (%s)' % (m.group(1), m.group(2))
return s.strip()
setup(name = 'txtorcon',
version = __version__,
description = 'Twisted-based Tor controller client, with state-tracking and configuration abstractions.',
long_description = open('README.rst', 'r').read(),
keywords = ['python', 'twisted', 'tor', 'tor controller'],
## way to have "development requirements"?
requires = filter(len, map(pip_to_requirements, open('requirements.txt').readlines())),
## FIXME is requires even doing anything? why is format
## apparently different for install_requires?
install_requires = ['Twisted>=11.1.0', 'zope.interface>=3.6.1'],
classifiers = ['Framework :: Twisted',
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Operating System :: Unix',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Internet :: Proxy Servers',
'Topic :: Internet',
'Topic :: Security'],
author = __author__,
author_email = __contact__,
url = __url__,
license = __license__,
packages = ["txtorcon", "twisted.plugins"],
# scripts = ['examples/attach_streams_by_country.py'],
## I'm a little unclear if I'm doing this "properly", especially
## the documentation etc. Do we really want "share/txtorcon" for
## the first member of the tuple? Why does it seem I need to
## duplicate this in MANIFEST.in?
data_files = [('share/txtorcon', ['INSTALL', 'README.rst', 'TODO', 'meejah.asc']),
## this includes the Sphinx source for the
## docs. The "map+filter" construct grabs all .rst
## files and re-maps the path
('share/txtorcon', ['docs/apilinks_sphinxext.py', 'docs/conf.py', 'docs/Makefile'] + map(lambda x: os.path.join('docs', x), filter(lambda x: x[-3:] == 'rst', os.listdir('docs'))) + map(lambda x: os.path.join('docs/_static', x), os.listdir('docs/_static'))),
## include all the examples
('share/txtorcon/examples', map(lambda x: os.path.join('examples', x), filter(lambda x: x[-3:] == '.py', os.listdir('examples'))))
]
)
| 0 | 0 | 0 |
df6b1c0fb9ac1a4e25419b4ad34f691009e681fb | 420 | py | Python | packages/pyright-internal/src/tests/samples/unusedVariable1.py | martindemello/pyright | 4fe3f1f7c08f139715701fdf746183062b5165ff | [
"MIT"
] | 4,391 | 2019-05-07T01:18:57.000Z | 2022-03-31T20:45:44.000Z | packages/pyright-internal/src/tests/samples/unusedVariable1.py | martindemello/pyright | 4fe3f1f7c08f139715701fdf746183062b5165ff | [
"MIT"
] | 2,740 | 2019-05-07T03:29:30.000Z | 2022-03-31T12:57:46.000Z | packages/pyright-internal/src/tests/samples/unusedVariable1.py | martindemello/pyright | 4fe3f1f7c08f139715701fdf746183062b5165ff | [
"MIT"
] | 455 | 2019-05-07T12:55:14.000Z | 2022-03-31T17:09:15.000Z | # This sample tests the reportUnusedVariable diagnostic check.
| 19.090909 | 75 | 0.619048 | # This sample tests the reportUnusedVariable diagnostic check.
def func1(a: int):
x = 4
# This should generate an error if reportUnusedVariable is enabled.
y = x
_z = 4
_ = 2
__z__ = 5
if x + 1:
# This should generate an error if reportUnusedVariable is enabled.
z = 3
else:
# This should generate an error if reportUnusedVariable is enabled.
z = 5
| 333 | 0 | 23 |
b3d857944d0e5a13fa7cbc6cbf18f73987689aa2 | 6,200 | py | Python | utils.py | Julienbeaulieu/Deeplearing-Image-Classifier-with-Pytorch | 0c7274ce3d864800f083ba884039771b96d020e0 | [
"MIT"
] | null | null | null | utils.py | Julienbeaulieu/Deeplearing-Image-Classifier-with-Pytorch | 0c7274ce3d864800f083ba884039771b96d020e0 | [
"MIT"
] | null | null | null | utils.py | Julienbeaulieu/Deeplearing-Image-Classifier-with-Pytorch | 0c7274ce3d864800f083ba884039771b96d020e0 | [
"MIT"
] | null | null | null | # Imports
import matplotlib.pyplot as plt
import seaborn as sns
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
from torchvision import datasets, transforms, models
from PIL import Image
import numpy as np
import json
import pandas as pd
# Import for keeping our session alive
from workspace_utils import active_session
# Label Mapping
with open('cat_to_name.json', 'r') as f:
cat_to_name = json.load(f)
# Quick check of data in json file
df = pd.DataFrame({'flower_type': cat_to_name})
df.head(10)
# Define our classifier Class
# testing the model and returning the accuracy on new data
# Function that loads a checkpoint and rebuilds the model
def process_image(image):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
im = Image.open(image)
width, height = im.size
# Resize image to make the shortest side 256 pixels
if im.width > im.height:
(width, height) = (im.width, 256)
elif im.width < im.height:
(width, height) = (256, im.height)
else:
(width, height) = (256, 256)
im.thumbnail((width, height), Image.ANTIALIAS)
# new size of image
width, height = im.size
# Crop at center, make image 224x224
reduce = 224
left = (width - reduce)/2
top = (height - reduce)/2
right = left + 224
bottom = top + 224
im = im.crop((left, top, right, bottom))
np_image = np.array(im)/255
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
np_image = (np_image - mean) / std
image = np_image.transpose((2, 0, 1))
return image
def imshow(image, ax=None, title=None):
"""Imshow for Tensor."""
if ax is None:
fig, ax = plt.subplots()
# PyTorch tensors assume the color channel is the first dimension
# but matplotlib assumes is the third dimension
image = image.transpose((1, 2, 0))
def predict(image_path, model, device = 'gpu', topk=1):
''' Predict the class (or classes) of an image using a trained deep learning model.
'''
image = process_image(image_path)
# Convert image to a FloatTensor and add a 'batch_size' dimension with .unsqueeze_(0)
image = torch.from_numpy(image).type(torch.FloatTensor).unsqueeze_(0)
# Select between gpu and cpu
if device == 'gpu' and torch.cuda.is_available():
device = torch.device('cuda:0')
else:
device = torch.device('cpu')
# Bring model to device
model.to(device)
with torch.no_grad():
model.eval()
output = model.forward(image.cuda())
ps = torch.exp(output)
probs, idx = ps.topk(topk, dim=1)
probs, idx = probs.to('cpu'), idx.to('cpu')
probs = probs.numpy () # converting both to numpy array
idx = idx.numpy ()
probs = probs.tolist () [0] # converting both to list
idx = idx.tolist () [0]
mapping = {val: key for key, val in
model.class_to_idx.items()
}
classes = [mapping [item] for item in idx]
class_names = [cat_to_name [item] for item in classes]
class_names = np.array(class_names)
classes = np.array(classes) # converting to Numpy array
return print(probs, class_names)
| 28.054299 | 89 | 0.61871 | # Imports
import matplotlib.pyplot as plt
import seaborn as sns
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
from torchvision import datasets, transforms, models
from PIL import Image
import numpy as np
import json
import pandas as pd
# Import for keeping our session alive
from workspace_utils import active_session
# Label Mapping
with open('cat_to_name.json', 'r') as f:
cat_to_name = json.load(f)
# Quick check of data in json file
df = pd.DataFrame({'flower_type': cat_to_name})
df.head(10)
# Define our classifier Class
class Classifier(nn.Module):
def __init__(self, input_size, output_size, hidden_layers, dropout = 0.2):
super().__init__()
# self.input_size = nn.Linear(input_size, hidden_layers[0])
self.hidden_layers = nn.ModuleList([nn.Linear(input_size, hidden_layers[0])])
layer_sizes = zip(hidden_layers[:-1], hidden_layers[1:])
self.hidden_layers.extend([nn.Linear(h1, h2) for h1, h2 in layer_sizes])
self.output = nn.Linear(hidden_layers[-1], output_size)
self.dropout = nn.Dropout(p=dropout)
def forward(self, x):
x = x.view(x.shape[0], -1)
for hidden_layer in self.hidden_layers:
x = self.dropout(F.relu(hidden_layer(x)))
x = F.log_softmax(self.output(x), dim=1)
return x
def setup(hidden_layers, learning_rate, arch, device='gpu', dropout = 0.3):
if arch == 'vgg16':
input_size = 25088
# Use VGG16 model from Torchvision
model = models.vgg16(pretrained=True)
elif arch == 'densenet121':
input_size = 1024
# Use Densenet121 from Torchvision
model = models.densenet121(pretrained=True)
else:
print("Sorry, this is not a valid model. Try 'vgg16' or 'densenet121'")
# Freeze the parameters
for param in model.parameters():
param.requires_grad = False
# output_size = 102 flower categories
output_size = 102
classifier = Classifier(input_size, output_size, hidden_layers, dropout = 0.3)
# Update the model's classifier
model.classifier = classifier
# Select between gpu and cpu
if device == 'gpu' and torch.cuda.is_available():
device = torch.device('cuda:0')
else:
device = torch.device('cpu')
# Bring model to device
model.to(device)
# Define optimizer
optimizer = optim.Adam(model.classifier.parameters(), lr = learning_rate)
# Define loss function
criterion = nn.NLLLoss()
return model, optimizer, criterion, device
# testing the model and returning the accuracy on new data
def testset_accuracy(model, testloader):
# Bring model to GPU
model.to(device)
correct = 0
total = 0
with torch.no_grad():
model.to(device)
model.eval()
for data in testloader:
images, labels = data
# Function that loads a checkpoint and rebuilds the model
def load_checkpoint(filepath='checkpoint.pth'):
checkpoint = torch.load(filepath)
model = models.vgg16(pretrained=True)
model.classifier = checkpoint['classifier']
model.class_to_idx = checkpoint['class_to_idx']
model.load_state_dict(checkpoint['state_dict'])
for param in model.parameters():
param.requires_grad = False
return model
def process_image(image):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
im = Image.open(image)
width, height = im.size
# Resize image to make the shortest side 256 pixels
if im.width > im.height:
(width, height) = (im.width, 256)
elif im.width < im.height:
(width, height) = (256, im.height)
else:
(width, height) = (256, 256)
im.thumbnail((width, height), Image.ANTIALIAS)
# new size of image
width, height = im.size
# Crop at center, make image 224x224
reduce = 224
left = (width - reduce)/2
top = (height - reduce)/2
right = left + 224
bottom = top + 224
im = im.crop((left, top, right, bottom))
np_image = np.array(im)/255
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
np_image = (np_image - mean) / std
image = np_image.transpose((2, 0, 1))
return image
def imshow(image, ax=None, title=None):
"""Imshow for Tensor."""
if ax is None:
fig, ax = plt.subplots()
# PyTorch tensors assume the color channel is the first dimension
# but matplotlib assumes is the third dimension
image = image.transpose((1, 2, 0))
def predict(image_path, model, device = 'gpu', topk=1):
''' Predict the class (or classes) of an image using a trained deep learning model.
'''
image = process_image(image_path)
# Convert image to a FloatTensor and add a 'batch_size' dimension with .unsqueeze_(0)
image = torch.from_numpy(image).type(torch.FloatTensor).unsqueeze_(0)
# Select between gpu and cpu
if device == 'gpu' and torch.cuda.is_available():
device = torch.device('cuda:0')
else:
device = torch.device('cpu')
# Bring model to device
model.to(device)
with torch.no_grad():
model.eval()
output = model.forward(image.cuda())
ps = torch.exp(output)
probs, idx = ps.topk(topk, dim=1)
probs, idx = probs.to('cpu'), idx.to('cpu')
probs = probs.numpy () # converting both to numpy array
idx = idx.numpy ()
probs = probs.tolist () [0] # converting both to list
idx = idx.tolist () [0]
mapping = {val: key for key, val in
model.class_to_idx.items()
}
classes = [mapping [item] for item in idx]
class_names = [cat_to_name [item] for item in classes]
class_names = np.array(class_names)
classes = np.array(classes) # converting to Numpy array
return print(probs, class_names)
| 2,623 | 7 | 181 |
5e418feda0e0beca1b1898921200f56ed7648535 | 483 | py | Python | user/urls.py | fobiero/InstagramClone_DJ_02 | e99c65699997e20f98442159b01bc9d382f90b82 | [
"MIT"
] | null | null | null | user/urls.py | fobiero/InstagramClone_DJ_02 | e99c65699997e20f98442159b01bc9d382f90b82 | [
"MIT"
] | null | null | null | user/urls.py | fobiero/InstagramClone_DJ_02 | e99c65699997e20f98442159b01bc9d382f90b82 | [
"MIT"
] | null | null | null | from django.urls import path
from user.views import ProfileView,ProfileEditView,AllProfilesView
from django.contrib.auth.decorators import login_required
urlpatterns = [
# dynamic URL
path('in/<str:username>/',login_required(ProfileView.as_view()),name='profile_view'),
path('in/<str:username>/edit/',login_required(ProfileEditView.as_view()),name='profile_edit_view'),
path('profiles/',login_required(AllProfilesView.as_view()),name='all_profiles_view'),
]
| 37.153846 | 103 | 0.761905 | from django.urls import path
from user.views import ProfileView,ProfileEditView,AllProfilesView
from django.contrib.auth.decorators import login_required
urlpatterns = [
# dynamic URL
path('in/<str:username>/',login_required(ProfileView.as_view()),name='profile_view'),
path('in/<str:username>/edit/',login_required(ProfileEditView.as_view()),name='profile_edit_view'),
path('profiles/',login_required(AllProfilesView.as_view()),name='all_profiles_view'),
]
| 0 | 0 | 0 |
f326b9679284bce6470da5c2ec0ec289c29e0ed3 | 5,798 | py | Python | piso/docstrings/interval.py | venaturum/piso | 54fd20443efb84d7a9982c92caf492b12206eaef | [
"MIT"
] | 5 | 2021-10-10T04:17:17.000Z | 2022-03-01T06:23:25.000Z | piso/docstrings/interval.py | venaturum/piso | 54fd20443efb84d7a9982c92caf492b12206eaef | [
"MIT"
] | 35 | 2021-10-09T13:22:04.000Z | 2022-01-29T08:38:15.000Z | piso/docstrings/interval.py | staircase-dev/piso | 2e6ac861f7166195e2fe67e2665c29e36b4ff12e | [
"MIT"
] | null | null | null | union_examples = """
Examples
-----------
>>> import pandas as pd
>>> import piso.interval
>>> piso.interval.union(
... pd.Interval(0, 3),
... pd.Interval(2, 4),
... )
Interval(0.0, 4.0, closed='right')
>>> piso.interval.union(
... pd.Interval(0, 3),
... pd.Interval(2, 4),
... squeeze=False,
... )
<IntervalArray>
[(0.0, 4.0]]
Length: 1, closed: right, dtype: interval[float64]
>>> piso.interval.union(
... pd.Interval(0, 3, closed="left"),
... pd.Interval(2, 4, closed="left"),
... )
Interval(0.0, 4.0, closed='left')
>>> piso.interval.union(
... pd.Interval(0, 1),
... pd.Interval(3, 4),
... )
<IntervalArray>
[(0.0, 1.0], (3.0, 4.0]]
Length: 2, closed: right, dtype: interval[float64]
"""
intersection_examples = """
Examples
-----------
>>> import pandas as pd
>>> import piso.interval
>>> piso.interval.intersection(
... pd.Interval(0, 3),
... pd.Interval(2, 4),
... )
Interval(2.0, 3.0, closed='right')
>>> piso.interval.intersection(
... pd.Interval(0, 3),
... pd.Interval(2, 4),
... squeeze=False,
... )
<IntervalArray>
[(2.0, 3.0]]
Length: 1, closed: right, dtype: interval[float64]
>>> piso.interval.intersection(
... pd.Interval(0, 3, closed="left"),
... pd.Interval(2, 4, closed="left"),
... )
Interval(2.0, 3.0, closed='left')
>>> piso.interval.intersection(
... pd.Interval(0, 1),
... pd.Interval(3, 4),
... )
<IntervalArray>
[]
Length: 0, closed: right, dtype: interval[int64]
"""
difference_examples = """
Examples
-----------
>>> import pandas as pd
>>> import piso.interval
>>> piso.interval.difference(
... pd.Interval(0, 3),
... pd.Interval(2, 4),
... )
Interval(0.0, 2.0, closed='right')
>>> piso.interval.difference(
... pd.Interval(0, 3),
... pd.Interval(2, 4),
... squeeze=False,
... )
<IntervalArray>
[(0.0, 2.0]]
Length: 1, closed: right, dtype: interval[float64]
>>> piso.interval.difference(
... pd.Interval(0, 4, closed="left"),
... pd.Interval(2, 3, closed="left"),
... )
<IntervalArray>
[[0.0, 2.0), [3.0, 4.0)]
Length: 2, closed: left, dtype: interval[float64]
>>> piso.interval.difference(
... pd.Interval(2, 3),
... pd.Interval(0, 4),
... )
<IntervalArray>
[]
Length: 0, closed: right, dtype: interval[int64]
"""
symmetric_difference_examples = """
Examples
-----------
>>> import pandas as pd
>>> import piso.interval
>>> piso.interval.symmetric_difference(
... pd.Interval(0, 3),
... pd.Interval(2, 4),
... )
<IntervalArray>
[(0.0, 2.0], (3.0, 4.0]]
Length: 2, closed: right, dtype: interval[float64]
>>> piso.interval.symmetric_difference(
... pd.Interval(0, 3),
... pd.Interval(2, 3),
... )
Interval(0.0, 2.0, closed='right')
>>> piso.interval.symmetric_difference(
... pd.Interval(0, 3, closed="left"),
... pd.Interval(2, 4, closed="left"),
... )
<IntervalArray>
[[0.0, 2.0), [3.0, 4.0)]
Length: 2, closed: left, dtype: interval[float64]
>>> piso.interval.symmetric_difference(
... pd.Interval(2, 3),
... pd.Interval(0, 4),
... )
<IntervalArray>
[(0.0, 2.0], (3.0, 4.0]]
Length: 2, closed: right, dtype: interval[float64]
"""
issuperset_examples = """
Examples
-----------
>>> import pandas as pd
>>> import piso.interval
>>> piso.interval.issuperset(
... pd.Interval(1, 4),
... pd.Interval(2, 4),
... )
True
>>> piso.interval.issuperset(
... pd.Interval(1, 4),
... pd.Interval(0, 3),
... )
False
>>> piso.interval.issuperset(
... pd.Interval(1, 4),
... pd.Interval(2, 4),
... pd.Interval(0, 3),
... )
array([ True, False])
>>> piso.interval.issuperset(
... pd.Interval(0, 3),
... pd.Interval(0, 3),
... squeeze=False
... )
array([ True])
"""
issubset_examples = """
Examples
-----------
>>> import pandas as pd
>>> import piso.interval
>>> piso.interval.issubset(
... pd.Interval(2, 4),
... pd.Interval(1, 4),
... )
True
>>> piso.interval.issubset(
... pd.Interval(2, 4),
... pd.Interval(0, 3),
... )
False
>>> piso.interval.issubset(
... pd.Interval(2, 4),
... pd.Interval(1, 4),
... pd.Interval(0, 3),
... )
array([ True, False])
>>> piso.interval.issubset(
... pd.Interval(1, 4),
... pd.Interval(1, 4),
... squeeze=False
... )
array([ True])
"""
template_doc = """
Performs the {operation} of two :class:`pandas.Interval`
Parameters
----------
interval1 : pandas.Interval
the first operand
interval2 : pandas.Interval
the second operand
squeeze : boolean, default True
If True, will try to coerce the return value to a :class:`pandas.Interval`
Returns
----------
:class:`pandas.Interval` or :class:`pandas.arrays.IntervalArray`
{examples}
"""
union_docstring = template_doc.format(operation="union", examples=union_examples)
intersection_docstring = template_doc.format(
operation="intersection", examples=intersection_examples
)
difference_docstring = template_doc.format(
operation="set difference", examples=difference_examples
)
symmetric_difference_docstring = template_doc.format(
operation="symmetric difference", examples=symmetric_difference_examples
)
is_sub_super_doc = """
Indicates whether one :class:`pandas.Interval` is a {operation} of one, or more, others.
Parameters
----------
interval : :class:`pandas.Interval`
An interval, against which all other intervals belonging to *intervals* are compared.
*intervals : argument list of :class:`pandas.Interval`
Must contain at least one argument.
squeeze : boolean, default True
If True, will try to coerce the return value to a single boolean
Returns
----------
boolean, or :class:`numpy.ndarray` of booleans
{examples}
"""
issuperset_docstring = is_sub_super_doc.format(
operation="superset",
examples=issuperset_examples,
)
issubset_docstring = is_sub_super_doc.format(
operation="subset",
examples=issubset_examples,
)
| 20.560284 | 89 | 0.624181 | union_examples = """
Examples
-----------
>>> import pandas as pd
>>> import piso.interval
>>> piso.interval.union(
... pd.Interval(0, 3),
... pd.Interval(2, 4),
... )
Interval(0.0, 4.0, closed='right')
>>> piso.interval.union(
... pd.Interval(0, 3),
... pd.Interval(2, 4),
... squeeze=False,
... )
<IntervalArray>
[(0.0, 4.0]]
Length: 1, closed: right, dtype: interval[float64]
>>> piso.interval.union(
... pd.Interval(0, 3, closed="left"),
... pd.Interval(2, 4, closed="left"),
... )
Interval(0.0, 4.0, closed='left')
>>> piso.interval.union(
... pd.Interval(0, 1),
... pd.Interval(3, 4),
... )
<IntervalArray>
[(0.0, 1.0], (3.0, 4.0]]
Length: 2, closed: right, dtype: interval[float64]
"""
intersection_examples = """
Examples
-----------
>>> import pandas as pd
>>> import piso.interval
>>> piso.interval.intersection(
... pd.Interval(0, 3),
... pd.Interval(2, 4),
... )
Interval(2.0, 3.0, closed='right')
>>> piso.interval.intersection(
... pd.Interval(0, 3),
... pd.Interval(2, 4),
... squeeze=False,
... )
<IntervalArray>
[(2.0, 3.0]]
Length: 1, closed: right, dtype: interval[float64]
>>> piso.interval.intersection(
... pd.Interval(0, 3, closed="left"),
... pd.Interval(2, 4, closed="left"),
... )
Interval(2.0, 3.0, closed='left')
>>> piso.interval.intersection(
... pd.Interval(0, 1),
... pd.Interval(3, 4),
... )
<IntervalArray>
[]
Length: 0, closed: right, dtype: interval[int64]
"""
difference_examples = """
Examples
-----------
>>> import pandas as pd
>>> import piso.interval
>>> piso.interval.difference(
... pd.Interval(0, 3),
... pd.Interval(2, 4),
... )
Interval(0.0, 2.0, closed='right')
>>> piso.interval.difference(
... pd.Interval(0, 3),
... pd.Interval(2, 4),
... squeeze=False,
... )
<IntervalArray>
[(0.0, 2.0]]
Length: 1, closed: right, dtype: interval[float64]
>>> piso.interval.difference(
... pd.Interval(0, 4, closed="left"),
... pd.Interval(2, 3, closed="left"),
... )
<IntervalArray>
[[0.0, 2.0), [3.0, 4.0)]
Length: 2, closed: left, dtype: interval[float64]
>>> piso.interval.difference(
... pd.Interval(2, 3),
... pd.Interval(0, 4),
... )
<IntervalArray>
[]
Length: 0, closed: right, dtype: interval[int64]
"""
symmetric_difference_examples = """
Examples
-----------
>>> import pandas as pd
>>> import piso.interval
>>> piso.interval.symmetric_difference(
... pd.Interval(0, 3),
... pd.Interval(2, 4),
... )
<IntervalArray>
[(0.0, 2.0], (3.0, 4.0]]
Length: 2, closed: right, dtype: interval[float64]
>>> piso.interval.symmetric_difference(
... pd.Interval(0, 3),
... pd.Interval(2, 3),
... )
Interval(0.0, 2.0, closed='right')
>>> piso.interval.symmetric_difference(
... pd.Interval(0, 3, closed="left"),
... pd.Interval(2, 4, closed="left"),
... )
<IntervalArray>
[[0.0, 2.0), [3.0, 4.0)]
Length: 2, closed: left, dtype: interval[float64]
>>> piso.interval.symmetric_difference(
... pd.Interval(2, 3),
... pd.Interval(0, 4),
... )
<IntervalArray>
[(0.0, 2.0], (3.0, 4.0]]
Length: 2, closed: right, dtype: interval[float64]
"""
issuperset_examples = """
Examples
-----------
>>> import pandas as pd
>>> import piso.interval
>>> piso.interval.issuperset(
... pd.Interval(1, 4),
... pd.Interval(2, 4),
... )
True
>>> piso.interval.issuperset(
... pd.Interval(1, 4),
... pd.Interval(0, 3),
... )
False
>>> piso.interval.issuperset(
... pd.Interval(1, 4),
... pd.Interval(2, 4),
... pd.Interval(0, 3),
... )
array([ True, False])
>>> piso.interval.issuperset(
... pd.Interval(0, 3),
... pd.Interval(0, 3),
... squeeze=False
... )
array([ True])
"""
issubset_examples = """
Examples
-----------
>>> import pandas as pd
>>> import piso.interval
>>> piso.interval.issubset(
... pd.Interval(2, 4),
... pd.Interval(1, 4),
... )
True
>>> piso.interval.issubset(
... pd.Interval(2, 4),
... pd.Interval(0, 3),
... )
False
>>> piso.interval.issubset(
... pd.Interval(2, 4),
... pd.Interval(1, 4),
... pd.Interval(0, 3),
... )
array([ True, False])
>>> piso.interval.issubset(
... pd.Interval(1, 4),
... pd.Interval(1, 4),
... squeeze=False
... )
array([ True])
"""
template_doc = """
Performs the {operation} of two :class:`pandas.Interval`
Parameters
----------
interval1 : pandas.Interval
the first operand
interval2 : pandas.Interval
the second operand
squeeze : boolean, default True
If True, will try to coerce the return value to a :class:`pandas.Interval`
Returns
----------
:class:`pandas.Interval` or :class:`pandas.arrays.IntervalArray`
{examples}
"""
union_docstring = template_doc.format(operation="union", examples=union_examples)
intersection_docstring = template_doc.format(
operation="intersection", examples=intersection_examples
)
difference_docstring = template_doc.format(
operation="set difference", examples=difference_examples
)
symmetric_difference_docstring = template_doc.format(
operation="symmetric difference", examples=symmetric_difference_examples
)
is_sub_super_doc = """
Indicates whether one :class:`pandas.Interval` is a {operation} of one, or more, others.
Parameters
----------
interval : :class:`pandas.Interval`
An interval, against which all other intervals belonging to *intervals* are compared.
*intervals : argument list of :class:`pandas.Interval`
Must contain at least one argument.
squeeze : boolean, default True
If True, will try to coerce the return value to a single boolean
Returns
----------
boolean, or :class:`numpy.ndarray` of booleans
{examples}
"""
issuperset_docstring = is_sub_super_doc.format(
operation="superset",
examples=issuperset_examples,
)
issubset_docstring = is_sub_super_doc.format(
operation="subset",
examples=issubset_examples,
)
| 0 | 0 | 0 |
0e77dececbe73353a54fdfea2a502e9ede998856 | 285 | py | Python | content/tampers/urlencodeall.py | officialhocc/WhatWaf | 0ca5c7f1ffc6dfba05e2d0a29799c023c4c6337f | [
"MIT"
] | null | null | null | content/tampers/urlencodeall.py | officialhocc/WhatWaf | 0ca5c7f1ffc6dfba05e2d0a29799c023c4c6337f | [
"MIT"
] | null | null | null | content/tampers/urlencodeall.py | officialhocc/WhatWaf | 0ca5c7f1ffc6dfba05e2d0a29799c023c4c6337f | [
"MIT"
] | 1 | 2018-12-27T23:18:44.000Z | 2018-12-27T23:18:44.000Z | __example_payload__ = "SELECT * FROM information_schema.tables"
__type__ = "encoding all characters in the payload into their URL encoding equivalent"
| 28.5 | 86 | 0.698246 | __example_payload__ = "SELECT * FROM information_schema.tables"
__type__ = "encoding all characters in the payload into their URL encoding equivalent"
def tamper(payload, **kwargs):
retval = ""
for char in payload:
retval += "%{}".format(ord(char))
return retval
| 110 | 0 | 23 |
2ccf9cae31016b266b5a8a656b3aea0c3e571fc9 | 361 | py | Python | uvloop/includes/__init__.py | Psycojoker/uvloop | 03487c80a508ea92e66f976fa196e64514894205 | [
"Apache-2.0",
"MIT"
] | 9,084 | 2016-04-04T10:42:42.000Z | 2022-03-30T22:53:22.000Z | uvloop/includes/__init__.py | Psycojoker/uvloop | 03487c80a508ea92e66f976fa196e64514894205 | [
"Apache-2.0",
"MIT"
] | 382 | 2016-04-02T19:19:09.000Z | 2022-03-22T08:39:18.000Z | uvloop/includes/__init__.py | Psycojoker/uvloop | 03487c80a508ea92e66f976fa196e64514894205 | [
"Apache-2.0",
"MIT"
] | 562 | 2016-04-01T19:42:12.000Z | 2022-03-30T22:40:40.000Z | # flake8: noqa
# These have to be synced with the stdlib.pxi
import asyncio
import collections
import concurrent.futures
import errno
import functools
import gc
import inspect
import itertools
import os
import signal
import socket
import subprocess
import ssl
import stat
import sys
import threading
import traceback
import time
import warnings
import weakref
| 15.041667 | 45 | 0.839335 | # flake8: noqa
# These have to be synced with the stdlib.pxi
import asyncio
import collections
import concurrent.futures
import errno
import functools
import gc
import inspect
import itertools
import os
import signal
import socket
import subprocess
import ssl
import stat
import sys
import threading
import traceback
import time
import warnings
import weakref
| 0 | 0 | 0 |
1c8ec33bd6590fc7464361b859e4510f1751e325 | 1,638 | py | Python | program/google/protobuf/internal/import_test_package/import_public_pb2.py | Addision/ProtoExcelTool | 17eaa08c08312e59c5a8f6114a121177ca65508a | [
"MIT"
] | null | null | null | program/google/protobuf/internal/import_test_package/import_public_pb2.py | Addision/ProtoExcelTool | 17eaa08c08312e59c5a8f6114a121177ca65508a | [
"MIT"
] | null | null | null | program/google/protobuf/internal/import_test_package/import_public_pb2.py | Addision/ProtoExcelTool | 17eaa08c08312e59c5a8f6114a121177ca65508a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/protobuf/internal/import_test_package/import_public.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf.internal.import_test_package import import_public_nested_pb2 as google_dot_protobuf_dot_internal_dot_import__test__package_dot_import__public__nested__pb2
from google.protobuf.internal.import_test_package.import_public_nested_pb2 import *
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/protobuf/internal/import_test_package/import_public.proto',
package='google.protobuf.python.internal.import_test_package',
syntax='proto2',
serialized_options=b'H\001',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n@google/protobuf/internal/import_test_package/import_public.proto\x12\x33google.protobuf.python.internal.import_test_package\x1aGgoogle/protobuf/internal/import_test_package/import_public_nested.protoB\x02H\x01P\x00'
,
dependencies=[google_dot_protobuf_dot_internal_dot_import__test__package_dot_import__public__nested__pb2.DESCRIPTOR,],
public_dependencies=[google_dot_protobuf_dot_internal_dot_import__test__package_dot_import__public__nested__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 45.5 | 236 | 0.856532 | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/protobuf/internal/import_test_package/import_public.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf.internal.import_test_package import import_public_nested_pb2 as google_dot_protobuf_dot_internal_dot_import__test__package_dot_import__public__nested__pb2
from google.protobuf.internal.import_test_package.import_public_nested_pb2 import *
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/protobuf/internal/import_test_package/import_public.proto',
package='google.protobuf.python.internal.import_test_package',
syntax='proto2',
serialized_options=b'H\001',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n@google/protobuf/internal/import_test_package/import_public.proto\x12\x33google.protobuf.python.internal.import_test_package\x1aGgoogle/protobuf/internal/import_test_package/import_public_nested.protoB\x02H\x01P\x00'
,
dependencies=[google_dot_protobuf_dot_internal_dot_import__test__package_dot_import__public__nested__pb2.DESCRIPTOR,],
public_dependencies=[google_dot_protobuf_dot_internal_dot_import__test__package_dot_import__public__nested__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 0 | 0 | 0 |
fef38ca47668163b162c207f959688da2177c198 | 776 | py | Python | pomdpy/util/console.py | wecacuee/POMDPy | c23ebf90f5815db4326564110487779961736b60 | [
"MIT"
] | 210 | 2015-04-23T17:05:50.000Z | 2022-03-14T08:00:00.000Z | pomdpy/util/console.py | wecacuee/POMDPy | c23ebf90f5815db4326564110487779961736b60 | [
"MIT"
] | 15 | 2015-04-13T05:36:14.000Z | 2019-05-06T19:14:50.000Z | pomdpy/util/console.py | wecacuee/POMDPy | c23ebf90f5815db4326564110487779961736b60 | [
"MIT"
] | 76 | 2016-08-18T03:54:08.000Z | 2022-01-26T09:08:23.000Z | """
CONSOLE LOGGING VERBOSITY LEVELS
---------------------------------
0 - FATAL
1 - CRITICAL
2 - INFO
3 - LOUD
4 - DEBUG
"""
from __future__ import print_function
VERBOSITY = 3
# Pass a function that handles printing
| 20.421053 | 87 | 0.489691 | """
CONSOLE LOGGING VERBOSITY LEVELS
---------------------------------
0 - FATAL
1 - CRITICAL
2 - INFO
3 - LOUD
4 - DEBUG
"""
from __future__ import print_function
VERBOSITY = 3
def print_divider(size):
if size == "large":
print("======================================================================")
elif size == "medium":
print("==========================================")
else:
print("========")
def console(verbosity_level, module, msg):
if verbosity_level > VERBOSITY:
return
else:
print(module + ' - ' + msg)
# Pass a function that handles printing
def console_no_print(verbosity_level, func):
if verbosity_level > VERBOSITY:
return
else:
assert callable(func)
func()
| 485 | 0 | 68 |
44ed70cbfc3304e7e67ef97dda862a36beaab64b | 6,182 | py | Python | scripts/download_objectron_shards.py | yycho0108/ai604-video-object-pose | 7067f36281038272b0e39166d8f9718076bb6e75 | [
"MIT"
] | null | null | null | scripts/download_objectron_shards.py | yycho0108/ai604-video-object-pose | 7067f36281038272b0e39166d8f9718076bb6e75 | [
"MIT"
] | 13 | 2021-04-13T04:58:38.000Z | 2021-06-17T04:58:07.000Z | scripts/download_objectron_shards.py | yycho0108/ai604-video-object-pose | 7067f36281038272b0e39166d8f9718076bb6e75 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import multiprocessing as mp
import functools
import logging
import time
from typing import List
from tqdm.auto import tqdm
from dataclasses import dataclass
from simple_parsing import Serializable
from pathlib import Path
from google.cloud import storage
from top.data.objectron_detection import ObjectronDetection
from top.run.app_util import update_settings
bucket = None
def download_shard(shard: str, out_dir: str, bucket_local=None):
"""Download a single shard into `out_dir`.
NOTE(ycho): The output file is automatically named according to the base-name of
`shard`.
"""
global bucket
# Convert arg to a path object, just in case ...
out_dir = Path(out_dir)
# Configure names and download.
basename = shard.split('/')[-1]
out_file = (out_dir / basename)
if bucket_local is None:
# NOTE(ycho): Fallback to global bucket
bucket_local = bucket
blob = bucket_local.blob(shard)
try:
blob.download_to_filename(str(out_file))
except KeyboardInterrupt as e:
# NOTE(ycho): This seems to be the only working solution,
# which is to cleanup only on SIGINT.
# Catching a general `Exception` does not work. Not sure why.
if out_file.exists():
logging.debug(F'unlink: {out_file}')
out_file.unlink()
return 0
# NOTE(ycho): since we're not downloading metadata through get_blob(),
# we need to stat the local file for the size, in bytes.
return out_file.stat().st_size
@dataclass
def init_worker():
"""Set global variable `bucket` to point to cloud.
NOTE(ycho): This function is only used for mp.Pool.
"""
global bucket
client = storage.Client.create_anonymous_client()
bucket = client.bucket('objectron')
if __name__ == '__main__':
main()
| 34.926554 | 103 | 0.573924 | #!/usr/bin/env python3
import multiprocessing as mp
import functools
import logging
import time
from typing import List
from tqdm.auto import tqdm
from dataclasses import dataclass
from simple_parsing import Serializable
from pathlib import Path
from google.cloud import storage
from top.data.objectron_detection import ObjectronDetection
from top.run.app_util import update_settings
bucket = None
def download_shard(shard: str, out_dir: str, bucket_local=None):
"""Download a single shard into `out_dir`.
NOTE(ycho): The output file is automatically named according to the base-name of
`shard`.
"""
global bucket
# Convert arg to a path object, just in case ...
out_dir = Path(out_dir)
# Configure names and download.
basename = shard.split('/')[-1]
out_file = (out_dir / basename)
if bucket_local is None:
# NOTE(ycho): Fallback to global bucket
bucket_local = bucket
blob = bucket_local.blob(shard)
try:
blob.download_to_filename(str(out_file))
except KeyboardInterrupt as e:
# NOTE(ycho): This seems to be the only working solution,
# which is to cleanup only on SIGINT.
# Catching a general `Exception` does not work. Not sure why.
if out_file.exists():
logging.debug(F'unlink: {out_file}')
out_file.unlink()
return 0
# NOTE(ycho): since we're not downloading metadata through get_blob(),
# we need to stat the local file for the size, in bytes.
return out_file.stat().st_size
def download_shards(shards: List[str], out_dir: str,
stop: mp.Value, queue: mp.Queue):
client = storage.Client.create_anonymous_client()
bucket = client.bucket('objectron')
for shard in shards:
# Check if we should stop.
with stop.get_lock():
if stop.value:
break
# Download ...
shard_bytes = download_shard(shard, out_dir, bucket)
if shard_bytes == 0:
break
# Return the number of downloaded bytes for accumulation.
queue.put_nowait(shard_bytes)
@dataclass
class Settings(Serializable):
max_train_bytes: int = 32 * (2 ** 30) # default 32GB
max_test_bytes: int = 4 * (2 ** 30) # default 4GB
num_workers: int = 8
cache_dir: str = '~/.cache/ai604/'
log_period_sec: float = 1.0
use_pool: bool = False
def init_worker():
"""Set global variable `bucket` to point to cloud.
NOTE(ycho): This function is only used for mp.Pool.
"""
global bucket
client = storage.Client.create_anonymous_client()
bucket = client.bucket('objectron')
def main():
logging.basicConfig(level=logging.INFO)
opts = Settings()
opts = update_settings(opts)
pool_states = [{} for _ in range(opts.num_workers)]
# for train in [False, True]:
for train in [False, True]:
name = 'objectron-train' if train else 'objectron-test'
logging.info(F'Processing {name}')
max_bytes = opts.max_train_bytes if train else opts.max_test_bytes
# TODO(ycho): Consider fancier (e.g. class-equalizing) shard samplers.
shards = ObjectronDetection(
ObjectronDetection.Settings(local=False), train).shards
out_dir = (Path(opts.cache_dir).expanduser() / name)
out_dir.mkdir(parents=True, exist_ok=True)
if opts.use_pool:
# NOTE(ycho): The initial approach based on mp.Pool().
# Turned out that it is not possible to guarantee graceful exit in
# this way.
_download = functools.partial(download_shard, out_dir=out_dir)
with mp.Pool(opts.num_workers, init_worker) as p:
with tqdm(total=max_bytes) as pbar:
total_bytes = 0
for shard_bytes in p.imap_unordered(_download, shards):
pbar.update(shard_bytes)
# Accumulate and check for termination.
total_bytes += shard_bytes
if total_bytes >= max_bytes:
logging.info(F'Done: {total_bytes} > {max_bytes}')
# NOTE(ycho): Due to bug in mp.Pool(), imap_unordered() with close()/join()
# does NOT work, thus we implicitly call terminate() via context manager
# which may result in incomplete shards. This condition
# must be checked.
break
else:
init_bytes = sum(
f.stat().st_size for f in out_dir.rglob('*') if f.is_file())
logging.info(F'Starting from {init_bytes}/{max_bytes} ...')
ctx = mp.get_context('fork')
stop = ctx.Value('b', (init_bytes >= max_bytes))
queue = ctx.Queue()
workers = [ctx.Process(target=download_shards,
args=(shards[i:: opts.num_workers],
out_dir, stop, queue))
for i in range(opts.num_workers)]
# Start!
for p in workers:
p.start()
# Progress logging ...
try:
with tqdm(initial=init_bytes, total=max_bytes) as pbar:
# Periodically check progress...
total_bytes = init_bytes
while True:
shard_bytes = queue.get()
pbar.update(shard_bytes)
total_bytes += shard_bytes
if total_bytes >= max_bytes:
break
except KeyboardInterrupt:
logging.info('Cancelling download, trying to clean up ...')
pass
finally:
# Stop.
with stop.get_lock():
stop.value = True
# Join.
logging.info(
'Download completed, joining the rest of the processes...')
for p in workers:
p.join()
if __name__ == '__main__':
main()
| 4,012 | 244 | 68 |
24275aed84864bf05f1c599575bbe3d0f445671d | 1,486 | py | Python | tests/documentation/library/test_lib_mechanism_docs.py | bdsinger/PsyNeuLink | 71d8a0bb1691ff85061d4ad3de866d9930a69a73 | [
"Apache-2.0"
] | null | null | null | tests/documentation/library/test_lib_mechanism_docs.py | bdsinger/PsyNeuLink | 71d8a0bb1691ff85061d4ad3de866d9930a69a73 | [
"Apache-2.0"
] | null | null | null | tests/documentation/library/test_lib_mechanism_docs.py | bdsinger/PsyNeuLink | 71d8a0bb1691ff85061d4ad3de866d9930a69a73 | [
"Apache-2.0"
] | null | null | null | import doctest
import os
import pytest
import psyneulink as pnl
| 28.037736 | 83 | 0.650067 | import doctest
import os
import pytest
import psyneulink as pnl
def test_ddm_docs():
# FIXME: Does this run outside of the test directory?
# os.chdir('../../Matlab/DDMFunctions')
# print("current dir = {}".format(os.getcwd()))
# ALSO FIXME: ValueError cannot convert float NaN integer
fail, total = doctest.testmod(
pnl.library.components.mechanisms.processing.integrator.ddm
)
if fail > 0:
pytest.fail("{} out of {} examples failed".format(fail, total),
pytrace=False)
def test_comparator_mechanism_docs():
fail, total = doctest.testmod(
pnl.library.components.mechanisms.processing.objective.comparatormechanism
)
if fail > 0:
pytest.fail("{} out of {} examples failed".format(fail, total),
pytrace=False)
def test_lc_control_mechanism_docs():
fail, test = doctest.testmod(
pnl.library.components.mechanisms.adaptive.control.agt.lccontrolmechanism,
optionflags=doctest.REPORT_NDIFF
)
if fail > 0:
pytest.fail("{} out of {} examples failed".format(fail, test),
pytrace=False)
def test_evc_control_mechanism_docs():
fail, test = doctest.testmod(
pnl.library.components.mechanisms.adaptive.control.evc.evccontrolmechanism,
optionflags=doctest.REPORT_NDIFF
)
if fail > 0:
pytest.fail("{} out of {} examples failed".format(fail, test),
pytrace=False)
| 1,324 | 0 | 92 |
0893a71e0f5da806a8a314e41845ca5a36b8b9a3 | 1,231 | py | Python | scripts/text_analysis.py | Lukas-Justen/Airbnb-Price-Evaluator | 8759e32e94510520984223d18f0f9b09396aa448 | [
"MIT"
] | null | null | null | scripts/text_analysis.py | Lukas-Justen/Airbnb-Price-Evaluator | 8759e32e94510520984223d18f0f9b09396aa448 | [
"MIT"
] | null | null | null | scripts/text_analysis.py | Lukas-Justen/Airbnb-Price-Evaluator | 8759e32e94510520984223d18f0f9b09396aa448 | [
"MIT"
] | 3 | 2019-02-05T03:09:38.000Z | 2020-01-30T02:55:37.000Z | import pandas as pd
import re
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasRegressor
from sklearn.preprocessing import StandardScaler
from nltk.corpus import stopwords
import nltk
from sklearn.feature_extraction.text import TfidfVectorizer
data = pd.read_csv('data/seattle/3/listings_texts.csv')
corpus = data['description']
y = data['price']
X=[]
for i,line in enumerate(corpus):
clear = [x for x in re.sub(r'[^\w\'\s]', '',line.lower()).split() if x not in stopwords.words('english')]
X.append(' '.join(clear))
if i%100 == 0:
print("Progress : ", i)
if i == 4000:
break
print("Moving on!")
vectorizer = TfidfVectorizer()
X = vectorizer.fit_transform(X)
net = Sequential()
net.add(Dense(200, input_dim=X[0].shape[1], kernel_initializer='normal',activation='relu'))
net.add(Dense(100, input_dim=200, kernel_initializer='normal',activation='relu'))
net.add(Dense(1, input_dim=100, kernel_initializer='normal'))
net.compile(loss='mean_squared_error', optimizer='adam')
net.fit(X[:3000],y[:3000], epochs=70, batch_size=100)
print(net.evaluate(X[3001:] ,y[3001:]))
for i in range(50):
print(net.predict(X[3001+i]), y[3001+i])
| 31.564103 | 109 | 0.718115 | import pandas as pd
import re
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasRegressor
from sklearn.preprocessing import StandardScaler
from nltk.corpus import stopwords
import nltk
from sklearn.feature_extraction.text import TfidfVectorizer
data = pd.read_csv('data/seattle/3/listings_texts.csv')
corpus = data['description']
y = data['price']
X=[]
for i,line in enumerate(corpus):
clear = [x for x in re.sub(r'[^\w\'\s]', '',line.lower()).split() if x not in stopwords.words('english')]
X.append(' '.join(clear))
if i%100 == 0:
print("Progress : ", i)
if i == 4000:
break
print("Moving on!")
vectorizer = TfidfVectorizer()
X = vectorizer.fit_transform(X)
net = Sequential()
net.add(Dense(200, input_dim=X[0].shape[1], kernel_initializer='normal',activation='relu'))
net.add(Dense(100, input_dim=200, kernel_initializer='normal',activation='relu'))
net.add(Dense(1, input_dim=100, kernel_initializer='normal'))
net.compile(loss='mean_squared_error', optimizer='adam')
net.fit(X[:3000],y[:3000], epochs=70, batch_size=100)
print(net.evaluate(X[3001:] ,y[3001:]))
for i in range(50):
print(net.predict(X[3001+i]), y[3001+i])
| 0 | 0 | 0 |
af623b00cd842370a410852427b7895cb98e73bd | 1,591 | py | Python | data/plot_produced_events_frequency.py | lpraat/scep2019 | f120ee20397648e708cce41a7949c70b523b6e56 | [
"MIT"
] | 1 | 2021-11-02T20:34:22.000Z | 2021-11-02T20:34:22.000Z | data/plot_produced_events_frequency.py | lpraat/scep2019 | f120ee20397648e708cce41a7949c70b523b6e56 | [
"MIT"
] | null | null | null | data/plot_produced_events_frequency.py | lpraat/scep2019 | f120ee20397648e708cce41a7949c70b523b6e56 | [
"MIT"
] | 1 | 2021-11-02T20:34:29.000Z | 2021-11-02T20:34:29.000Z | import time
import matplotlib.pyplot as plt
from confluent_kafka import Producer
if __name__ == '__main__':
plot_events_produced_frequency('first_half.txt')
| 25.253968 | 103 | 0.540541 | import time
import matplotlib.pyplot as plt
from confluent_kafka import Producer
def delivery_report(err, msg):
if err is not None:
print('Message delivery failed: {}'.format(err))
def plot_events_produced_frequency(data):
second = 0
seconds = []
num_events = []
producer = Producer({'bootstrap.servers': 'localhost:9092'})
with open(data, 'r') as first_half:
num_events_produced = 0
delta = 0
curr = time.time()
for line in first_half:
event_str = line[:-1]
produced = False
while not produced:
try:
producer.poll(0)
producer.produce('fake-topic', event_str.encode('ascii'), callback=delivery_report)
produced = True
num_events_produced += 1
except BufferError:
producer.poll(0.001)
now = time.time()
delta += now - curr
curr = time.time()
if delta >= 1:
seconds.append(second)
num_events.append(num_events_produced)
num_events_produced = 0
second = second + 1
delta = 0
if second == 60*5:
break
fig = plt.figure()
plt.xlabel('Second')
plt.ylabel('Num events')
ax = fig.add_subplot(1, 1, 1)
ax.set_xlim(0, 60*5)
ax.set_ylim(0, 200000)
ax.plot(seconds, num_events)
plt.show()
if __name__ == '__main__':
plot_events_produced_frequency('first_half.txt')
| 1,379 | 0 | 46 |
c441282bcbc825edec262db51bb98fc861239dc2 | 10,372 | py | Python | postcipes/unstructured_channel_flow.py | Mopolino8/postcipes | 5d67b383aa3e314b581b5262ba95f734ecb6369f | [
"MIT"
] | null | null | null | postcipes/unstructured_channel_flow.py | Mopolino8/postcipes | 5d67b383aa3e314b581b5262ba95f734ecb6369f | [
"MIT"
] | null | null | null | postcipes/unstructured_channel_flow.py | Mopolino8/postcipes | 5d67b383aa3e314b581b5262ba95f734ecb6369f | [
"MIT"
] | 1 | 2019-11-24T17:11:16.000Z | 2019-11-24T17:11:16.000Z | # This file is part of postcipes
# (c) Timofey Mukha
# The code is released under the MIT Licence.
# See LICENCE.txt and the Legal section in the README for more information
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .postcipe import Postcipe
import turbulucid as tbl
import numpy as np
from os.path import join
from scipy.integrate import simps
from collections import OrderedDict
from scipy.interpolate import LinearNDInterpolator
from scipy.spatial import Delaunay
import h5py
__all__ = ["UnstructuredChannelFlow"]
| 40.515625 | 100 | 0.558427 | # This file is part of postcipes
# (c) Timofey Mukha
# The code is released under the MIT Licence.
# See LICENCE.txt and the Legal section in the README for more information
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .postcipe import Postcipe
import turbulucid as tbl
import numpy as np
from os.path import join
from scipy.integrate import simps
from collections import OrderedDict
from scipy.interpolate import LinearNDInterpolator
from scipy.spatial import Delaunay
import h5py
__all__ = ["UnstructuredChannelFlow"]
class UnstructuredChannelFlow(Postcipe):
def __init__(self, path, nu, nSamples, wallModel=False):
Postcipe.__init__(self)
self.case = path
self.readPath = join(self.case)
self.nu = nu
self.tblCase = tbl.Case(self.readPath)
self.nSamples = nSamples
self.wallModel = wallModel
# line = vtk.vtkLineSource()
# probeFilter = vtk.vtkProbeFilter()
# probeFilter.SetSourceData(self.tblCase.vtkData.VTKObject)
# smallDx = 9/(2*nSamples)
# for seed in range(int(nSeedPoints)):
#
# seedPoint = seeds[seed]
# line.SetResolution(nSamples - 1)
# line.SetPoint1(0 + smallDx, seedPoint, 0)
# line.SetPoint2(9 - smallDx, seedPoint, 0)
# line.Update()
#
# probeFilter.SetInputConnection(line.GetOutputPort())
# probeFilter.Update()
#
# probeData = dsa.WrapDataObject(probeFilter.GetOutput()).PointData
#
# for field in avrgFields:
# if avrgFields[field].shape[1] == 9: # a tensor
# reshaped = probeData[field].reshape((nSamples, 9))
# avrgFields[field][seed] = np.mean(reshaped, axis=0)
# else:
# avrgFields[field][seed] = np.mean(probeData[field], axis=0)
#
# self.avrgFields = avrgFields
def compute(self):
seeds = np.sort(self.tblCase.boundary_data("inlet")[0][:, 1])
avrgFields = OrderedDict()
cellData = self.tblCase.vtkData.GetCellData()
nFields = cellData.GetNumberOfArrays()
nSeedPoints = seeds.size
for field in range(nFields):
name = cellData.GetArrayName(field)
nCols = cellData.GetArray(field).GetNumberOfComponents()
avrgFields[name] = np.zeros((nSeedPoints, nCols))
coords = np.row_stack((self.tblCase.cellCentres,
self.tblCase.boundary_data("inlet")[0],
self.tblCase.boundary_data("outlet")[0]))
delaunay = Delaunay(coords)
dx = 9/self.nSamples
for field in avrgFields:
if np.ndim(self.tblCase[field]) == 1:
data = np.row_stack((self.tblCase[field][:, np.newaxis],
self.tblCase.boundary_data("inlet")[1][field][:, np.newaxis],
self.tblCase.boundary_data("outlet")[1][field][:, np.newaxis]))
else:
data = np.row_stack((self.tblCase[field],
self.tblCase.boundary_data("inlet")[1][field],
self.tblCase.boundary_data("outlet")[1][field]))
interpolant = LinearNDInterpolator(delaunay, data)
for seed in range(int(nSeedPoints)):
x = dx/2
for i in range(self.nSamples-1):
avrgFields[field][seed] += interpolant([x, seeds[seed]])[0]
x += dx
avrgFields[field][seed] /= (self.nSamples-1)
self.avrgFields = avrgFields
self.y = np.append(np.append([0], seeds), [2])
bot = np.mean(self.tblCase.boundary_data("bottomWall")[1]['UMean'][:,0])
top = np.mean(self.tblCase.boundary_data("topWall")[1]['UMean'][:,0])
self.u = np.append(np.append(bot, avrgFields['UMean'][:, 0]), top)
bot = np.mean(self.tblCase.boundary_data("bottomWall")[1]['UPrime2Mean'][:,0])
top = np.mean(self.tblCase.boundary_data("topWall")[1]['UPrime2Mean'][:,0])
self.uu = np.append(np.append(bot, avrgFields['UPrime2Mean'][:, 0]), top)
bot = np.mean(self.tblCase.boundary_data("bottomWall")[1]['UPrime2Mean'][:,1])
top = np.mean(self.tblCase.boundary_data("topWall")[1]['UPrime2Mean'][:,1])
self.vv = np.append(np.append(bot, avrgFields['UPrime2Mean'][:, 1]), top)
bot = np.mean(self.tblCase.boundary_data("bottomWall")[1]['UPrime2Mean'][:,2])
top = np.mean(self.tblCase.boundary_data("topWall")[1]['UPrime2Mean'][:,2])
self.ww = np.append(np.append(bot, avrgFields['UPrime2Mean'][:, 2]), top)
bot = np.mean(self.tblCase.boundary_data("bottomWall")[1]['UPrime2Mean'][:,3])
top = np.mean(self.tblCase.boundary_data("topWall")[1]['UPrime2Mean'][:,3])
self.uv = np.append(np.append(bot, avrgFields['UPrime2Mean'][:, 3]), top)
self.k = 0.5*(self.uu + self.vv + self.ww)
bot = np.mean(self.tblCase.boundary_data("bottomWall")[1]['nutMean'])
top = np.mean(self.tblCase.boundary_data("topWall")[1]['nutMean'])
self.nut = np.append(np.append(bot, avrgFields['nutMean']), top)
bot = np.mean(self.tblCase.boundary_data("bottomWall")[1]['wallShearStressMean'][:, 0])
top = np.mean(self.tblCase.boundary_data("topWall")[1]['wallShearStressMean'][:, 0])
self.tau = 0
if self.wallModel:
self.wss = np.append(np.append(bot, avrgFields['wallShearStress'][:, 0]), top)
self.tau = 0.5*(self.wss[0] + self.wss[-1])
else:
self.tau = self.nu*0.5*(self.u[1] + self.u[-2])/self.y[1]
self.uTau = np.sqrt(self.tau)
self.delta = 0.5*(self.y[-1] - self.y[0])
self.uB = simps(self.u, self.y)/(2*self.delta)
self.uC = 0.5*(self.u[int(self.y.size/2)] +
self.u[int(self.y.size/2) -1])
self.yPlus = self.y*self.uTau/self.nu
self.uPlus = self.u/self.uTau
self.uuPlus = self.uu/self.uTau**2
self.vvPlus = self.vv/self.uTau**2
self.wwPlus = self.ww/self.uTau**2
self.uvPlus = self.uv/self.uTau**2
self.kPlus = self.k/self.uTau**2
self.uRms = np.sqrt(self.uu)/self.uTau
self.vRms = np.sqrt(self.vv)/self.uTau
self.wRms = np.sqrt(self.ww)/self.uTau
self.reTau = self.uTau*self.delta/self.nu
self.reB = self.uB*self.delta/self.nu
self.reC = self.uC*self.delta/self.nu
self.theta = tbl.momentum_thickness(self.y[:int(self.y.size/2)],
self.u[:int(self.u.size/2)],
interpolate=True)
self.delta99 = tbl.delta_99(self.y[:int(self.y.size/2)],
self.u[:int(self.u.size/2)],
interpolate=True)
#
self.deltaStar = tbl.delta_star(self.y[:int(self.y.size/2)],
self.u[:int(self.u.size/2)],
interpolate=True)
#
self.reTheta = self.theta*self.uC/self.nu
self.reDelta99 = self.delta99*self.uC/self.nu
self.reDeltaStar = self.deltaStar*self.uC/self.nu
def save(self, name):
f = h5py.File(name, 'w')
f.attrs["nu"] = self.nu
f.attrs["uTau"] = self.uTau
f.attrs["uB"] = self.uB
f.attrs["uC"] = self.uC
f.attrs["delta"] = self.delta
f.attrs["delta99"] = self.delta99
f.attrs["deltaStar"] = self.deltaStar
f.attrs["theta"] = self.reTheta
f.attrs["reDelta99"] = self.reDelta99
f.attrs["reDeltaStar"] = self.reDeltaStar
f.attrs["reTheta"] = self.reTheta
f.attrs["reTau"] = self.reTau
f.attrs["reB"] = self.reB
f.attrs["reC"] = self.reC
f.create_dataset("y", data=self.y)
f.create_dataset("u", data=self.u)
f.create_dataset("uu", data=self.uu)
f.create_dataset("vv", data=self.vv)
f.create_dataset("ww", data=self.ww)
f.create_dataset("k", data=self.k)
f.create_dataset("uv", data=self.uv)
f.create_dataset("nut", data=self.nut)
f.create_dataset("yPlus",data=self.yPlus)
f.create_dataset("uPlus", data=self.uPlus)
f.create_dataset("uuPlus", data=self.uuPlus)
f.create_dataset("vvPlus", data=self.vvPlus)
f.create_dataset("wwPlus", data=self.wwPlus)
f.create_dataset("uvPlus", data=self.uvPlus)
f.create_dataset("kPlus", data=self.kPlus)
f.create_dataset("uRms", data=self.uRms)
f.create_dataset("vRms", data=self.vRms)
f.create_dataset("wRms", data=self.wRms)
f.close()
def load(self, name):
f = h5py.File(name, 'r')
self.nu = f.attrs["nu"]
self.uTau = f.attrs["uTau"]
self.uB = f.attrs["uB"]
self.uC = f.attrs["uC"]
self.delta = f.attrs["delta"]
self.delta99 = f.attrs["delta99"]
self.deltaStar = f.attrs["deltaStar"]
self.reTheta = f.attrs["theta"]
self.reDelta99 = f.attrs["reDelta99"]
self.reDeltaStar = f.attrs["reDeltaStar"]
self.reTheta = f.attrs["reTheta"]
self.reTau = f.attrs["reTau"]
self.reB = f.attrs["reB"]
self.reC = f.attrs["reC"]
self.y = f["y"][:]
self.u = f["u"][:]
self.uu = f["uu"][:]
self.vv = f["vv"][:]
self.ww = f["ww"][:]
self.k = f["k"][:]
self.uv = f["uv"][:]
self.nut = f["nut"][:]
self.yPlus = f["yPlus"][:]
self.uPlus = f["uPlus"][:]
self.uuPlus= f["uuPlus"][:]
self.vvPlus = f["vvPlus"][:]
self.wwPlus = f["wwPlus"][:]
self.uvPlus = f["uvPlus"][:]
self.uvPlus = f["kPlus"][:]
self.uRms = f["uRms"][:]
self.vRms = f["vRms"][:]
self.vRms = f["wRms"][:]
self.kPlus = f["kPlus"][:]
f.close()
def utau_relative_error(self, bench, procent=True, abs=False):
error = (self.uTau - bench)/bench
if procent:
error *= 100
if abs:
error = np.abs(error)
return error
| 9,601 | 19 | 158 |
d92f467975214762a809181706879ef56dad667e | 290 | py | Python | web/impact/impact/v1/views/judging_round_detail_view.py | masschallenge/impact-api | 81075ced8fcc95de9390dd83c15e523e67fc48c0 | [
"MIT"
] | 5 | 2017-10-19T15:11:52.000Z | 2020-03-08T07:16:21.000Z | web/impact/impact/v1/views/judging_round_detail_view.py | masschallenge/impact-api | 81075ced8fcc95de9390dd83c15e523e67fc48c0 | [
"MIT"
] | 182 | 2017-06-21T19:32:13.000Z | 2021-03-22T13:38:16.000Z | web/impact/impact/v1/views/judging_round_detail_view.py | masschallenge/impact-api | 81075ced8fcc95de9390dd83c15e523e67fc48c0 | [
"MIT"
] | 1 | 2018-06-23T11:53:18.000Z | 2018-06-23T11:53:18.000Z | # MIT License
# Copyright (c) 2017 MassChallenge, Inc.
from impact.v1.helpers import JudgingRoundHelper
from impact.v1.views.base_detail_view import BaseDetailView
| 26.363636 | 59 | 0.813793 | # MIT License
# Copyright (c) 2017 MassChallenge, Inc.
from impact.v1.helpers import JudgingRoundHelper
from impact.v1.views.base_detail_view import BaseDetailView
class JudgingRoundDetailView(BaseDetailView):
view_name = "judging_round_detail"
helper_class = JudgingRoundHelper
| 0 | 101 | 23 |
9ccb8028ad0763b4db88db48c471d0ce6a4d0c23 | 443 | py | Python | ecr_list.py | willfong/aws-helper | 21708044fbf95b76393e9b5f0e86c5e74ff11c77 | [
"MIT"
] | null | null | null | ecr_list.py | willfong/aws-helper | 21708044fbf95b76393e9b5f0e86c5e74ff11c77 | [
"MIT"
] | null | null | null | ecr_list.py | willfong/aws-helper | 21708044fbf95b76393e9b5f0e86c5e74ff11c77 | [
"MIT"
] | null | null | null | import boto3
import base64
aws_ecr_client = boto3.client('ecr')
response = aws_ecr_client.describe_repositories()
for repo in response['repositories']:
response = aws_ecr_client.get_authorization_token(registryIds=[repo['registryId']])
print(f"{repo['repositoryName']}\t{repo['repositoryArn']}")
#print(f" {response['authorizationData'][0]['proxyEndpoint']}")
#print(response['authorizationData'][0]['authorizationToken'])
| 34.076923 | 87 | 0.742664 | import boto3
import base64
aws_ecr_client = boto3.client('ecr')
response = aws_ecr_client.describe_repositories()
for repo in response['repositories']:
response = aws_ecr_client.get_authorization_token(registryIds=[repo['registryId']])
print(f"{repo['repositoryName']}\t{repo['repositoryArn']}")
#print(f" {response['authorizationData'][0]['proxyEndpoint']}")
#print(response['authorizationData'][0]['authorizationToken'])
| 0 | 0 | 0 |
941d365262555a8c34051404c843d63f740f4d10 | 3,915 | py | Python | tests/constants.py | acutaia/goeasy-ublox_reader | f4662389667c9087ca73dd33e5122891bd05db8a | [
"Apache-2.0"
] | null | null | null | tests/constants.py | acutaia/goeasy-ublox_reader | f4662389667c9087ca73dd33e5122891bd05db8a | [
"Apache-2.0"
] | null | null | null | tests/constants.py | acutaia/goeasy-ublox_reader | f4662389667c9087ca73dd33e5122891bd05db8a | [
"Apache-2.0"
] | null | null | null | """
Test constants
:author: Angelo Cutaia
:copyright: Copyright 2021, LINKS Foundation
:version: 1.0.0
..
Copyright 2021 LINKS Foundation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Standard library
import os
import time
FAKE_DATA = os.path.join(os.path.abspath(os.path.dirname(__file__)), "fake_data.txt")
"""Path of the file containing the fake data"""
# ------------------------------------------------------------------------------
# Module version
__version_info__ = (1, 0, 0)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
########
# TIME #
########
TIME_MESSAGE_PAYLOAD = bytes(
[
0x1,
0x25,
0x14,
0x0,
0x0,
0x16,
0x9C,
0x16,
0xC0,
0xC9,
0x5,
0x0,
0x1C,
0xA4,
0x2,
0x0,
0x31,
0x4,
0x12,
0x7,
0x3,
0x0,
0x0,
0x0,
0xA3,
0xEF,
]
)
"""Time message payload with"""
raw_galTow = 379328
"""Galielo time of the week"""
raw_galWno = 1073
"""Galielo week number"""
raw_leapS = 18
"""Galileo leap seconds"""
timestampMessage_unix = 1584609709997
"""Time stamp of the message in a unix system"""
timestampMessage_galileo = 649329725
"""Time stamp of the message in galileo"""
time_raw_ck_A = 163
"""Time checksum A"""
time_raw_ck_B = 239
"""Time checksum B"""
# ------------------------------------------------------------------------------
###########
# GALILEO #
###########
UBLOX_MESSAGE_PAYLOAD = bytes(
[
0x2,
0x13,
0x2C,
0x0,
0x2,
0x12,
0x1,
0x0,
0x9,
0xE,
0x2,
0xD2,
0x34,
0x77,
0x76,
0x7,
0x5D,
0x63,
0x0,
0x1,
0xF5,
0x51,
0x22,
0x24,
0x0,
0x40,
0xF,
0x7F,
0x0,
0x40,
0x65,
0xA6,
0x2A,
0x0,
0x0,
0x0,
0xD2,
0x57,
0xAA,
0xAA,
0x0,
0x40,
0xBF,
0x3F,
0xD5,
0x9A,
0xE8,
0x3F,
0x4A,
0x7C,
]
)
"""Ublox message payload"""
GALILEO_MESSAGE_PAYLOAD = "077677340100635d242251f57f0f40a66540000000002aaaaa57d23fbf40"
"""Galileo message payload"""
TEST_AUTH_BYTES = bytes([0x0, 0x40, 0x65, 0xA6, 0x2A, 0x0, 0x0, 0x0])
"""Bytes that contain inside the 40 auth bits"""
raw_auth = 0
"""Int value of the 5 authorization bytes"""
raw_svId = 18
"""Galielo service id"""
raw_numWords = 9
"""Num of words"""
raw_ck_A = 74
"""Galileo checksum A"""
raw_ck_B = 124
"""Galileo checksum B"""
# ------------------------------------------------------------------------------
#################
# DATA TO STORE #
#################
DATA_TO_STORE = (
time.time() * 1000,
timestampMessage_unix,
raw_galTow,
raw_galWno,
raw_leapS,
UBLOX_MESSAGE_PAYLOAD.hex(),
GALILEO_MESSAGE_PAYLOAD,
0,
raw_svId,
raw_numWords,
raw_ck_B,
raw_ck_A,
time_raw_ck_A,
time_raw_ck_B,
-1,
timestampMessage_galileo,
)
"""Data to use to test the database"""
| 18.294393 | 88 | 0.516731 | """
Test constants
:author: Angelo Cutaia
:copyright: Copyright 2021, LINKS Foundation
:version: 1.0.0
..
Copyright 2021 LINKS Foundation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Standard library
import os
import time
FAKE_DATA = os.path.join(os.path.abspath(os.path.dirname(__file__)), "fake_data.txt")
"""Path of the file containing the fake data"""
# ------------------------------------------------------------------------------
# Module version
__version_info__ = (1, 0, 0)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
########
# TIME #
########
TIME_MESSAGE_PAYLOAD = bytes(
[
0x1,
0x25,
0x14,
0x0,
0x0,
0x16,
0x9C,
0x16,
0xC0,
0xC9,
0x5,
0x0,
0x1C,
0xA4,
0x2,
0x0,
0x31,
0x4,
0x12,
0x7,
0x3,
0x0,
0x0,
0x0,
0xA3,
0xEF,
]
)
"""Time message payload with"""
raw_galTow = 379328
"""Galielo time of the week"""
raw_galWno = 1073
"""Galielo week number"""
raw_leapS = 18
"""Galileo leap seconds"""
timestampMessage_unix = 1584609709997
"""Time stamp of the message in a unix system"""
timestampMessage_galileo = 649329725
"""Time stamp of the message in galileo"""
time_raw_ck_A = 163
"""Time checksum A"""
time_raw_ck_B = 239
"""Time checksum B"""
# ------------------------------------------------------------------------------
###########
# GALILEO #
###########
UBLOX_MESSAGE_PAYLOAD = bytes(
[
0x2,
0x13,
0x2C,
0x0,
0x2,
0x12,
0x1,
0x0,
0x9,
0xE,
0x2,
0xD2,
0x34,
0x77,
0x76,
0x7,
0x5D,
0x63,
0x0,
0x1,
0xF5,
0x51,
0x22,
0x24,
0x0,
0x40,
0xF,
0x7F,
0x0,
0x40,
0x65,
0xA6,
0x2A,
0x0,
0x0,
0x0,
0xD2,
0x57,
0xAA,
0xAA,
0x0,
0x40,
0xBF,
0x3F,
0xD5,
0x9A,
0xE8,
0x3F,
0x4A,
0x7C,
]
)
"""Ublox message payload"""
GALILEO_MESSAGE_PAYLOAD = "077677340100635d242251f57f0f40a66540000000002aaaaa57d23fbf40"
"""Galileo message payload"""
TEST_AUTH_BYTES = bytes([0x0, 0x40, 0x65, 0xA6, 0x2A, 0x0, 0x0, 0x0])
"""Bytes that contain inside the 40 auth bits"""
raw_auth = 0
"""Int value of the 5 authorization bytes"""
raw_svId = 18
"""Galielo service id"""
raw_numWords = 9
"""Num of words"""
raw_ck_A = 74
"""Galileo checksum A"""
raw_ck_B = 124
"""Galileo checksum B"""
# ------------------------------------------------------------------------------
#################
# DATA TO STORE #
#################
DATA_TO_STORE = (
time.time() * 1000,
timestampMessage_unix,
raw_galTow,
raw_galWno,
raw_leapS,
UBLOX_MESSAGE_PAYLOAD.hex(),
GALILEO_MESSAGE_PAYLOAD,
0,
raw_svId,
raw_numWords,
raw_ck_B,
raw_ck_A,
time_raw_ck_A,
time_raw_ck_B,
-1,
timestampMessage_galileo,
)
"""Data to use to test the database"""
| 0 | 0 | 0 |
d827fc4c0960176a711a9d9d10c251e11d8975fa | 239 | py | Python | tests/test_base.py | FlorianLudwig/datamodel-code-generator | 052882f7c5bd883fac0f42916a5a8fc9623f37ce | [
"MIT"
] | null | null | null | tests/test_base.py | FlorianLudwig/datamodel-code-generator | 052882f7c5bd883fac0f42916a5a8fc9623f37ce | [
"MIT"
] | null | null | null | tests/test_base.py | FlorianLudwig/datamodel-code-generator | 052882f7c5bd883fac0f42916a5a8fc9623f37ce | [
"MIT"
] | null | null | null | from datamodel_code_generator.parser.base import snake_to_upper_camel
def test_snake_to_upper_camel_underscore():
"""In case a name starts with a underline, we should keep it."""
assert snake_to_upper_camel('_hello') == '_Hello'
| 34.142857 | 69 | 0.778243 | from datamodel_code_generator.parser.base import snake_to_upper_camel
def test_snake_to_upper_camel_underscore():
"""In case a name starts with a underline, we should keep it."""
assert snake_to_upper_camel('_hello') == '_Hello'
| 0 | 0 | 0 |
05407402a2ccd8b744694eeb09b18264ced17222 | 5,580 | py | Python | Lay_dataset.py | sixduck/Sorting-robot-using-Opencv-Tensorflow-Arduino | 54f58d8899b3c29dcb12eb4843f3418d36824c8a | [
"MIT"
] | null | null | null | Lay_dataset.py | sixduck/Sorting-robot-using-Opencv-Tensorflow-Arduino | 54f58d8899b3c29dcb12eb4843f3418d36824c8a | [
"MIT"
] | null | null | null | Lay_dataset.py | sixduck/Sorting-robot-using-Opencv-Tensorflow-Arduino | 54f58d8899b3c29dcb12eb4843f3418d36824c8a | [
"MIT"
] | null | null | null | import cv2
import numpy as np
import dk_ardruino
import serial
cam_mtx=np.load('utils/cam_mtx.npy')
dist=np.load('utils/dist.npy')
newcam_mtx=np.load('utils/newcam_mtx.npy')
roi=np.load('utils/roi.npy')
matrix = np.load('utils/abc.npy')
area1 = int(open('utils/Resolution.txt').read().split('\n')[2])
width = int(open('utils/Resolution.txt').read().split('\n')[0])
height = int(open('utils/Resolution.txt').read().split('\n')[1])
diff_low_t= int(open('utils/Resolution.txt').read().split('\n')[5])
diff_high_t= int(open('utils/Resolution.txt').read().split('\n')[6])
realwidth = int(open('utils/Resolution.txt').read().split('\n')[3])
realheight = int(open('utils/Resolution.txt').read().split('\n')[4])
camera_number = int(open('utils/Resolution.txt').read().split('\n')[7])
Serial_port = open('utils/Resolution.txt').read().split('\n')[8]
bg_capture=False
bg_counter=0
count = 0
cam = cv2.VideoCapture(camera_number)
make_720p(cam)
ser = serial.Serial(Serial_port, 9600, timeout=1)
arm = True
arm_c=dk_ardruino.arm_controller(ser)
arm_c.wait_forready()
while True:
ret, frame = cam.read()
frame = cv2.undistort(frame, cam_mtx, dist, None, newcam_mtx)
a, b, c, d = roi
frame = frame[b:b+d, a:a+c]
frame1 = frame.copy()
if bg_capture == False:
bg_counter+=1
print(bg_counter)
if bg_counter==10:
crop_nen = cv2.warpPerspective(frame, matrix, (width, height))
# crop_nen= frame
bg_capture=True
if bg_capture == True:
crop_phat_hien = cv2.warpPerspective(frame, matrix, (width, height))
# crop_phat_hien = frame
target_gray = cv2.cvtColor(crop_phat_hien, cv2.COLOR_BGR2GRAY)
bg_gray = cv2.cvtColor(crop_nen, cv2.COLOR_BGR2GRAY)
diff_gray = cv2.absdiff(target_gray,bg_gray)
diff_gray_blur = cv2.GaussianBlur(diff_gray,(9,9),0)
ret,diff_tresh = cv2.threshold(diff_gray_blur,diff_low_t,diff_high_t,cv2.THRESH_BINARY)
diff = cv2.GaussianBlur(diff_tresh,(9,9),0)
diff = cv2.dilate(diff, None, iterations=2)
contours, hierarchy = cv2.findContours(diff, cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
XYZ=[]
for cnt in contours:
(x,y,w,h) = cv2.boundingRect(cnt)
cv2.rectangle(crop_phat_hien, (x, y), (x + w, y + h), (0, 0, 255), 2)
area = w*h
edge_noise=False
if x==0:
edge_noise=True
if y==0:
edge_noise=True
if (x+w)== width:
edge_noise=True
if (y+h)== height:
edge_noise=True
if edge_noise==False:
if area > area1:
realx = (realwidth/width)*(x + (w/2))
realy = (realheight/height)*(y + (h/2))
cv2.rectangle(crop_phat_hien, (x, y), (x + w, y + h), (0, 0, 255), 2)
adjust=0.1
y=int(y-((h*adjust)/2))
if y<0:
y=0
x=int(x-((w*adjust)/2))
if x<0:
x=0
w=int(w*(1+adjust))
h=int(h*(1+adjust))
if y<0: y=0
if x<0: x=0
if (x+w)>width: w=width-x
if (y+h)>height: h=height-y
if w>h:
#ensure contour is centered
y=int(y-((w-h)/2))
if y<0: y=0
#make a square
h=w
if (y+h)>height: y=height-h
if h>w:
x=int(x-((h-w)/2))
if x<0: x=0
w=h
if (x+w)>width: x=width-w
crop_img = crop_phat_hien[y:y+h, x:x+w]
# cv2.rectangle(crop_phat_hien, (x, y), (x + w, y + h), (0, 0, 255), 2)
# cv2.putText(crop_phat_hien, str(count), (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (36,255,12), 2)
XYZ.append([realx,realy])
# XYZ.append([x,y,w,h])
cv2.imshow('camera', diff)
cv2.imshow('camera1', crop_phat_hien)
k = cv2.waitKey(1)
if k%256 == 27:
# ESC pressed
print("Escape hit, closing...")
break
if k%256 == 32:
# for items in XYZ:
# x,y,w,h = items
# crop_img = frame1[y:y+h, x:x+w]
# cv2.imwrite("dataset/frame%d.png" % count, crop_img)
# count += 1
pickanddrop(XYZ,arm)
if (arm==True): arm_c.move_home()
cam.release()
cv2.destroyAllWindows()
ser.close() | 36.953642 | 123 | 0.507706 | import cv2
import numpy as np
import dk_ardruino
import serial
cam_mtx=np.load('utils/cam_mtx.npy')
dist=np.load('utils/dist.npy')
newcam_mtx=np.load('utils/newcam_mtx.npy')
roi=np.load('utils/roi.npy')
matrix = np.load('utils/abc.npy')
area1 = int(open('utils/Resolution.txt').read().split('\n')[2])
width = int(open('utils/Resolution.txt').read().split('\n')[0])
height = int(open('utils/Resolution.txt').read().split('\n')[1])
diff_low_t= int(open('utils/Resolution.txt').read().split('\n')[5])
diff_high_t= int(open('utils/Resolution.txt').read().split('\n')[6])
realwidth = int(open('utils/Resolution.txt').read().split('\n')[3])
realheight = int(open('utils/Resolution.txt').read().split('\n')[4])
camera_number = int(open('utils/Resolution.txt').read().split('\n')[7])
Serial_port = open('utils/Resolution.txt').read().split('\n')[8]
bg_capture=False
bg_counter=0
count = 0
cam = cv2.VideoCapture(camera_number)
def make_720p(cap):
cap.set(3, width)
cap.set(4, height)
make_720p(cam)
ser = serial.Serial(Serial_port, 9600, timeout=1)
arm = True
arm_c=dk_ardruino.arm_controller(ser)
arm_c.wait_forready()
def pickanddrop(XYZ, arm=True):
#set drop position
arm_x_dest=30
arm_y_dest=6
arm_x_dest1=30
arm_y_dest1=-6
for i in range(0,len(XYZ)):
cam_x=76.5-XYZ[i][0] #camera X
cam_x = round(cam_x,3)
cam_y=6-XYZ[i][1] #camera Y
cam_y = round(cam_y,3)
print(arm_x_dest,arm_y_dest,cam_x,cam_y)
if (arm==True): arm_c.move_and_pickup(cam_x,cam_y)
if (arm==True):
arm_c.transport_and_drop(arm_x_dest1,arm_y_dest1)
print(arm_x_dest1,arm_y_dest1,cam_x,cam_y)
if (arm==True): arm_c.move_home()
while True:
ret, frame = cam.read()
frame = cv2.undistort(frame, cam_mtx, dist, None, newcam_mtx)
a, b, c, d = roi
frame = frame[b:b+d, a:a+c]
frame1 = frame.copy()
if bg_capture == False:
bg_counter+=1
print(bg_counter)
if bg_counter==10:
crop_nen = cv2.warpPerspective(frame, matrix, (width, height))
# crop_nen= frame
bg_capture=True
if bg_capture == True:
crop_phat_hien = cv2.warpPerspective(frame, matrix, (width, height))
# crop_phat_hien = frame
target_gray = cv2.cvtColor(crop_phat_hien, cv2.COLOR_BGR2GRAY)
bg_gray = cv2.cvtColor(crop_nen, cv2.COLOR_BGR2GRAY)
diff_gray = cv2.absdiff(target_gray,bg_gray)
diff_gray_blur = cv2.GaussianBlur(diff_gray,(9,9),0)
ret,diff_tresh = cv2.threshold(diff_gray_blur,diff_low_t,diff_high_t,cv2.THRESH_BINARY)
diff = cv2.GaussianBlur(diff_tresh,(9,9),0)
diff = cv2.dilate(diff, None, iterations=2)
contours, hierarchy = cv2.findContours(diff, cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
XYZ=[]
for cnt in contours:
(x,y,w,h) = cv2.boundingRect(cnt)
cv2.rectangle(crop_phat_hien, (x, y), (x + w, y + h), (0, 0, 255), 2)
area = w*h
edge_noise=False
if x==0:
edge_noise=True
if y==0:
edge_noise=True
if (x+w)== width:
edge_noise=True
if (y+h)== height:
edge_noise=True
if edge_noise==False:
if area > area1:
realx = (realwidth/width)*(x + (w/2))
realy = (realheight/height)*(y + (h/2))
cv2.rectangle(crop_phat_hien, (x, y), (x + w, y + h), (0, 0, 255), 2)
adjust=0.1
y=int(y-((h*adjust)/2))
if y<0:
y=0
x=int(x-((w*adjust)/2))
if x<0:
x=0
w=int(w*(1+adjust))
h=int(h*(1+adjust))
if y<0: y=0
if x<0: x=0
if (x+w)>width: w=width-x
if (y+h)>height: h=height-y
if w>h:
#ensure contour is centered
y=int(y-((w-h)/2))
if y<0: y=0
#make a square
h=w
if (y+h)>height: y=height-h
if h>w:
x=int(x-((h-w)/2))
if x<0: x=0
w=h
if (x+w)>width: x=width-w
crop_img = crop_phat_hien[y:y+h, x:x+w]
# cv2.rectangle(crop_phat_hien, (x, y), (x + w, y + h), (0, 0, 255), 2)
# cv2.putText(crop_phat_hien, str(count), (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (36,255,12), 2)
XYZ.append([realx,realy])
# XYZ.append([x,y,w,h])
cv2.imshow('camera', diff)
cv2.imshow('camera1', crop_phat_hien)
k = cv2.waitKey(1)
if k%256 == 27:
# ESC pressed
print("Escape hit, closing...")
break
if k%256 == 32:
# for items in XYZ:
# x,y,w,h = items
# crop_img = frame1[y:y+h, x:x+w]
# cv2.imwrite("dataset/frame%d.png" % count, crop_img)
# count += 1
pickanddrop(XYZ,arm)
if (arm==True): arm_c.move_home()
cam.release()
cv2.destroyAllWindows()
ser.close() | 650 | 0 | 48 |
0be1ea82fa002a548d8dd7062260cae15d0d9942 | 2,791 | py | Python | backend/tournesol/urls.py | sigmike/tournesol | 9d92ff9c552c3b2923b3222f5f32d6b7e558de29 | [
"CC0-1.0"
] | null | null | null | backend/tournesol/urls.py | sigmike/tournesol | 9d92ff9c552c3b2923b3222f5f32d6b7e558de29 | [
"CC0-1.0"
] | null | null | null | backend/tournesol/urls.py | sigmike/tournesol | 9d92ff9c552c3b2923b3222f5f32d6b7e558de29 | [
"CC0-1.0"
] | null | null | null | # coding: utf-8
"""
Defines Tournesol's backend API routes
"""
from django.urls import include, path
from rest_framework import routers
from .views import ComparisonDetailApi, ComparisonListApi, ComparisonListFilteredApi
from .views.email_domains import EmailDomainsList
from .views.exports import ExportAllView, ExportComparisonsView, ExportPublicComparisonsView
from .views.ratings import (
ContributorRatingDetail,
ContributorRatingList,
ContributorRatingUpdateAll,
)
from .views.stats import StatisticsView
from .views.user import CurrentUserView
from .views.video import VideoViewSet
from .views.video_rate_later import VideoRateLaterDetail, VideoRateLaterList
router = routers.DefaultRouter()
router.register(r'video', VideoViewSet)
app_name = "tournesol"
urlpatterns = [
path("", include(router.urls)),
# User API
path(
"users/me/",
CurrentUserView.as_view(),
name="users_me"
),
# Data exports
path(
"users/me/exports/comparisons/",
ExportComparisonsView.as_view(),
name="export_comparisons"
),
path(
"users/me/exports/all/",
ExportAllView.as_view(),
name="export_all"
),
path(
"exports/comparisons/",
ExportPublicComparisonsView.as_view(),
name="export_public"
),
# Comparison API
path(
"users/me/comparisons/", ComparisonListApi.as_view(),
name="comparisons_me_list",
),
path(
"users/me/comparisons/<str:video_id>/", ComparisonListFilteredApi.as_view(),
name="comparisons_me_list_filtered",
),
path(
"users/me/comparisons/<str:video_id_a>/<str:video_id_b>/",
ComparisonDetailApi.as_view(),
name="comparisons_me_detail",
),
# VideoRateLater API
path(
"users/me/video_rate_later/",
VideoRateLaterList.as_view(),
name="video_rate_later_list",
),
path(
"users/me/video_rate_later/<str:video_id>/",
VideoRateLaterDetail.as_view(),
name="video_rate_later_detail",
),
# Ratings API
path(
"users/me/contributor_ratings/",
ContributorRatingList.as_view(),
name="ratings_me_list",
),
path(
"users/me/contributor_ratings/_all/",
ContributorRatingUpdateAll.as_view(),
name="ratings_me_list_update_is_public",
),
path(
"users/me/contributor_ratings/<str:video_id>/",
ContributorRatingDetail.as_view(),
name="ratings_me_detail",
),
# Email domain API
path(
"domains/",
EmailDomainsList.as_view(),
name="email_domains_list"
),
# Statistics API
path(
"stats/",
StatisticsView.as_view(),
name="statistics_detail"
)
]
| 26.580952 | 92 | 0.654246 | # coding: utf-8
"""
Defines Tournesol's backend API routes
"""
from django.urls import include, path
from rest_framework import routers
from .views import ComparisonDetailApi, ComparisonListApi, ComparisonListFilteredApi
from .views.email_domains import EmailDomainsList
from .views.exports import ExportAllView, ExportComparisonsView, ExportPublicComparisonsView
from .views.ratings import (
ContributorRatingDetail,
ContributorRatingList,
ContributorRatingUpdateAll,
)
from .views.stats import StatisticsView
from .views.user import CurrentUserView
from .views.video import VideoViewSet
from .views.video_rate_later import VideoRateLaterDetail, VideoRateLaterList
router = routers.DefaultRouter()
router.register(r'video', VideoViewSet)
app_name = "tournesol"
urlpatterns = [
path("", include(router.urls)),
# User API
path(
"users/me/",
CurrentUserView.as_view(),
name="users_me"
),
# Data exports
path(
"users/me/exports/comparisons/",
ExportComparisonsView.as_view(),
name="export_comparisons"
),
path(
"users/me/exports/all/",
ExportAllView.as_view(),
name="export_all"
),
path(
"exports/comparisons/",
ExportPublicComparisonsView.as_view(),
name="export_public"
),
# Comparison API
path(
"users/me/comparisons/", ComparisonListApi.as_view(),
name="comparisons_me_list",
),
path(
"users/me/comparisons/<str:video_id>/", ComparisonListFilteredApi.as_view(),
name="comparisons_me_list_filtered",
),
path(
"users/me/comparisons/<str:video_id_a>/<str:video_id_b>/",
ComparisonDetailApi.as_view(),
name="comparisons_me_detail",
),
# VideoRateLater API
path(
"users/me/video_rate_later/",
VideoRateLaterList.as_view(),
name="video_rate_later_list",
),
path(
"users/me/video_rate_later/<str:video_id>/",
VideoRateLaterDetail.as_view(),
name="video_rate_later_detail",
),
# Ratings API
path(
"users/me/contributor_ratings/",
ContributorRatingList.as_view(),
name="ratings_me_list",
),
path(
"users/me/contributor_ratings/_all/",
ContributorRatingUpdateAll.as_view(),
name="ratings_me_list_update_is_public",
),
path(
"users/me/contributor_ratings/<str:video_id>/",
ContributorRatingDetail.as_view(),
name="ratings_me_detail",
),
# Email domain API
path(
"domains/",
EmailDomainsList.as_view(),
name="email_domains_list"
),
# Statistics API
path(
"stats/",
StatisticsView.as_view(),
name="statistics_detail"
)
]
| 0 | 0 | 0 |
5b8a078ff34f1d035aa62e9f50a56a993be2958b | 1,678 | py | Python | src/utils/live-view/WindowCapture.py | soyuka/botty | cc1670ea1db72c5e0ac91685897bf63c89b18896 | [
"MIT"
] | 1 | 2021-11-13T23:23:25.000Z | 2021-11-13T23:23:25.000Z | src/utils/live-view/WindowCapture.py | soyuka/botty | cc1670ea1db72c5e0ac91685897bf63c89b18896 | [
"MIT"
] | null | null | null | src/utils/live-view/WindowCapture.py | soyuka/botty | cc1670ea1db72c5e0ac91685897bf63c89b18896 | [
"MIT"
] | null | null | null | import numpy as np
import win32gui, win32api
import ctypes
import mss
sct = mss.mss() | 41.95 | 135 | 0.681764 | import numpy as np
import win32gui, win32api
import ctypes
import mss
sct = mss.mss()
class WindowCapture:
w = 0
h = 0
hwnd = None
cropped_x = 0
cropped_y = 0
offset_x = 0
offset_y = 0
def __init__(self):
window_handle = win32gui.FindWindow('OsWindow', 'Diablo II: Resurrected')
if not window_handle:
raise Exception('Window not found')
monitor_handle = ctypes.windll.user32.MonitorFromWindow(window_handle)
#self.monitor_left, self.monitor_top = win32api.GetMonitorInfo(monitor_handle)['Monitor'][:2]
self.monitor_left, self.monitor_top = (0,0)
self.hwnd = win32gui.GetDesktopWindow()
self.client_left, self.client_top = win32gui.ClientToScreen(window_handle, (0, 0))
self.client_height = 720
self.client_width = 1280
self.client_right, self.client_bottom = self.client_to_screen((self.client_width, self.client_height))
def screen_to_client(self, xy: tuple[int, int]) -> tuple[int, int]:
# for things like detecting mouse movements, which will give screen coordinates
# returns client coords
return xy[0] - self.client_left, xy[1] - self.client_top
def client_to_screen(self, xy: tuple[int, int]) -> tuple[int, int]:
# for things like mouse movements or bbox, which use full virtual desktop
# this function can return negative values
return xy[0] + self.client_left, xy[1] + self.client_top
def get_screenshot(self):
screenshot = sct.grab({'left':self.client_left, 'top':self.client_top, 'width':self.client_width, 'height':self.client_height})
return np.array(screenshot) | 1,358 | 213 | 22 |
e8635a64531764fac2692b44d3e7d7ab7842efa8 | 1,594 | py | Python | pyohio/schedule.py | tylerdave/pyohio | f4d5c4a06f02afdf2f7a326b6072f2e95a9fdcb8 | [
"MIT"
] | null | null | null | pyohio/schedule.py | tylerdave/pyohio | f4d5c4a06f02afdf2f7a326b6072f2e95a9fdcb8 | [
"MIT"
] | null | null | null | pyohio/schedule.py | tylerdave/pyohio | f4d5c4a06f02afdf2f7a326b6072f2e95a9fdcb8 | [
"MIT"
] | null | null | null | import requests
import requests_cache
from collections import OrderedDict
from datetime import datetime
from dateutil import parser
from operator import itemgetter
from tabulate import tabulate
SCHEDULE_URL = "http://pyohio.org/schedule/json/"
def get_schedule(cache_ttl=3600):
""" Get the schedule from the conference website and return the JSON. """
requests_cache.install_cache(expire_after=cache_ttl)
response = requests.get(SCHEDULE_URL)
response.raise_for_status()
return response.json()
def _session_summary(session):
""" Given a detailed session dict, return a summary dict. """
summary = OrderedDict()
summary['date'] = parser.parse(session.get('start', '2016')).date().isoformat()
summary['start_time'] = parser.parse(session.get('start', '2016')).time().strftime('%H:%M')
summary['end_time'] = parser.parse(session.get('end', '2016')).time().strftime('%H:%M')
summary['room'] = session.get('room')
summary['name'] = session.get('name')
authors = session.get('authors', []) or []
summary['presenter'] = ", ".join(authors)
return summary
def make_table(schedule, start_datetime=None):
""" Given a list of session summaries, return a simple text table. """
if start_datetime is None:
start_datetime = datetime(1900, 1, 1)
schedule_summary = [_session_summary(session) for session in schedule if \
parser.parse(session.get('start', '2016')) > start_datetime]
schedule_summary.sort(key=itemgetter('date', 'start_time', 'end_time'))
return tabulate(schedule_summary, tablefmt="plain")
| 41.947368 | 95 | 0.707026 | import requests
import requests_cache
from collections import OrderedDict
from datetime import datetime
from dateutil import parser
from operator import itemgetter
from tabulate import tabulate
SCHEDULE_URL = "http://pyohio.org/schedule/json/"
def get_schedule(cache_ttl=3600):
""" Get the schedule from the conference website and return the JSON. """
requests_cache.install_cache(expire_after=cache_ttl)
response = requests.get(SCHEDULE_URL)
response.raise_for_status()
return response.json()
def _session_summary(session):
""" Given a detailed session dict, return a summary dict. """
summary = OrderedDict()
summary['date'] = parser.parse(session.get('start', '2016')).date().isoformat()
summary['start_time'] = parser.parse(session.get('start', '2016')).time().strftime('%H:%M')
summary['end_time'] = parser.parse(session.get('end', '2016')).time().strftime('%H:%M')
summary['room'] = session.get('room')
summary['name'] = session.get('name')
authors = session.get('authors', []) or []
summary['presenter'] = ", ".join(authors)
return summary
def make_table(schedule, start_datetime=None):
""" Given a list of session summaries, return a simple text table. """
if start_datetime is None:
start_datetime = datetime(1900, 1, 1)
schedule_summary = [_session_summary(session) for session in schedule if \
parser.parse(session.get('start', '2016')) > start_datetime]
schedule_summary.sort(key=itemgetter('date', 'start_time', 'end_time'))
return tabulate(schedule_summary, tablefmt="plain")
| 0 | 0 | 0 |
748eb425947874e2951b767772a82a62dcdbf127 | 1,348 | py | Python | apps/core/serializers.py | jjorgewill/security | 6db561def4a753892611c7e0033b3ab0afad9a31 | [
"MIT"
] | 1 | 2018-10-23T22:03:23.000Z | 2018-10-23T22:03:23.000Z | apps/core/serializers.py | jjorgewill/security-micro-service | 6db561def4a753892611c7e0033b3ab0afad9a31 | [
"MIT"
] | null | null | null | apps/core/serializers.py | jjorgewill/security-micro-service | 6db561def4a753892611c7e0033b3ab0afad9a31 | [
"MIT"
] | null | null | null | from django.contrib.auth.models import User
from rest_framework import serializers
from django.contrib.auth import authenticate
from django.core import exceptions
from rest_framework.serializers import ModelSerializer
from django.utils.translation import ugettext_lazy as _
| 33.7 | 78 | 0.658754 | from django.contrib.auth.models import User
from rest_framework import serializers
from django.contrib.auth import authenticate
from django.core import exceptions
from rest_framework.serializers import ModelSerializer
from django.utils.translation import ugettext_lazy as _
class UserSerializer(ModelSerializer):
class Meta:
model = User
fields = ('id', 'first_name', 'last_name')
class AuthCustomTokenSerializer(serializers.Serializer):
email_or_username = serializers.CharField()
password = serializers.CharField()
def validate(self, attrs):
email_or_username = attrs.get('email_or_username')
password = attrs.get('password')
if email_or_username and password:
# Check if user sent email
user = authenticate(username=email_or_username, password=password)
if user:
if not user.is_active:
msg = _('User account is disabled.')
raise exceptions.ValidationError(msg)
else:
msg = _('Unable to log in with provided credentials.')
raise exceptions.ValidationError(msg)
else:
msg = _('Must include "email or username" and "password"')
raise exceptions.ValidationError(msg)
attrs['user'] = user
return attrs | 773 | 254 | 46 |
7995d4c1e1bb63d1840f0a19cb868618116f6f6c | 575 | py | Python | app/app/migrations/0003_auto_20210109_0016.py | maszaa/remote-desktop-software-controller | 270176b46ce23620e3811a24498f7425df98f396 | [
"MIT"
] | 1 | 2022-01-29T13:44:47.000Z | 2022-01-29T13:44:47.000Z | app/app/migrations/0003_auto_20210109_0016.py | maszaa/remote-desktop-software-controller | 270176b46ce23620e3811a24498f7425df98f396 | [
"MIT"
] | 5 | 2021-01-16T16:19:16.000Z | 2021-01-29T12:59:29.000Z | app/app/migrations/0003_auto_20210109_0016.py | maszaa/remote-desktop-software-controller | 270176b46ce23620e3811a24498f7425df98f396 | [
"MIT"
] | 1 | 2021-01-10T14:45:38.000Z | 2021-01-10T14:45:38.000Z | # Generated by Django 3.1.5 on 2021-01-08 22:16
from django.db import migrations, models
| 25 | 99 | 0.598261 | # Generated by Django 3.1.5 on 2021-01-08 22:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0002_auto_20210108_2129'),
]
operations = [
migrations.AlterModelOptions(
name='command',
options={'ordering': ['command_group__order', 'command_group__name', 'order', 'name']},
),
migrations.AddField(
model_name='window',
name='needs_clicking_center',
field=models.BooleanField(default=False),
),
]
| 0 | 461 | 23 |
1ef431b4957d4a5e8c47ddae3802f8694f92521e | 2,684 | py | Python | haystack/utils/app_loading.py | atn9/django-haystack | bd60745ce82318b1819768c9a31db0579228654d | [
"BSD-3-Clause"
] | null | null | null | haystack/utils/app_loading.py | atn9/django-haystack | bd60745ce82318b1819768c9a31db0579228654d | [
"BSD-3-Clause"
] | null | null | null | haystack/utils/app_loading.py | atn9/django-haystack | bd60745ce82318b1819768c9a31db0579228654d | [
"BSD-3-Clause"
] | null | null | null | # encoding: utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
from django import VERSION as DJANGO_VERSION
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db.models.loading import get_app, get_model, get_models
from django.utils.importlib import import_module
__all__ = ['haystack_get_models', 'haystack_load_apps']
APP = 'app'
MODEL = 'model'
if DJANGO_VERSION >= (1, 7):
from django.apps import apps
def haystack_get_app_modules():
"""Return the Python module for each installed app"""
return [i.module for i in apps.get_app_configs()]
def haystack_load_apps():
"""Return a list of app labels for all installed applications which have models"""
return [i.label for i in apps.get_app_configs() if i.models_module is not None]
else:
def haystack_get_app_modules():
"""Return the Python module for each installed app"""
return [import_module(i) for i in settings.INSTALLED_APPS]
| 31.209302 | 132 | 0.632265 | # encoding: utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
from django import VERSION as DJANGO_VERSION
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db.models.loading import get_app, get_model, get_models
from django.utils.importlib import import_module
__all__ = ['haystack_get_models', 'haystack_load_apps']
APP = 'app'
MODEL = 'model'
if DJANGO_VERSION >= (1, 7):
from django.apps import apps
def haystack_get_app_modules():
"""Return the Python module for each installed app"""
return [i.module for i in apps.get_app_configs()]
def haystack_load_apps():
"""Return a list of app labels for all installed applications which have models"""
return [i.label for i in apps.get_app_configs() if i.models_module is not None]
def haystack_get_models(label):
try:
app_mod = get_app(label)
if app_mod is not None:
return get_models(app_mod=app_mod)
except ImproperlyConfigured:
pass
if '.' not in label:
raise ImproperlyConfigured("No installed application has the label %s" % label)
app_label, model_name = label.rsplit('.', 1)
return [get_model(app_label, model_name)]
else:
def is_app_or_model(label):
label_bits = label.split('.')
if len(label_bits) == 1:
return APP
elif len(label_bits) == 2:
try:
get_model(*label_bits)
except LookupError:
return APP
return MODEL
else:
raise ImproperlyConfigured("'%s' isn't recognized as an app (<app_label>) or model (<app_label>.<model_name>)." % label)
def haystack_get_app_modules():
"""Return the Python module for each installed app"""
return [import_module(i) for i in settings.INSTALLED_APPS]
def haystack_load_apps():
# Do all, in an INSTALLED_APPS sorted order.
items = []
for app in settings.INSTALLED_APPS:
app_label = app.split('.')[-1]
try:
get_app(app_label)
except ImproperlyConfigured:
continue # Intentionally allow e.g. apps without models.py
items.append(app_label)
return items
def haystack_get_models(label):
app_or_model = is_app_or_model(label)
if app_or_model == APP:
app_mod = get_app(label)
return get_models(app_mod)
else:
app_label, model_name = label.rsplit('.', 1)
return [get_model(app_label, model_name)]
| 1,533 | 0 | 107 |
474b914f97e6aa9059538049c5d4fc6346792ce1 | 859 | py | Python | recipe_modules/futures/examples/background_helper.resources/helper.py | engeg/recipes-py | 9dac536b55887262b4ce846f3db7a7f596542e5e | [
"Apache-2.0"
] | null | null | null | recipe_modules/futures/examples/background_helper.resources/helper.py | engeg/recipes-py | 9dac536b55887262b4ce846f3db7a7f596542e5e | [
"Apache-2.0"
] | null | null | null | recipe_modules/futures/examples/background_helper.resources/helper.py | engeg/recipes-py | 9dac536b55887262b4ce846f3db7a7f596542e5e | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
import json
import os
import sys
import time
import signal
signal.signal(
(
signal.SIGBREAK # pylint: disable=no-member
if sys.platform.startswith('win') else
signal.SIGTERM
),
lambda _signum, _frame: sys.exit(0))
try:
print "Starting up!"
print >>sys.stderr, ">> SLEEPING 5s"
time.sleep(5)
with open(sys.argv[1], 'wb') as pid_file:
json.dump({
# Note, you could put whatever connection information you wanted here.
'pid': os.getpid(),
}, pid_file)
print >>sys.stderr, ">> DUMPED PIDFILE"
for x in xrange(30):
print "Hi! %s" % x
time.sleep(1)
except SystemExit:
print >>sys.stderr, ">> QUITQUITQUIT"
raise
| 23.216216 | 76 | 0.663562 | # Copyright 2019 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
import json
import os
import sys
import time
import signal
signal.signal(
(
signal.SIGBREAK # pylint: disable=no-member
if sys.platform.startswith('win') else
signal.SIGTERM
),
lambda _signum, _frame: sys.exit(0))
try:
print "Starting up!"
print >>sys.stderr, ">> SLEEPING 5s"
time.sleep(5)
with open(sys.argv[1], 'wb') as pid_file:
json.dump({
# Note, you could put whatever connection information you wanted here.
'pid': os.getpid(),
}, pid_file)
print >>sys.stderr, ">> DUMPED PIDFILE"
for x in xrange(30):
print "Hi! %s" % x
time.sleep(1)
except SystemExit:
print >>sys.stderr, ">> QUITQUITQUIT"
raise
| 0 | 0 | 0 |
8d81e53036950dba7774071b6973aff11f1ed3ef | 1,752 | py | Python | core_dev/animations/progress.py | alexzanderr/_core-dev | 831f69dad524e450c4243b1dd88f26de80e1d444 | [
"MIT"
] | null | null | null | core_dev/animations/progress.py | alexzanderr/_core-dev | 831f69dad524e450c4243b1dd88f26de80e1d444 | [
"MIT"
] | null | null | null | core_dev/animations/progress.py | alexzanderr/_core-dev | 831f69dad524e450c4243b1dd88f26de80e1d444 | [
"MIT"
] | null | null | null |
from time import sleep
import os
from animations import progress_bar
from threading import Thread
import sys
import time
# for i in range(100):
# time.sleep(0.1)
# sys.stdout.write(f'salutare-{i}\n')
# sys.stdout.write(f'andrew-{i}\n')
# sys.stdout.write("\x1b[1A") # cursor up one line
# sys.stdout.write("\x1b[2K") # delete the last line
# sys.stdout.write("\x1b[1A") # cursor up one line
# sys.stdout.write("\x1b[2K") # delete the last line
from random import randrange
from random import uniform
work_ref = [0]
total_range = 100
work_thread = Thread(target=work, args=(total_range, work_ref))
work_thread.start()
work_ref2 = [0]
total_range2 = 100
work_thread2 = Thread(target=work2, args=(total_range2, work_ref2))
work_thread2.start()
while 1:
p = progress_bar(work_ref[0], total_range, color="yellow", title="work_thread")
print(p)
p = progress_bar(work_ref2[0], total_range2, color="yellow", title="work_thread")
print(p)
if not work_thread.is_alive() and not work_thread2.is_alive():
break
clear_lines(2)
sleep(0.01)
# if j < 100:
# j += 2
# p = progress_bar(j, 100, color="yellow", title="tqdm")
# print(p)
# if j >= 100 and index >= 100:
# break
| 21.9 | 85 | 0.638699 |
from time import sleep
import os
from animations import progress_bar
from threading import Thread
import sys
import time
# for i in range(100):
# time.sleep(0.1)
# sys.stdout.write(f'salutare-{i}\n')
# sys.stdout.write(f'andrew-{i}\n')
# sys.stdout.write("\x1b[1A") # cursor up one line
# sys.stdout.write("\x1b[2K") # delete the last line
# sys.stdout.write("\x1b[1A") # cursor up one line
# sys.stdout.write("\x1b[2K") # delete the last line
def clear_lines(total=1):
for _ in range(total):
sys.stdout.write("\x1b[1A") # cursor up one line
sys.stdout.write("\x1b[2K") # delete the last line
from random import randrange
from random import uniform
def work(_range, reference: list):
for i in range(_range):
reference[0] = i
sleep(uniform(0.01, 0.2))
reference[0] = _range
def work2(_range, reference: list):
for i in range(_range):
reference[0] = i
sleep(uniform(0.01, 0.05))
reference[0] = _range
work_ref = [0]
total_range = 100
work_thread = Thread(target=work, args=(total_range, work_ref))
work_thread.start()
work_ref2 = [0]
total_range2 = 100
work_thread2 = Thread(target=work2, args=(total_range2, work_ref2))
work_thread2.start()
while 1:
p = progress_bar(work_ref[0], total_range, color="yellow", title="work_thread")
print(p)
p = progress_bar(work_ref2[0], total_range2, color="yellow", title="work_thread")
print(p)
if not work_thread.is_alive() and not work_thread2.is_alive():
break
clear_lines(2)
sleep(0.01)
# if j < 100:
# j += 2
# p = progress_bar(j, 100, color="yellow", title="tqdm")
# print(p)
# if j >= 100 and index >= 100:
# break
| 403 | 0 | 69 |
9a1a255f27c75706814744325288ae364429e8ba | 782 | py | Python | attributes/unit_test/discoverer/python.py | Lufedi/reaper | bdf56b499e5b704c27b9f6c053d798c2a10fa4cf | [
"Apache-2.0"
] | 106 | 2015-07-21T16:18:26.000Z | 2022-03-31T06:45:34.000Z | attributes/unit_test/discoverer/python.py | Kowndinya2000/enhanced_repo_reaper | 744f794ba53bde5667b3b0f99b07273d0e32a495 | [
"Apache-2.0"
] | 21 | 2015-07-11T03:48:28.000Z | 2022-01-18T12:57:30.000Z | attributes/unit_test/discoverer/python.py | Kowndinya2000/enhanced_repo_reaper | 744f794ba53bde5667b3b0f99b07273d0e32a495 | [
"Apache-2.0"
] | 26 | 2015-07-22T22:38:21.000Z | 2022-03-14T10:11:56.000Z | from attributes.unit_test.discoverer import TestDiscoverer
| 30.076923 | 58 | 0.604859 | from attributes.unit_test.discoverer import TestDiscoverer
class PythonTestDiscoverer(TestDiscoverer):
def __init__(self):
self.language = 'Python'
self.languages = ['Python']
self.extensions = ['*.py']
self.frameworks = [
self.__django_test__,
self.__nose__,
self.__unittest__,
]
def __django_test__(self, path, sloc):
pattern = '((from|import)(\s)(django\.test))'
return self.measure(path, sloc, pattern)
def __nose__(self, path, sloc):
pattern = '((from|import)(\s)(nose))'
return self.measure(path, sloc, pattern)
def __unittest__(self, path, sloc):
pattern = '((from|import)(\s)(unittest))'
return self.measure(path, sloc, pattern)
| 570 | 22 | 130 |
093cc965f02f52c91f22af862a09e4f2efc576ab | 3,743 | py | Python | GarbleExecutable.py | greshje-nachc/data-owner-tools | 771a6fbcd24af8b9d3861e337f4dbd213ead9dc8 | [
"Apache-2.0"
] | null | null | null | GarbleExecutable.py | greshje-nachc/data-owner-tools | 771a6fbcd24af8b9d3861e337f4dbd213ead9dc8 | [
"Apache-2.0"
] | null | null | null | GarbleExecutable.py | greshje-nachc/data-owner-tools | 771a6fbcd24af8b9d3861e337f4dbd213ead9dc8 | [
"Apache-2.0"
] | 1 | 2021-09-28T16:58:36.000Z | 2021-09-28T16:58:36.000Z | import garble
import wx
from multiprocessing import freeze_support
import sys
import os
from pathlib import Path
# pyinstaller GarbleExecutable.py --onefile -w --add-data ./venv/Lib/site-packages/clkhash/data;clkhash/data --add-data ./venv/Lib/site-packages/clkhash/schemas;clkhash/schemas --add-data ./example-schema;example-schema
if getattr(sys, 'frozen', False) and hasattr(sys, '_MEIPASS'):
os.chdir(sys._MEIPASS)
if __name__ == '__main__':
freeze_support()
main()
| 33.419643 | 220 | 0.635052 | import garble
import wx
from multiprocessing import freeze_support
import sys
import os
from pathlib import Path
# pyinstaller GarbleExecutable.py --onefile -w --add-data ./venv/Lib/site-packages/clkhash/data;clkhash/data --add-data ./venv/Lib/site-packages/clkhash/schemas;clkhash/schemas --add-data ./example-schema;example-schema
if getattr(sys, 'frozen', False) and hasattr(sys, '_MEIPASS'):
os.chdir(sys._MEIPASS)
class GarbleWindow(wx.Frame):
def __init__(self, *args, **kwargs):
super(GarbleWindow, self).__init__(*args, **kwargs)
self.pii_path = ""
self.schema_dir = "example-schema"
self.salt_path = ""
self.pii_path_text = None
self.pii_path_btn = None
self.salt_path_text = None
self.salt_path_btn = None
self.garble_text = None
self.garble_btn = None
self.InitUI()
def InitUI(self):
panel = wx.Panel(self)
hbox = wx.BoxSizer()
sizer = wx.FlexGridSizer(5, 2, 2, 250)
self.pii_path_text = wx.StaticText(panel, label="Select PII CSV file:")
self.pii_path_btn = wx.Button(panel, label='Open CSV File')
self.pii_path_btn.Bind(wx.EVT_BUTTON, self.on_open_pii)
self.salt_path_text = wx.StaticText(panel, label="Select Secret File:")
self.salt_path_btn = wx.Button(panel, label='Open Secret File')
self.salt_path_btn.Bind(wx.EVT_BUTTON, self.on_open_salt)
self.garble_text = wx.StaticText(panel, label="")
self.garble_btn = wx.Button(panel, label='Garble')
self.garble_btn.Bind(wx.EVT_BUTTON, self.on_garble)
sizer.AddMany(
[self.pii_path_text, self.pii_path_btn, self.salt_path_text, self.salt_path_btn, self.garble_text, self.garble_btn])
hbox.Add(sizer, 0, wx.ALL, 15)
panel.SetSizer(hbox)
self.SetSize((500, 150))
self.SetTitle('Garble Tool')
self.Centre()
def on_open_pii(self, event):
with wx.FileDialog(self, "Open csv file", wildcard="CSV files (*.csv)|*.csv",
style=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST) as fileDialog:
if fileDialog.ShowModal() == wx.ID_CANCEL:
return # the user changed their mind
# Proceed loading the file chosen by the user
self.pii_path = fileDialog.GetPath()
self.pii_path_text.SetLabel(self.pii_path)
def on_open_salt(self, event):
with wx.FileDialog(self, "Open Secret file", style=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST) as fileDialog:
if fileDialog.ShowModal() == wx.ID_CANCEL:
return # the user changed their mind
# Proceed loading the file chosen by the user
self.salt_path = fileDialog.GetPath()
self.salt_path_text.SetLabel(self.salt_path)
def on_garble(self, event):
with wx.FileDialog(self, "Save Zip file", wildcard="Zip files (*.zip)|*.zip",
style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT) as fileDialog:
if fileDialog.ShowModal() == wx.ID_CANCEL:
return # the user changed their mind
# save the current contents in the file
pathname = fileDialog.GetPath()
self.garble_text.SetLabel("Processing PII Data...")
self.Update()
output_dir, file_name = os.path.split(pathname)
self.garble_text.SetLabel(
garble.garble_data(Path(self.pii_path), Path(self.schema_dir), Path(self.salt_path), Path(output_dir), file_name, rm_json=True))
def main():
app = wx.App()
ex = GarbleWindow(None)
ex.Show()
app.MainLoop()
if __name__ == '__main__':
freeze_support()
main()
| 3,065 | 8 | 180 |
d5aa27298b18cea6f7b3bd1d6baded26a49d8045 | 3,807 | py | Python | main.py | ashtedroid/Test-Cases-Prioritization-and-Analysis | 70a6997f3764ea39c51fdc8cd6806e430088f8a7 | [
"MIT"
] | 1 | 2020-04-24T08:08:49.000Z | 2020-04-24T08:08:49.000Z | main.py | ashtedroid/Test-Cases-Prioritization-and-Analysis | 70a6997f3764ea39c51fdc8cd6806e430088f8a7 | [
"MIT"
] | null | null | null | main.py | ashtedroid/Test-Cases-Prioritization-and-Analysis | 70a6997f3764ea39c51fdc8cd6806e430088f8a7 | [
"MIT"
] | null | null | null | '''
=================================================================
@version 2.0
@author Ashwin Ramadevanahalli
@title Testing.
Main module.
=================================================================
'''
import os
import sys
import subprocess
import testset_parse
import gcov_parse
import rand_pri
import tot_pri
import add_pri
import pickle
'''
Initializations
'''
pname=str(str(subprocess.check_output("pwd",shell=True)).split('/')[-1].strip())
location=""
maxlimit={'tcas':96.67,'totinfo':97.04,'printtokens':95.34,'printtokens2':99.50,'replace':95.02,'schedule':98.67,'schedule2':99.23}
#location="/Users/Ashwin/Downloads/benchmarks/"+pname+"/"
'''
Cleaning
'''
print "################################\nEntered CLeanig\n################################\n"
subprocess.call("rm -r outputs",shell=True)
subprocess.call("mkdir outputs",shell=True)
'''
Testset parse module
returns: A dictionary with Key in range '1 to No_of_tests' and value as the testcases and total number of statements in program.
input: program name, location of program.
'''
print "################################\nEntered Testset parse module\n################################\n"
testset,tot_statements,No_of_tests=testset_parse.parse(pname,location)
print testset
print tot_statements
'''
Gcov parse module
returns: state_testset=list of <No of statements it covers,testcase> and Brances_testset=list of <No of brances it covers,testcase> and both.
input: testset and total number of statements
'''
print "################################\nEntered Gcov parse module\n################################\n"
state_testset,branch_testset,sb_testset=gcov_parse.parse(testset,tot_statements)
print state_testset
'''
Random prioritization
returns: Random prioritizated testsets for statement, branch and both coverage.
input: testset, program name and location of program, max coverage
'''
print "################################\nEntered Random prioritization\n################################\n"
Ran_S,Ran_B,Ran_SB=rand_pri.pri(testset.values(),pname,location,maxlimit)
'''
Total coverage prioritization
returns: Total coverage prioritizated testsets for statement, branch and both coverage.
input: testsets with coverage information, program name and location of program, max coverage
'''
Tot_S,Tot_B,Tot_SB=tot_pri.pri(state_testset,branch_testset,sb_testset,pname,location,maxlimit)
'''
Additional coverage prioritization
returns: Additional coverage prioritizated testsets for statement, branch and both coverage.
input: testsets with coverage information, program name and location of program, max coverage
'''
Add_S,Add_B,Add_SB=add_pri.pri(state_testset,branch_testset,sb_testset,pname,location,maxlimit)
print "################################\nResult Section\n################################\n"
print len(Ran_S)
print len(Ran_B)
print len(Ran_SB)
print len(Tot_S)
print len(Tot_B)
print len(Tot_SB)
print len(Add_S)
print len(Add_B)
print len(Add_SB)
print "Total number of test cases=",No_of_tests
'''Storing Results'''
subprocess.call("rm -r results",shell=True)
subprocess.call("mkdir results",shell=True)
test=open("results/sran","w")
pickle.dump(Ran_S, test)
test.close()
test=open("results/bran","w")
pickle.dump(Ran_B, test)
test.close()
test=open("results/sbran","w")
pickle.dump(Ran_SB, test)
test.close()
test=open("results/stot","w")
pickle.dump(Tot_S, test)
test.close()
test=open("results/btot","w")
pickle.dump(Tot_B, test)
test.close()
test=open("results/sbtot","w")
pickle.dump(Tot_SB, test)
test.close()
test=open("results/sadd","w")
pickle.dump(Add_S, test)
test.close()
test=open("results/badd","w")
pickle.dump(Add_B, test)
test.close()
test=open("results/sbadd","w")
pickle.dump(Add_SB, test)
test.close()
print "Task Complete.Thank you."
| 25.897959 | 141 | 0.67192 | '''
=================================================================
@version 2.0
@author Ashwin Ramadevanahalli
@title Testing.
Main module.
=================================================================
'''
import os
import sys
import subprocess
import testset_parse
import gcov_parse
import rand_pri
import tot_pri
import add_pri
import pickle
'''
Initializations
'''
pname=str(str(subprocess.check_output("pwd",shell=True)).split('/')[-1].strip())
location=""
maxlimit={'tcas':96.67,'totinfo':97.04,'printtokens':95.34,'printtokens2':99.50,'replace':95.02,'schedule':98.67,'schedule2':99.23}
#location="/Users/Ashwin/Downloads/benchmarks/"+pname+"/"
'''
Cleaning
'''
print "################################\nEntered CLeanig\n################################\n"
subprocess.call("rm -r outputs",shell=True)
subprocess.call("mkdir outputs",shell=True)
'''
Testset parse module
returns: A dictionary with Key in range '1 to No_of_tests' and value as the testcases and total number of statements in program.
input: program name, location of program.
'''
print "################################\nEntered Testset parse module\n################################\n"
testset,tot_statements,No_of_tests=testset_parse.parse(pname,location)
print testset
print tot_statements
'''
Gcov parse module
returns: state_testset=list of <No of statements it covers,testcase> and Brances_testset=list of <No of brances it covers,testcase> and both.
input: testset and total number of statements
'''
print "################################\nEntered Gcov parse module\n################################\n"
state_testset,branch_testset,sb_testset=gcov_parse.parse(testset,tot_statements)
print state_testset
'''
Random prioritization
returns: Random prioritizated testsets for statement, branch and both coverage.
input: testset, program name and location of program, max coverage
'''
print "################################\nEntered Random prioritization\n################################\n"
Ran_S,Ran_B,Ran_SB=rand_pri.pri(testset.values(),pname,location,maxlimit)
'''
Total coverage prioritization
returns: Total coverage prioritizated testsets for statement, branch and both coverage.
input: testsets with coverage information, program name and location of program, max coverage
'''
Tot_S,Tot_B,Tot_SB=tot_pri.pri(state_testset,branch_testset,sb_testset,pname,location,maxlimit)
'''
Additional coverage prioritization
returns: Additional coverage prioritizated testsets for statement, branch and both coverage.
input: testsets with coverage information, program name and location of program, max coverage
'''
Add_S,Add_B,Add_SB=add_pri.pri(state_testset,branch_testset,sb_testset,pname,location,maxlimit)
print "################################\nResult Section\n################################\n"
print len(Ran_S)
print len(Ran_B)
print len(Ran_SB)
print len(Tot_S)
print len(Tot_B)
print len(Tot_SB)
print len(Add_S)
print len(Add_B)
print len(Add_SB)
print "Total number of test cases=",No_of_tests
'''Storing Results'''
subprocess.call("rm -r results",shell=True)
subprocess.call("mkdir results",shell=True)
test=open("results/sran","w")
pickle.dump(Ran_S, test)
test.close()
test=open("results/bran","w")
pickle.dump(Ran_B, test)
test.close()
test=open("results/sbran","w")
pickle.dump(Ran_SB, test)
test.close()
test=open("results/stot","w")
pickle.dump(Tot_S, test)
test.close()
test=open("results/btot","w")
pickle.dump(Tot_B, test)
test.close()
test=open("results/sbtot","w")
pickle.dump(Tot_SB, test)
test.close()
test=open("results/sadd","w")
pickle.dump(Add_S, test)
test.close()
test=open("results/badd","w")
pickle.dump(Add_B, test)
test.close()
test=open("results/sbadd","w")
pickle.dump(Add_SB, test)
test.close()
print "Task Complete.Thank you."
| 0 | 0 | 0 |