max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
peek_storage/_private/service/sw_install/PeekSwInstallManager.py
|
Synerty/peek-storage
| 0
|
12779551
|
from peek_platform import PeekPlatformConfig
from peek_platform.sw_install.PeekSwInstallManagerABC import PeekSwInstallManagerABC
__author__ = 'synerty'
class PeekSwInstallManager(PeekSwInstallManagerABC):
def _stopCode(self):
PeekPlatformConfig.pluginLoader.stopOptionalPlugins()
PeekPlatformConfig.pluginLoader.stopCorePlugins()
PeekPlatformConfig.pluginLoader.unloadOptionalPlugins()
PeekPlatformConfig.pluginLoader.unloadCorePlugins()
def _upgradeCode(self):
pass
def _startCode(self):
PeekPlatformConfig.pluginLoader.loadCorePlugins()
PeekPlatformConfig.pluginLoader.loadOptionalPlugins()
PeekPlatformConfig.pluginLoader.startCorePlugins()
PeekPlatformConfig.pluginLoader.startOptionalPlugins()
| 1.726563
| 2
|
tests/test_team.py
|
aspic2/NCAABB
| 1
|
12779552
|
import unittest
from ncaabb import team
class TestTeam(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.team_one = team.Team(["Team One", "Region", 1, True, 30, 30])
def test_calculate_rating(self):
self.assertNotEqual(self.team_one, self.team_one.calculate_rating())
def test_get_scores(self):
ucla = team.Team(["UCLA", "Region", 1, True, 30, 30])
#TODO: .get_scores() looks in wrong directory for the database
ucla.get_scores()
self.assertTrue(ucla.points_scored, "Error getting points scored")
self.assertTrue(ucla.points_allowed, "Error getting points allowed")
if __name__ == '__main__':
unittest.main()
| 3.21875
| 3
|
test_hash_to_curve.py
|
ANSIIRU/BLS12_381-small_memory_c-
| 0
|
12779553
|
<gh_stars>0
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
# Name: H2C_BLS381G1.py
# Purpose:
#
# Author: <NAME>
#
# Created: 2/3/2022
# Copyright: (c) sakamoto 2022
# Licence: <your licence>
#-------------------------------------------------------------------------------
import math
import hashlib
from re import L
###############################################################
## Tst Vectors
###############################################################
SUITE = "BLS12381G1_XMD:SHA-256_SSWU_RO_"
DST = "QUUX-V01-CS02-with-BLS12381G1_XMD:SHA-256_SSWU_RO_"
DST_LEN = len(DST)
p = 0x1a0111ea397fe69a4b1ba7b6434bacd764774b84f38512bf6730d2a0f6b0f6241eabfffeb153ffffb9feffffffffaaab
MSG = "q128_qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq"
Px = 0x15f68eaa693b95ccb85215dc65fa81038d69629f70aeee0d0f677cf22285e7bf58d7cb86eefe8f2e9bc3f8cb84fac488
Py = 0x1807a1d50c29f430b8cafc4f8638dfeeadf51211e1602a5f184443076715f91bb90a48ba1e370edce6ae1062f5e6dd38
u0 = 0x010476f6a060453c0b1ad0b628f3e57c23039ee16eea5e71bb87c3b5419b1255dc0e5883322e563b84a29543823c0e86
u1 = 0x0b1a912064fb0554b180e07af7e787f1f883a0470759c03c1b6509eb8ce980d1670305ae7b928226bb58fdc0a419f46e
Q0x = 0x0cbd7f84ad2c99643fea7a7ac8f52d63d66cefa06d9a56148e58b984b3dd25e1f41ff47154543343949c64f88d48a710
Q0y = 0x052c00e4ed52d000d94881a5638ae9274d3efc8bc77bc0e5c650de04a000b2c334a9e80b85282a00f3148dfdface0865
def H2C(msg):
"""
hash_to_curve(msg)
Input: msg, an arbitrary-length byte string.
Output: P, a point in G.
Steps:
1. u = hash_to_field(msg, 2)
2. Q0 = map_to_curve(u[0])
3. Q1 = map_to_curve(u[1])
4. R = Q0 + Q1 # Point addition
5. P = clear_cofactor(R)
6. return P
"""
pass
def strxor(str1, str2):
return bytes.fromhex(hex(int(str1.hex(), 16) ^ int(str2.hex(), 16))[2:])
def expand_message_xmd(msg, dst, len_in_bytes):
"""
Parameters:
- H, a hash function (see requirements above).
- b_in_bytes, b / 8 for b the output size of H in bits.
For example, for b = 256, b_in_bytes = 32.
- s_in_bytes, the input block size of H, measured in bytes (see
discussion above). For example, for SHA-256, s_in_bytes = 64.
Input:
- msg, a byte string.
- DST, a byte string of at most 255 bytes.
See below for information on using longer DSTs.
- len_in_bytes, the length of the requested output in bytes,
not greater than the lesser of (255 * b_in_bytes) or 2^16-1.
Output:
- uniform_bytes, a byte string.
Steps:
1. ell = ceil(len_in_bytes / b_in_bytes)
2. ABORT if ell > 255 or len_in_bytes > 65535 or len(DST) > 255
3. DST_prime = DST || I2OSP(len(DST), 1)
4. Z_pad = I2OSP(0, s_in_bytes)
5. l_i_b_str = I2OSP(len_in_bytes, 2)
6. msg_prime = Z_pad || msg || l_i_b_str || I2OSP(0, 1) || DST_prime
7. b_0 = H(msg_prime)
8. b_1 = H(b_0 || I2OSP(1, 1) || DST_prime)
9. for i in (2, ..., ell):
10. b_i = H(strxor(b_0, b_(i - 1)) || I2OSP(i, 1) || DST_prime)
11. uniform_bytes = b_1 || ... || b_ell
12. return substr(uniform_bytes, 0, len_in_bytes)
"""
ell = math.ceil(len_in_bytes / 32) # = 4
assert ell < 256 and len_in_bytes < 65536, "Raise assertion: ell = {}, len_in_bytes = {}".format(ell, len_in_bytes)
DST_prime = dst + chr(len(dst))
print(DST_prime.encode())
s_in_bytes = 64
Z_pad = (0).to_bytes(s_in_bytes, 'big')
l_i_b_str = len_in_bytes.to_bytes(2, 'big')
print(l_i_b_str)
msg_prime = Z_pad + msg.encode() + l_i_b_str + (0).to_bytes(1, 'big') + DST_prime.encode()
H = hashlib.sha256()
H.update(msg_prime)
b_0 = H.digest()
H = hashlib.sha256()
H.update(b_0 + (chr(1) + DST_prime).encode())
b_1 = H.digest()
uniform_bytes = [b_1]
for i in range(2, ell + 1):
mdXor = strxor(b_0, uniform_bytes[-1])
H = hashlib.sha256()
H.update(mdXor + (chr(i) + DST_prime).encode())
b_i = H.digest()
uniform_bytes.append(b_i)
print("b_0: " + b_0.hex())
print("b_1: " + uniform_bytes[0].hex())
print("b_2: " + uniform_bytes[1].hex())
print("b_3: " + uniform_bytes[2].hex())
print("b_4: " + uniform_bytes[3].hex())
return uniform_bytes
def H2F(msg, cnt):
"""
Parameters:
- DST, a domain separation tag (see Section 3.1).
- F, a finite field of characteristic p and order q = p^m.
- p, the characteristic of F (see immediately above).
- m, the extension degree of F, m >= 1 (see immediately above).
- L = ceil((ceil(log2(p)) + k) / 8), where k is the security
parameter of the suite (e.g., k = 128).
- expand_message, a function that expands a byte string and
domain separation tag into a uniformly random byte string
(see Section 5.4).
Inputs:
- msg, a byte string containing the message to hash.
- count, the number of elements of F to output.
Outputs:
- (u_0, ..., u_(count - 1)), a list of field elements.
Steps:
1. len_in_bytes = count * m * L
2. uniform_bytes = expand_message(msg, DST, len_in_bytes)
3. for i in (0, ..., count - 1):
4. for j in (0, ..., m - 1):
5. elm_offset = L * (j + i * m)
6. tv = substr(uniform_bytes, elm_offset, L)
7. e_j = OS2IP(tv) mod p
8. u_i = (e_0, ..., e_(m - 1))
9. return (u_0, ..., u_(count - 1))
"""
L = 64
m = 1
len_in_bytes = cnt * m * L
uniform_bytes = expand_message_xmd(MSG, DST, len_in_bytes)
tv = uniform_bytes[0] + uniform_bytes[1]
u_0 = int(tv.hex(), 16) % p
tv = uniform_bytes[2] + uniform_bytes[3]
u_1 = int(tv.hex(), 16) % p
print("u_0: " + hex(u_0))
print("u_1: " + hex(u_1))
MSG = ""
Fp_add
val = H2F(MSG, 2)
# suite = BLS12381G1_XMD: SHA-256_SSWU_RO_
# dst =
# msg =
# P.x = 052926add2207b76ca4fa57a8734416c8dc95e24501772c8142787
# 00eed6d1e4e8cf62d9c09db0fac349612b759e79a1
# P.y = 08ba738453bfed09cb546dbb0783dbb3a5f1f566ed67bb6be0e8c6
# 7e2e81a4cc68ee29813bb7994998f3eae0c9c6a265
# u[0] = 0ba14bd907ad64a016293ee7c2d276b8eae71f25a4b941eece7b0d
# 89f17f75cb3ae5438a614fb61d6835ad59f29c564f
# u[1] = 019b9bd7979f12657976de2884c7cce192b82c177c80e0ec604436
# a7f538d231552f0d96d9f7babe5fa3b19b3ff25ac9
| 1.945313
| 2
|
assn5/palindrome.py
|
vardhan2000/1st-sem-python-assignments
| 0
|
12779554
|
def recurse(n, i):
## Your code - begin
n1 = [j.lower() for j in n] # converting each of the element to lowercase
if i == int((len(n1) - 1) / 2) and (n1[i] == n1[len(n1) - i - 1]):
return True
elif i <= int((len(n1) - 1) / 2) and (n1[i] != n1[len(n1) - i - 1]):
return False
else:
return recurse(n, i + 1)
## Your code - end
def isPalindrome(n):
return recurse(n, 0)
if __name__ == '__main__':
n = raw_input("Enter string: ")
output = isPalindrome(n)
print output
| 3.875
| 4
|
bot.py
|
newkozlukov/cian_bot
| 0
|
12779555
|
<reponame>newkozlukov/cian_bot<filename>bot.py
import argparse
import collections
import copy
import datetime
import hashlib
import io
import itertools
import json
import logging
import os
import os.path as osp
from contextlib import ExitStack
import attr
import requests
import cian_parser
from cian_parser import get_flats
from telegram import InputFile, InputMediaPhoto
from telegram.ext import CommandHandler, Updater
logger = logging.getLogger('cian_bot')
logger.setLevel(logging.DEBUG)
log_to_file = logging.FileHandler('cian_bot.log')
log_to_file.setLevel(logging.DEBUG)
logger.addHandler(log_to_file)
log_to_stdout = logging.StreamHandler()
log_to_stdout.setLevel(logging.INFO)
logger.addHandler(log_to_stdout)
SAVE_FILE = 'save.json'
N_PHOTOS_MAX = 4
METRO = [
'Достоевская',
'<NAME>',
'Сухаревская',
'Цветной бульвар',
'Трубная',
'Чеховская',
'Пушкинская',
'Кузнецкий мост',
'Лубянка',
'Чистые пруды',
'Красные Ворота',
'Тургеневская',
'Сретенский бульвар',
'Китай-город',
'Китай Город',
'Чкаловская',
'Маяковская',
'Белорусская',
'Менделеевская',
'Новослободская',
]
METRO = [m.lower() for m in METRO]
METRO_BLACKLIST = [
'Электрозаводская', 'Солнцево', 'Косино', 'Новогиреево', 'Выхино'
]
METRO_BLACKLIST = [m.lower() for m in METRO_BLACKLIST]
def filter_price_per_person(flat):
ppp = flat.price / flat.rooms
if ppp > 35000:
logger.debug(
f'Flat {flat.id} failed price test: price={flat.price}, rooms={flat.rooms}'
)
return ppp <= 35000
def filter_metro(flat):
ok = any(m.lower() in METRO
for m in flat.metros) and not any(m.lower() in METRO_BLACKLIST
for m in flat.metros)
if not ok:
logger.debug(
f'Flat {flat.id} failed metro test. Metros: {flat.metros}')
return ok
def filter_monthly(flat):
try:
return flat.json['bargainTerms']['paymentPeriod'] == 'monthly'
except:
return True
def fetch_file(url):
# aye, it doesnt depend on basedir, i know
for i in range(5):
try:
if not osp.exists('photos'):
os.mkdir('photos')
filename = hashlib.sha256(url.encode('utf8')).hexdigest()
filename = osp.join('photos', filename)
if not osp.exists(filename):
res = requests.get(url)
with open(filename, 'wb') as out:
for chunk in res.iter_content(chunk_size=1024):
out.write(chunk)
return filename
except Exception as e:
logger.error(f'fetch_file: url={url} e={e}')
@attr.s
class CianStateSerializable:
"""CianStateSerializable:
TODO: just add in fucking mongo you lazy hog, it'll be more usable and reusable"""
flatlist = attr.ib(type=dict)
flat_details = attr.ib(type=dict)
viewed = attr.ib(type=dict)
observed_urls = attr.ib(type=list)
scheduled_messages = attr.ib(type=list)
class CianBot:
def __init__(self):
self.flatlist = dict()
self.flat_details = dict()
self.viewed = collections.defaultdict(set) # chat_id -> set[int]
self.scheduled_messages = collections.deque()
self.observed_urls = list()
@property
def filters(self):
return [filter_price_per_person, filter_metro, filter_monthly]
def save(self, basepath):
if not osp.exists(basepath):
os.makedirs(basepath)
with open(osp.join(basepath, 'state.json'), 'w') as f:
json.dump(
attr.asdict(
CianStateSerializable(
flatlist=self.flatlist,
flat_details=self.flat_details,
viewed=self.viewed,
scheduled_messages=list(self.scheduled_messages),
observed_urls=self.observed_urls)), f)
@staticmethod
def from_directory(basepath):
self = CianBot()
with open(osp.join(basepath, 'state.json'), 'r') as f:
state = json.load(f)
self.flatlist.update(state['flatlist'])
self.flat_details.update(state['flat_details'])
self.viewed.update({a: set(b) for a, b in state['viewed'].items()})
self.scheduled_messages.extend(state['scheduled_messages'])
self.observed_urls.extend(state['observed_urls'])
logger.info(f'from_directory: loaded {len(state["flatlist"])} flatlistitems, {len(state["scheduled_messages"])} scheduled messages, {len(state["observed_urls"])} observed urls')
return self
def start(self, update, context):
self.viewed[update.message.chat_id] = set()
logger.info(f'{update.message.chat_id} connected')
def flat_to_message(self, flat):
try:
text = '.\n'.join([
f'{flat.href}',
', '.join([
f'{k} {getattr(flat, k.lower())}'
for k in ['Price', 'deposit', 'fee', 'bonus']
if getattr(flat, k.lower())
]),
f'{flat.bedrooms} rooms',
f'{flat.metros}',
f'{flat.address}',
' '.join(cian_parser.js_offer_to_phones(flat.json)),
])
msg = dict(text=text)
if len(flat.photos) > 0:
msg['photos'] = flat.photos
msg['photo'] = flat.photos[0]
msg['document'] = flat.pdf_link
return msg
except Exception as e:
logger.error(f'flat_to_msg: {e}')
raise e
def flat_ok(self, flat):
for f in self.filters:
if not f(flat):
logger.debug(f'Flat {flat.id} couldn\'t pass {f.__name__}.')
return False
return True
def handle_new_flat(self, flat: cian_parser.FlatListItem):
if flat.id in self.flatlist:
return
self.flatlist[flat.id] = flat
if not self.flat_ok(flat):
return
msg = self.flat_to_message(flat)
for u in self.viewed:
if flat.id in self.viewed[u]:
continue
msg = copy.deepcopy(msg)
msg['chat_id'] = u
self.scheduled_messages.append(msg)
self.viewed[u].add(flat.id)
def send_messages(self, context):
logger.info(
f'send_messages: about to send {len(self.scheduled_messages)} messages'
)
if len(self.scheduled_messages) == 0:
logger.info('send_messages: no messages scheduled')
return
try:
msg = self.scheduled_messages.popleft()
logger.debug(
f'Notifying {msg["chat_id"]} about: {msg["text"]}')
sent_msg = None
# Aye, that's a ton of shitcode
if 'photo' in msg:
sent_msg = context.bot.send_photo(msg['chat_id'],
msg['photo'],
caption=msg['text'])
else:
sent_msg = context.bot.send_message(
msg['chat_id'], msg['text'])
except KeyboardInterrupt:
logger.error(f'send_messages: keyboard interrupt, putting message back to queue and pushing Exception forward')
self.scheduled_messages.append(msg)
raise
except Exception as e:
logger.error(f'send_messages: {e}')
self.scheduled_messages.append(msg)
else:
if 'document' in msg and sent_msg is not None:
sent_msg.reply_text(msg['document'])
if 'photos' in msg and len(
msg['photos']) >= 2 and sent_msg is not None:
with ExitStack() as stack:
photos = msg['photos'][:N_PHOTOS_MAX]
photos = [fetch_file(p) for p in photos]
photos = [
stack.enter_context(open(p, 'rb')) for p in photos
]
context.bot.send_media_group(
msg['chat_id'],
[InputMediaPhoto(p) for p in photos],
timeout=120 * len(photos),
reply_to_message_id=sent_msg.message_id)
if sent_msg is None:
logger.error(
f'Failed to send message to {msg["chat_id"]} with content {msg["text"]}'
)
if len(self.scheduled_messages) > 0:
context.job_queue.run_once(self.send_messages, 0.0, context)
def get_json(self, update, context):
logger.info(f'get_json {context.args}')
flatid = context.args[0]
flatid = int(flatid)
if not flatid in self.flatlist:
logger.error(f'{flatid} not in flatlist')
pass # TODO: schedule querying the flat page
flat = self.flatlist[flatid]
logger.debug(f'get_json {context.args}: flat found')
js = flat.json
logger.debug(f'get_json {context.args}: extracted json')
js = json.dumps(js, ensure_ascii=False, sort_keys=True, indent=4).encode('utf8')
logger.debug(f'get_json {context.args}: encoded into bytes')
doc = io.BytesIO(js)
logger.debug(f'get_json {context.args}: created InputFile')
update.message.reply_document(document=doc, filename=f'{flatid}.json')
logger.debug(f'get_json {context.args}: send a reply')
def fetch_messages(self, update, context):
logger.info(f'{update.message.chat_id} asks for messages')
for f in self.flatlist:
if f.id in self.viewed[update.message.chat_id]:
continue
if not self.flat_ok(flat):
continue
msg = self.flat_to_message(f)
msg['chat_id'] = update.message.chat_id
self.scheduled_messages.append(msg)
logger.info('Sending messages as requested')
context.job_queue.run_once(self.send_messages, 0.0, context)
logger.info('Messages sent')
def fetch_cian(self, context):
if len(self.observed_urls) == 0:
logger.info('fetch_cian: no URLs to fetch')
return
with requests.Session() as s:
for url in self.observed_urls:
try:
logger.info(f'fetch_cian: fetching {url}')
res = s.get(url)
logger.info(f'fetch_cian: status {res.status_code}')
html = res.text
flats = cian_parser.get_flatlist(html)
logger.info(
f'fetch_cian: fetched {len(flats)} flats from {url}')
for f in flats:
self.handle_new_flat(f)
context.job_queue.run_once(self.send_messages, 0, context)
except Exception as e:
logger.fatal(
f'fetch_cian: failed fetching flats from {url}; error: {e}'
)
logger.info('Saving backup')
self.save('.cian-backup')
logger.info('Saved backup')
def observe_url(self, update, context):
if len(context.args) != 1:
update.message.reply('Synopsis: /observe https://cian.ru/...')
logger.error(
f'observe_url: invalid number of arguments; arguments are: {context.args}'
)
return
url = context.args[0]
self.observed_urls = sorted(set(self.observed_urls + [url]))
logger.info('observe_url: scheduled cian_fetch')
due = 5
context.job_queue.run_once(self.fetch_cian,
due,
context=update.message.chat_id)
update.message.reply(f'Observing {url}')
logger.info(f'observe_url: Observing {url}')
if __name__ == '__main__':
parser = argparse.ArgumentParser('cian_bot')
parser.add_argument('--token-file', default='.token')
parser.add_argument('--state-dir', default='cian')
args = parser.parse_args()
with open(args.token_file, 'r') as f:
token = f.readline().strip()
updater = Updater(token, use_context=True)
dp = updater.dispatcher
dp.use_context = True
if args.state_dir and osp.exists(args.state_dir):
state = CianBot.from_directory(args.state_dir)
else:
state = CianBot()
try:
job = updater.job_queue
job.run_repeating(state.fetch_cian, datetime.timedelta(minutes=180),
10)
dp.add_handler(CommandHandler('start', state.start))
dp.add_handler(
CommandHandler('observe',
state.observe_url,
pass_args=True,
pass_job_queue=True,
pass_chat_data=True))
dp.add_handler(CommandHandler('fetchMessages', state.fetch_messages))
dp.add_handler(
CommandHandler('json',
state.get_json,
pass_args=True,
pass_job_queue=True,
pass_chat_data=True))
updater.start_polling()
updater.idle()
finally:
if args.state_dir is not None:
if not osp.exists(args.state_dir):
os.makedirs(args.state_dir)
state.save(args.state_dir)
| 2.078125
| 2
|
source/utils/__init__.py
|
LukasErlenbach/active_learning_bnn
| 2
|
12779556
|
<reponame>LukasErlenbach/active_learning_bnn
from .global_variables import set_global_var, get_global_var
from .timing import secs_to_str
from .yaml_load_dump import load_complete_config_yaml, dump_complete_config_yaml
from .default_configs import (
default_al_schedule,
default_train_schedule,
default_net_config,
)
from .plotting import plot_rmse_metrics
| 1.28125
| 1
|
plugins/lookup/netbox.py
|
loganbest/ansible_modules
| 1
|
12779557
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2019. <NAME> <<EMAIL>>
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
netbox.py
A lookup function designed to return data from the Netbox application
"""
from __future__ import absolute_import, division, print_function
from pprint import pformat
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
from ansible.parsing.splitter import parse_kv
from ansible.utils.display import Display
import pynetbox
__metaclass__ = type
DOCUMENTATION = """
lookup: netbox
author: <NAME> (@cpmills1975)
version_added: "2.9"
short_description: Queries and returns elements from Netbox
description:
- Queries Netbox via its API to return virtually any information
capable of being held in Netbox.
- If wanting to obtain the plaintext attribute of a secret, key_file must be provided.
options:
_terms:
description:
- The Netbox object type to query
required: True
api_endpoint:
description:
- The URL to the Netbox instance to query
required: True
api_filter:
description:
- The api_filter to use.
required: False
token:
description:
- The API token created through Netbox
required: True
key_file:
description:
- The location of the private key tied to user account.
required: False
requirements:
- pynetbox
"""
EXAMPLES = """
tasks:
# query a list of devices
- name: Obtain list of devices from Netbox
debug:
msg: >
"Device {{ item.value.display_name }} (ID: {{ item.key }}) was
manufactured by {{ item.value.device_type.manufacturer.name }}"
loop: "{{ query('netbox', 'devices',
api_endpoint='http://localhost/',
token='<redacted>') }}"
This example uses an API Filter
tasks:
# query a list of devices
- name: Obtain list of devices from Netbox
debug:
msg: >
"Device {{ item.value.display_name }} (ID: {{ item.key }}) was
manufactured by {{ item.value.device_type.manufacturer.name }}"
loop: "{{ query('netbox', 'devices',
api_endpoint='http://localhost/',
api_filter='role=management tag=Dell'),
token='<<PASSWORD>acted>') }}"
# Obtain a secret for R1-device
tasks:
- name: "Obtain secrets for R1-Device"
debug:
msg: "{{ query('netbox', 'secrets', api_filter='device=R1-Device', api_endpoint='http://localhost/', token='<<PASSWORD>acted>', key_file='~/.ssh/id_rsa') }}"
"""
RETURN = """
_list:
description:
- list of composed dictonaries with key and value
type: list
"""
def get_endpoint(netbox, term):
"""
get_endpoint(netbox, term)
netbox: a predefined pynetbox.api() pointing to a valid instance
of Netbox
term: the term passed to the lookup function upon which the api
call will be identified
"""
netbox_endpoint_map = {
"aggregates": {"endpoint": netbox.ipam.aggregates},
"circuit-terminations": {"endpoint": netbox.circuits.circuit_terminations},
"circuit-types": {"endpoint": netbox.circuits.circuit_types},
"circuits": {"endpoint": netbox.circuits.circuits},
"circuit-providers": {"endpoint": netbox.circuits.providers},
"cables": {"endpoint": netbox.dcim.cables},
"cluster-groups": {"endpoint": netbox.virtualization.cluster_groups},
"cluster-types": {"endpoint": netbox.virtualization.cluster_types},
"clusters": {"endpoint": netbox.virtualization.clusters},
"config-contexts": {"endpoint": netbox.extras.config_contexts},
"console-connections": {"endpoint": netbox.dcim.console_connections},
"console-ports": {"endpoint": netbox.dcim.console_ports},
"console-server-port-templates": {
"endpoint": netbox.dcim.console_server_port_templates
},
"console-server-ports": {"endpoint": netbox.dcim.console_server_ports},
"device-bay-templates": {"endpoint": netbox.dcim.device_bay_templates},
"device-bays": {"endpoint": netbox.dcim.device_bays},
"device-roles": {"endpoint": netbox.dcim.device_roles},
"device-types": {"endpoint": netbox.dcim.device_types},
"devices": {"endpoint": netbox.dcim.devices},
"export-templates": {"endpoint": netbox.dcim.export_templates},
"front-port-templates": {"endpoint": netbox.dcim.front_port_templates},
"front-ports": {"endpoint": netbox.dcim.front_ports},
"graphs": {"endpoint": netbox.extras.graphs},
"image-attachments": {"endpoint": netbox.extras.image_attachments},
"interface-connections": {"endpoint": netbox.dcim.interface_connections},
"interface-templates": {"endpoint": netbox.dcim.interface_templates},
"interfaces": {"endpoint": netbox.dcim.interfaces},
"inventory-items": {"endpoint": netbox.dcim.inventory_items},
"ip-addresses": {"endpoint": netbox.ipam.ip_addresses},
"manufacturers": {"endpoint": netbox.dcim.manufacturers},
"object-changes": {"endpoint": netbox.extras.object_changes},
"platforms": {"endpoint": netbox.dcim.platforms},
"power-connections": {"endpoint": netbox.dcim.power_connections},
"power-outlet-templates": {"endpoint": netbox.dcim.power_outlet_templates},
"power-outlets": {"endpoint": netbox.dcim.power_outlets},
"power-port-templates": {"endpoint": netbox.dcim.power_port_templates},
"power-ports": {"endpoint": netbox.dcim.power_ports},
"prefixes": {"endpoint": netbox.ipam.prefixes},
"rack-groups": {"endpoint": netbox.dcim.rack_groups},
"rack-reservations": {"endpoint": netbox.dcim.rack_reservations},
"rack-roles": {"endpoint": netbox.dcim.rack_roles},
"racks": {"endpoint": netbox.dcim.racks},
"rear-port-templates": {"endpoint": netbox.dcim.rear_port_templates},
"rear-ports": {"endpoint": netbox.dcim.rear_ports},
"regions": {"endpoint": netbox.dcim.regions},
"reports": {"endpoint": netbox.extras.reports},
"rirs": {"endpoint": netbox.ipam.rirs},
"roles": {"endpoint": netbox.ipam.roles},
"secret-roles": {"endpoint": netbox.secrets.secret_roles},
"secrets": {"endpoint": netbox.secrets.secrets},
"services": {"endpoint": netbox.ipam.services},
"sites": {"endpoint": netbox.dcim.sites},
"tags": {"endpoint": netbox.extras.tags},
"tenant-groups": {"endpoint": netbox.tenancy.tenant_groups},
"tenants": {"endpoint": netbox.tenancy.tenants},
"topology-maps": {"endpoint": netbox.extras.topology_maps},
"virtual-chassis": {"endpoint": netbox.dcim.virtual_chassis},
"virtual-machines": {"endpoint": netbox.virtualization.virtual_machines},
"virtualization-interfaces": {"endpoint": netbox.virtualization.interfaces},
"vlan-groups": {"endpoint": netbox.ipam.vlan_groups},
"vlans": {"endpoint": netbox.ipam.vlans},
"vrfs": {"endpoint": netbox.ipam.vrfs},
}
return netbox_endpoint_map[term]["endpoint"]
class LookupModule(LookupBase):
"""
LookupModule(LookupBase) is defined by Ansible
"""
def run(self, terms, variables=None, **kwargs):
netbox_api_token = kwargs.get("token")
netbox_api_endpoint = kwargs.get("api_endpoint")
netbox_private_key_file = kwargs.get("key_file")
netbox_api_filter = kwargs.get("api_filter")
if not isinstance(terms, list):
terms = [terms]
try:
netbox = pynetbox.api(
netbox_api_endpoint,
token=netbox_api_token,
private_key_file=netbox_private_key_file,
)
except FileNotFoundError:
raise AnsibleError(
"%s cannot be found. Please make sure file exists."
% netbox_private_key_file
)
results = []
for term in terms:
try:
endpoint = get_endpoint(netbox, term)
except KeyError:
raise AnsibleError("Unrecognised term %s. Check documentation" % term)
Display().vvvv(
u"Netbox lookup for %s to %s using token %s filter %s"
% (term, netbox_api_endpoint, netbox_api_token, netbox_api_filter)
)
if netbox_api_filter:
filter = parse_kv(netbox_api_filter)
Display().vvvv("filter is %s" % filter)
for res in endpoint.filter(**filter):
Display().vvvvv(pformat(dict(res)))
key = dict(res)["id"]
result = {key: dict(res)}
results.extend(self._flatten_hash_to_list(result))
else:
for res in endpoint.all():
Display().vvvvv(pformat(dict(res)))
key = dict(res)["id"]
result = {key: dict(res)}
results.extend(self._flatten_hash_to_list(result))
return results
| 2.453125
| 2
|
icekit/utils/search/views.py
|
ic-labs/django-icekit
| 52
|
12779558
|
from django.conf import settings
from haystack.backends import SQ
from haystack.generic_views import SearchView
from haystack.inputs import AutoQuery
from haystack.query import SearchQuerySet
# convert the subfacet settings to Facet objects
from facets import Facet
SEARCH_SUBFACETS = getattr(settings, "SEARCH_SUBFACETS", {})
for k, kwargs_list in SEARCH_SUBFACETS.items():
facets = [Facet(**kw) for kw in kwargs_list]
SEARCH_SUBFACETS[k] = facets
class ICEkitSearchView(SearchView):
"""
A search view which arranges results according to a top facet ('type'),
then any of several sets of subfacets, depending on which top facet is
selected.
Only zero or one top facet can be active at a time, but many sub-facets
can be active at a time.
Counter to Haystack convention, we're not using search logic in the form
"""
top_facet = Facet(field_name='search_types', is_top_level=True, select_many=False)
fluent_page = None
def get_top_level_facet_value(self):
value = self.request.GET.get(self.top_facet.field_name)
if value:
return value
if self.fluent_page:
return self.fluent_page.default_search_type or None
return None
def pre_facet_sqs(self):
"""
Return the queryset used for generating facets, before any facets
are applied
"""
sqs = SearchQuerySet()
if self.query:
sqs = sqs.filter(
SQ(content=AutoQuery(self.query)) | # Search `text` document
SQ(get_title=AutoQuery(self.query)) | # boosted field
SQ(boosted_search_terms=AutoQuery(self.query)) # boosted field
)
return sqs
def get(self, request, *args, **kwargs):
"""User has conducted a search, or default state"""
form_class = self.get_form_class()
form = self.get_form(form_class)
top_value = self.get_top_level_facet_value()
subfacets = SEARCH_SUBFACETS.get(top_value, [])
self.active_facets = [self.top_facet] + subfacets
if form.is_valid():
self.query = form.cleaned_data.get(self.search_field)
else:
self.query = ""
sqs = self.pre_facet_sqs()
for facet in self.active_facets:
sqs = facet.set_on_sqs(sqs)
facet_counts = sqs.facet_counts()
for facet in self.active_facets:
facet.set_values_from_sqs_facet_counts(facet_counts)
facet.apply_request_and_page_to_values(self.request, self.fluent_page)
for facet in self.active_facets:
sqs = facet.narrow_sqs(sqs)
context = self.get_context_data(**{
self.form_name: form,
'facets': self.active_facets,
'top_facet': self.top_facet,
'query': self.query,
'object_list': sqs,
'page': self.fluent_page,
'show_placeholders': self.show_placeholders()
})
return self.render_to_response(context)
def show_placeholders(self):
return not self.query and all([f.is_default() for f in self.active_facets])
| 2.359375
| 2
|
pre_proc/tests/test_file_fix/test_copy_attribute.py
|
PRIMAVERA-H2020/pre-proc
| 0
|
12779559
|
<reponame>PRIMAVERA-H2020/pre-proc<gh_stars>0
"""
test_attribute_add.py
Unit tests for all FileFix concrete classes from attribute_add.py
"""
import subprocess
import unittest
import mock
from pre_proc.exceptions import AttributeNotFoundError
from pre_proc.file_fix import (ParentSourceIdFromSourceId,
FillValueFromMissingValue)
class BaseTest(unittest.TestCase):
""" Base class to setup a typical environment used by other tests """
def setUp(self):
""" Set up code run before every test """
# mock any external calls
patch = mock.patch('pre_proc.common.subprocess.check_output')
self.mock_subprocess = patch.start()
self.addCleanup(patch.stop)
class MockedNamespace(object):
def __exit__(self, *args):
pass
def __enter__(self):
return self
patch = mock.patch('pre_proc.file_fix.abstract.Dataset')
self.mock_dataset = patch.start()
self.mock_dataset.return_value = MockedNamespace()
self.addCleanup(patch.stop)
class TestParentSourceIdFromSourceId(BaseTest):
""" Test ParentSourceIdFromSourceId """
def test_no_attribute_raises(self):
""" Test if the required attribute isn't found in the netCDF """
fix = ParentSourceIdFromSourceId('1.nc', '/a')
exception_text = ('Cannot find attribute source_id in '
'file 1.nc')
self.assertRaisesRegex(AttributeNotFoundError, exception_text,
fix.apply_fix)
def test_subprocess_called_correctly(self):
"""
Test that an external call's been made correctly for
ParentSourceIdFromSourceId
"""
self.mock_dataset.return_value.source_id = 'some-model'
self.mock_dataset.return_value.parent_source_id = 'a-model'
fix = ParentSourceIdFromSourceId('1.nc', '/a')
fix.apply_fix()
self.mock_subprocess.assert_called_once_with(
"ncatted -h -a parent_source_id,global,o,c,'some-model' /a/1.nc",
stderr=subprocess.STDOUT,
shell=True
)
class TestFillValueFromMissingValue(BaseTest):
""" Test FillValueFromMissingValue """
def test_no_attribute_raises(self):
""" Test if the required attribute isn't found in the netCDF """
fix = FillValueFromMissingValue('tos_blah.nc', '/a')
exception_text = ('Cannot find attribute tos.missing_value in '
'file tos_blah.nc')
self.assertRaisesRegex(AttributeNotFoundError, exception_text,
fix.apply_fix)
def test_subprocess_called_correctly(self):
"""
Test that an external call's been made correctly for
FillValueFromMissingValue
"""
class MissingValue(object):
missing_value = 1e-7
self.mock_dataset.return_value.variables = {'tos': MissingValue()}
fix = FillValueFromMissingValue('tos_gubbins.nc', '/a')
fix.apply_fix()
self.mock_subprocess.assert_called_once_with(
"ncatted -h -a _FillValue,tos,o,f,1e-07 /a/tos_gubbins.nc",
stderr=subprocess.STDOUT,
shell=True
)
if __name__ == '__main__':
unittest.main()
| 2.140625
| 2
|
src/stray/segmentation.py
|
StrayRobots/stray
| 1
|
12779560
|
import os
from stray.scene import Scene
from stray.renderer import Renderer
import numpy as np
import pycocotools.mask as mask_util
import pickle
def write_segmentation_masks(scene_path):
scene = Scene(scene_path)
renderer = Renderer(scene)
segmentation_parent_path = os.path.join(scene_path, "segmentation")
os.makedirs(segmentation_parent_path, exist_ok=True)
for bbox_id, bbox in enumerate(scene.bounding_boxes):
segmentation_path = os.path.join(segmentation_parent_path, f"instance_{bbox_id}")
os.makedirs(segmentation_path, exist_ok=True)
renderer.add_scene_instance(bbox)
for i in range(0, len(scene), 1):
print(f"Processing frame {i:06}", end='\r')
mask = renderer.render_segmentation(i)
segmentation = mask_util.encode(np.asarray(mask, order="F"))
with open(os.path.join(segmentation_path, f"{i:06}.pickle"), 'wb') as handle:
pickle.dump(segmentation, handle, protocol=pickle.HIGHEST_PROTOCOL)
print(f"Saved segmetations to {segmentation_path} for instance {bbox_id}")
renderer.clear_scene_instances()
| 2.421875
| 2
|
lizty-liztz.py
|
sgriffith3/2020-12-07-PyNDE
| 1
|
12779561
|
<gh_stars>1-10
#!/usr/bin/env python3
pets = [["dogs", "cats", "fish"], ["iguana", "tortoise", "llama"]]
# index 0 1
print(pets[0])
# pets[0] = ['dogs', 'cats', 'fish']
# index 0 1 2
normal_pets = pets[0]
print(normal_pets[2])
print(pets[0][1])
print(pets[1])
# pets[1] = ['iguana', 'tortoise', 'llama']
# index 0 1 2
print(pets[1][2])
| 3.171875
| 3
|
05_python_intermedio/modulo_IV_conceptos_avanzados_de_funciones/complementos/pildorasinformaticas/funciones_lambda/practica00.py
|
EdinsonRequena/articicial-inteligence-and-data-science
| 30
|
12779562
|
<filename>05_python_intermedio/modulo_IV_conceptos_avanzados_de_funciones/complementos/pildorasinformaticas/funciones_lambda/practica00.py
"""
Tema: Funciones Lambda
Curso: Python.
Plataforma: Youtube.
Profesor: <NAME> (Pildoras informaticas)
Alumno: @edinsonrequena.
"""
# Calcular el area de un triangulo
area_triangulo = lambda base, altura: (base*altura) / 2
# Elevar un numero al cubo
cubo = lambda x: x**3
# Elevar un numero al cuadrado
cuadrado = lambda x: x**2
# Sumar dos numeros
suma = lambda x, y: x + y
# Restar dos numeros
resta = lambda x, y: x - y
# Reto
desctacar = lambda string: f'{string}$ !!!'
| 3.296875
| 3
|
src/oauth/user_auth/views.py
|
GoelJatin/django_oauth
| 0
|
12779563
|
<reponame>GoelJatin/django_oauth
from django.contrib import messages
from django.shortcuts import render, redirect
from django.views import View
from .models import User, UserAuth
from .forms import LoginForm, SignupForm
from .encrypt import encrypt
from google_auth.auth_helper import GoogleOauth
GOOGLE_OAUTH = GoogleOauth()
GOOGLE_OAUTH.setup()
class LoginView(View):
template = 'login.html'
def get(self, request):
username = request.session.get('username')
if username:
return redirect('/blogs/')
print(GOOGLE_OAUTH.get_authorization_url())
return render(
request,
self.template,
{
'login_form': LoginForm(),
'google_auth_url': GOOGLE_OAUTH.get_authorization_url()
}
)
def post(self, request):
data = request.POST.copy()
login_form = LoginForm(data)
error = False
if login_form.is_valid():
username = login_form.cleaned_data['username']
try:
user = User.objects.get(username=username)
user_auth = UserAuth.objects.get(user=user)
except (User.DoesNotExist, UserAuth.DoesNotExist):
messages.error(request, 'No user found with given username. Check the username or signup now')
error = True
else:
if encrypt(user.salt, login_form.cleaned_data['password']) != user_auth.password.tobytes():
messages.error(request, 'Password does not match')
error = True
if error:
return render(
request,
self.template,
{
'login_form': LoginForm(data),
'google_auth_url': GOOGLE_OAUTH.get_authorization_url()
}
)
request.session['username'] = user.username
return redirect('/blogs/')
class SignupView(View):
template = 'signup.html'
def get(self, request):
username = request.session.get('username')
if username:
return redirect('/blogs/')
return render(
request,
self.template,
{
'signup_form': SignupForm()
}
)
def post(self, request):
data = request.POST.copy()
signup_form = SignupForm(data)
if signup_form.is_valid():
signup_form.save()
user = User.objects.get(username=signup_form.cleaned_data['username'])
user_auth = {
'user': user,
'password': encrypt(user.salt, signup_form.cleaned_data['password'])
}
print(user_auth)
user_auth = UserAuth.objects.create(**user_auth)
user_auth.save()
request.session['username'] = user.username
return redirect('/blogs/')
return render(
request,
self.template,
{
'signup_form': SignupForm()
}
)
def logout(request):
if 'username' in request.session:
del request.session['username']
return redirect('/login')
| 2.453125
| 2
|
app/migrations/0013_auto_20220203_1618.py
|
Maryan23/Moringa-Alumni-Backend
| 0
|
12779564
|
# Generated by Django 3.2.9 on 2022-02-03 13:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0012_auto_20220203_1617'),
]
operations = [
migrations.AlterField(
model_name='fundraiser',
name='end_date',
field=models.DateTimeField(null=True),
),
migrations.AlterField(
model_name='fundraiser',
name='start_date',
field=models.DateTimeField(auto_now_add=True, null=True),
),
]
| 1.507813
| 2
|
examinator/scratch/20190407_rdir.py
|
brl0/examinator
| 0
|
12779565
|
<gh_stars>0
#%%
import os
def rdir(path):
if os.path.isdir(path):
return {path: [rdir(p) if p.is_dir() else p for p in os.scandir(path)]}
return [path]
rdir('.')
| 2.234375
| 2
|
setup.py
|
mortele/MP-sort
| 7
|
12779566
|
from setuptools import setup
from Cython.Build import cythonize
from distutils.extension import Extension
from distutils.command.build_ext import build_ext
import numpy
import mpi4py
import os
class build_ext_subclass(build_ext):
user_options = build_ext.user_options + \
[
('mpicc', None, 'MPICC')
]
def initialize_options(self):
try:
compiler = str(mpi4py.get_config()['mpicc'])
except:
compiler = "mpicc"
self.mpicc = os.environ.get('MPICC', compiler)
build_ext.initialize_options(self)
def finalize_options(self):
build_ext.finalize_options(self)
def build_extensions(self):
# turns out set_executables only works for linker_so, but for compiler_so
self.compiler.compiler_so[0] = self.mpicc
self.compiler.linker_so[0] = self.mpicc
build_ext.build_extensions(self)
extensions = [
Extension("mpsort.binding", [
"mpsort/binding.pyx",
"radixsort.c",
"mp-mpiu.c",
"mpsort-mpi.c"],
include_dirs = ["./", numpy.get_include()],
depends=[
"mpsort.h",
"mpsort-mpi.h",
"mp-mpiu.h",
]
)
]
def find_version(path):
import re
# path shall be a plain ascii text file.
s = open(path, 'rt').read()
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
s, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Version not found")
setup(
name="mpsort",
version=find_version("mpsort/version.py"),
author="<NAME>",
author_email="<EMAIL>",
url="http://github.com/rainwoodman/mpsort",
description="python binding of MP-sort, a peta scale sorting routine",
zip_safe = False,
package_dir = {'mpsort': 'mpsort'},
install_requires=['cython', 'numpy', 'mpi4py'],
packages= ['mpsort', 'mpsort.tests'],
license='BSD-2-Clause',
cmdclass = {
"build_ext": build_ext_subclass
},
ext_modules = cythonize(extensions)
)
| 1.789063
| 2
|
plugs_payments/signals.py
|
solocompt/plugs-payments
| 0
|
12779567
|
<gh_stars>0
"""
Plugs Payments Signals
"""
from django.dispatch import Signal
# sent when a validated ifthen payment received
valid_ifthen_payment_received = Signal()
# sent when an invalid payment received
# could be an error with a reference, value or entity
# cannot be the anti phishing key
invalid_ifthen_payment_received = Signal()
# sent when a request to the confirmation callback
# was made with an incorrect or missing anti phisphing key
suspicious_ifthen_payment_received = Signal()
| 1.75
| 2
|
valueIterationAgents.py
|
abhinavcreed13/ai-reinforcement-learning
| 0
|
12779568
|
<reponame>abhinavcreed13/ai-reinforcement-learning<gh_stars>0
# valueIterationAgents.py
# -----------------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by <NAME>
# (<EMAIL>) and <NAME> (<EMAIL>).
# Student side autograding was added by <NAME>, <NAME>, and
# <NAME> (<EMAIL>).
# valueIterationAgents.py
# -----------------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by <NAME>
# (<EMAIL>) and <NAME> (<EMAIL>).
# Student side autograding was added by <NAME>, <NAME>, and
# <NAME> (<EMAIL>).
import mdp, util, copy
from learningAgents import ValueEstimationAgent
import collections
class ValueIterationAgent(ValueEstimationAgent):
"""
* Please read learningAgents.py before reading this.*
A ValueIterationAgent takes a Markov decision process
(see mdp.py) on initialization and runs value iteration
for a given number of iterations using the supplied
discount factor.
"""
def __init__(self, mdp, discount = 0.9, iterations = 100):
"""
Your value iteration agent should take an mdp on
construction, run the indicated number of iterations
and then act according to the resulting policy.
Some useful mdp methods you will use:
mdp.getStates()
mdp.getPossibleActions(state)
mdp.getTransitionStatesAndProbs(state, action)
mdp.getReward(state, action, nextState)
mdp.isTerminal(state)
"""
self.mdp = mdp
self.discount = discount
self.iterations = iterations
self.values = util.Counter() # A Counter is a dict with default 0
self.runValueIteration()
def runValueIteration(self):
# Write value iteration code here
for iter in range(0, self.iterations):
newQValues = self.values.copy()
for state in self.mdp.getStates():
if self.mdp.isTerminal(state):
continue
bestAction = self.computeActionFromValues(state)
QValue = self.computeQValueFromValues(state, bestAction)
newQValues[state] = QValue
self.values = newQValues
def getValue(self, state):
"""
Return the value of the state (computed in __init__).
"""
return self.values[state]
def computeQValueFromValues(self, state, action):
"""
Compute the Q-value of action in state from the
value function stored in self.values.
"""
nextTransitions = self.mdp.getTransitionStatesAndProbs(state, action)
QValue = 0
for nextState, probs in nextTransitions:
nextReward = self.mdp.getReward(state, action, nextState)
discount = self.discount
futureQVal = self.getValue(nextState)
QValue += probs * (nextReward + discount * futureQVal)
return QValue
def computeActionFromValues(self, state):
"""
The policy is the best action in the given state
according to the values currently stored in self.values.
You may break ties any way you see fit. Note that if
there are no legal actions, which is the case at the
terminal state, you should return None.
"""
if self.mdp.isTerminal(state):
return None
possibleActions = self.mdp.getPossibleActions(state)
actionQValues = util.Counter()
for possibleAction in possibleActions:
actionQValues[possibleAction] = self.computeQValueFromValues(state, possibleAction)
bestAction = actionQValues.argMax()
return bestAction
def getPolicy(self, state):
return self.computeActionFromValues(state)
def getAction(self, state):
"Returns the policy at the state (no exploration)."
return self.computeActionFromValues(state)
def getQValue(self, state, action):
return self.computeQValueFromValues(state, action)
class AsynchronousValueIterationAgent(ValueIterationAgent):
"""
* Please read learningAgents.py before reading this.*
An AsynchronousValueIterationAgent takes a Markov decision process
(see mdp.py) on initialization and runs cyclic value iteration
for a given number of iterations using the supplied
discount factor.
"""
def __init__(self, mdp, discount = 0.9, iterations = 1000):
"""
Your cyclic value iteration agent should take an mdp on
construction, run the indicated number of iterations,
and then act according to the resulting policy. Each iteration
updates the value of only one state, which cycles through
the states list. If the chosen state is terminal, nothing
happens in that iteration.
Some useful mdp methods you will use:
mdp.getStates()
mdp.getPossibleActions(state)
mdp.getTransitionStatesAndProbs(state, action)
mdp.getReward(state)
mdp.isTerminal(state)
"""
ValueIterationAgent.__init__(self, mdp, discount, iterations)
def runValueIteration(self):
mdpStates = self.mdp.getStates()
indexIterator = 0
for iter in range(0, self.iterations):
if indexIterator == len(mdpStates): indexIterator = 0
targetState = mdpStates[indexIterator]
indexIterator += 1
if self.mdp.isTerminal(targetState):
continue
bestAction = self.computeActionFromValues(targetState)
QValue = self.computeQValueFromValues(targetState,
bestAction)
self.values[targetState] = QValue
class PrioritizedSweepingValueIterationAgent(AsynchronousValueIterationAgent):
"""
* Please read learningAgents.py before reading this.*
A PrioritizedSweepingValueIterationAgent takes a Markov decision process
(see mdp.py) on initialization and runs prioritized sweeping value iteration
for a given number of iterations using the supplied parameters.
"""
def __init__(self, mdp, discount = 0.9, iterations = 100, theta = 1e-5):
"""
Your prioritized sweeping value iteration agent should take an mdp on
construction, run the indicated number of iterations,
and then act according to the resulting policy.
"""
self.theta = theta
ValueIterationAgent.__init__(self, mdp, discount, iterations)
def runValueIteration(self):
# Initialize an empty priority queue
self.queue = util.PriorityQueue()
self.predecessors = util.Counter()
for s in self.mdp.getStates():
if not self.mdp.isTerminal(s):
self.predecessors[s] = set()
for s in self.mdp.getStates():
if self.mdp.isTerminal(s):
continue
# compute predecessors for state s
possibleActions = self.mdp.getPossibleActions(s)
for action in possibleActions:
nextTransitions = self.mdp.getTransitionStatesAndProbs(s, action)
for nextState, prob in nextTransitions:
if prob != 0 and not self.mdp.isTerminal(nextState):
self.predecessors[nextState].add(s)
# calculate priority and push into queue
currentValue = self.values[s]
bestAction = self.computeActionFromValues(s)
highestQValue = self.computeQValueFromValues(s, bestAction)
diff = abs(currentValue - highestQValue)
self.queue.push(s, -diff)
for iter in range(0, self.iterations):
if self.queue.isEmpty():
# terminate
return
s = self.queue.pop()
# calculate Q-value for updating s
bestAction = self.computeActionFromValues(s)
self.values[s] = self.computeQValueFromValues(s, bestAction)
for p in self.predecessors[s]:
currentValue = self.values[p]
bestAction = self.computeActionFromValues(p)
highestQValue = self.computeQValueFromValues(p, bestAction)
diff = abs(currentValue - highestQValue)
if diff > self.theta:
self.queue.update(p, -diff)
| 2.5625
| 3
|
get_level_data.py.py
|
wjsutton/super_mario_bros_level_details
| 0
|
12779569
|
<reponame>wjsutton/super_mario_bros_level_details
import requests
from bs4 import BeautifulSoup
import pandas as pd
from time import sleep
# read csv of all page urls
worlds = pd.read_csv('world_pages.csv')
worlds_pages = worlds['world'].tolist()
# function to turn html tables into dataframes
def tableDataText(table):
rows = []
trs = table.find_all('tr')
headerow = [td.get_text(strip=True) for td in trs[0].find_all('th')] # header row
if headerow: # if there is a header row include first
rows.append(headerow)
trs = trs[1:]
for tr in trs: # for every table row
rows.append([td.get_text(strip=True) for td in tr.find_all('td')]) # data row
return rows
# initiate list for dataframes from loop
level_df = []
enemy_df = []
item_df =[]
gif_list = []
# loop through worlds_pages and extract level map and data tables
for i in range(len(worlds_pages)):
print(i)
# add sleep to prevent IP whitelisting
sleep(30)
url = worlds_pages[i]
try:
page = requests.get(url,timeout = 60)
except requests.ConnectionError as e:
sleep(5*60)
page = requests.get(url,timeout = 60)
except requests.Timeout as e:
sleep(15*60)
page = requests.get(url,timeout = 60)
soup = BeautifulSoup(page.content, 'html.parser')
# find elements from html
image = [x.get('src') for x in soup.find_all("img")]
tables = soup.find_all("table",{"class":"wikitable"})
infobox = soup.find_all("table",{"class":"infobox"})
# find map image from list of images
map = [s for s in image if "Map.png" in s]
if len(map) >1:
map = map[0]
gifs = [s for s in image if "gif" in s]
gif_list.append(gifs)
# build level dataframe and append map image
level = tableDataText(infobox[0])
level = pd.DataFrame(level)
# reduce data and transpose
level = level[3:7]
level = level.transpose()
# replace headers with first row of data
level = level.rename(columns=level.iloc[0]).drop(level.index[0])
level['Map'] = map
# add url for future joins
level['url'] = url
level_df.append(level)
# build enemies dataframe
if len(tables) > 0:
enemies = tableDataText(tables[0])
enemies = pd.DataFrame(enemies)
# replace headers with first row of data
enemies = enemies.rename(columns=enemies.iloc[0]).drop(enemies.index[0])
# add url for future joins
enemies['url'] = url
enemy_df.append(enemies)
# build items dataframe
if len(tables) > 1:
items = tableDataText(tables[1])
items = pd.DataFrame(items)
# replace headers with first row of data
items = items.rename(columns=items.iloc[0]).drop(items.index[0])
# add url for future joins
items['url'] = url
item_df.append(items)
# convert lists of dataframes into one dataframes
level_df = pd.concat(level_df)
enemy_df = pd.concat(enemy_df)
item_df = pd.concat(item_df)
#gif_list = set(gif_list)
gif_df = pd.DataFrame()
gif_df['url'] = gif_list
# writing data to csv
level_df.to_csv('data\\super_mario_bros_levels.csv', encoding="utf-8-sig", index=False)
enemy_df.to_csv('data\\super_mario_bros_enemies.csv', encoding="utf-8-sig", index=False)
item_df.to_csv('data\\super_mario_bros_items.csv', encoding="utf-8-sig", index=False)
gif_df.to_csv('data\\super_mario_bros_gifs.csv', encoding="utf-8-sig", index=False)
| 2.90625
| 3
|
tests/rules/test_garbage_symbols.py
|
gitter-badger/arche
| 0
|
12779570
|
<gh_stars>0
from arche import SH_URL
from arche.rules.garbage_symbols import garbage_symbols
from arche.rules.result import Level
from conftest import create_result
import pytest
dirty_inputs = [
(
[
{
"_key": f"{SH_URL}/112358/13/21/item/0",
"name": " <NAME>",
"address": "here goes &",
"phone": "<h1>144</h1>.sx-prime-pricing-row { float: left; }",
"rank": 14441,
},
{
"_key": f"{SH_URL}/112358/13/21/item/1",
"name": "<!--<NAME>-->",
"address": "Some street",
"phone": "1144",
"rank": 2_039_857,
},
],
{
Level.ERROR: [
(
"100.0% (2) items affected",
None,
{
"100.0% of 'name' values contain [' ', '-->', '<!--']": [
f"{SH_URL}/112358/13/21/item/0",
f"{SH_URL}/112358/13/21/item/1",
],
"50.0% of 'address' values contain ['&']": [
f"{SH_URL}/112358/13/21/item/0"
],
(
"50.0% of 'phone' values contain "
"['.sx-prime-pricing-ro', '</h1>', '<h1>']"
): [f"{SH_URL}/112358/13/21/item/0"],
},
)
]
},
2,
2,
),
([{"_key": "0"}], {}, 1, 0),
]
@pytest.mark.parametrize(
"get_job_items, expected_messages, expected_items_count, expected_err_items_count",
dirty_inputs,
indirect=["get_job_items"],
)
def test_garbage_symbols(
get_job_items, expected_messages, expected_items_count, expected_err_items_count
):
assert garbage_symbols(get_job_items) == create_result(
"Garbage Symbols",
expected_messages,
items_count=expected_items_count,
err_items_count=expected_err_items_count,
)
| 2.0625
| 2
|
src/python/defs.py
|
sloorush/ebpf-keylogger
| 0
|
12779571
|
import os, sys
ticksleep = 0.1
project_path = os.path.realpath(os.path.join(os.path.dirname(__file__), "../.."))
| 1.539063
| 2
|
music_random.py
|
rnsribeiro/RenomearEmMassa
| 0
|
12779572
|
import os
from random import randint
for name in os.listdir('pendrive'):
newname='{}.mp3'.format(randint(0,1000000))
os.rename("pendrive/"+name,"pendrive/"+newname)
| 2.6875
| 3
|
app/python/zbrank/src/rank_start.py
|
fengshiyou/live
| 0
|
12779573
|
# -*- coding: UTF-8 -*-
# 自定义py导入开始
# from getList import getList
import zbrank
# 导入拓展包开始
import json
import time
# 导入拓展包结束
# 自定义py导入结束
now = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
class status_start:
def __init__(self):
# 主url
self.main_url = "http://www.zbrank.com/ranking/affect/"
# ajax url
self.ajax_url = "http://www.zbrank.com/ranking/getRatingList"
def status_start(self):
# zbrank.zbrankSave().saveZbrankRank()
zbrank.zbrankSave().saveZbrankRankNew()
status_start().status_start()
| 2.609375
| 3
|
krake/krake/api/app.py
|
rak-n-rok/Krake
| 1
|
12779574
|
<filename>krake/krake/api/app.py
"""This module defines the bootstrap function for creating the aiohttp server
instance serving Krake's HTTP API.
Krake serves multiple APIs for different technologies, e.g. the core
functionality like roles and role bindings are served by the
:mod:`krake.api.core` API where as the Kubernetes API is provided by
:mod:`krake.api.kubernetes`.
Example:
The API server can be run as follows:
.. code:: python
from aiohttp import web
from krake.api.app import create_app
config = ...
app = create_app(config)
web.run_app(app)
"""
import aiohttp_cors
import logging
import ssl
from aiohttp import web, ClientSession
from functools import partial
from krake.api.database import Session
from krake.data.core import RoleBinding
from . import __version__ as version
from . import middlewares
from . import auth
from .helpers import session
from .core import CoreApi
from .openstack import OpenStackApi
from .kubernetes import KubernetesApi
routes = web.RouteTableDef()
@routes.get("/")
async def index(request):
return web.json_response({"version": version})
@routes.get("/me")
async def me(request):
roles = set()
user = request["user"]
async for binding in session(request).all(RoleBinding):
if user in binding.users:
roles.update(binding.roles)
return web.json_response({"user": user, "roles": sorted(roles)})
@routes.get("/release")
async def release(request):
return web.json_response("You released the Krake.", status=202)
def create_app(config):
"""Create aiohttp application instance providing the Krake HTTP API
Args:
config (krake.data.config.ApiConfiguration): Application configuration object
Returns:
aiohttp.web.Application: Krake HTTP API
"""
logger = logging.getLogger("krake.api")
if not config.tls.enabled:
ssl_context = None
else:
ssl_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ssl_context.verify_mode = ssl.CERT_OPTIONAL
ssl_context.load_cert_chain(certfile=config.tls.cert, keyfile=config.tls.key)
# Load authorities for client certificates.
client_ca = config.tls.client_ca
if client_ca:
ssl_context.load_verify_locations(cafile=client_ca)
authentication = load_authentication(config)
authorizer = load_authorizer(config)
app = web.Application(
logger=logger,
middlewares=[
middlewares.error_log(),
authentication,
middlewares.retry_transaction(retry=config.etcd.retry_transactions),
],
)
app["config"] = config
app["authorizer"] = authorizer
app["ssl_context"] = ssl_context
# Cleanup contexts
app.cleanup_ctx.append(http_session)
app.cleanup_ctx.append(
partial(db_session, host=config.etcd.host, port=config.etcd.port)
)
# Routes
app.add_routes(routes)
app.add_routes(CoreApi.routes)
app.add_routes(OpenStackApi.routes)
app.add_routes(KubernetesApi.routes)
cors_setup(app)
return app
def cors_setup(app):
"""Set the default CORS (Cross-Origin Resource Sharing) rules for all routes of the
given web application.
Args:
app (web.Application): Web application
"""
cors_origin = app["config"].authentication.cors_origin
default_origin = "*"
if cors_origin == default_origin:
app.logger.warning(
f"Setting the default origin '{default_origin}' for the CORS setup may be a"
" security concern. See the 'Security principles' in the admin"
" documentation."
)
cors = aiohttp_cors.setup(
app,
defaults={
cors_origin: aiohttp_cors.ResourceOptions(
allow_credentials=True,
allow_headers="*",
allow_methods=["DELETE", "GET", "OPTIONS", "POST", "PUT"],
)
},
)
for route in app.router.routes():
cors.add(route)
async def db_session(app, host, port):
"""Async generator creating a database :class:`krake.api.database.Session` that can
be used by other components (middleware, route handlers) or by the requests
handlers. The database session is available under the ``db`` key of the application.
This function should be used as cleanup context (see
:attr:`aiohttp.web.Application.cleanup_ctx`).
Args:
app (aiohttp.web.Application): Web application
"""
async with Session(host=host, port=port) as session:
app["db"] = session
yield
async def http_session(app):
"""Async generator creating an :class:`aiohttp.ClientSession` HTTP session
that can be used by other components (middleware, route handlers). The HTTP
client session is available under the ``http`` key of the application.
This function should be used as cleanup context (see
:attr:`aiohttp.web.Application.cleanup_ctx`).
Args:
app (aiohttp.web.Application): Web application
"""
async with ClientSession() as session:
app["http"] = session
yield
def load_authentication(config):
"""Create the authentication middleware :func:`.middlewares.authentication`.
The authenticators are loaded from the "authentication" configuration key.
If the server is configured with TLS, client certificates are also added
as authentication (:func:`.auth.client_certificate_authentication`)
strategy.
Args:
config (krake.data.config.ApiConfiguration): Application configuration object
Returns:
aiohttp middleware handling request authentication
"""
authenticators = []
allow_anonymous = config.authentication.allow_anonymous
strategy = config.authentication.strategy
if strategy.static.enabled:
authenticators.append(auth.static_authentication(name=strategy.static.name))
elif strategy.keystone.enabled:
authenticators.append(
auth.keystone_authentication(endpoint=strategy.keystone.endpoint)
)
elif strategy.keycloak.enabled:
authenticators.append(
auth.keycloak_authentication(
endpoint=strategy.keycloak.endpoint, realm=strategy.keycloak.realm
)
)
# If the "client_ca" TLS configuration parameter is given, enable client
# certificate authentication.
if config.tls.enabled and config.tls.client_ca:
authenticators.append(auth.client_certificate_authentication())
return middlewares.authentication(authenticators, allow_anonymous)
def load_authorizer(config):
"""Load authorization function from configuration.
Args:
config (krake.data.config.ApiConfiguration): Application configuration object
Raises:
ValueError: If an unknown authorization strategy is configured
Returns:
Coroutine function for authorizing resource requests
"""
if config.authorization == "always-allow":
return auth.always_allow
if config.authorization == "always-deny":
return auth.always_deny
if config.authorization == "RBAC":
return auth.rbac
raise ValueError(f"Unknown authorization strategy {config.authorization!r}")
| 2.5
| 2
|
tensornetwork/backends/tensorflow/tensorflow_tensornetwork_test.py
|
khanhgithead/TensorNetwork
| 1,681
|
12779575
|
"""Tests for graphmode_tensornetwork."""
import numpy as np
import tensorflow as tf
from tensornetwork import (contract, connect, flatten_edges_between,
contract_between, Node)
import pytest
class GraphmodeTensorNetworkTest(tf.test.TestCase):
def test_basic_graphmode(self):
# pylint: disable=not-context-manager
with tf.compat.v1.Graph().as_default():
a = Node(tf.ones(10), backend="tensorflow")
b = Node(tf.ones(10), backend="tensorflow")
e = connect(a[0], b[0])
final_tensor = contract(e).get_tensor()
sess = tf.compat.v1.Session()
final_val = sess.run(final_tensor)
self.assertAllClose(final_val, 10.0)
def test_gradient_decent(self):
# pylint: disable=not-context-manager
with tf.compat.v1.Graph().as_default():
a = Node(tf.Variable(tf.ones(10)), backend="tensorflow")
b = Node(tf.ones(10), backend="tensorflow")
e = connect(a[0], b[0])
final_tensor = contract(e).get_tensor()
opt = tf.compat.v1.train.GradientDescentOptimizer(0.001)
train_op = opt.minimize(final_tensor)
sess = tf.compat.v1.Session()
sess.run(tf.compat.v1.global_variables_initializer())
self.assertAllClose(sess.run(final_tensor), 10.0)
sess.run(train_op)
self.assertLess(sess.run(final_tensor), 10.0)
def test_dynamic_network_sizes(self):
@tf.function
def f(x, n):
x_slice = x[:n]
n1 = Node(x_slice, backend="tensorflow")
n2 = Node(x_slice, backend="tensorflow")
e = connect(n1[0], n2[0])
return contract(e).get_tensor()
x = np.ones(10)
self.assertAllClose(f(x, tf.convert_to_tensor(2)), 2.0)
self.assertAllClose(f(x, tf.convert_to_tensor(3)), 3.0)
@pytest.mark.skip(reason="Test fails due to probable bug in tensorflow 2.0.0")
def test_dynamic_network_sizes_contract_between(self):
@tf.function
def f(x, n):
x_slice = x[..., :n]
n1 = Node(x_slice, backend="tensorflow")
n2 = Node(x_slice, backend="tensorflow")
connect(n1[0], n2[0])
connect(n1[1], n2[1])
connect(n1[2], n2[2])
return contract_between(n1, n2).get_tensor()
x = tf.ones((3, 4, 5))
self.assertAllClose(f(x, tf.convert_to_tensor(2)), 24.0)
self.assertAllClose(f(x, tf.convert_to_tensor(3)), 36.0)
def test_dynamic_network_sizes_flatten_standard(self):
@tf.function
def f(x, n):
x_slice = x[..., :n]
n1 = Node(x_slice, backend="tensorflow")
n2 = Node(x_slice, backend="tensorflow")
connect(n1[0], n2[0])
connect(n1[1], n2[1])
connect(n1[2], n2[2])
return contract(flatten_edges_between(n1, n2)).get_tensor()
x = np.ones((3, 4, 5))
self.assertAllClose(f(x, tf.convert_to_tensor(2)), 24.0)
self.assertAllClose(f(x, tf.convert_to_tensor(3)), 36.0)
def test_dynamic_network_sizes_flatten_trace(self):
@tf.function
def f(x, n):
x_slice = x[..., :n]
n1 = Node(x_slice, backend="tensorflow")
connect(n1[0], n1[2])
connect(n1[1], n1[3])
return contract(flatten_edges_between(n1, n1)).get_tensor()
x = np.ones((3, 4, 3, 4, 5))
self.assertAllClose(f(x, tf.convert_to_tensor(2)), np.ones((2,)) * 12)
self.assertAllClose(f(x, tf.convert_to_tensor(3)), np.ones((3,)) * 12)
def test_batch_usage(self,):
def build_tensornetwork(tensors):
a = Node(tensors[0], backend="tensorflow")
b = Node(tensors[1], backend="tensorflow")
e = connect(a[0], b[0])
return contract(e).get_tensor()
tensors = [np.ones((5, 10)), np.ones((5, 10))]
result = tf.map_fn(build_tensornetwork, tensors, dtype=tf.float64)
np.testing.assert_allclose(result, np.ones(5) * 10)
if __name__ == '__main__':
tf.test.main()
| 2.4375
| 2
|
book-code/numpy-ml/numpy_ml/rl_models/agents.py
|
yangninghua/code_library
| 0
|
12779576
|
<reponame>yangninghua/code_library
"""Reinforcement learning agents that can be run on OpenAI gym environs"""
from abc import ABC, abstractmethod
from collections import defaultdict
import numpy as np
from .rl_utils import EnvModel, env_stats, tile_state_space
from ..utils.data_structures import Dict
class AgentBase(ABC):
def __init__(self, env):
super().__init__()
self.env = env
self.parameters = {}
self.hyperparameters = {}
self.derived_variables = {}
self.env_info = env_stats(env)
def _create_2num_dicts(self, obs_encoder=None, act_encoder=None):
E = self.env_info
n_states = np.prod(E["n_obs_per_dim"])
n_actions = np.prod(E["n_actions_per_dim"])
# create action -> scalar dictionaries
self._num2action = Dict()
self._action2num = Dict(act_encoder)
if n_actions != np.inf:
self._action2num = {act: i for i, act in enumerate(E["action_ids"])}
self._num2action = {i: act for act, i in self._action2num.items()}
# create obs -> scalar dictionaries
self._num2obs = Dict()
self._obs2num = Dict(obs_encoder)
if n_states != np.inf:
self._obs2num = {act: i for i, act in enumerate(E["obs_ids"])}
self._num2obs = {i: act for act, i in self._obs2num.items()}
def flush_history(self):
"""Clear the episode history"""
for k, v in self.episode_history.items():
self.episode_history[k] = []
@abstractmethod
def act(self, obs):
"""Generate an action given the current observation"""
raise NotImplementedError
@abstractmethod
def greedy_policy(self, **kwargs):
"""
Take a greedy action.
Returns
-------
total_reward : float
The total reward on the episode.
n_steps : float
The total number of steps taken on the episode.
"""
raise NotImplementedError
@abstractmethod
def run_episode(self, max_steps, render=False):
"""
Run the agent on a single episode.
Parameters
----------
max_steps : int
The maximum number of steps to run an episode
render : bool
Whether to render the episode during training
Returns
-------
reward : float
The total reward on the episode, averaged over the theta samples.
steps : float
The total number of steps taken on the episode, averaged over the
theta samples.
"""
raise NotImplementedError
@abstractmethod
def update(self):
r"""
Update the agent parameters according to the rewards accrued on the
current episode.
Returns
-------
avg_reward : float
The average reward earned by the best `retain_prcnt` theta samples
on the current episode.
"""
raise NotImplementedError
class CrossEntropyAgent(AgentBase):
def __init__(self, env, n_samples_per_episode=500, retain_prcnt=0.2):
r"""
A cross-entropy method agent.
Notes
-----
The cross-entropy method [1]_ [2]_ agent only operates on ``envs`` with
discrete action spaces.
On each episode the agent generates `n_theta_samples` of the parameters
(:math:`\theta`) for its behavior policy. The `i`'th sample at
timestep `t` is:
.. math::
\theta_i &= \{\mathbf{W}_i^{(t)}, \mathbf{b}_i^{(t)} \} \\
\theta_i &\sim \mathcal{N}(\mu^{(t)}, \Sigma^{(t)})
Weights (:math:`\mathbf{W}_i`) and bias (:math:`\mathbf{b}_i`) are the
parameters of the softmax policy:
.. math::
\mathbf{z}_i &= \text{obs} \cdot \mathbf{W}_i + \mathbf{b}_i \\
p(a_i^{(t + 1)}) &= \frac{e^{\mathbf{z}_i}}{\sum_j e^{z_{ij}}} \\
a^{(t + 1)} &= \arg \max_j p(a_j^{(t+1)})
At the end of each episode, the agent takes the top `retain_prcnt`
highest scoring :math:`\theta` samples and combines them to generate
the mean and variance of the distribution of :math:`\theta` for the
next episode:
.. math::
\mu^{(t+1)} &= \text{avg}(\texttt{best_thetas}^{(t)}) \\
\Sigma^{(t+1)} &= \text{var}(\texttt{best_thetas}^{(t)})
References
----------
.. [1] <NAME>., <NAME>., & <NAME>. (2003). The cross entropy
method for fast policy search. In *Proceedings of the 20th Annual
ICML, 20*.
.. [2] <NAME>. (1997). optimization of computer simulation
models with rare events, *European Journal of Operational Research,
99*, 89–112.
Parameters
----------
env : :meth:`gym.wrappers` or :meth:`gym.envs` instance
The environment to run the agent on.
n_samples_per_episode : int
The number of theta samples to evaluate on each episode. Default is 500.
retain_prcnt: float
The percentage of `n_samples_per_episode` to use when calculating
the parameter update at the end of the episode. Default is 0.2.
"""
super().__init__(env)
self.retain_prcnt = retain_prcnt
self.n_samples_per_episode = n_samples_per_episode
self._init_params()
def _init_params(self):
E = self.env_info
assert not E["continuous_actions"], "Action space must be discrete"
self._create_2num_dicts()
b_len = np.prod(E["n_actions_per_dim"])
W_len = b_len * np.prod(E["obs_dim"])
theta_dim = b_len + W_len
# init mean and variance for mv gaussian with dimensions theta_dim
theta_mean = np.random.rand(theta_dim)
theta_var = np.ones(theta_dim)
self.parameters = {"theta_mean": theta_mean, "theta_var": theta_var}
self.derived_variables = {
"b_len": b_len,
"W_len": W_len,
"W_samples": [],
"b_samples": [],
"episode_num": 0,
"cumulative_rewards": [],
}
self.hyperparameters = {
"agent": "CrossEntropyAgent",
"retain_prcnt": self.retain_prcnt,
"n_samples_per_episode": self.n_samples_per_episode,
}
self.episode_history = {"rewards": [], "state_actions": []}
def act(self, obs):
r"""
Generate actions according to a softmax policy.
Notes
-----
The softmax policy assumes that the pmf over actions in state :math:`x_t` is
given by:
.. math::
\pi(a | x^{(t)}) = \text{softmax}(
\text{obs}^{(t)} \cdot \mathbf{W}_i^{(t)} + \mathbf{b}_i^{(t)} )
where :math:`\mathbf{W}` is a learned weight matrix, `obs` is the observation
at timestep `t`, and **b** is a learned bias vector.
Parameters
----------
obs : int or :py:class:`ndarray <numpy.ndarray>`
An observation from the environment.
Returns
-------
action : int, float, or :py:class:`ndarray <numpy.ndarray>`
An action sampled from the distribution over actions defined by the
softmax policy.
"""
E, P = self.env_info, self.parameters
W, b = P["W"], P["b"]
s = self._obs2num[obs]
s = np.array([s]) if E["obs_dim"] == 1 else s
# compute softmax
Z = s.T @ W + b
e_Z = np.exp(Z - np.max(Z, axis=-1, keepdims=True))
action_probs = e_Z / e_Z.sum(axis=-1, keepdims=True)
# sample action
a = np.random.multinomial(1, action_probs).argmax()
return self._num2action[a]
def run_episode(self, max_steps, render=False):
"""
Run the agent on a single episode.
Parameters
----------
max_steps : int
The maximum number of steps to run an episode
render : bool
Whether to render the episode during training
Returns
-------
reward : float
The total reward on the episode, averaged over the theta samples.
steps : float
The total number of steps taken on the episode, averaged over the
theta samples.
"""
self._sample_thetas()
E, D = self.env_info, self.derived_variables
n_actions = np.prod(E["n_actions_per_dim"])
W_len, obs_dim = D["W_len"], E["obs_dim"]
steps, rewards = [], []
for theta in D["theta_samples"]:
W = theta[:W_len].reshape(obs_dim, n_actions)
b = theta[W_len:]
total_rwd, n_steps = self._episode(W, b, max_steps, render)
rewards.append(total_rwd)
steps.append(n_steps)
# return the average reward and average number of steps across all
# samples on the current episode
D["episode_num"] += 1
D["cumulative_rewards"] = rewards
return np.mean(D["cumulative_rewards"]), np.mean(steps)
def _episode(self, W, b, max_steps, render):
"""
Run the agent for an episode.
Parameters
----------
W : :py:class:`ndarray <numpy.ndarray>` of shape `(obs_dim, n_actions)`
The weights for the softmax policy.
b : :py:class:`ndarray <numpy.ndarray>` of shape `(bias_len, )`
The bias for the softmax policy.
max_steps : int
The maximum number of steps to run the episode.
render : bool
Whether to render the episode during training.
Returns
-------
reward : float
The total reward on the episode.
steps : float
The total number of steps taken on the episode.
"""
rwds, sa = [], []
H = self.episode_history
total_reward, n_steps = 0.0, 1
obs = self.env.reset()
self.parameters["W"] = W
self.parameters["b"] = b
for i in range(max_steps):
if render:
self.env.render()
n_steps += 1
action = self.act(obs)
s, a = self._obs2num[obs], self._action2num[action]
sa.append((s, a))
obs, reward, done, _ = self.env.step(action)
rwds.append(reward)
total_reward += reward
if done:
break
H["rewards"].append(rwds)
H["state_actions"].append(sa)
return total_reward, n_steps
def update(self):
r"""
Update :math:`\mu` and :math:`\Sigma` according to the rewards accrued on
the current episode.
Returns
-------
avg_reward : float
The average reward earned by the best `retain_prcnt` theta samples
on the current episode.
"""
D, P = self.derived_variables, self.parameters
n_retain = int(self.retain_prcnt * self.n_samples_per_episode)
# sort the cumulative rewards for each theta sample from greatest to least
sorted_y_val_idxs = np.argsort(D["cumulative_rewards"])[::-1]
top_idxs = sorted_y_val_idxs[:n_retain]
# update theta_mean and theta_var with the best theta value
P["theta_mean"] = np.mean(D["theta_samples"][top_idxs], axis=0)
P["theta_var"] = np.var(D["theta_samples"][top_idxs], axis=0)
def _sample_thetas(self):
"""
Sample `n_samples_per_episode` thetas from a multivariate Gaussian with
mean `theta_mean` and covariance `diag(theta_var)`
"""
P, N = self.parameters, self.n_samples_per_episode
Mu, Sigma = P["theta_mean"], np.diag(P["theta_var"])
samples = np.random.multivariate_normal(Mu, Sigma, N)
self.derived_variables["theta_samples"] = samples
def greedy_policy(self, max_steps, render=True):
"""
Execute a greedy policy using the current agent parameters.
Parameters
----------
max_steps : int
The maximum number of steps to run the episode.
render : bool
Whether to render the episode during execution.
Returns
-------
total_reward : float
The total reward on the episode.
n_steps : float
The total number of steps taken on the episode.
"""
E, D, P = self.env_info, self.derived_variables, self.parameters
Mu, Sigma = P["theta_mean"], np.diag(P["theta_var"])
sample = np.random.multivariate_normal(Mu, Sigma, 1)
W_len, obs_dim = D["W_len"], E["obs_dim"]
n_actions = np.prod(E["n_actions_per_dim"])
W = sample[0, :W_len].reshape(obs_dim, n_actions)
b = sample[0, W_len:]
total_reward, n_steps = self._episode(W, b, max_steps, render)
return total_reward, n_steps
class MonteCarloAgent(AgentBase):
def __init__(self, env, off_policy=False, temporal_discount=0.9, epsilon=0.1):
"""
A Monte-Carlo learning agent trained using either first-visit Monte
Carlo updates (on-policy) or incremental weighted importance sampling
(off-policy).
Parameters
----------
env : :class:`gym.wrappers` or :class:`gym.envs` instance
The environment to run the agent on.
off_policy : bool
Whether to use a behavior policy separate from the target policy
during training. If False, use the same epsilon-soft policy for
both behavior and target policies. Default is False.
temporal_discount : float between [0, 1]
The discount factor used for downweighting future rewards. Smaller
values result in greater discounting of future rewards. Default is
0.9.
epsilon : float between [0, 1]
The epsilon value in the epsilon-soft policy. Larger values
encourage greater exploration during training. Default is 0.1.
"""
super().__init__(env)
self.epsilon = epsilon
self.off_policy = off_policy
self.temporal_discount = temporal_discount
self._init_params()
def _init_params(self):
E = self.env_info
assert not E["continuous_actions"], "Action space must be discrete"
assert not E["continuous_observations"], "Observation space must be discrete"
n_states = np.prod(E["n_obs_per_dim"])
n_actions = np.prod(E["n_actions_per_dim"])
self._create_2num_dicts()
# behavior policy is stochastic, epsilon-soft policy
self.behavior_policy = self.target_policy = self._epsilon_soft_policy
if self.off_policy:
self.parameters["C"] = np.zeros((n_states, n_actions))
# target policy is deterministic, greedy policy
self.target_policy = self._greedy
# initialize Q function
self.parameters["Q"] = np.random.rand(n_states, n_actions)
# initialize returns object for each state-action pair
self.derived_variables = {
"returns": {(s, a): [] for s in range(n_states) for a in range(n_actions)},
"episode_num": 0,
}
self.hyperparameters = {
"agent": "MonteCarloAgent",
"epsilon": self.epsilon,
"off_policy": self.off_policy,
"temporal_discount": self.temporal_discount,
}
self.episode_history = {"state_actions": [], "rewards": []}
def _epsilon_soft_policy(self, s, a=None):
r"""
Epsilon-soft exploration policy.
Notes
-----
Soft policies are necessary for first-visit Monte Carlo methods, as
they require continual exploration (i.e., each state-action pair must
have nonzero probability of occurring).
In epsilon-soft policies, :math:`\pi(a \mid s) > 0` for all :math:`s
\in S` and all :math:`a \in A(s)` at the start of training. As learning
progresses, :math:`pi` gradually shifts closer and closer to a
deterministic optimal policy.
In particular, we have:
.. math::
\pi(a \mid s) &=
1 - \epsilon + \frac{\epsilon}{|A(s)|} &&\text{if} a = a^*
\pi(a \mid s) &=
\frac{\epsilon}{|A(s)|} &&\text{if} a \neq a^*
where :math:`|A(s)|` is the number of actions available in state `s`
and :math:`a^* \in A(s)` is the greedy action in state `s` (i.e.,
:math:`a^* = \arg \max_a Q(s, a)`).
Note that epsilon-greedy policies are instances of epsilon-soft
policies, defined as policies for which :math:`\pi(a|s) \geq \epsilon / |A(s)|`
for all states and actions.
Parameters
----------
s : int, float, or tuple
The state number for the current observation, as returned by
``_obs2num[obs]``.
a : int, float, tuple, or None
The action number in the current state, as returned by
``self._action2num[obs]``. If None, sample an action from the
action probabilities in state `s`, otherwise, return the
probability of action `a` under the epsilon-soft policy. Default is
None.
Returns
-------
action : int, float, or :py:class:`ndarray <numpy.ndarray>`
If `a` is None, this is an action sampled from the distribution
over actions defined by the epsilon-soft policy. If `a` is not
None, this is the probability of `a` under the epsilon-soft policy.
"""
E, P = self.env_info, self.parameters
# TODO: this assumes all actions are available in every state
n_actions = np.prod(E["n_actions_per_dim"])
a_star = P["Q"][s, :].argmax()
p_a_star = 1.0 - self.epsilon + (self.epsilon / n_actions)
p_a = self.epsilon / n_actions
action_probs = np.ones(n_actions) * p_a
action_probs[a_star] = p_a_star
np.testing.assert_allclose(np.sum(action_probs), 1)
if a is not None:
return action_probs[a]
# sample action
a = np.random.multinomial(1, action_probs).argmax()
return self._num2action[a]
def _greedy(self, s, a=None):
"""
A greedy behavior policy.
Notes
-----
Only used when off-policy is True.
Parameters
----------
s : int, float, or tuple
The state number for the current observation, as returned by
``self._obs2num[obs]``.
a : int, float, or tuple
The action number in the current state, as returned by
``self._action2num[obs]``. If None, sample an action from the action
probabilities in state `s`, otherwise, return the probability of
action `a` under the greedy policy. Default is None.
Returns
-------
action : int, float, or :py:class:`ndarray <numpy.ndarray>`
If `a` is None, this is an action sampled from the distribution
over actions defined by the greedy policy. If `a` is not
None, this is the probability of `a` under the greedy policy.
"""
a_star = self.parameters["Q"][s, :].argmax()
if a is None:
out = self._num2action[a_star]
else:
out = 1 if a == a_star else 0
return out
def _on_policy_update(self):
r"""
Update the `Q` function using an on-policy first-visit Monte Carlo
update.
Notes
-----
The on-policy first-visit Monte Carlo update is
.. math::
Q'(s, a) \leftarrow
\text{avg}(\text{reward following first visit to } (s, a)
\text{ across all episodes})
RL agents seek to learn action values conditional on subsequent optimal
behavior, but they need to behave non-optimally in order to explore all
actions (to find the optimal actions).
The on-policy approach is a compromise -- it learns action values not
for the optimal policy, but for a *near*-optimal policy that still
explores (the epsilon-soft policy).
"""
D, P, HS = self.derived_variables, self.parameters, self.episode_history
ep_rewards = HS["rewards"]
sa_tuples = set(HS["state_actions"])
locs = [HS["state_actions"].index(sa) for sa in sa_tuples]
cumulative_returns = [np.sum(ep_rewards[i:]) for i in locs]
# update Q value with the average of the first-visit return across
# episodes
for (s, a), cr in zip(sa_tuples, cumulative_returns):
D["returns"][(s, a)].append(cr)
P["Q"][s, a] = np.mean(D["returns"][(s, a)])
def _off_policy_update(self):
"""
Update `Q` using weighted importance sampling.
Notes
-----
In importance sampling updates, we account for the fact that we are
updating a different policy from the one we used to generate behavior
by weighting the accumulated rewards by the ratio of the probability of
the trajectory under the target policy versus its probability under
the behavior policies. This is known as the importance sampling weight.
In weighted importance sampling, we scale the accumulated rewards for a
trajectory by their importance sampling weight, then take the
*weighted* average using the importance sampling weight. This weighted
average then becomes the value for the trajectory.
W = importance sampling weight
G_t = total discounted reward from time t until episode end
C_n = sum of importance weights for the first n rewards
This algorithm converges to Q* in the limit.
"""
P = self.parameters
HS = self.episode_history
ep_rewards = HS["rewards"]
T = len(ep_rewards)
G, W = 0.0, 1.0
for t in reversed(range(T)):
s, a = HS["state_actions"][t]
G = self.temporal_discount * G + ep_rewards[t]
P["C"][s, a] += W
# update Q(s, a) using weighted importance sampling
P["Q"][s, a] += (W / P["C"][s, a]) * (G - P["Q"][s, a])
# multiply the importance sampling ratio by the current weight
W *= self.target_policy(s, a) / self.behavior_policy(s, a)
if W == 0.0:
break
def act(self, obs):
r"""
Execute the behavior policy--an :math:`\epsilon`-soft policy used to
generate actions during training.
Parameters
----------
obs : int, float, or :py:class:`ndarray <numpy.ndarray>` as returned by ``env.step(action)``
An observation from the environment.
Returns
-------
action : int, float, or :py:class:`ndarray <numpy.ndarray>`
An action sampled from the distribution over actions defined by the
epsilon-soft policy.
""" # noqa: E501
s = self._obs2num[obs]
return self.behavior_policy(s)
def run_episode(self, max_steps, render=False):
"""
Run the agent on a single episode.
Parameters
----------
max_steps : int
The maximum number of steps to run an episode.
render : bool
Whether to render the episode during training.
Returns
-------
reward : float
The total reward on the episode.
steps : float
The number of steps taken on the episode.
"""
D = self.derived_variables
total_rwd, n_steps = self._episode(max_steps, render)
D["episode_num"] += 1
return total_rwd, n_steps
def _episode(self, max_steps, render):
"""
Execute agent on an episode.
Parameters
----------
max_steps : int
The maximum number of steps to run the episode.
render : bool
Whether to render the episode during training.
Returns
-------
reward : float
The total reward on the episode.
steps : float
The number of steps taken on the episode.
"""
obs = self.env.reset()
HS = self.episode_history
total_reward, n_steps = 0.0, 0
for i in range(max_steps):
if render:
self.env.render()
n_steps += 1
action = self.act(obs)
s = self._obs2num[obs]
a = self._action2num[action]
# store (state, action) tuple
HS["state_actions"].append((s, a))
# take action
obs, reward, done, info = self.env.step(action)
# record rewards
HS["rewards"].append(reward)
total_reward += reward
if done:
break
return total_reward, n_steps
def update(self):
"""
Update the parameters of the model following the completion of an
episode. Flush the episode history after the update is complete.
"""
H = self.hyperparameters
if H["off_policy"]:
self._off_policy_update()
else:
self._on_policy_update()
self.flush_history()
def greedy_policy(self, max_steps, render=True):
"""
Execute a greedy policy using the current agent parameters.
Parameters
----------
max_steps : int
The maximum number of steps to run the episode.
render : bool
Whether to render the episode during execution.
Returns
-------
total_reward : float
The total reward on the episode.
n_steps : float
The total number of steps taken on the episode.
"""
H = self.episode_history
obs = self.env.reset()
total_reward, n_steps = 0.0, 0
for i in range(max_steps):
if render:
self.env.render()
n_steps += 1
action = self._greedy(obs)
s = self._obs2num[obs]
a = self._action2num[action]
# store (state, action) tuple
H["state_actions"].append((s, a))
# take action
obs, reward, done, info = self.env.step(action)
# record rewards
H["rewards"].append(reward)
total_reward += reward
if done:
break
return total_reward, n_steps
class TemporalDifferenceAgent(AgentBase):
def __init__(
self,
env,
lr=0.4,
epsilon=0.1,
n_tilings=8,
obs_max=None,
obs_min=None,
grid_dims=[8, 8],
off_policy=False,
temporal_discount=0.99,
):
r"""
A temporal difference learning agent with expected SARSA (on-policy) [3]_ or
TD(0) `Q`-learning (off-policy) [4]_ updates.
Notes
-----
The expected SARSA on-policy TD(0) update is:
.. math::
Q(s, a) \leftarrow Q(s, a) + \eta \left(
r + \gamma \mathbb{E}_\pi[Q(s', a') \mid s'] - Q(s, a)
\right)
and the TD(0) off-policy Q-learning upate is:
.. math::
Q(s, a) \leftarrow Q(s, a) + \eta (
r + \gamma \max_a \left\{ Q(s', a) \right\} - Q(s, a)
)
where in each case we have taken action `a` in state `s`, received
reward `r`, and transitioned into state :math:`s'`. In the above
equations, :math:`\eta` is a learning rate parameter, :math:`\gamma` is
a temporal discount factor, and :math:`\mathbb{E}_\pi[ Q[s', a'] \mid
s']` is the expected value under the current policy :math:`\pi` of the
Q function conditioned that we are in state :math:`s'`.
Observe that the expected SARSA update can be used for both on- and
off-policy methods. In an off-policy context, if the target policy is
greedy and the expectation is taken wrt. the target policy then the
expected SARSA update is exactly Q-learning.
NB. For this implementation the agent requires a discrete action
space, but will try to discretize the observation space via tiling if
it is continuous.
References
----------
.. [3] <NAME>. & <NAME>. (1994). *On-Line Q-learning Using
Connectionist Systems*. Tech Report 166. Cambridge University
Department of Engineering.
.. [4] <NAME>. (1989). Learning from delayed rewards. *PhD thesis,
Cambridge University*.
Parameters
----------
env : gym.wrappers or gym.envs instance
The environment to run the agent on.
lr : float
Learning rate for the Q function updates. Default is 0.05.
epsilon : float between [0, 1]
The epsilon value in the epsilon-soft policy. Larger values
encourage greater exploration during training. Default is 0.1.
n_tilings : int
The number of overlapping tilings to use if the ``env`` observation
space is continuous. Unused if observation space is discrete.
Default is 8.
obs_max : float or :py:class:`ndarray <numpy.ndarray>`
The value to treat as the max value of the observation space when
calculating the grid widths if the observation space is continuous.
If None, use ``env.observation_space.high``. Unused if observation
space is discrete. Default is None.
obs_min : float or :py:class:`ndarray <numpy.ndarray>`
The value to treat as the min value of the observation space when
calculating grid widths if the observation space is continuous. If
None, use ``env.observation_space.low``. Unused if observation
space is discrete. Default is None.
grid_dims : list
The number of rows and columns in each tiling grid if the env
observation space is continuous. Unused if observation space is
discrete. Default is [8, 8].
off_policy : bool
Whether to use a behavior policy separate from the target policy
during training. If False, use the same epsilon-soft policy for
both behavior and target policies. Default is False.
temporal_discount : float between [0, 1]
The discount factor used for downweighting future rewards. Smaller
values result in greater discounting of future rewards. Default is
0.9.
"""
super().__init__(env)
self.lr = lr
self.obs_max = obs_max
self.obs_min = obs_min
self.epsilon = epsilon
self.n_tilings = n_tilings
self.grid_dims = grid_dims
self.off_policy = off_policy
self.temporal_discount = temporal_discount
self._init_params()
def _init_params(self):
E = self.env_info
assert not E["continuous_actions"], "Action space must be discrete"
obs_encoder = None
if E["continuous_observations"]:
obs_encoder, _ = tile_state_space(
self.env,
self.env_info,
self.n_tilings,
state_action=False,
obs_max=self.obs_max,
obs_min=self.obs_min,
grid_size=self.grid_dims,
)
self._create_2num_dicts(obs_encoder=obs_encoder)
# behavior policy is stochastic, epsilon-soft policy
self.behavior_policy = self.target_policy = self._epsilon_soft_policy
if self.off_policy:
# target policy is deterministic, greedy policy
self.target_policy = self._greedy
# initialize Q function
self.parameters["Q"] = defaultdict(np.random.rand)
# initialize returns object for each state-action pair
self.derived_variables = {"episode_num": 0}
self.hyperparameters = {
"agent": "TemporalDifferenceAgent",
"lr": self.lr,
"obs_max": self.obs_max,
"obs_min": self.obs_min,
"epsilon": self.epsilon,
"n_tilings": self.n_tilings,
"grid_dims": self.grid_dims,
"off_policy": self.off_policy,
"temporal_discount": self.temporal_discount,
}
self.episode_history = {"state_actions": [], "rewards": []}
def run_episode(self, max_steps, render=False):
"""
Run the agent on a single episode without updating the priority queue
or performing backups.
Parameters
----------
max_steps : int
The maximum number of steps to run an episode
render : bool
Whether to render the episode during training
Returns
-------
reward : float
The total reward on the episode, averaged over the theta samples.
steps : float
The total number of steps taken on the episode, averaged over the
theta samples.
"""
return self._episode(max_steps, render, update=False)
def train_episode(self, max_steps, render=False):
"""
Train the agent on a single episode.
Parameters
----------
max_steps : int
The maximum number of steps to run an episode.
render : bool
Whether to render the episode during training.
Returns
-------
reward : float
The total reward on the episode.
steps : float
The number of steps taken on the episode.
"""
D = self.derived_variables
total_rwd, n_steps = self._episode(max_steps, render, update=True)
D["episode_num"] += 1
return total_rwd, n_steps
def _episode(self, max_steps, render, update=True):
"""
Run or train the agent on an episode.
Parameters
----------
max_steps : int
The maximum number of steps to run the episode.
render : bool
Whether to render the episode during training.
update : bool
Whether to perform the Q function backups after each step. Default
is True.
Returns
-------
reward : float
The total reward on the episode.
steps : float
The number of steps taken on the episode.
"""
self.flush_history()
obs = self.env.reset()
HS = self.episode_history
action = self.act(obs)
s = self._obs2num[obs]
a = self._action2num[action]
# store initial (state, action) tuple
HS["state_actions"].append((s, a))
total_reward, n_steps = 0.0, 0
for i in range(max_steps):
if render:
self.env.render()
# take action
obs, reward, done, info = self.env.step(action)
n_steps += 1
# record rewards
HS["rewards"].append(reward)
total_reward += reward
# generate next state and action
action = self.act(obs)
s_ = self._obs2num[obs] if not done else None
a_ = self._action2num[action]
# store next (state, action) tuple
HS["state_actions"].append((s_, a_))
if update:
self.update()
if done:
break
return total_reward, n_steps
def _epsilon_soft_policy(self, s, a=None):
r"""
Epsilon-soft exploration policy.
In epsilon-soft policies, :math:`\pi(a|s) > 0` for all s ∈ S and all a
∈ A(s) at the start of training. As learning progresses, :math:`\pi`
gradually shifts closer and closer to a deterministic optimal policy.
In particular, we have:
pi(a|s) = 1 - epsilon + (epsilon / |A(s)|) IFF a == a*
pi(a|s) = epsilon / |A(s)| IFF a != a*
where
|A(s)| is the number of actions available in state s
a* ∈ A(s) is the greedy action in state s (i.e., a* = argmax_a Q(s, a))
Note that epsilon-greedy policies are instances of epsilon-soft
policies, defined as policies for which pi(a|s) >= epsilon / |A(s)| for
all states and actions.
Parameters
----------
s : int, float, or tuple
The state number for the current observation, as returned by
``self._obs2num[obs]``
a : int, float, or tuple
The action number in the current state, as returned by
self._action2num[obs]. If None, sample an action from the action
probabilities in state s, otherwise, return the probability of
action `a` under the epsilon-soft policy. Default is None.
Returns
-------
If `a` is None:
action : int, float, or :py:class:`ndarray <numpy.ndarray>` as returned by `self._num2action`
If `a` is None, returns an action sampled from the distribution
over actions defined by the epsilon-soft policy.
If `a` is not None:
action_prob : float in range [0, 1]
If `a` is not None, returns the probability of `a` under the
epsilon-soft policy.
""" # noqa: E501
E, P = self.env_info, self.parameters
# TODO: this assumes all actions are available in every state
n_actions = np.prod(E["n_actions_per_dim"])
a_star = np.argmax([P["Q"][(s, aa)] for aa in range(n_actions)])
p_a_star = 1.0 - self.epsilon + (self.epsilon / n_actions)
p_a = self.epsilon / n_actions
action_probs = np.ones(n_actions) * p_a
action_probs[a_star] = p_a_star
np.testing.assert_allclose(np.sum(action_probs), 1)
if a is not None:
return action_probs[a]
# sample action
a = np.random.multinomial(1, action_probs).argmax()
return self._num2action[a]
def _greedy(self, s, a=None):
"""
A greedy behavior policy. Only used when off-policy is true.
Parameters
----------
s : int, float, or tuple
The state number for the current observation, as returned by
``self._obs2num[obs]``
a : int, float, or tuple
The action number in the current state, as returned by
``self._action2num[obs]``. If None, sample an action from the
action probabilities in state `s`, otherwise, return the
probability of action `a` under the greedy policy. Default is None.
Returns
-------
If `a` is None:
action : int, float, or :py:class:`ndarray <numpy.ndarray>` as returned by ``self._num2action``
If `a` is None, returns an action sampled from the distribution
over actions defined by the greedy policy.
If `a` is not None:
action_prob : float in range [0, 1]
If `a` is not None, returns the probability of `a` under the
greedy policy.
""" # noqa: E501
P, E = self.parameters, self.env_info
n_actions = np.prod(E["n_actions_per_dim"])
a_star = np.argmax([P["Q"][(s, aa)] for aa in range(n_actions)])
if a is None:
out = self._num2action[a_star]
else:
out = 1 if a == a_star else 0
return out
def _on_policy_update(self, s, a, r, s_, a_):
"""
Update the Q function using the expected SARSA on-policy TD(0) update:
Q[s, a] <- Q[s, a] + lr * [
r + temporal_discount * E[Q[s', a'] | s'] - Q[s, a]
]
where
E[ Q[s', a'] | s'] is the expected value of the Q function over all
a_ given that we're in state s' under the current policy
NB. the expected SARSA update can be used for both on- and off-policy
methods. In an off-policy context, if the target policy is greedy and
the expectation is taken wrt. the target policy then the expected SARSA
update is exactly Q-learning.
Parameters
----------
s : int as returned by `self._obs2num`
The id for the state/observation at timestep t-1
a : int as returned by `self._action2num`
The id for the action taken at timestep t-1
r : float
The reward after taking action `a` in state `s` at timestep t-1
s_ : int as returned by `self._obs2num`
The id for the state/observation at timestep t
a_ : int as returned by `self._action2num`
The id for the action taken at timestep t
"""
Q, E, pi = self.parameters["Q"], self.env_info, self.behavior_policy
# TODO: this assumes that all actions are available in each state
n_actions = np.prod(E["n_actions_per_dim"])
# compute the expected value of Q(s', a') given that we are in state s'
E_Q = np.sum([pi(s_, aa) * Q[(s_, aa)] for aa in range(n_actions)]) if s_ else 0
# perform the expected SARSA TD(0) update
qsa = Q[(s, a)]
Q[(s, a)] = qsa + self.lr * (r + self.temporal_discount * E_Q - qsa)
def _off_policy_update(self, s, a, r, s_):
"""
Update the `Q` function using the TD(0) Q-learning update:
Q[s, a] <- Q[s, a] + lr * (
r + temporal_discount * max_a { Q[s', a] } - Q[s, a]
)
Parameters
----------
s : int as returned by `self._obs2num`
The id for the state/observation at timestep `t-1`
a : int as returned by `self._action2num`
The id for the action taken at timestep `t-1`
r : float
The reward after taking action `a` in state `s` at timestep `t-1`
s_ : int as returned by `self._obs2num`
The id for the state/observation at timestep `t`
"""
Q, E = self.parameters["Q"], self.env_info
n_actions = np.prod(E["n_actions_per_dim"])
qsa = Q[(s, a)]
Qs_ = [Q[(s_, aa)] for aa in range(n_actions)] if s_ else [0]
Q[(s, a)] = qsa + self.lr * (r + self.temporal_discount * np.max(Qs_) - qsa)
def update(self):
"""Update the parameters of the model online after each new state-action."""
H, HS = self.hyperparameters, self.episode_history
(s, a), r = HS["state_actions"][-2], HS["rewards"][-1]
s_, a_ = HS["state_actions"][-1]
if H["off_policy"]:
self._off_policy_update(s, a, r, s_)
else:
self._on_policy_update(s, a, r, s_, a_)
def act(self, obs):
r"""
Execute the behavior policy--an :math:`\epsilon`-soft policy used to
generate actions during training.
Parameters
----------
obs : int, float, or :py:class:`ndarray <numpy.ndarray>` as returned by ``env.step(action)``
An observation from the environment.
Returns
-------
action : int, float, or :py:class:`ndarray <numpy.ndarray>`
An action sampled from the distribution over actions defined by the
epsilon-soft policy.
""" # noqa: E501
s = self._obs2num[obs]
return self.behavior_policy(s)
def greedy_policy(self, max_steps, render=True):
"""
Execute a deterministic greedy policy using the current agent
parameters.
Parameters
----------
max_steps : int
The maximum number of steps to run the episode.
render : bool
Whether to render the episode during execution.
Returns
-------
total_reward : float
The total reward on the episode.
n_steps : float
The total number of steps taken on the episode.
"""
self.flush_history()
H = self.episode_history
obs = self.env.reset()
total_reward, n_steps = 0.0, 0
for i in range(max_steps):
if render:
self.env.render()
s = self._obs2num[obs]
action = self._greedy(s)
a = self._action2num[action]
# store (state, action) tuple
H["state_actions"].append((s, a))
# take action
obs, reward, done, info = self.env.step(action)
n_steps += 1
# record rewards
H["rewards"].append(reward)
total_reward += reward
if done:
break
return total_reward, n_steps
class DynaAgent(AgentBase):
def __init__(
self,
env,
lr=0.4,
epsilon=0.1,
n_tilings=8,
obs_max=None,
obs_min=None,
q_plus=False,
grid_dims=[8, 8],
explore_weight=0.05,
temporal_discount=0.9,
n_simulated_actions=50,
):
r"""
A Dyna-`Q` / Dyna-`Q+` agent [5]_ with full TD(0) `Q`-learning updates via
prioritized-sweeping [6]_ .
Notes
-----
This approach consists of three components: a planning method involving
simulated actions, a direct RL method where the agent directly interacts
with the environment, and a model-learning method where the agent
learns to better represent the environment during planning.
During planning, the agent performs random-sample one-step tabular
Q-planning with prioritized sweeping. This entails using a priority
queue to retrieve the state-action pairs from the agent's history which
would stand to have the largest change to their Q-values if backed up.
Specifically, for state action pair `(s, a)` the priority value is:
.. math::
P = \sum_{s'} p(s') | r + \gamma \max_a \{Q(s', a) \} - Q(s, a) |
which corresponds to the absolute magnitude of the TD(0) Q-learning
backup for the pair.
When the first pair in the queue is backed up, the effect on each of
its predecessor pairs is computed. If the predecessor's priority is
greater than a small threshold the pair is added to the queue and the
process is repeated until either the queue is empty or we have exceeded
`n_simulated_actions` updates. These backups occur without the agent
taking any action in the environment and thus constitute simulations
based on the agent's current model of the environment (i.e., its
tabular state-action history).
During the direct RL phase, the agent takes an action based on its
current behavior policy and Q function and receives a reward from the
environment. The agent logs this state-action-reward-new state tuple in
its interaction table (i.e., environment model) and updates its Q
function using a full-backup version of the Q-learning update:
.. math::
Q(s, a) \leftarrow Q(s, a) + \eta \sum_{r, s'} p(r, s' \mid s, a)
\left(r + \gamma \max_a \{ Q(s', a) \} - Q(s, a) \right)
References
----------
.. [5] <NAME>. (1990). Integrated architectures for learning,
planning, and reacting based on approximating dynamic programming.
In *Proceedings of the 7th Annual ICML*, 216-224.
.. [6] <NAME>. & Atkeson, C. (1993). Prioritized sweeping:
Reinforcement learning with less data and less time. *Machine
Learning, 13(1)*, 103-130.
Parameters
----------
env : :class:`gym.wrappers` or :class:`gym.envs` instance
The environment to run the agent on
lr : float
Learning rate for the `Q` function updates. Default is 0.05.
epsilon : float between [0, 1]
The epsilon value in the epsilon-soft policy. Larger values
encourage greater exploration during training. Default is 0.1.
n_tilings : int
The number of overlapping tilings to use if the env observation
space is continuous. Unused if observation space is discrete.
Default is 8.
obs_max : float or :py:class:`ndarray <numpy.ndarray>` or None
The value to treat as the max value of the observation space when
calculating the grid widths if the observation space is continuous.
If None, use :meth:`env.observation_space.high`. Unused if observation
space is discrete. Default is None.
obs_min : float or :py:class:`ndarray <numpy.ndarray>` or None
The value to treat as the min value of the observation space when
calculating grid widths if the observation space is continuous. If
None, use :meth:`env.observation_space.low`. Unused if observation
space is discrete. Default is None.
grid_dims : list
The number of rows and columns in each tiling grid if the env
observation space is continuous. Unused if observation space is
discrete. Default is `[8, 8]`.
q_plus : bool
Whether to add incentives for visiting states that the agent hasn't
encountered recently. Default is False.
explore_weight : float
Amount to incentivize exploring states that the agent hasn't
recently visited. Only used if `q_plus` is True. Default is 0.05.
temporal_discount : float between [0, 1]
The discount factor used for downweighting future rewards. Smaller
values result in greater discounting of future rewards. Default is
0.9.
n_simulated_actions : int
THe number of simulated actions to perform for each "real" action.
Default is 50.
"""
super().__init__(env)
self.lr = lr
self.q_plus = q_plus
self.obs_max = obs_max
self.obs_min = obs_min
self.epsilon = epsilon
self.n_tilings = n_tilings
self.grid_dims = grid_dims
self.explore_weight = explore_weight
self.temporal_discount = temporal_discount
self.n_simulated_actions = n_simulated_actions
self._init_params()
def _init_params(self):
E = self.env_info
assert not E["continuous_actions"], "Action space must be discrete"
obs_encoder = None
if E["continuous_observations"]:
obs_encoder, _ = tile_state_space(
self.env,
self.env_info,
self.n_tilings,
state_action=False,
obs_max=self.obs_max,
obs_min=self.obs_min,
grid_size=self.grid_dims,
)
self._create_2num_dicts(obs_encoder=obs_encoder)
self.behavior_policy = self.target_policy = self._epsilon_soft_policy
# initialize Q function and model
self.parameters["Q"] = defaultdict(np.random.rand)
self.parameters["model"] = EnvModel()
# initialize returns object for each state-action pair
self.derived_variables = {
"episode_num": 0,
"sweep_queue": {},
"visited": set(),
"steps_since_last_visit": defaultdict(lambda: 0),
}
if self.q_plus:
self.derived_variables["steps_since_last_visit"] = defaultdict(
np.random.rand,
)
self.hyperparameters = {
"agent": "DynaAgent",
"lr": self.lr,
"q_plus": self.q_plus,
"obs_max": self.obs_max,
"obs_min": self.obs_min,
"epsilon": self.epsilon,
"n_tilings": self.n_tilings,
"grid_dims": self.grid_dims,
"explore_weight": self.explore_weight,
"temporal_discount": self.temporal_discount,
"n_simulated_actions": self.n_simulated_actions,
}
self.episode_history = {"state_actions": [], "rewards": []}
def act(self, obs):
r"""
Execute the behavior policy--an :math:`\epsilon`-soft policy used to
generate actions during training.
Parameters
----------
obs : int, float, or :py:class:`ndarray <numpy.ndarray>` as returned by ``env.step(action)``
An observation from the environment.
Returns
-------
action : int, float, or :py:class:`ndarray <numpy.ndarray>`
An action sampled from the distribution over actions defined by the
epsilon-soft policy.
""" # noqa: E501
s = self._obs2num[obs]
return self.behavior_policy(s)
def _epsilon_soft_policy(self, s, a=None):
"""
Epsilon-soft exploration policy.
In epsilon-soft policies, pi(a|s) > 0 for all s ∈ S and all a ∈ A(s) at
the start of training. As learning progresses, pi gradually shifts
closer and closer to a deterministic optimal policy.
In particular, we have:
pi(a|s) = 1 - epsilon + (epsilon / |A(s)|) IFF a == a*
pi(a|s) = epsilon / |A(s)| IFF a != a*
where
|A(s)| is the number of actions available in state s
a* ∈ A(s) is the greedy action in state s (i.e., a* = argmax_a Q(s, a))
Note that epsilon-greedy policies are instances of epsilon-soft
policies, defined as policies for which pi(a|s) >= epsilon / |A(s)| for
all states and actions.
Parameters
----------
s : int, float, or tuple
The state number for the current observation, as returned by
self._obs2num[obs]
a : int, float, or tuple
The action number in the current state, as returned by
self._action2num[obs]. If None, sample an action from the action
probabilities in state s, otherwise, return the probability of
action `a` under the epsilon-soft policy. Default is None.
Returns
-------
If `a` is None:
action : int, float, or :py:class:`ndarray <numpy.ndarray>` as returned by :meth:`_num2action`
If `a` is None, returns an action sampled from the distribution
over actions defined by the epsilon-soft policy.
If `a` is not None:
action_prob : float in range [0, 1]
If `a` is not None, returns the probability of `a` under the
epsilon-soft policy.
""" # noqa: E501
E, P = self.env_info, self.parameters
# TODO: this assumes all actions are available in every state
n_actions = np.prod(E["n_actions_per_dim"])
a_star = np.argmax([P["Q"][(s, aa)] for aa in range(n_actions)])
p_a_star = 1.0 - self.epsilon + (self.epsilon / n_actions)
p_a = self.epsilon / n_actions
action_probs = np.ones(n_actions) * p_a
action_probs[a_star] = p_a_star
np.testing.assert_allclose(np.sum(action_probs), 1)
if a is not None:
return action_probs[a]
# sample action
a = np.random.multinomial(1, action_probs).argmax()
return self._num2action[a]
def _greedy(self, s, a=None):
"""
A greedy behavior policy.
Parameters
----------
s : int, float, or tuple
The state number for the current observation, as returned by
self._obs2num[obs]
a : int, float, or tuple
The action number in the current state, as returned by
self._action2num[obs]. If None, sample an action from the action
probabilities in state s, otherwise, return the probability of
action `a` under the greedy policy. Default is None.
Returns
-------
If `a` is None:
action : int, float, or :py:class:`ndarray <numpy.ndarray>` as returned by :meth:`_num2action`
If `a` is None, returns an action sampled from the distribution
over actions defined by the greedy policy.
If `a` is not None:
action_prob : float in range [0, 1]
If `a` is not None, returns the probability of `a` under the
greedy policy.
""" # noqa: E501
E, Q = self.env_info, self.parameters["Q"]
n_actions = np.prod(E["n_actions_per_dim"])
a_star = np.argmax([Q[(s, aa)] for aa in range(n_actions)])
if a is None:
out = self._num2action[a_star]
else:
out = 1 if a == a_star else 0
return out
def update(self):
"""
Update the priority queue with the most recent (state, action) pair and
perform random-sample one-step tabular Q-planning.
Notes
-----
The planning algorithm uses a priority queue to retrieve the
state-action pairs from the agent's history which will result in the
largest change to its `Q`-value if backed up. When the first pair in
the queue is backed up, the effect on each of its predecessor pairs is
computed. If the predecessor's priority is greater than a small
threshold the pair is added to the queue and the process is repeated
until either the queue is empty or we exceed `n_simulated_actions`
updates.
"""
s, a = self.episode_history["state_actions"][-1]
self._update_queue(s, a)
self._simulate_behavior()
def _update_queue(self, s, a):
"""
Update the priority queue by calculating the priority for (s, a) and
inserting it into the queue if it exceeds a fixed (small) threshold.
Parameters
----------
s : int as returned by `self._obs2num`
The id for the state/observation
a : int as returned by `self._action2num`
The id for the action taken from state `s`
"""
sweep_queue = self.derived_variables["sweep_queue"]
# TODO: what's a good threshold here?
priority = self._calc_priority(s, a)
if priority >= 0.001:
if (s, a) in sweep_queue:
sweep_queue[(s, a)] = max(priority, sweep_queue[(s, a)])
else:
sweep_queue[(s, a)] = priority
def _calc_priority(self, s, a):
"""
Compute the "priority" for state-action pair (s, a). The priority P is
defined as:
P = sum_{s_} p(s_) * abs(r + temporal_discount * max_a {Q[s_, a]} - Q[s, a])
which corresponds to the absolute magnitude of the TD(0) Q-learning
backup for (s, a).
Parameters
----------
s : int as returned by `self._obs2num`
The id for the state/observation
a : int as returned by `self._action2num`
The id for the action taken from state `s`
Returns
-------
priority : float
The absolute magnitude of the full-backup TD(0) Q-learning update
for (s, a)
"""
priority = 0.0
E = self.env_info
Q = self.parameters["Q"]
env_model = self.parameters["model"]
n_actions = np.prod(E["n_actions_per_dim"])
outcome_probs = env_model.outcome_probs(s, a)
for (r, s_), p_rs_ in outcome_probs:
max_q = np.max([Q[(s_, aa)] for aa in range(n_actions)])
P = p_rs_ * (r + self.temporal_discount * max_q - Q[(s, a)])
priority += np.abs(P)
return priority
def _simulate_behavior(self):
"""
Perform random-sample one-step tabular Q-planning with prioritized
sweeping.
Notes
-----
This approach uses a priority queue to retrieve the state-action pairs
from the agent's history with largest change to their Q-values if
backed up. When the first pair in the queue is backed up, the effect on
each of its predecessor pairs is computed. If the predecessor's
priority is greater than a small threshold the pair is added to the
queue and the process is repeated until either the queue is empty or we
have exceeded a `n_simulated_actions` updates.
"""
env_model = self.parameters["model"]
sweep_queue = self.derived_variables["sweep_queue"]
for _ in range(self.n_simulated_actions):
if len(sweep_queue) == 0:
break
# select (s, a) pair with the largest update (priority)
sq_items = list(sweep_queue.items())
(s_sim, a_sim), _ = sorted(sq_items, key=lambda x: x[1], reverse=True)[0]
# remove entry from queue
del sweep_queue[(s_sim, a_sim)]
# update Q function for (s_sim, a_sim) using the full-backup
# version of the TD(0) Q-learning update
self._update(s_sim, a_sim)
# get all (_s, _a) pairs that lead to s_sim (ie., s_sim's predecessors)
pairs = env_model.state_action_pairs_leading_to_outcome(s_sim)
# add predecessors to queue if their priority exceeds thresh
for (_s, _a) in pairs:
self._update_queue(_s, _a)
def _update(self, s, a):
"""
Update Q using a full-backup version of the TD(0) Q-learning update:
Q(s, a) = Q(s, a) + lr *
sum_{r, s'} [
p(r, s' | s, a) * (r + gamma * max_a { Q(s', a) } - Q(s, a))
]
Parameters
----------
s : int as returned by ``self._obs2num``
The id for the state/observation
a : int as returned by ``self._action2num``
The id for the action taken from state `s`
"""
update = 0.0
env_model = self.parameters["model"]
E, D, Q = self.env_info, self.derived_variables, self.parameters["Q"]
n_actions = np.prod(E["n_actions_per_dim"])
# sample rewards from the model
outcome_probs = env_model.outcome_probs(s, a)
for (r, s_), p_rs_ in outcome_probs:
# encourage visiting long-untried actions by adding a "bonus"
# reward proportional to the sqrt of the time since last visit
if self.q_plus:
r += self.explore_weight * np.sqrt(D["steps_since_last_visit"][(s, a)])
max_q = np.max([Q[(s_, a_)] for a_ in range(n_actions)])
update += p_rs_ * (r + self.temporal_discount * max_q - Q[(s, a)])
# update Q value for (s, a) pair
Q[(s, a)] += self.lr * update
def run_episode(self, max_steps, render=False):
"""
Run the agent on a single episode without performing `Q`-function
backups.
Parameters
----------
max_steps : int
The maximum number of steps to run an episode.
render : bool
Whether to render the episode during training.
Returns
-------
reward : float
The total reward on the episode.
steps : float
The number of steps taken on the episode.
"""
return self._episode(max_steps, render, update=False)
def train_episode(self, max_steps, render=False):
"""
Train the agent on a single episode.
Parameters
----------
max_steps : int
The maximum number of steps to run an episode.
render : bool
Whether to render the episode during training.
Returns
-------
reward : float
The total reward on the episode.
steps : float
The number of steps taken on the episode.
"""
D = self.derived_variables
total_rwd, n_steps = self._episode(max_steps, render, update=True)
D["episode_num"] += 1
return total_rwd, n_steps
def _episode(self, max_steps, render, update=True):
"""
Run or train the agent on an episode.
Parameters
----------
max_steps : int
The maximum number of steps to run the episode.
render : bool
Whether to render the episode during training.
update : bool
Whether to perform the `Q` function backups after each step.
Default is True.
Returns
-------
reward : float
The total reward on the episode.
steps : float
The number of steps taken on the episode.
"""
self.flush_history()
obs = self.env.reset()
env_model = self.parameters["model"]
HS, D = self.episode_history, self.derived_variables
action = self.act(obs)
s = self._obs2num[obs]
a = self._action2num[action]
# store initial (state, action) tuple
HS["state_actions"].append((s, a))
total_reward, n_steps = 0.0, 0
for i in range(max_steps):
if render:
self.env.render()
# take action
obs, reward, done, info = self.env.step(action)
n_steps += 1
# record rewards
HS["rewards"].append(reward)
total_reward += reward
# generate next state and action
action = self.act(obs)
s_ = self._obs2num[obs] if not done else None
a_ = self._action2num[action]
# update model
env_model[(s, a, reward, s_)] += 1
# update history counter
for k in D["steps_since_last_visit"].keys():
D["steps_since_last_visit"][k] += 1
D["steps_since_last_visit"][(s, a)] = 0
if update:
self.update()
# store next (state, action) tuple
HS["state_actions"].append((s_, a_))
s, a = s_, a_
if done:
break
return total_reward, n_steps
def greedy_policy(self, max_steps, render=True):
"""
Execute a deterministic greedy policy using the current agent
parameters.
Parameters
----------
max_steps : int
The maximum number of steps to run the episode.
render : bool
Whether to render the episode during execution.
Returns
-------
total_reward : float
The total reward on the episode.
n_steps : float
The total number of steps taken on the episode.
"""
self.flush_history()
H = self.episode_history
obs = self.env.reset()
total_reward, n_steps = 0.0, 0
for i in range(max_steps):
if render:
self.env.render()
s = self._obs2num[obs]
action = self._greedy(s)
a = self._action2num[action]
# store (state, action) tuple
H["state_actions"].append((s, a))
# take action
obs, reward, done, info = self.env.step(action)
n_steps += 1
# record rewards
H["rewards"].append(reward)
total_reward += reward
if done:
break
return total_reward, n_steps
| 2.84375
| 3
|
wirehome.services.tradfri.gateway_manager/1.0.0/script.py
|
chkr1011/Wirehome.Repository
| 0
|
12779577
|
<reponame>chkr1011/Wirehome.Repository<filename>wirehome.services.tradfri.gateway_manager/1.0.0/script.py
import json
import sys
from time import sleep
TIMER_ID = "wirehome.tradfri.gateway_manager.polling"
config = {}
wirehome = {}
_devices = {}
_gateway_is_connected = False
def initialize():
# wirehome.debugger.enable()
global _devices, _gateway_is_connected
_devices = {}
_gateway_is_connected = False
def start():
wirehome.scheduler.start_timer(TIMER_ID, 2000, __poll_status__)
def stop():
wirehome.scheduler.stop_timer(TIMER_ID)
def get_service_status():
return {
"devices": _devices,
"trace": wirehome.debugger.get_trace(),
"gateway_is_connected": _gateway_is_connected
}
def __find_device_id__(caption):
for device_uid in _devices:
if _devices[device_uid]["9001"] == caption:
return int(device_uid)
return None
def set_device_status(status):
device_id = status.get("device_id", None)
if device_id == None:
device_caption = status.get("device_caption", None)
device_id = __find_device_id__(device_caption)
if device_id == None:
return {"type": "exception.parameter_invalid", "parameter_name": "device_id"}
power_state = status.get("power_state", 0)
brightness = status.get("brightness", 245)
color = status.get("color", "ffffff")
uri = "15001/" + str(device_id)
data = {"5850": power_state}
# Do not add other values if power is off. Otherwise the device will go on and off
# e.g when chaning the brightness while the device is off.
if power_state == 1:
data["5851"] = brightness
data["5706"] = color
data = {"3311": [data]}
payload = json.dumps(data)
response = __execute_coap_request__("put", uri, payload)
response_status = response.get("status", None)
if response_status == "Changed":
return {"type": "success"}
return response
def __poll_status__(_):
global _gateway_is_connected, _devices
response = None
try:
new_devices = {}
response = __execute_coap_request__("get", "15001")
device_ids = response["payload"]
device_ids = json.loads(device_ids)
for device_id in device_ids:
response = __execute_coap_request__("get", "15001/" + str(device_id))
new_devices[device_id] = json.loads(response["payload"])
_gateway_is_connected = True
__fire_events__(_devices, new_devices)
_devices = new_devices
except:
_gateway_is_connected = False
exception = sys.exc_info()
print("TRADFRI gateway pull failed. {} (Response={})".format(
exception[0], str(response)))
print(str(exception))
sleep(10)
def __fire_events__(old, new):
if old == None or new == None:
return
for device_id in new:
old_power_state = __get_device_status_value__(old, device_id, "5850")
new_power_state = __get_device_status_value__(new, device_id, "5850")
if old_power_state != new_power_state:
wirehome.message_bus.publish({
"type": "tradfri.gateway_manager.event.device_state_changed",
"device_id": device_id,
"property": "power_state",
"old_state": "on" if old_power_state == 1 else "off",
"new_state": "on" if new_power_state == 1 else "off"
})
old_brightness = __get_device_status_value__(old, device_id, "5851")
new_brightness = __get_device_status_value__(new, device_id, "5851")
if old_brightness != new_brightness:
wirehome.message_bus.publish({
"type": "tradfri.gateway_manager.event.device_state_changed",
"device_id": device_id,
"property": "brightness",
"old_state": old_brightness,
"new_state": new_brightness
})
old_color = __get_device_status_value__(old, device_id, "5706")
new_color = __get_device_status_value__(new, device_id, "5706")
if old_color != new_color:
wirehome.message_bus.publish({
"type": "tradfri.gateway_manager.event.device_state_changed",
"device_id": device_id,
"property": "color",
"old_state": old_color,
"new_state": new_color
})
def __get_device_status_value__(source, device_id, status_id):
device = source.get(device_id, None)
if device == None:
return None
status = device.get("3311", None)
if status == None:
return None
if not isinstance(status, list):
return None
if len(status) != 1:
return None
status = status[0]
return status.get(status_id, None)
def __execute_coap_request__(method, uri, payload=""):
# Get the gateway settings from the location where they are stored by the
# Tradfri PSK token generator.
address = wirehome.value_storage.read_string("ikea/tradfri/gateway/address")
identity = wirehome.value_storage.read_string("ikea/tradfri/gateway/identity")
psk = wirehome.value_storage.read_string("ikea/tradfri/gateway/psk")
if address == None or identity == None or psk == None:
return {
"type": "error",
"message": "IKEA tradfri gateway not configured."
}
request = {
"client_uid": "tradfri_gateway_manager",
"host": address,
"identity": identity,
"key": psk,
"method": method,
"path": uri,
"payload": payload
}
response = wirehome.coap.request(request)
# print(response)
return response
| 2.09375
| 2
|
py/optimisation_update_heating_cooling_cop.py
|
Tokarzewski/db-scripts
| 0
|
12779578
|
<reponame>Tokarzewski/db-scripts
import ctypes
from eppy import modeleditor
from eppy.modeleditor import IDF
def show_message(title, text):
ctypes.windll.user32.MessageBoxW(0, text, title, 0)
def before_energy_simulation():
IDF.setiddname(api_environment.EnergyPlusInputIddPath)
idf_file = IDF(api_environment.EnergyPlusInputIdfPath)
# extract values from optimisation table
site = api_environment.Site
table = site.GetTable("OptimisationVariables")
heating_coils = idf_file.idfobjects['Coil:WaterHeating:AirToWaterHeatPump:Pumped'.upper()]
heating_cop_row = table.Records["heatingCOP"]
heating_cop = heating_cop_row["VariableCurrentValue"]
for coil in heating_coils:
if coil.Name == 'HP Water Heater HP Water Heating Coil':
if heating_cop != "UNKNOWN":
coil.Rated_COP = heating_cop
else:
show_message("ERROR", "Cannot set heating COP, unknown value in table OptimisationVariables")
chillers = idf_file.idfobjects['Chiller:Electric:EIR'.upper()]
cooling_eer_row = table.Records["coolingEER"]
cooling_eer = cooling_eer_row["VariableCurrentValue"]
for chiller in chillers:
if cooling_eer != "UNKNOWN":
chiller.Reference_COP = cooling_eer
else:
show_message("ERROR", "Cannot set cooling COP, unknown value in table OptimisationVariables")
idf_file.save()
| 2.484375
| 2
|
basic-python/code.py
|
abidraza451/greyatom-python-for-data-science
| 0
|
12779579
|
# --------------
class_1 =['<NAME>','<NAME>','<NAME>','<NAME>']
class_2 =['<NAME>','<NAME>','<NAME>']
new_class = class_1 + class_2
print(new_class)
new_class.append('<NAME>')
print(new_class)
new_class.remove('<NAME>')
print(new_class)
courses ={'Math':65,'English':70,'History':80,'French':70,'Science':60}
total =sum(courses.values())
percentage = (total*100)/500
print(percentage)
mathematics ={'<NAME>':78,'<NAME>':95,'<NAME>':65,'<NAME>':50,'<NAME>':70,'<NAME>':66,'<NAME>':75}
topper =max(mathematics,key = mathematics.get)
print(topper)
x=topper.split( )
first_name =x[0]
last_name =x[1]
full_name = (last_name +" "+ first_name)
certificate_name = full_name.upper()
print(certificate_name)
| 3.625
| 4
|
month01/day06/exercise01.py
|
Amiao-miao/all-codes
| 1
|
12779580
|
<filename>month01/day06/exercise01.py
"""
根据月日,计算是这一年的第几天.
公式:前几个月总天数 + 当月天数
例如:5月10日
计算:31 29 31 30 + 10
"""
year=int(input("请输入年:"))
month=int(input("输入月:"))
day=int(input("输入日:"))
day_of_month02=29 if year % 4 == 0 and year % 100 != 0 or year % 400 == 0 else 28
days_of_month=(31,29,31,30,31,30,31,31,30,31,30,31)
# result=0
# for i in range(month-1):
# result+=days_of_month[i]
result=sum(days_of_month[:month-1])
result+=day
print(result)
| 3.75
| 4
|
runtests.py
|
prohfesor/tapiriik
| 1,445
|
12779581
|
<reponame>prohfesor/tapiriik
import tapiriik.database
tapiriik.database.db = tapiriik.database._connection["tapiriik_test"]
tapiriik.database.cachedb = tapiriik.database._connection["tapiriik_cache_test"]
from tapiriik.testing import *
import unittest
unittest.main()
tapiriik.database._connection.drop_database("tapiriik_test")
tapiriik.database._connection.drop_database("tapiriik_cache_test")
| 1.703125
| 2
|
django_job/users/admin.py
|
Mohitkaushal97/File
| 0
|
12779582
|
<filename>django_job/users/admin.py
# from allauth.account.models import EmailAddress
# from django.contrib import admin
# from django.contrib.auth import admin as auth_admin
# from django.contrib.auth import get_user_model
# from django_job.users.forms import UserCreationForm, UserChangeForm
#
# User = get_user_model()
#
# class EmailAddressInline(admin.TabularInline):
# model = EmailAddress
# extra = 1
# exclude = ('primary',)
#
# @admin.register(User)
# class UserAdmin(auth_admin.UserAdmin):
# inlines = (EmailAddressInline,)
#
# form = UserChangeForm
# add_form = UserCreationForm
# fieldsets = (("User", {"fields": ("name",)}),) + auth_admin.UserAdmin.fieldsets
# list_display = ["username", "name", "is_superuser"]
# search_fields = ["name"]
#
# # @admin.register(User)
# # class UserAdmin(nested_admin.UserAdmin):
# # inlines = (EmailAddressInline,)
| 2.15625
| 2
|
hpvm/test/dnn_benchmarks/keras/resnet50_imagenet.py
|
vzyrianov/hpvm-autograd
| 0
|
12779583
|
import os
import sys
import glob
import numpy as np
import tensorflow as tf
import scipy
import scipy.io
import keras
from keras.models import Model, Sequential
from keras.layers import *
from keras.optimizers import Adam
from keras import regularizers
from keras import backend as K
from keras.utils import to_categorical
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import LearningRateScheduler
from Benchmark import Benchmark
from Config import MODEL_PARAMS_DIR
class ResNet50(Benchmark):
def buildModel(self):
def identity_block(input_tensor, kernel_size, filters, stage, block):
filters1, filters2, filters3 = filters
bn_axis = 1
x = Conv2D(filters1, (1, 1))(input_tensor)
x = BatchNormalization(axis=bn_axis)(x)
x = Activation('relu')(x)
x = Conv2D(filters2, kernel_size,
padding='same')(x)
x = BatchNormalization(axis=bn_axis)(x)
x = Activation('relu')(x)
x = Conv2D(filters3, (1, 1))(x)
x = BatchNormalization(axis=bn_axis)(x)
x = add([x, input_tensor])
x = Activation('relu')(x)
return x
def conv_block(input_tensor,
kernel_size,
filters,
stage,
block,
strides=(2, 2)):
filters1, filters2, filters3 = filters
bn_axis = 1
x = Conv2D(filters1, (1, 1), strides=strides)(input_tensor)
x = BatchNormalization(axis=bn_axis)(x)
x = Activation('relu')(x)
x = Conv2D(filters2, kernel_size, padding='same')(x)
x = BatchNormalization(axis=bn_axis)(x)
x = Activation('relu')(x)
x = Conv2D(filters3, (1, 1))(x)
x = BatchNormalization(axis=bn_axis)(x)
shortcut = Conv2D(filters3, (1, 1), strides=strides)(input_tensor)
shortcut = BatchNormalization(
axis=bn_axis)(shortcut)
x = add([x, shortcut])
x = Activation('relu')(x)
return x
img_input = Input(shape=(3, 224, 224))
bn_axis = 1
x = ZeroPadding2D((3, 3))(img_input)
x = Conv2D(64, (7, 7), strides=(2, 2))(x)
# x = BatchNormalization(axis=bn_axis)(x)
x = Activation('relu')(x)
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = BatchNormalization(axis=bn_axis)(x)
x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')
x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')
x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f')
x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')
x = AveragePooling2D((7, 7))(x)
x = Flatten()(x)
x = Dense(1000)(x)
x = Activation('softmax')(x)
model = Model(img_input, x)
return model
def data_preprocess(self):
X_train, y_train = None, None
X_test = np.fromfile(MODEL_PARAMS_DIR + '/resnet50_imagenet/test_input.bin', dtype=np.float32)
X_test = X_test.reshape((-1, 3, 224, 224))
y_test = np.fromfile(MODEL_PARAMS_DIR + '/resnet50_imagenet/test_labels.bin', dtype=np.uint32)
X_tuner = np.fromfile(MODEL_PARAMS_DIR + '/resnet50_imagenet/tune_input.bin', dtype=np.float32)
X_tuner = X_tuner.reshape((-1, 3, 224, 224))
y_tuner = np.fromfile(MODEL_PARAMS_DIR + '/resnet50_imagenet/tune_labels.bin', dtype=np.uint32)
return X_train, y_train, X_test, y_test, X_tuner, y_tuner
def trainModel(self, model):
assert False, "ImageNet training not supported - use Pretrained weights"
if __name__ == '__main__':
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
# Changing to NCHW format
K.set_image_data_format('channels_first')
### Parameters specific to each benchmark
reload_dir = MODEL_PARAMS_DIR + '/resnet50_imagenet/'
keras_model_file = MODEL_PARAMS_DIR + '/keras/resnet50_imagenet.h5'
data_dir = 'data/resnet50_imagenet/'
src_dir = 'src/resnet50_imagenet_src/'
num_classes = 1000
batch_size = 50
ResNet50 = ResNet50('ResNet50_imagenet', reload_dir, keras_model_file, data_dir, src_dir, num_classes, batch_size)
ResNet50.exportToHPVM(sys.argv)
| 2.546875
| 3
|
parser_cnpj_raw_files_c.py
|
cadu-leite/CNPJ_cad_pub
| 0
|
12779584
|
from resource import *
import psutil
from collections import defaultdict
# FILE_IN_PATH = 'K3241.K03200DV.D00422.L00001'
FILE_IN_PATH = '/Users/cadu/Downloads/datasets/dadosabertos_CNPjs/K3241.K03200DV.D00422.L00001'
FILE_OUT_PATH = 'output.txt'
# head
# posicao de inicio da linha e TAMANHO de registros
# ini = acc(todas pos anteriores) + pos_initial
# position 1 is '0' for python strings, qtd fica como é.
LINE_HEAD_0_METRICS = ((0), (1, 16, 11, 8, 8, 1155, 1)) # tuple pos_initial, tamanho
# dados cadastrais
LINE_DETAL_1_METRICS = ((0), (
1, 1, 1, 14, 1, 150, 55, 2, 8, 2, 55, 3, 70, 4, 8, 7, 20, 60, 6, 156,
50, 8, 2, 4, 50, 12, 4, 8, 12, 4, 8, 12, 4, 8, 115, 2, 14, 2, 1, 8, 8,
1, 23, 8, 243, 1)
)
# dados socios
LINE_SOCIO_2_METRICS = ((0), (1, 1, 1, 14, 1, 150, 14, 2, 5, 8, 3, 70, 11, 60, 2, 855, 1))
# dados socios
LINE_CNAES_6_METRICS = ((0), (1, 1, 1, 14, 693, 489, 1))
ROWTYPE_HEAD = '0'
ROWTYPE_DACAD = '1'
ROWTYPE_SOCIO = '2'
ROWTYPE_CNAES = '6'
def get_row_data_list(row):
ROWTYPE_HEAD = '0'
ROWTYPE_DACAD = '1'
ROWTYPE_SOCIO = '2'
ROWTYPE_CNAES = '6'
l = []
def _recorta(row, row_metrics):
acc = row_metrics[0]
l = []
index = 0
for i in row_metrics[1]:
l.append((row[acc:acc + row_metrics[1][index]]).strip())
acc = acc + row_metrics[1][index]
#print(f'--> {row[acc:(acc + row_metrics[1][index])]}')
index += 1
return (";".join(l)) + '\n'
if row[0] == ROWTYPE_HEAD:
return _recorta(row, LINE_HEAD_0_METRICS)
if row[0] == ROWTYPE_DACAD:
return _recorta(row, LINE_DETAL_1_METRICS)
if row[0] == ROWTYPE_SOCIO:
return _recorta(row, LINE_SOCIO_2_METRICS)
if row[0] == ROWTYPE_CNAES:
return _recorta(row, LINE_CNAES_6_METRICS)
def main():
linhas_lidas = 0
linhas_gravadas = 0
l=list()
with open(FILE_IN_PATH, 'rt',encoding="latin-1") as f, open(FILE_OUT_PATH, 'w') as fo:
for row in f:
linhas_lidas += 1
try:
fo.write(get_row_data_list(row))
linhas_gravadas += 1
except:
print (f'FAIL at |{row}|')
print(f'Linha lidas: {linhas_lidas}')
print(f'Linha gravadas: {linhas_gravadas}')
# with open(FILE_OUT_PATH, 'w') as f, :
# print(l)
# f.writelines(l)
if __name__ == "__main__":
import time
start_time = time.time()
main()
print(f'----------- ESTATS-----------------------')
print(f'Python seconds: {time.time()-start_time}')
print(f'Python CPU: {getrusage(RUSAGE_SELF)}')
print (f'PSUTIL CPU: {psutil.cpu_percent()}')
print (f'PSUTIL VitMem: {psutil.virtual_memory()[2]}')
print(f'----------- ESTATS END-----------------------')
| 2.140625
| 2
|
connect_youtube_uploader/core.py
|
linaro-marketing/connect_youtube_uploader
| 0
|
12779585
|
#!/usr/bin/python3
from apiclient.http import MediaFileUpload
from apiclient.errors import HttpError
from apiclient.discovery import build
import time
import sys
import random
import os
import httplib2
import json
import boto3
import requests
try:
import httplib
except ImportError: # python3 compatibility
from http import client
httplib = client
from oauth2client import client
from oauth2client import file
from oauth2client import tools
class cmd_flags(object):
"""
Used to provide command-line level authentication rather than
working through a web browser.
"""
def __init__(self):
self.auth_host_name = 'localhost'
self.auth_host_port = [8080, 8090]
self.logging_level = 'ERROR'
self.noauth_local_webserver = True
class ConnectYoutubeUploader:
"""
The ConnectYoutubeUploader enabled a video to be downloaded from s3 based on a session_id
and uploaded to YouTube with the title/descriptions populated based on data from the SchedDataInterface
module.
Attributes
----------
s3_bucket : string
The s3 bucket e.g static-linaro-org
videos_object_prefix: string
The s3 object key prefix to the video objects e.g. connect/SAN19/videos/
session_id: string
The session id for a given video.
Methods
-------
upload()
Uploads a local .mp4 video to YouTube and adds video metadata based on the data
provided by the SchedDataInterface.
"""
def __init__(self, secrets_dir, client_secrets_file_name):
# Explicitly tell the underlying HTTP transport library not to retry, since
# we are handling retry logic ourselves.
httplib2.RETRIES = 1
# Always retry when these exceptions are raised.
self.RETRIABLE_EXCEPTIONS = (httplib2.HttpLib2Error, IOError, httplib.NotConnected,
httplib.IncompleteRead, httplib.ImproperConnectionState,
httplib.CannotSendRequest, httplib.CannotSendHeader,
httplib.ResponseNotReady, httplib.BadStatusLine)
# Always retry when an apiclient.errors.HttpError with one of these status
# codes is raised.
self.RETRIABLE_STATUS_CODES = [500, 502, 503, 504]
# Maximum number of times to retry before giving up.
self.MAX_RETRIES = 10
# The secrets secrets_directory
self.SECRETS_DIRECTORY = secrets_dir
# The clients secrets file to use when authenticating our requests to the YouTube Data API
self.CLIENT_SECRETS_FILE = client_secrets_file_name
# This variable defines a message to display if the CLIENT_SECRETS_FILE is
# missing.
self.MISSING_CLIENT_SECRETS_MESSAGE = ""
# WARNING: Please configure OAuth 2.0
# To make this sample run you will need to populate the client_secrets.json file
# found at:
# %s
# with information from the {{ Cloud Console }}
# {{ https://cloud.google.com/console }}
# For more information about the client_secrets.json file format, please visit:
# https://developers.google.com/api-client-library/python/guide/aaa_client_secrets
# """ % os.path.abspath(os.path.join(os.path.dirname(__file__),
# self.CLIENT_SECRETS_FILE))
# This OAuth 2.0 access scope allows an application to upload files to the
# authenticated user's YouTube channel, but doesn't allow other types of access.
self.YOUTUBE_UPLOAD_SCOPE = ['https://www.googleapis.com/auth/youtube']
# Name of the API service
self.YOUTUBE_API_SERVICE_NAME = 'youtube'
# Version of the YouTube API
self.YOUTUBE_API_VERSION = 'v3'
# Privacy statuses we can use to set on YouTube videos
self.VALID_PRIVACY_STATUSES = ('public', 'private', 'unlisted')
# The ID of the playlist for the current Connect
# In the future this playlist ID should be retrieved dynamically based on the
# connect code
self.playlist_id = "PLKZSArYQptsOzc0kBoWyVSC3f0sHbJdBK"
# Get the authenticated service to use in requests to the API
self.service = self.get_authenticated_service()
# Authorize the request and store authorization credentials.
def get_authenticated_service(self):
"""
Gets an authenticated service object for requests to the
YouTube Data API
"""
store = file.Storage(self.SECRETS_DIRECTORY +
"connect_youtube_uploader-oauth2.json")
creds = store.get()
if creds is None or creds.invalid:
flow = client.flow_from_clientsecrets(self.SECRETS_DIRECTORY + self.CLIENT_SECRETS_FILE,
scope=self.YOUTUBE_UPLOAD_SCOPE,
message=self.MISSING_CLIENT_SECRETS_MESSAGE)
creds = tools.run_flow(flow, store, cmd_flags())
return build(self.YOUTUBE_API_SERVICE_NAME, self.YOUTUBE_API_VERSION,
http=creds.authorize(httplib2.Http()))
def get_video_id_based_on_session_id(self, session_id):
"""
Retrieve a video id of a YouTube video based on a session_id
"""
current_videos = self.get_current_youtube_videos_based_on_string(
session_id)
if len(current_videos) == 1:
return current_videos[0][1]
else:
return False
def download_video(self, video_url, output_folder):
"""Downloads a video from video_url and outputs to output_path"""
response = requests.get(video_url, stream=True)
filename = os.path.split(video_url)[1]
output_path = output_folder + filename
if os.path.exists(output_folder) != True:
print("Creating {}".format(output_folder))
os.makedirs(output_folder)
print("Downloading {} to {}".format(filename, output_folder))
with open(output_path, 'wb') as f:
for chunk in response.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
return output_path
def update_video_status(self, video_id, status):
"""
This method updates the status of a video based on the video_id and status provided.
"""
# Call the API's videos.list method to retrieve the video resource.
# Get the current video details
videos_list_response = self.service.videos().list(
id=video_id,
part='status'
).execute()
# If the response does not contain an array of 'items' then the video was
# not found.
if not videos_list_response['items']:
return False
# Since the request specified a video ID, the response only contains one
# video resource. This code extracts the snippet from that resource.
video_list_status = videos_list_response['items'][0]['status']
print(video_list_status)
input()
# Set the privacy status of the video
if status:
video_list_status['privacyStatus'] = status
# Update the video resource by calling the videos.update() method.
self.service.videos().update(
part='status',
body=dict(
status=video_list_status,
id=video_id
)).execute()
return True
def get_current_youtube_videos_based_on_string(self, string):
"""
Gets the current videos on YouTube that contain the specified string in
in the title or description
"""
# Get the channels on Youtube and their ID's i.e uploads
channels_response = self.service.channels().list(
mine=True,
part="contentDetails"
).execute()
# From the API response, extract the playlist ID that identifies the list
# of videos uploaded to the authenticated user's channel.
youtube_uploads_id = channels_response["items"][0]["contentDetails"]["relatedPlaylists"]["uploads"]
# Retrieve the list of videos uploaded to the authenticated user's channel.
playlistitems_list_request = self.service.playlistItems().list(
playlistId=youtube_uploads_id,
part="snippet",
maxResults=50
)
videos = []
while playlistitems_list_request:
playlistitems_list_response = playlistitems_list_request.execute()
# Print information about each video.
for playlist_item in playlistitems_list_response["items"]:
title = playlist_item["snippet"]["title"]
video_id = playlist_item["snippet"]["resourceId"]["videoId"]
if string.lower() in title:
print("%s (%s)" % (title, video_id))
videos.append([title, video_id])
playlistitems_list_request = self.service.playlistItems().list_next(
playlistitems_list_request, playlistitems_list_response)
if len(videos) > 0:
return videos
else:
return False
def build_resource(self, properties):
"""
# Build a resource based on a list of properties given as key-value pairs.
# Leave properties with empty values out of the inserted resource.
"""
resource = {}
for p in properties:
# Given a key like "snippet.title", split into "snippet" and "title", where
# "snippet" will be an object and "title" will be a property in that object.
prop_array = p.split('.')
ref = resource
for pa in range(0, len(prop_array)):
is_array = False
key = prop_array[pa]
# For properties that have array values, convert a name like
# "snippet.tags[]" to snippet.tags, and set a flag to handle
# the value as an array.
if key[-2:] == '[]':
key = key[0:len(key)-2:]
is_array = True
if pa == (len(prop_array) - 1):
# Leave properties without values out of inserted resource.
if properties[p]:
if is_array:
ref[key] = properties[p].split(',')
else:
ref[key] = properties[p]
elif key not in ref:
# For example, the property is "snippet.title", but the resource does
# not yet have a "snippet" object. Create the snippet object here.
# Setting "ref = ref[key]" means that in the next time through the
# "for pa in range ..." loop, we will be setting a property in the
# resource's "snippet" object.
ref[key] = {}
ref = ref[key]
else:
# For example, the property is "snippet.description", and the resource
# already has a "snippet" object.
ref = ref[key]
return resource
def upload_video(self, options):
"""
Takes a dictionary of all video details e.g
{
"file":"/home/kyle.kirkby/Documents/Marketing/Connect/YVR18/videos/yvr18-100k.mp4",
"title": "YVR18-100k: Opening Keynote by <NAME>",
"description": "The Opening Keynote by <NAME> at Linaro Connect Vancouver 2018",
"keywords": "Keynote,yvr18,Open Source,Arm, Vancouver",
"category": "28",
"privacyStatus": "private"
}
"""
request = self.get_upload_request(options)
# Output Details while uploading
video_id = self.resumable_upload(request, options["title"])
return video_id
def add_video_to_playlist(self, playlistId, videoId):
"""Adds a video(videoId) to a playlist(playlistId)"""
# Create the body of the request
bodyData = {'snippet.playlistId': playlistId,
'snippet.resourceId.kind': 'youtube#video',
'snippet.resourceId.videoId': videoId,
'snippet.position': ''}
resource = self.build_resource(bodyData)
add_to_playlist = self.service.playlistItems().insert(
body=resource,
part='snippet'
).execute()
return add_to_playlist
def set_custom_thumbnail(self, local_thumbnail_url, video_id):
"""
Sets the specified custom thumbnail for a given video(video_id)
"""
request = self.service.thumbnails().set(
videoId=video_id,
media_body=MediaFileUpload(local_thumbnail_url)
)
response = request.execute()
return response
def get_upload_request(self, options):
"""
Create the request to initialize the upload of a video.
Takes a service and a dictionary containing the various options
"""
# Get the Youtube Tags from the keywords
tags = None
try:
if options["tags"]:
tags = options["tags"].split(',')
except Exception as e:
tags = []
# Create the body of the request
body = dict(
snippet=dict(
title=options["title"][0:70],
description=options["description"],
tags=tags,
categoryId=28
),
status=dict(
privacyStatus=options["privacyStatus"]
)
)
# Call the API's videos.insert method to create and upload the video.
insert_request = self.service.videos().insert(
part=','.join(body.keys()),
body=body,
# The chunksize parameter specifies the size of each chunk of data, in
# bytes, that will be uploaded at a time. Set a higher value for
# reliable connections as fewer chunks lead to faster uploads. Set a lower
# value for better recovery on less reliable connections.
#
# Setting 'chunksize' equal to -1 in the code below means that the entire
# file will be uploaded in a single HTTP request. (If the upload fails,
# it will still be retried where it left off.) This is usually a best
# practice, but if you're using Python older than 2.6 or if you're
# running on App Engine, you should set the chunksize to something like
# 1024 * 1024 (1 megabyte).
media_body=MediaFileUpload(
options["file"], chunksize=-1, resumable=True)
)
return insert_request
def resumable_upload(self, request, title):
"""
Creates a resumable upload
"""
response = None
error = None
retry = 0
while response is None:
try:
print("Uploading {0} file...".format(title))
status, response = request.next_chunk()
if response is not None:
if 'id' in response:
print("Video id '%s' was successfully uploaded." %
response['id'])
video_id = response['id']
return video_id
else:
exit(
"The upload failed with an unexpected response: %s" % response)
except HttpError as e:
if e.resp.status in self.RETRIABLE_STATUS_CODES:
error = "A retriable HTTP error %d occurred:\n%s" % (e.resp.status,
e.content)
else:
raise
except self.RETRIABLE_EXCEPTIONS as e:
error = "A retriable error occurred: %s" % e
if error is not None:
print(error)
retry += 1
if retry > self.MAX_RETRIES:
exit("No longer attempting to retry.")
max_sleep = 2 ** retry
sleep_seconds = random.random() * max_sleep
print("Sleeping %f seconds and then retrying..." %
sleep_seconds)
time.sleep(sleep_seconds)
| 2.515625
| 3
|
koku/providers/azure/client.py
|
rubik-ai/koku
| 157
|
12779586
|
#
# Copyright 2021 Red Hat Inc.
# SPDX-License-Identifier: Apache-2.0
#
"""Azure Client Configuration."""
from azure.common.credentials import ServicePrincipalCredentials
from azure.mgmt.costmanagement import CostManagementClient
from azure.mgmt.resource import ResourceManagementClient
from azure.mgmt.storage import StorageManagementClient
from azure.storage.blob import BlobServiceClient
from msrestazure.azure_cloud import AZURE_CHINA_CLOUD
from msrestazure.azure_cloud import AZURE_GERMAN_CLOUD
from msrestazure.azure_cloud import AZURE_PUBLIC_CLOUD
from msrestazure.azure_cloud import AZURE_US_GOV_CLOUD
class AzureClientFactory:
"""Azure client factory.
This class holds the Azure credentials and can create Service Clients for
querying the Azure Service APIs.
Args:
subscription_id (str): Subscription ID
tenant_id (str): Tenant ID for your Azure Subscription
client_id (str): Service Principal Application ID
client_secret (str): Service Principal Password
cloud (str): Cloud selector, must be one of ['china', 'germany', 'public', 'usgov']
"""
def __init__(self, subscription_id, tenant_id, client_id, client_secret, cloud="public"):
"""Constructor."""
self._subscription_id = subscription_id
clouds = {
"china": AZURE_CHINA_CLOUD,
"germany": AZURE_GERMAN_CLOUD,
"public": AZURE_PUBLIC_CLOUD,
"usgov": AZURE_US_GOV_CLOUD,
}
self._credentials = ServicePrincipalCredentials(
client_id=client_id, secret=client_secret, tenant=tenant_id, cloud_environment=clouds.get(cloud, "public")
)
@property
def credentials(self):
"""Service Principal Credentials property."""
return self._credentials
@property
def cost_management_client(self):
"""Get cost management client with subscription and credentials."""
return CostManagementClient(self.credentials, self.subscription_id)
@property
def resource_client(self):
"""Return a resource client."""
return ResourceManagementClient(self.credentials, self.subscription_id)
@property
def storage_client(self):
"""Get storage client with subscription and credentials."""
return StorageManagementClient(self.credentials, self.subscription_id)
@property
def subscription_id(self):
"""Subscription ID property."""
return self._subscription_id
def cloud_storage_account(self, resource_group_name, storage_account_name):
"""Get a BlobServiceClient."""
storage_account_keys = self.storage_client.storage_accounts.list_keys(
resource_group_name, storage_account_name
)
# Add check for keys and a get value
key = storage_account_keys.keys[0]
connect_str = (
f"DefaultEndpointsProtocol=https;"
f"AccountName={storage_account_name};"
f"AccountKey={key.value};"
f"EndpointSuffix=core.windows.net"
)
return BlobServiceClient.from_connection_string(connect_str)
| 2.1875
| 2
|
year_2020/day23/test_day23.py
|
mjalkio/advent-of-code
| 0
|
12779587
|
import pytest
from year_2020.day23.crab_cups import get_crab_cups
TEST_INPUT = "389125467"
@pytest.mark.parametrize("num_moves, expected", [(10, "92658374"), (100, "67384529")])
def test_get_crap_cups(num_moves, expected):
assert get_crab_cups(TEST_INPUT, num_moves=num_moves) == expected
@pytest.mark.slow
def test_get_crap_cups_part_two():
assert (
get_crab_cups(TEST_INPUT, num_moves=10_000_000, is_part_two=True)
== 149245887792
)
| 2.65625
| 3
|
meetings/osf_oauth2_adapter/adapter.py
|
CenterForOpenScience/osf-meetings
| 0
|
12779588
|
from allauth.account.adapter import DefaultAccountAdapter
from django.conf import settings
import urlparse
class OsfMeetingsAdapter(DefaultAccountAdapter):
def get_login_redirect_url(self, request):
try:
refererUrl = request.environ['HTTP_REFERER']
nextUrl = urlparse.parse_qs(
urlparse.urlparse(refererUrl).query)['next'][0]
return nextUrl
except KeyError:
return settings.OSF_MEETINGS_HOME_URL
| 1.757813
| 2
|
django_digest/test/__init__.py
|
gushil/django-digest
| 5
|
12779589
|
<reponame>gushil/django-digest
from __future__ import absolute_import
from __future__ import unicode_literals
import django.test
from django_digest.test.methods.basic import BasicAuth
from django_digest.test.methods.detect import DetectAuth
from django_digest.test.methods.digest import DigestAuth
class Client(django.test.Client):
AUTH_METHODS = {'Basic': BasicAuth,
'Digest': DigestAuth}
def __init__(self, *args, **kwargs):
super(Client, self).__init__(*args, **kwargs)
self.clear_authorization()
def request(self, **request):
if self.auth_method:
request.update(self.auth_method(request))
# This payload object can only be read once. Since digest auth involves
# two requests, refresh it for the second "request"
payload = None
if 'wsgi.input' in request:
payload = request['wsgi.input'].read()
request['wsgi.input'] = django.test.client.FakePayload(payload)
response = super(Client, self).request(**request)
if response.status_code == 401 and self.auth_method:
# Try to authenticate
request.update(self.auth_method(request, response))
if payload is not None:
request['wsgi.input'] = django.test.client.FakePayload(payload)
response = super(Client, self).request(**request)
return response
def set_authorization(self, username, password, method=None):
self.username = username
self.password = password
if method is None:
self.auth_method = DetectAuth(client=self,
username=username,
password=password)
else:
self.auth_method = self.AUTH_METHODS[method](username=username,
password=password)
def clear_authorization(self):
self.username = None
self.password = None
self.auth_method = None
| 2.359375
| 2
|
Deeplabv3_Ensemble/model/backbone/__init__.py
|
jackyjsy/CVPR21Chal-Agrivision
| 5
|
12779590
|
<filename>Deeplabv3_Ensemble/model/backbone/__init__.py
from . import resnet, xception, drn, mobilenet
from .efficientnet import EfficientNet
from .sw import backbones
def build_backbone(backbone, output_stride, ibn_mode, BatchNorm):
if backbone == 'resnet50':
return resnet.ResNet50(output_stride, ibn_mode)
if backbone == 'resnet101':
return resnet.ResNet101(output_stride, ibn_mode)
elif backbone == 'xception':
return xception.AlignedXception(output_stride, BatchNorm)
elif backbone == 'drn':
return drn.drn_d_54(BatchNorm)
elif backbone == 'mobilenet':
return mobilenet.MobileNetV2(output_stride, BatchNorm)
elif backbone == 'efficientnet-b0':
return EfficientNet.from_pretrained('efficientnet-b0')
elif backbone == 'efficientnet-b6':
return EfficientNet.from_pretrained('efficientnet-b6')
elif backbone == 'sw-resnet101':
return backbones.ResNet101(output_stride,
sw_cfg = dict(type='SW',
sw_type=2,
num_pergroup=16,
T=5,
tie_weight=False,
momentum=0.9,
affine=True))
else:
raise NotImplementedError
| 1.90625
| 2
|
build_scene.py
|
hengwang322/small_scale_PV_data_viz
| 1
|
12779591
|
"""
This script is written and tested in Blender 2.83.1 & BlenderGIS 1.0
"""
import bpy, bmesh, json, os, re
from pathlib import Path
def load_data(data_file):
with open(data_file) as f:
data = json.load(f)
f.close()
return data
def clean_mesh(obj_name):
# Clean up the mesh by delete some rogue vertices
bpy.ops.object.mode_set(mode='EDIT', toggle=False)
obj = bpy.data.objects[obj_name]
me = obj.data
wm = obj.matrix_world
bpy.context.view_layer.objects.active = obj
bm = bmesh.from_edit_mesh(me)
bm.select_mode = {'VERT'}
for v in bm.verts:
global_v = wm @ v.co # calculate global coordinates for the vertex
v.select = ( global_v.x < -20 and global_v.y <-16)
bm.select_flush_mode()
me.update()
bpy.ops.mesh.delete()
bpy.ops.object.mode_set(mode='OBJECT', toggle=False)
bpy.ops.object.origin_set(type='ORIGIN_CENTER_OF_MASS', center='MEDIAN')
def add_geo_obj(shp_file):
# the objects & names in collection 'geo' will be referenced throughout the script
try:
bpy.ops.importgis.shapefile(filepath=shp_file,
fieldExtrudeName="base",
extrusionAxis='Z',
separateObjects=True,
fieldObjName="postcode"
)
except AttributeError:
print("Cannot seem to find Blender GIS addon. Make sure it's installed and enabled.")
for obj in bpy.data.collections['geo'].all_objects:
clean_mesh(obj.name)
bpy.ops.object.select_all(action='DESELECT')
bpy.data.objects['59'].select_set(True)
bpy.ops.object.delete()
def add_material(obj_name):
gradient_color0 = (0.05,0.05,0.05,1) # dark grey
gradient_color1 = (0.1,2,0,1) # green, also control emission strength, that's why green is > 1
bpy.context.view_layer.objects.active = bpy.data.objects[obj_name]
obj = bpy.data.objects[obj_name]
bpy.context.view_layer.objects.active = obj
mat = bpy.data.materials.new(name=obj_name)
obj.data.materials.append(mat)
mat.use_nodes = True
bpy.context.object.active_material.blend_method = 'BLEND'
nodes = mat.node_tree.nodes
links = mat.node_tree.links
output = nodes.get('Material Output')
output.location = (300,0)
bsdf = nodes.get('Principled BSDF')
bsdf.location = (0,0)
bsdf.inputs[18].default_value = 0.5 # alpha
bsdf.inputs[15].default_value = 1.0 # transmission
links.new(bsdf.outputs[0],output.inputs[0]) # BSDF to material surface
# add color ramp as input for main shader to get a color gradiant
color_ramp = nodes.new("ShaderNodeValToRGB")
color_ramp.location = (-300,0)
links.new(color_ramp.outputs[0],bsdf.inputs[0]) # color ramp to base color
links.new(color_ramp.outputs[0],bsdf.inputs[17]) # color ramp to emission color/strength
color_ramp.color_ramp.elements[0].color = gradient_color0
color_ramp.color_ramp.elements[1].color = gradient_color1
# the value node will be used for inserting keyframes
color_v = nodes.new("ShaderNodeValue")
color_v.location = (-600,0)
links.new(color_v.outputs[0],color_ramp.inputs[0]) # value node to ramp's color
def add_material_all(collection):
for obj in bpy.data.collections[collection].all_objects:
add_material(obj.name)
def add_shape_key(obj_name,max_height):
obj = bpy.data.objects[obj_name]
me = obj.data
bpy.context.view_layer.objects.active = obj
bpy.ops.object.shape_key_add(from_mix=False) # Base Key
bpy.ops.object.shape_key_add(from_mix=False) # Key 1
bpy.context.object.active_shape_key_index = 1
bpy.data.shape_keys["Key"].name = obj_name
bpy.ops.object.mode_set(mode='EDIT', toggle=False)
bm = bmesh.from_edit_mesh(me)
bm.select_mode = {'VERT'}
for v in bm.verts:
if v.co.z > 0: #since the base is at 0, this will effectively select top faces
v.co.z = max_height
bm.select_flush_mode()
me.update()
bpy.ops.object.mode_set(mode='OBJECT', toggle=False)
def add_shape_key_all(max_height,collection):
for obj in bpy.data.collections[collection].all_objects:
add_shape_key(obj.name,max_height=max_height)
def animate_obj_all(frame_step,data):
data_len = len(data['all']['date'])
bpy.context.scene.frame_end = data_len*frame_step
bpy.ops.object.mode_set(mode='OBJECT', toggle=False)
bpy.context.view_layer.objects.active = None
for keyframe_id in range(data_len):
for obj in bpy.data.collections['geo'].all_objects:
height = data[obj.name[:2]]['height'][keyframe_id]
# height values are scaled between 0 & 1, and are used for inserting keyframes
shapekey = bpy.data.shape_keys[obj.name].key_blocks["Key 1"]
shapekey.value = height
shapekey.keyframe_insert(data_path="value", frame=frame_step*keyframe_id)
def animate_material_all(frame_step,data):
data_len = len(data['all']['date'])
bpy.context.scene.frame_end = data_len*frame_step
for keyframe_id in range(data_len):
for mat in bpy.data.materials:
if mat.name in [obj.name for obj in bpy.data.collections['geo'].all_objects]:
color = data[mat.name]['color'][keyframe_id]
color_value = mat.node_tree.nodes["Value"].outputs[0]
color_value.default_value = color
color_value.keyframe_insert('default_value',frame=frame_step*keyframe_id)
def add_camera(lens):
cam = bpy.data.cameras.new("Camera")
cam.lens = lens
cam_obj = bpy.data.objects.new("Camera", cam)
bpy.context.scene.collection.objects.link(cam_obj)
def animate_camera(frame_step,data):
data_len = len(data['all']['date'])
camera = bpy.data.objects['Camera']
# pan down camera a bit at first, then a lot in the end
camera.location = (0,4,40)
camera.rotation_euler = (0,0,0)
camera.keyframe_insert(data_path="location", frame=0)
camera.keyframe_insert(data_path="rotation_euler", frame=0)
camera.location = (0,-4.6,40.17)
camera.rotation_euler = (0.175,0,0)
camera.keyframe_insert(data_path="location", frame=int(frame_step*data_len*0.5))
camera.keyframe_insert(data_path="rotation_euler", frame=int(frame_step*data_len*0.5))
camera.location = (0,-19.25,30.57)
camera.rotation_euler = (0.534,0,0)
camera.keyframe_insert(data_path="location", frame=int(frame_step*data_len*0.75))
camera.keyframe_insert(data_path="rotation_euler", frame=int(frame_step*data_len*0.75))
camera.location = (0,-22.69,24.64)
camera.rotation_euler = (0.698,0,0)
camera.keyframe_insert(data_path="location", frame=int(frame_step*data_len))
camera.keyframe_insert(data_path="rotation_euler", frame=int(frame_step*data_len))
def add_bg_plane(size):
# Adds a highly reflective background plane
bpy.ops.object.mode_set(mode='OBJECT', toggle=False)
bpy.ops.mesh.primitive_plane_add(size=size,enter_editmode=False,location=(0,0,0))
plane_mat = bpy.data.materials.new(name='plane_mat')
plane_mat.use_nodes = True
output = plane_mat.node_tree.nodes.get('Material Output')
bsdf = plane_mat.node_tree.nodes.get('Principled BSDF')
bsdf.inputs[0].default_value = (0.01,0.01,0.01,1) # base color, black
bsdf.inputs[7].default_value = 0.1 # roughness
plane_mat.node_tree.links.new(bsdf.outputs[0],output.inputs[0]) # bsdf to material surface
bpy.data.objects['Plane'].data.materials.append(plane_mat)
def animate_text(font_path,frame_step,data):
title_loc = (-38.9,24.5,0)
cap1_loc = (-29.784,-9.944,0)
cap2_loc = (-0.6316,-13.728,0)
cap3_loc = (-22.052,-15.814,0)
cap4_loc = (-3.2,-15.914,0)
foot_loc = (-30.4412,-16.75,0)
data_len=len(data['all']['date'])
title_curve = bpy.data.curves.new(type="FONT",name="title curve")
title_curve.extrude = 0.01
title_curve.font = bpy.data.fonts.load(font_file)
title_curve.body = f"""
Growth of Small-scale Solar PVs in Australia
Quantity & Output by Postcode"""
title_obj = bpy.data.objects.new("title", title_curve)
bpy.context.scene.collection.objects.link(title_obj)
title_obj.location = title_loc
title_obj.scale = (2,2,2)
footnote_curve = bpy.data.curves.new(type="FONT",name="footnote curve")
footnote_curve.extrude = 0.01
footnote_curve.font = bpy.data.fonts.load(font_file)
footnote_curve.body = f"""
Height represents install quantity, color represents output. Data Source: Clean Energy Regulator
"""
footnote_obj = bpy.data.objects.new("footnote", footnote_curve)
bpy.context.scene.collection.objects.link(footnote_obj)
footnote_obj.location = foot_loc
footnote_obj.scale = (0.7,0.7,0.7)
caption1_curve = bpy.data.curves.new(type="FONT",name="caption1")
caption1_curve.extrude = 0.01
caption1_curve.font = bpy.data.fonts.load(font_file)
caption1_curve.space_line = 1.6
caption1_obj = bpy.data.objects.new("caption1", caption1_curve)
bpy.context.scene.collection.objects.link(caption1_obj)
caption1_obj.location = cap1_loc
caption1_obj.scale = (1.1,1.2,1.2)
caption2_curve = bpy.data.curves.new(type="FONT",name="caption2")
caption2_curve.extrude = 0.01
caption2_curve.font = bpy.data.fonts.load(font_file)
caption2_obj = bpy.data.objects.new("caption2", caption2_curve)
bpy.context.scene.collection.objects.link(caption2_obj)
caption2_obj.location = cap2_loc
caption2_obj.scale = (2,2.2,2.2)
caption3_curve = bpy.data.curves.new(type="FONT",name="caption3")
caption3_curve.extrude = 0.01
caption3_curve.font = bpy.data.fonts.load(font_file)
caption3_curve.body = """Raising the total power output to"""
caption3_obj = bpy.data.objects.new("caption3", caption3_curve)
bpy.context.scene.collection.objects.link(caption3_obj)
caption3_obj.location = cap3_loc
caption3_obj.scale = (1.1,1.2,1.2)
caption4_curve = bpy.data.curves.new(type="FONT",name="caption4")
caption4_curve.extrude = 0.01
caption4_curve.font = bpy.data.fonts.load(font_file)
caption4_obj = bpy.data.objects.new("caption4", caption4_curve)
bpy.context.scene.collection.objects.link(caption4_obj)
caption4_obj.location = cap4_loc
caption4_obj.scale = (2,2.2,2.2)
# add white static material
font_mat = bpy.data.materials.new(name='font_mat')
font_mat.use_nodes = True
output = font_mat.node_tree.nodes.get('Material Output')
bsdf = font_mat.node_tree.nodes.get('Principled BSDF')
bsdf.inputs[17].default_value = (2,2,2,1) # emission color/strength
font_mat.node_tree.links.new(bsdf.outputs[0],output.inputs[0]) # bsdf to material surface
bpy.data.objects['title'].data.materials.append(font_mat)
bpy.data.objects['caption1'].data.materials.append(font_mat)
bpy.data.objects['footnote'].data.materials.append(font_mat)
# add green animated material
font_green_mat = bpy.data.materials.new(name='font_green_mat')
font_green_mat.use_nodes = True
output_green = font_green_mat.node_tree.nodes.get('Material Output')
bsdf_green = font_green_mat.node_tree.nodes.get('Principled BSDF')
font_green_mat.node_tree.links.new(bsdf_green.outputs[0],output_green.inputs[0]) # bsdf to material surface
color_ramp_font = font_green_mat.node_tree.nodes.new("ShaderNodeValToRGB")
color_ramp_font.location = (-300,0)
font_green_mat.node_tree.links.new(color_ramp_font.outputs[0],bsdf_green.inputs[0]) # ramp to base color
font_green_mat.node_tree.links.new(color_ramp_font.outputs[0],bsdf_green.inputs[17]) # ramp to emission color/strength
color_ramp_font.color_ramp.elements[0].color = (2,2,2,1) # white
color_ramp_font.color_ramp.elements[1].color = (0.1,2,0,1) # green
color_v_font = font_green_mat.node_tree.nodes.new("ShaderNodeValue")
color_v_font.location = (-600,0)
font_green_mat.node_tree.links.new(color_v_font.outputs[0],color_ramp_font.inputs[0]) # value to ramp's color
bpy.data.objects['title'].data.materials.append(font_mat)
bpy.data.objects['caption1'].data.materials.append(font_mat)
bpy.data.objects['caption3'].data.materials.append(font_mat)
bpy.data.objects['footnote'].data.materials.append(font_mat)
bpy.data.objects['caption2'].data.materials.append(font_green_mat)
bpy.data.objects['caption4'].data.materials.append(font_green_mat)
# animate green text, the text turn green linearly
mat_green = bpy.data.materials["font_green_mat"]
color_value = mat_green.node_tree.nodes["Value"].outputs[0]
color_value.default_value = 0
color_value.keyframe_insert('default_value',frame=0)
color_value.default_value = 0.95
color_value.keyframe_insert('default_value',frame=frame_step*data_len)
# update text with frames
def update(self):
caption1 = bpy.data.objects['caption1']
caption2 = bpy.data.objects['caption2']
caption4 = bpy.data.objects['caption4']
frame = bpy.context.scene.frame_current
data_index = int(frame/frame_step)
caption1.location = cap1_loc
caption1.data.body = \
f"""
By {data['all']['date'][data_index]}
The quantity of solar PVs has grown to
"""
caption2.location = cap2_loc
caption2.data.body = f"""{data['all']['install'][data_index]}"""
caption4.location = cap4_loc
caption4.data.body = f"""{data['all']['output'][data_index]} MW"""
if bpy.context.scene.frame_current in range(frame_step*data_len):
bpy.app.handlers.frame_change_post.append(update)
def build_scene(data_file,shp_file,font_file,frame_step,max_height):
data = load_data(data_file=data_file)
# Start scene by deleting all objects
bpy.ops.object.select_all(action='SELECT')
bpy.ops.object.delete(use_global=False)
# Add all objects & animate
add_geo_obj(shp_file=shp_file)
add_material_all(collection='geo')
add_shape_key_all(max_height,collection='geo')
animate_obj_all(frame_step,data)
add_material_all(collection='geo')
animate_material_all(frame_step,data)
add_camera(lens=18)
animate_camera(frame_step,data)
add_bg_plane(size=500)
animate_text(font_file,frame_step,data)
def update_render_setting():
# Tweak the rendering settings
bpy.context.scene.frame_start = 0
bpy.context.scene.render.engine = 'CYCLES'
bpy.context.scene.cycles.use_adaptive_sampling = True
bpy.context.scene.cycles.adaptive_threshold = 0.001
bpy.context.scene.cycles.use_animated_seed = True
bpy.context.scene.cycles.samples = 850
bpy.context.scene.cycles.sample_clamp_direct = 0.2
bpy.context.scene.cycles.sample_clamp_indirect = 10
bpy.context.scene.cycles.blur_glossy = 5
bpy.context.scene.cycles.max_bounces = 4
bpy.context.scene.world.light_settings.use_ambient_occlusion = True
bpy.context.scene.render.image_settings.color_depth = '16'
bpy.context.scene.render.tile_x = 256
bpy.context.scene.render.tile_y = 256
scene = bpy.data.scenes['Scene'].view_layers['View Layer']
scene.cycles.use_denoising = True
# Setup GPU
scene = bpy.context.scene
scene.cycles.device = 'GPU'
prefs = bpy.context.preferences
prefs.addons['cycles'].preferences.get_devices()
cprefs = prefs.addons['cycles'].preferences
print(cprefs)
# Attempt to set GPU device types if available
for compute_device_type in ('CUDA', 'OPENCL', 'NONE'):
try:
cprefs.compute_device_type = compute_device_type
print('Device found',compute_device_type)
break
except TypeError:
pass
# Enable all CPU and GPU devices
for device in cprefs.devices:
if not re.match('intel', device.name, re.I):
print('Activating',device)
device.use = True
if __name__ == '__main__':
frame_step = 4 # the steps between keyframes
max_height = 6
current_dir = Path(bpy.data.filepath).parent # this is where your blend file is saved
data_file = os.path.join(current_dir,'data.json')
shp_file = os.path.join(current_dir,'geo.shp')
# Download the free font at design.ubuntu.com/font/
font_file = os.path.join(current_dir.parent,'resource','UbuntuMono-Regular.ttf')
build_scene(data_file,shp_file,font_file,frame_step,max_height)
update_render_setting()
| 2.109375
| 2
|
workspace/models/GLM/model.py
|
rynpssss/data_science
| 0
|
12779592
|
# encoding utf-8
import pandas as pd
import numpy as np
import statsmodels.api as sm
from statsmodels.tools import eval_measures
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
class Preprocessing:
def __init__(self, data_raw):
self.data_clean = data_raw
def run(self):
# EDAで安価な物件に外れ値が見受けられた
# 下位1%をとりあえず除外とする(適当ではないが、正確でもない)
THRESHOLD = 0.01
self.exclude_outlier(THRESHOLD)
# 上記以外の明らかな外れ値
self.exclude_idx([524, 1299])
# 正規分布に近づけ、線形回帰の精度を高める
self.convert_log(["SalePrice"])
# 多重共線性をなくす
self.create_adding_column("AllSF", ["GrLivArea", "TotalBsmtSF"])
self.create_adding_column("AllFlrsSF", ["1stFlrSF", "2ndFlrSF"])
def exclude_outlier(self, THRESHOLD):
low_row = round(self.data_clean.shape[0] * THRESHOLD)
low_ids = self.data_clean.iloc[:low_row]
low_ids = list(low_ids['Id'].unique())
self.data_clean = self.data_clean.query("Id not in @low_ids")
def exclude_idx(self, ids):
self.data_clean = self.data_clean.query("Id not in @ids")
def convert_log(self, columns):
for c in columns:
self.data_clean[c] = self.data_clean[c].apply(lambda x: np.log(x))
def create_adding_column(self, create, adding):
c1, c2 = adding
self.data_clean[create] = self.data_clean[c1] + self.data_clean[c2]
class Glm:
def __init__(self, preprocessing, X_columns, y_column):
self.X = preprocessing.data_clean[X_columns]
self.y = preprocessing.data_clean[y_column]
def fit(self):
TRAIN_SIZE = 0.8 # >=0.7 なら自由
RANDOM_STATE = 0 # チューニングはしていない
x_train, x_test, y_train, y_test = \
self.train_test_split(TRAIN_SIZE, RANDOM_STATE)
x_train, x_test = self.normalization(x_train, x_test)
self.model = sm.OLS(y_train, sm.add_constant(x_train))
self.model = self.model.fit()
def train_test_split(self, TRAIN_SIZE, RANDOM_STATE):
x_train, x_test, y_train, y_test = train_test_split(self.X, self.y,
train_size=TRAIN_SIZE,
random_state=RANDOM_STATE)
return x_train, x_test, y_train, y_test
def normalization(self, x_train, x_test):
scaler = StandardScaler()
scaler.fit(x_train)
x_train = scaler.transform(x_train)
x_test = scaler.transform(x_test)
return x_train, x_test
def write_summary(self, write_path):
with open(write_path, "w") as f:
f.write(str(self.model.summary()))
def main():
data_raw = pd.read_csv("./../../data/house_prices/train.csv")
preprocessing = Preprocessing(data_raw)
preprocessing.run()
X_columns = ["OverallQual", "GarageArea", "YearBuilt", "AllSF",
"AllFlrsSF", "YearRemodAdd", "OverallCond"]
y_column = ["SalePrice"]
model = Glm(preprocessing, X_columns, y_column)
model.fit()
model.write_summary("./GLM_summary.txt")
if __name__ == "__main__":
main()
| 2.703125
| 3
|
hdrpy/tmo/linear.py
|
popura/hdrpy
| 5
|
12779593
|
<filename>hdrpy/tmo/linear.py
from typing import Union, Optional
import numpy as np
from hdrpy.stats import gmean
from hdrpy.image import get_luminance
from hdrpy.tmo import ColorProcessing, LuminanceProcessing
def multiply_scalar(
intensity: np.ndarray,
factor: float,
nan_sub: Optional[float] = 0,
inf_sub: Union[Optional[float], str] = "Inf",
minus_sub: Optional[float] = 0) -> np.ndarray:
"""Compensates exposure of given image.
Args:
intensity: input array
factor: scale ratio
nan_sub: value for substituting NaN in `intensity`
inf_sub: value for substituting Inf in `intensity`
minus_sub: value for substituting negative values in `intensity`
Returns:
multiplied image
Raise:
ValueError
>>> image = np.ones((5, 5))
>>> multiply_scalar(image, factor=2)
array([[ 2., 2., 2., 2., 2.],
[ 2., 2., 2., 2., 2.],
[ 2., 2., 2., 2., 2.],
[ 2., 2., 2., 2., 2.],
[ 2., 2., 2., 2., 2.]])
>>> multiply_scalar(image, factor=0.5)
array([[ 0.5, 0.5, 0.5, 0.5, 0.5],
[ 0.5, 0.5, 0.5, 0.5, 0.5],
[ 0.5, 0.5, 0.5, 0.5, 0.5],
[ 0.5, 0.5, 0.5, 0.5, 0.5],
[ 0.5, 0.5, 0.5, 0.5, 0.5]])
>>> image = np.random.randn(5, 5)
>>> image = multiply_scalar(image, factor=2, minus_sub=0)
>>> np.all(image >= 0)
True
"""
new_intensity = intensity * factor
if nan_sub is not None:
new_intensity[np.isnan(new_intensity)] = nan_sub
if inf_sub is None:
pass
elif isinstance(inf_sub, str) and inf_sub.lower() == "Inf".lower():
new_intensity[np.isinf(new_intensity)] = np.finfo(np.float32).max
elif isinstance(inf_sub, float):
new_intensity[np.isinf(new_intensity)] = inf_sub
else:
raise ValueError()
if minus_sub is not None:
new_intensity[new_intensity <= 0] = minus_sub
return new_intensity
class ExposureCompensation(ColorProcessing, LuminanceProcessing):
"""Adjusts exposure, i.e., brightness, of images based on geometric mean.
Attributes:
ev: target exposure value
eps: small value for stability
Examples:
>>> import hdrpy
>>> hdr = hdrpy.io.read("./data/CandleGlass.exr")
>>> luminance = hdrpy.image.get_luminance(hdr)
>>> f = ExposureCompensation()
>>> new_luminance = f(luminance)
>>> np.isclose(gmean(new_luminance, axis=None), 0.18, atol=1e-3)
True
>>> f = ExposureCompensation(ev=1)
>>> new_luminance = f(luminance)
>>> np.isclose(gmean(new_luminance, axis=None), 0.36, atol=1e-3)
True
>>> f = ExposureCompensation(ev=0)
>>> ldr = f(hdr)
>>> hdrpy.io.write("./data/CandleGlass.jpg", ldr)
"""
def __init__(
self,
ev: Optional[float] = 0,
eps: float = 1e-6) -> None:
super().__init__()
self.ev = ev
self.eps = eps
def __call__(self, image: np.ndarray) -> np.ndarray:
"""Adjusts exposure, i.e., brightness, of images based on geometric mean.
Args:
image: image with a size of (H, W, 3) or
luminance with a size of (H, W)
Returns:
New image or luminance with the same size of the input
"""
g = gmean(image, eps=self.eps)
factor = self.ev2gmean(self.ev) / g
return multiply_scalar(image, factor)
@staticmethod
def ev2gmean(ev: float) -> float:
"""Calculates the geometric mean of an image
having an relative exposure value of `ev`,
where 0 EV means that the geometric mean of an image is 0.18
Args:
ev: an relative exposure value
Returns:
The geometric mean
Examples:
>>> ExposureCompensation.ev2gmean(0)
0.18
>>> ExposureCompensation.ev2gmean(2)
0.72
>>> ExposureCompensation.ev2gmean(-1)
0.09
"""
return 0.18 * (2**ev)
@staticmethod
def gmean2ev(g: float) -> float:
"""Calculates an relative exposure value of an image
whose geometric mean of luminance is `gmean`,
where 0 EV means that the geometric mean is 0.18
Args:
ev: an relative exposure value
Returns:
The geometric mean
Examples:
>>> ExposureCompensation.gmean2ev(0.18)
0.0
>>> ExposureCompensation.gmean2ev(0.72)
2.0
>>> ExposureCompensation.gmean2ev(0.09)
-1.0
"""
return np.log2(g / 0.18)
class NormalizeRange(ColorProcessing, LuminanceProcessing):
"""Scales an image so that
its color or luminance range is in [0, 1] range.
This processing is different from the min-max normalization
because this does not include the shift operation.
Attributes:
mode: "luminance" or "color". Default is "luminance"
Examples:
>>> f = NormalizeRange("color")
>>> image = f(np.random.randn(100, 100, 3))
>>> np.all(image <= 1)
True
"""
def __init__(self, mode: str = "luminance"):
super().__init__()
if mode.lower() not in ("luminance", "color"):
raise ValueError
self.mode = mode
def __call__(self, image: np.ndarray) -> np.ndarray:
"""Normalizes an image luminance or color into [0, 1] range.
Args:
image: image with a size of (H, W, C)
or luminance with a size of (H, W)
Returns:
normalized image
"""
if self.mode == "luminance":
lum = hdrpy.get_lumianance(image)
factor = 1. / np.amax(lum)
else:
factor = 1. / np.amax(image)
return multiply_scalar(image, factor)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2.703125
| 3
|
tests/test_latest_window.py
|
omri374/fossa
| 0
|
12779594
|
<gh_stars>0
"""Tests for the LatestWindowAnomalyDetector."""
import pytest
from sklearn.exceptions import NotFittedError
from fossa import LatestWindowAnomalyDetector
from fossa.utils import dummy_data
def test_base():
num_categ = 8
clf = LatestWindowAnomalyDetector(p_threshold=0.00001)
history = dummy_data(
num_days=10, num_categories=num_categ, min_val=100, max_val=1000)
new_day = dummy_data(
num_days=1, num_categories=num_categ, min_val=100, max_val=1000)
clf.fit(history)
prediction = clf.predict(new_day)
assert len(prediction) == num_categ
for x in prediction.values:
assert x in [-1, 0, 1]
num_new_days = 30
many_days = dummy_data(
num_days=num_new_days, num_categories=num_categ, min_val=100,
max_val=1000)
predictions = clf.predict(many_days)
assert len(predictions) == num_categ * num_new_days
for x in predictions.values:
assert x in [-1, 0, 1]
def test_diff_categ():
num_categ_1 = 8
num_categ_2 = 7
clf = LatestWindowAnomalyDetector(p_threshold=0.00001)
history = dummy_data(
num_days=10, num_categories=num_categ_1, min_val=100, max_val=1000)
new_day = dummy_data(
num_days=1, num_categories=num_categ_2, min_val=100, max_val=1000)
clf.fit(history)
prediction = clf.predict(new_day)
assert len(prediction) == max(num_categ_1, num_categ_2)
for x in prediction.values:
assert x in [-1, 0, 1]
def test_errors():
# bad p thresholds
with pytest.raises(ValueError):
LatestWindowAnomalyDetector(p_threshold=2)
# bad p thresholds
with pytest.raises(ValueError):
LatestWindowAnomalyDetector(p_threshold=-1)
clf = LatestWindowAnomalyDetector(p_threshold=0.00001)
new_day = dummy_data(
num_days=1, num_categories=8, min_val=100, max_val=1000)
with pytest.raises(NotFittedError):
clf.predict(new_day)
def test_partial_fit():
num_categ = 8
clf = LatestWindowAnomalyDetector(p_threshold=0.00001)
history = dummy_data(
num_days=10, num_categories=num_categ, min_val=100, max_val=1000)
recent_history = dummy_data(
num_days=6, num_categories=num_categ, min_val=100, max_val=1000)
new_day = dummy_data(
num_days=1, num_categories=num_categ, min_val=100, max_val=1000)
clf.fit(history)
clf.partial_fit(recent_history)
prediction = clf.predict(new_day)
assert len(prediction) == num_categ
for x in prediction.values:
assert x in [-1, 0, 1]
def test_non_def_power():
num_categ = 8
clf = LatestWindowAnomalyDetector(p_threshold=0.00001, power=0)
history = dummy_data(
num_days=10, num_categories=num_categ, min_val=100, max_val=1000)
new_day = dummy_data(
num_days=1, num_categories=num_categ, min_val=100, max_val=1000)
clf.fit(history)
prediction = clf.predict(new_day)
assert len(prediction) == num_categ
for x in prediction.values:
assert x in [-1, 0, 1]
def test_non_def_ddof():
num_categ = 8
clf = LatestWindowAnomalyDetector(p_threshold=0.00001, power=-2, ddof=4)
history = dummy_data(
num_days=10, num_categories=num_categ, min_val=100, max_val=1000)
new_day = dummy_data(
num_days=1, num_categories=num_categ, min_val=100, max_val=1000)
clf.fit(history)
prediction = clf.predict(new_day)
assert len(prediction) == num_categ
for x in prediction.values:
assert x in [-1, 0, 1]
| 2.359375
| 2
|
Aulas_2/Aula45/Aula50a.py
|
Sofista23/Aula2_Python
| 0
|
12779595
|
<filename>Aulas_2/Aula45/Aula50a.py
import sqlite3
from sqlite3 import Error
#Criar Conexão
def conexaoBanco():
path="C:\\Users\\<NAME>\\Documents\\PROGRAMAÇÃO\\Python\\Aulas_2\\Aula45\\Agenda.db"
con=None
try:
con=sqlite3.connect(path)
except Error as ex:
print(ex)
return con
vcon=conexaoBanco()
id=int(input("ID para DELETAR: "))
vsql=f"DELETE FROM contatos WHERE id={id}"
#Deletando DATAS
def deletar(conexao,sql):
try:
c=conexao.cursor()
c.execute(sql)
conexao.commit()
print("Dados Deletados!")
except Error as ex:
print(ex)
deletar(vcon,vsql)
vcon.close()
| 3.1875
| 3
|
stellar_sdk/signer.py
|
bantalon/py-stellar-base
| 1
|
12779596
|
from .__version__ import __issues__
from .exceptions import ValueError
from .strkey import StrKey
from .xdr import Xdr
__all__ = ["Signer"]
class Signer:
"""The :class:`Signer` object, which represents an account signer on Stellar's network.
:param signer_key: The XDR signer object
:param weight:
"""
def __init__(self, signer_key: Xdr.types.SignerKey, weight) -> "None":
self.signer_key: Xdr.types.SignerKey = signer_key
self.weight: int = weight
@classmethod
def ed25519_public_key(cls, account_id: str, weight: int) -> "Signer":
"""Create ED25519 PUBLIC KEY Signer from account id.
:param account_id: account id
:param weight: The weight of the signer (0 to delete or 1-255)
:return: ED25519 PUBLIC KEY Signer
:raises:
:exc:`Ed25519PublicKeyInvalidError <stellar_sdk.exceptions.Ed25519PublicKeyInvalidError>`: if ``account_id``
is not a valid ed25519 public key.
"""
signer_key = Xdr.types.SignerKey(
Xdr.const.SIGNER_KEY_TYPE_ED25519,
ed25519=StrKey.decode_ed25519_public_key(account_id),
)
return cls(signer_key, weight)
@classmethod
def pre_auth_tx(cls, pre_auth_tx_hash: bytes, weight: int) -> "Signer":
"""Create Pre AUTH TX Signer from the sha256 hash of a transaction,
click `here <https://www.stellar.org/developers/guides/concepts/multi-sig.html#pre-authorized-transaction>`__ for more information.
:param pre_auth_tx_hash: The sha256 hash of a transaction.
:param weight: The weight of the signer (0 to delete or 1-255)
:return: Pre AUTH TX Signer
"""
signer_key = Xdr.types.SignerKey(
Xdr.const.SIGNER_KEY_TYPE_PRE_AUTH_TX, preAuthTx=pre_auth_tx_hash
)
return cls(signer_key, weight)
@classmethod
def sha256_hash(cls, sha256_hash: bytes, weight: int) -> "Signer":
"""Create SHA256 HASH Signer from a sha256 hash of a preimage,
click `here <https://www.stellar.org/developers/guides/concepts/multi-sig.html#hashx>`__ for more information.
:param sha256_hash: a sha256 hash of a preimage
:param weight: The weight of the signer (0 to delete or 1-255)
:return: SHA256 HASH Signer
"""
signer_key = Xdr.types.SignerKey(
Xdr.const.SIGNER_KEY_TYPE_HASH_X, hashX=sha256_hash
)
return cls(signer_key, weight)
def to_xdr_object(self) -> Xdr.types.Signer:
"""Returns the xdr object for this Signer object.
:return: XDR Signer object
"""
return Xdr.types.Signer(self.signer_key, self.weight)
@classmethod
def from_xdr_object(cls, signer_xdr_object: Xdr.types.Signer) -> "Signer":
"""Create a :class:`Signer` from an XDR TimeBounds object.
:param signer_xdr_object: The XDR Signer object.
:return: A new :class:`Signer` object from the given XDR Signer object.
"""
weight = signer_xdr_object.weight
if signer_xdr_object.type == Xdr.const.SIGNER_KEY_TYPE_ED25519:
account_id = StrKey.encode_ed25519_public_key(signer_xdr_object.ed25519)
return cls.ed25519_public_key(account_id, weight)
elif signer_xdr_object.type == Xdr.const.SIGNER_KEY_TYPE_PRE_AUTH_TX:
return cls.pre_auth_tx(signer_xdr_object.preAuthTx, weight)
elif signer_xdr_object.type == Xdr.const.SIGNER_KEY_TYPE_HASH_X:
return cls.sha256_hash(signer_xdr_object.hashX, weight)
else:
raise ValueError(
"This is an unknown signer type, "
"please consider creating an issuer at {}.".format(__issues__)
)
def __eq__(self, other: object) -> bool:
if not isinstance(other, self.__class__):
return NotImplemented # pragma: no cover
return self.to_xdr_object().to_xdr() == other.to_xdr_object().to_xdr()
| 2.1875
| 2
|
pyplot_eps/offdiag_eps.py
|
marvinlenk/subsystem_entropy_epsplots
| 0
|
12779597
|
<filename>pyplot_eps/offdiag_eps.py
import numpy as np
import os
from mpEntropy import mpSystem
import matplotlib as mpl
import matplotlib.pyplot as plt
from scipy.integrate import cumtrapz
# This is a workaround until scipy fixes the issue
import warnings
warnings.filterwarnings(action="ignore", module="scipy", message="^internal gelsd")
# Load sysVar
sysVar = mpSystem("../interact_0.ini", plotOnly=True)
# Create plot folder
pltfolder = "./epsplots/"
if not os.path.exists(pltfolder):
os.mkdir(pltfolder)
print("Plotting", end='')
mpl.use('Agg')
# minimum and maximum times to plot
min_time = 0
max_time = 10
log_min_time = 0
log_max_time = 15
inlay_min_time = 00
inlay_max_time = 10
inlay_log_min_time = 0
inlay_log_max_time = 20
# styles and stuff
avgstyle = 'dashed'
avgsize = 0.6
expectstyle = 'solid'
expectsize = 1
legend_size = 10
font_size = 10
# https://scipy.github.io/old-wiki/pages/Cookbook/Matplotlib/LaTeX_Examples.html
fig_width_pt = 246.0 # Get this from LaTeX using \showthe\columnwidth
inches_per_pt = 1.0 / 72.27 # Convert pt to inches
golden_mean = (np.sqrt(5) - 1.0) / 2.0 # Aesthetic ratio
fig_width = fig_width_pt * inches_per_pt # width in inches
fig_height = fig_width * golden_mean # height in inches
fig_size = [fig_width, fig_height]
# padding in units of fontsize
padding = 0.32
params = {
'axes.labelsize': 10,
'font.size': 10,
'legend.fontsize': 10,
'xtick.labelsize': 8,
'ytick.labelsize': 8,
'lines.linewidth': 1,
'figure.figsize': fig_size,
'legend.frameon': False,
'legend.loc': 'best',
'mathtext.default': 'rm' # see http://matplotlib.org/users/customizing.html
}
plt.rcParams['agg.path.chunksize'] = 0
plt.rcParams.update(params)
plt.rc('text', usetex=True)
plt.rc('font', **{'family': 'sans-serif', 'sans-serif': ['Arial']})
loavgpercent = sysVar.plotLoAvgPerc # percentage of time evolution to start averaging
loavgind = int(loavgpercent * sysVar.dataPoints) # index to start at when calculating average and stddev
loavgtime = np.round(loavgpercent * (sysVar.deltaT * sysVar.steps * sysVar.plotTimeScale), 2)
# stuff for averaging
if sysVar.boolPlotAverages:
print(' with averaging from Jt=%.2f' % loavgtime, end='')
fwidth = sysVar.plotSavgolFrame
ford = sysVar.plotSavgolOrder
# ## occupation number operator offdiagonals
if os.path.isfile('../data/offdiagocc.txt') or os.path.isfile('../data/offdiagonal.txt'):
# if old path - load old data
if os.path.isfile('../data/offdiagonal.txt'):
offdiagocc = np.loadtxt('../data/offdiagonal.txt')
else:
offdiagocc = np.loadtxt('../data/offdiagocc.txt')
step_array = offdiagocc[:, 0] * sysVar.plotTimeScale
# multiply step array with time scale
min_index = int(min_time / step_array[-1] * len(step_array))
max_index = int(max_time / step_array[-1] * len(step_array))
inlay_min_index = int(inlay_min_time / step_array[-1] * len(step_array))
inlay_max_index = int(inlay_max_time / step_array[-1] * len(step_array))
inlay_log_min_index = int(inlay_log_min_time / step_array[-1] * len(step_array))
inlay_log_max_index = int(inlay_log_max_time / step_array[-1] * len(step_array))
log_min_index = int(log_min_time / step_array[-1] * len(step_array))
log_max_index = int(log_max_time / step_array[-1] * len(step_array))
for i in range(0, sysVar.m):
plt.plot(step_array[min_index:max_index], offdiagocc[min_index:max_index, i + 1],
label=r'$n_' + str(i) + '$', linewidth=0.6)
plt.ylabel(r'$\sum\limits_{k\neq l} \langle E_k | n | E_l \rangle c_k^\ast c_l$')
plt.xlabel(r'$J\,t$')
plt.legend()
plt.tight_layout(padding-0.2)
plt.gca().yaxis.set_label_coords(-0.09, 0.5)
###
plt.savefig(pltfolder + 'offdiag_occupations.eps', format='eps', dpi=1000)
plt.clf()
#for i in range(0, sysVar.m):
for i in [0,2,4]:
plt.semilogy(step_array[log_min_index:log_max_index], np.abs(offdiagocc[log_min_index:log_max_index, i + 1]),
label=r'$n_' + str(i) + '$', linewidth=0.6)
plt.ylim(ymin=2e-2, ymax=9e0)
plt.legend()
plt.ylabel(r'$| \sum\limits_{k\neq l} \langle E_k | n | E_l \rangle c_k^\ast c_l |$')
plt.xlabel(r'$J\,t$')
plt.tight_layout(padding)
###
plt.savefig(pltfolder + 'offdiag_occupations_semilog.eps', format='eps', dpi=1000)
plt.clf()
dt = offdiagocc[1, 0] - offdiagocc[0, 0]
nrm = offdiagocc[:, 0] / dt
nrm[1:] = 1 / nrm[1:]
for i in range(0, sysVar.m):
# ##### only sum (subsystem-thermalization)
plt.ylabel(r'$\sum\limits_{n\neq m} n^{%i}_{n,m}$' % i)
plt.xlabel(r'$J\,t$')
plt.plot(offdiagocc[min_index:max_index, 0], offdiagocc[min_index:max_index, i + 1])
plt.tight_layout(padding + 0.2)
# ##inlay with the whole deal
a = plt.axes([0.62, 0.6, 0.28, 0.28])
a.plot(offdiagocc[inlay_min_index:inlay_max_index, 0], offdiagocc[inlay_min_index:inlay_max_index, i + 1])
a.set_xticks([])
a.set_yticks([])
###
plt.savefig(pltfolder + 'offdiag_occupation_%i.eps' % i, format='eps', dpi=1000)
plt.clf()
plt.ylabel(r'$\sum\limits_{k\neq l} n_{E_k,E_l}$')
plt.semilogy(offdiagocc[log_min_index:log_max_index, 0], np.abs(offdiagocc[log_min_index:log_max_index, i + 1]))
plt.ylim(ymin=1e-2)
plt.tight_layout(padding)
###
plt.savefig(pltfolder + 'offdiag_occupation_%i_semilog.eps' % i, format='eps', dpi=1000)
plt.clf()
# ##### average (eigenstate-thermalization)
f, (ax1, ax2) = plt.subplots(2, sharex='none', sharey='none')
tmp = cumtrapz(offdiagocc[:, i + 1], offdiagocc[:, 0], initial=offdiagocc[0, i + 1])
tmp = np.multiply(tmp, nrm)
f.text(0.07, 0.5,
r'$\frac{1}{t} \int\limits_{0}^{t} \mathop{dt^\prime} \sum\limits_{n\neq m} n_{n,m}(t^\prime)$',
ha='center', va='center', rotation='vertical')
ax1.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
ax1.plot(offdiagocc[min_index:max_index, 0], tmp[min_index:max_index])
if min_index == 0:
min_index_opt = 1
else:
min_index_opt = min_index
ax2.plot(1/offdiagocc[min_index_opt:max_index, 0], np.abs(tmp[min_index_opt:max_index]))
# ax2.set_ylim(bottom=1e-4)
plt.tight_layout(padding)
###
plt.savefig(pltfolder + 'offdiag_occupation_%i_eth.eps' % i, format='eps', dpi=1000)
plt.clf()
print('.', end='', flush=True)
# ## density matrix offdiagonals
if os.path.isfile('../data/offdiagdens.txt'):
offdiagdens = np.loadtxt('../data/offdiagdens.txt')
step_array = offdiagdens[:, 0] * sysVar.plotTimeScale
# multiply step array with time scale
min_index = int(min_time / step_array[-1] * len(step_array))
max_index = int(max_time / step_array[-1] * len(step_array))
inlay_min_index = int(inlay_min_time / step_array[-1] * len(step_array))
inlay_max_index = int(inlay_max_time / step_array[-1] * len(step_array))
inlay_log_min_index = int(inlay_log_min_time / step_array[-1] * len(step_array))
inlay_log_max_index = int(inlay_log_max_time / step_array[-1] * len(step_array))
plt.plot(step_array[min_index:max_index], offdiagdens[min_index:max_index, 1])
plt.ylabel(r'Sum of off diagonals (dens. mat.)')
plt.xlabel(r'$J\,t$')
plt.tight_layout(padding)
###
plt.savefig(pltfolder + 'offdiag_densitymatrix.eps', format='eps', dpi=1000)
plt.clf()
print('\n densavg = %f' % np.average(offdiagdens[loavgind:, 1]))
# ## reduced density matrix offdiagonals
if os.path.isfile('../data/offdiagdensred.txt'):
offdiagdensred = np.loadtxt('../data/offdiagdensred.txt')
step_array = offdiagdensred[:, 0] * sysVar.plotTimeScale
# multiply step array with time scale
min_index = int(min_time / step_array[-1] * len(step_array))
max_index = int(max_time / step_array[-1] * len(step_array))
inlay_min_index = int(inlay_min_time / step_array[-1] * len(step_array))
inlay_max_index = int(inlay_max_time / step_array[-1] * len(step_array))
inlay_log_min_index = int(inlay_log_min_time / step_array[-1] * len(step_array))
inlay_log_max_index = int(inlay_log_max_time / step_array[-1] * len(step_array))
plt.plot(step_array[min_index:max_index], offdiagdensred[min_index:max_index, 1])
plt.ylabel(r'Sum of off diagonals (red. dens. mat.)')
plt.xlabel(r'$J\,t$')
plt.tight_layout(padding)
###
plt.savefig(pltfolder + 'offdiag_densitymatrix_reduced.eps', format='eps', dpi=1000)
plt.clf()
print('\n densredavg = %f' % np.average(offdiagdens[loavgind:, 1]))
# ## single off diagonals
if os.path.isfile('../data/offdiagsingle.txt') and os.path.isfile('../data/offdiagsingleinfo.txt'):
singlesdat = np.loadtxt('../data/offdiagsingle.txt')
singlesinfo = np.loadtxt('../data/offdiagsingleinfo.txt')
dt = singlesdat[1, 0] - singlesdat[0, 0]
nrm = singlesdat[:, 0] / dt
nrm[1:] = 1 / nrm[1:]
'''
for i in range(0,sysVar.m):
for j in range(0,sysVar.occEnSingle):
infoind = 1+4*j+2 #so we start at the first energy
f, (ax1, ax2, ax3) = plt.subplots(3, sharex=True, sharey=False)
f.suptitle(r'$n_{%i} \; E_1=%.2e \; E_2=%.2e$' % (i, singlesinfo[i,infoind], singlesinfo[i,infoind+1]))
ind = 1+2*j+(i*sysVar.occEnSingle*2)
comp = singlesdat[:,ind] + 1j*singlesdat[:,ind+1]
ax1.set_ylabel(r'$|A_{n,m}|$')
ax1.plot(singlesdat[:,0], np.abs(comp), linewidth = 0.5)
tmp = cumtrapz(comp,singlesdat[:,0]/dt,initial=comp[0])
tmp = np.multiply(tmp,nrm)
ax2.set_ylabel(r'average $|A_{n,m}|$')
ax2.plot(singlesdat[:,0], np.abs(tmp), linewidth = 0.5)
ax3.set_ylabel(r'arg$/\pi$')
plt.xlabel(r'$J\,t$')
ax3.plot(singlesdat[:,0], np.angle(comp)/(np.pi), linewidth = 0.5)
plt.tight_layout()
plt.subplots_adjust(top=0.9, left=0.1)
pp.savefig(f)
f.clf()
# do the double log plot
de = np.abs(singlesinfo[i,infoind] - singlesinfo[i,infoind+1])
linar = np.zeros(len(singlesdat[:,0]), dtype=np.float64)
linar[0] = 0
linar[1:] = 2/(singlesdat[1:,0] * de)
plt.xlabel(r'$J\,t$')
plt.ylabel(r'relative average $|A_{n,m}|$')
plt.loglog(singlesdat[1:,0], np.abs(tmp/np.abs(comp[0]))[1:], singlesdat[1:,0], linar[1:], lw=0.5)
pp.savefig()
plt.clf()
print('.',end='',flush=True)
'''
for i in range(0, sysVar.m):
for j in range(0, sysVar.occEnSingle):
infoind = 1 + 4 * j + 2 # so we start at the first energy
# fetch the exponents. if abs(ordr)==1 set to zero for more readability
f, (ax1, ax2) = plt.subplots(2, sharex=True, sharey=False)
ordr1 = int(np.log10(np.abs(singlesinfo[i, infoind])))
if ordr1 == 1 or ordr1 == -1:
ordr1 = 0
ordr2 = int(np.log10(np.abs(singlesinfo[i, infoind + 1])))
if ordr2 == 1 or ordr2 == -1:
ordr2 = 0
if ordr1 == 0 and ordr2 == 0:
f.suptitle(
r'$n_{%i} \quad E_n=%.2f \; E_m=%.2f$' % (i, singlesinfo[i, infoind], singlesinfo[i, infoind + 1]))
elif ordr1 == 0:
f.suptitle(r'$n_{%i} \quad E_n=%.2f \; E_m=%.2f \cdot 10^{%i}$' % (
i, singlesinfo[i, infoind], singlesinfo[i, infoind + 1] / (10 ** ordr2), ordr2))
elif ordr2 == 0:
f.suptitle(r'$n_{%i} \quad E_n=%.2f \cdot 10^{%i} \; E_m=%.2f$' % (
i, singlesinfo[i, infoind] / (10 ** ordr1), ordr1, singlesinfo[i, infoind + 1]))
else:
f.suptitle(r'$n_{%i} \quad E_n=%.2f \cdot 10^{%i} \; E_m=%.2f \cdot 10^{%i}$' % (
i, singlesinfo[i, infoind] / (10 ** ordr1), ordr1, singlesinfo[i, infoind + 1] / (10 ** ordr2),
ordr2))
#
ind = 1 + 2 * j + (i * sysVar.occEnSingle * 2)
comp = singlesdat[:, ind] + 1j * singlesdat[:, ind + 1]
# order of magnitude of the deviation
if not (np.abs(np.abs(comp[0]) - np.abs(comp[-1])) == 0):
ordr = int(np.log10(np.abs(np.abs(comp[0]) - np.abs(comp[-1])))) - 1
else:
ordr = 0
ax1.set_ylabel(r'$|n(t)| - |n(0)| / 10^{%i}$' % ordr)
ax1.plot(singlesdat[:, 0], (np.abs(comp) - np.abs(comp[0])) / np.abs(comp[-1]), linewidth=0.5)
tmp = cumtrapz(comp, singlesdat[:, 0] / dt, initial=comp[0])
tmp = np.multiply(tmp, nrm)
# order of magnitude of the average
if not (np.abs(tmp[1]) == 0):
ordr = int(np.log10(np.abs(tmp[1]))) - 1
else:
ordr = 0
ax2.set_ylabel(r'$|\overline{n}_{n,m}^{%i}| / 10^{%i}$' % (i, ordr))
ax2.plot(singlesdat[:, 0], np.abs(tmp) / (10 ** ordr), linewidth=0.5)
ax2.set_xlabel(r'$J\,t$')
plt.tight_layout(padding)
plt.savefig(pltfolder + 'offdiag_occupation_single_n%i_%i.eps' % (i, j), format='eps', dpi=1000)
f.clf()
plt.close()
# do the double log plot
de = np.abs(singlesinfo[i, infoind] - singlesinfo[i, infoind + 1])
linar = np.zeros(len(singlesdat[:, 0]), dtype=np.float64)
linar[0] = 0
linar[1:] = 2 / (singlesdat[1:, 0] * de)
plt.xlabel(r'$J\,t$')
plt.ylabel(r'relative average $|n_{n,m}^{%i}|$' % i)
plt.loglog(singlesdat[1:, 0], np.abs(tmp / np.abs(comp[0]))[1:], singlesdat[1:, 0], linar[1:], lw=0.5)
plt.savefig(pltfolder + 'offdiag_occupation_single_n%i_%i_loglog.eps' % (i, j), format='eps', dpi=1000)
plt.clf()
plt.close()
print('.', end='', flush=True)
if os.path.isfile('../data/diagsingles.txt'):
diagdat = np.loadtxt('../data/diagsingles.txt')
if os.path.isfile('../data/energy.txt') and os.path.isfile('../data/hamiltonian_eigvals.txt'):
# ## look for energy - this works because the energies are sorted
engy = np.loadtxt('../data/energy.txt')
eigengy = np.loadtxt('../data/hamiltonian_eigvals.txt')
diff = 0
for l in range(0, sysVar.dim):
if np.abs(eigengy[l, 1] - engy[0, 1]) > diff and l != 0:
eind = l - 1
break
else:
diff = np.abs(eigengy[l, 1] - engy[0, 1])
if eind < 15:
loran = 0
else:
loran = eind - 15
for i in range(0, sysVar.m):
if os.path.isfile('../data/energy.txt') and os.path.isfile('../data/hamiltonian_eigvals.txt'):
plt.title(r'Diagonal weighted elements of $n_{%i}$ in spectral decomp.' % (i))
lo = np.int32(sysVar.dim * i)
hi = np.int32(lo + sysVar.dim)
plt.ylabel(r'$|n%i_{E}|$' % i)
plt.xlabel(r'$E / J$')
# plt.plot(diagdat[lo:hi,1], diagdat[lo:hi,2],linestyle='none',marker='o',ms=0.5)
plt.plot(diagdat[lo + loran:hi, 1][:30], diagdat[lo + loran:hi, 2][:30], marker='o', ms=2)
plt.axvline(x=engy[0, 1], linewidth=0.8, color='red')
###inlay
a = plt.axes([0.18, 0.6, 0.28, 0.28])
a.plot(diagdat[lo:hi - 300, 1], diagdat[lo:hi - 300, 2], marker='o', ms=0.6, ls='none')
a.set_xticks([])
a.set_yticks([])
plt.tight_layout(padding)
plt.savefig(pltfolder + 'diag_occupations_n%i_weighted.eps' % i, format='eps', dpi=1000)
plt.clf()
if os.path.isfile('../data/occ' + str(i) + '_re.txt'):
occmat = np.loadtxt('../data/occ' + str(i) + '_re.txt')
diags = np.zeros(sysVar.dim)
### large plot
plt.title(r'Diagonal elements of $n_{%i}$ in spectral decomposition' % (i))
plt.ylabel(r'$|n%i_{E}|$' % (i))
plt.xlabel(r'$E / J$')
for el in range(0, sysVar.dim):
diags[el] = occmat[el, el]
plt.plot(diagdat[lo + loran:hi, 1][:30], diags[loran:][:30], marker='o', ms=2)
plt.axvline(x=engy[0, 1], linewidth=0.8, color='red')
### inlay
a = plt.axes([0.18, 0.6, 0.28, 0.28])
a.plot(diagdat[lo:hi - 50, 1], diags[:-50], marker='o', ms=0.5, ls='none')
a.set_xticks([])
a.set_yticks([])
plt.savefig(pltfolder + 'diag_occupations_n%i.eps' % i, format='eps', dpi=1000)
plt.clf()
else:
plt.title(r'Diagonal weighted elements of $n_{%i}$ in spectral decomp.' % (i))
lo = np.int32(sysVar.dim * i)
hi = np.int32(lo + sysVar.dim)
plt.ylabel(r'$|n%i_{E}|$' % i)
plt.xlabel(r'$E / J$')
# plt.plot(diagdat[lo:hi,1], diagdat[lo:hi,2],linestyle='none',marker='o',ms=0.5)
plt.plot(diagdat[lo:hi - np.int32(sysVar.dim / 100), 1], diagdat[lo:hi - np.int32(sysVar.dim / 100), 2],
marker='o', ms=0.6, ls='none')
plt.tight_layout(padding)
plt.savefig(pltfolder + 'diag_occupations_n%i_weighted.eps' % i, format='eps', dpi=1000)
plt.clf()
print('.', end='', flush=True)
plt.close()
print(' done!')
| 2.078125
| 2
|
setup.py
|
hanneshapke/pyzillow
| 88
|
12779598
|
<reponame>hanneshapke/pyzillow<gh_stars>10-100
#!/usr/bin/env python
#
"""
Distutils setup script for pyzillow.
"""
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == "publish":
os.system("python setup.py sdist upload")
sys.exit()
readme = open("README.rst").read()
doclink = """
Documentation
-------------
The full documentation is at https://pyzillow.readthedocs.io/."""
history = open("HISTORY.rst").read().replace(".. :changelog:", "")
setup(
name="pyzillow",
version="0.7.0",
description="Python API wrapper for Zillow's API",
long_description=readme + "\n\n" + doclink + "\n\n" + history,
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/hanneshapke/pyzillow",
packages=["pyzillow"],
package_dir={"pyzillow": "pyzillow"},
include_package_data=True,
install_requires=["requests"],
license="MIT",
zip_safe=False,
keywords=["pyzillow", "zillow", "api", "real estate"],
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
)
| 1.554688
| 2
|
high-availability-endpoint/python/region_lookup.py
|
fortunecookiezen/aws-health-tools
| 825
|
12779599
|
<filename>high-availability-endpoint/python/region_lookup.py
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import dns.resolver
class RegionLookupError(Exception):
"""Rasied when there was a problem when looking up the active region"""
pass
def active_region():
qname = 'global.health.amazonaws.com'
try:
answers = dns.resolver.resolve(qname, 'CNAME')
except Exception as e:
raise RegionLookupError('Failed to resolve {}'.format(qname), e)
if len(answers) != 1:
raise RegionLookupError('Failed to get a single answer when resolving {}'.format(qname))
name = str(answers[0].target) # e.g. health.us-east-1.amazonaws.com.
region_name = name.split('.')[1] # Region name is the 1st in split('.') -> ['health', 'us-east-1', 'amazonaws', 'com', '']
return region_name
| 2.890625
| 3
|
django_hits/models.py
|
bung87/django_hits
| 0
|
12779600
|
# -*- coding: utf-8 -*-
from django.contrib.contenttypes.models import ContentType
from django.db import models
import django
if (hasattr(django,"version") and django.version > 1.8) or (hasattr(django,"get_version") and django.get_version()):
from django.contrib.contenttypes.fields import GenericForeignKey
from django.db.transaction import atomic
else:
from django.contrib.contenttypes.generic import GenericForeignKey
from django.db.transaction import commit_manually as atomic
from django.contrib.contenttypes.models import ContentType
from django.db import transaction,IntegrityError
from datetime import timedelta
from django.utils import timezone
now = timezone.now
from django.db.models import signals
from django.conf import settings
class HitManager(models.Manager):
def get_for(self, obj, bucket=None):
if bucket is None:
bucket_kwargs = {'bucket__isnull': True}
else:
bucket_kwargs = {'bucket': bucket}
if isinstance(obj, models.Model):
content_type = ContentType.objects.get_for_model(obj.__class__)
object_pk = getattr(obj, obj._meta.pk.column)
try:
return self.get_or_create(content_type=content_type, object_pk=object_pk, **bucket_kwargs)[0]
except IntegrityError: # catch race condition
return self.get(content_type=content_type, object_pk=object_pk, **bucket_kwargs)
elif isinstance(obj, (str, unicode)):
try:
return self.get_or_create(content_type__isnull=True, object_pk=obj, **bucket_kwargs)[0]
except IntegrityError: # catch race condition
return self.get(content_type__isnull=True, object_pk=obj, **bucket_kwargs)
else:
raise Exception("Don't know what to do with this obj!?")
def hit(self, obj, user, ip, bucket=None):
hit = self.get_for(obj, bucket=bucket)
hit.hit(user, ip)
return hit
class Hit(models.Model):
content_type = models.ForeignKey(ContentType, null=True)
object_pk = models.CharField(max_length=50) # TextField not possible, because unique_together is needed, must be enough
content_object = GenericForeignKey(ct_field="content_type", fk_field="object_pk")
bucket = models.CharField(max_length=50, blank=True, null=True) # Each object may have multiple buckets hits get counted in
views = models.PositiveIntegerField(default=0) # page hits/views
visits = models.PositiveIntegerField(default=0) # unique visits
objects = HitManager()
def hit(self, user, ip):
try:
with transaction.atomic():
if self.has_hit_from(user, ip):
self.update_hit_from(user, ip)
Hit.objects.filter(pk=self.pk).update(views=models.F('views') + 1)
self.views += 1
return True
else:
self.log.create(user=user, ip=ip)
Hit.objects.filter(pk=self.pk).update(views=models.F('views') + 1, visits=models.F('visits') + 1)
self.views += 1
self.visits += 1
return True
except IntegrityError:
# CATCH RACE CONDITION
# log-extry was already created
# happens when users double-click or reload to fast
# (we ignore this)
return False
def has_hit_from(self, user, ip):
self.clear_log()
if self.log.filter(user=user, ip=ip).count():
return True
else:
return False
def update_hit_from(self, user, ip):
self.log.filter(user=user, ip=ip).update(when=now())
def clear_log(self):
timespan = now() - timedelta(days=30)
for l in self.log.filter(when__lt=timespan).order_by('-when')[25:]:
l.delete()
class Meta:
unique_together = (('content_type', 'object_pk', 'bucket'),)
class HitLog(models.Model):
hit = models.ForeignKey(Hit, related_name='log')
user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='hits_log', null=True)
ip = models.GenericIPAddressField(null=True) if hasattr(models,"GenericIPAddressField") else models.IPAddressField(null=True)
when = models.DateTimeField(default=now)
class Meta:
unique_together = (('hit', 'user', 'ip'),)
class HitHistory(models.Model):
hit = models.ForeignKey(Hit, related_name='history')
when = models.DateTimeField(default=now)
views = models.PositiveIntegerField(default=0)
visits = models.PositiveIntegerField(default=0)
views_change = models.PositiveIntegerField(default=0)
visits_change = models.PositiveIntegerField(default=0)
| 2.0625
| 2
|
understat/constants.py
|
arkjinli/understat
| 0
|
12779601
|
BASE_URL = "https://understat.com/"
LEAGUE_URL = "https://understat.com/league/{}/{}"
PLAYER_URL = "https://understat.com/player/{}"
TEAM_URL = "https://understat.com/team/{}/{}"
PATTERN = r"{}\s+=\s+JSON.parse\(\'(.*?)\'\)"
| 1.46875
| 1
|
tests/test_add_metaphor.py
|
KyleMaclean/Poetry-Generator
| 0
|
12779602
|
<reponame>KyleMaclean/Poetry-Generator<filename>tests/test_add_metaphor.py<gh_stars>0
from unittest import TestCase
from agents.add_metaphor import do
class Test(TestCase):
# reported
def test_do(self):
actual_line = do('you are beautiful')
self.assertEqual(actual_line, 'you are beautiful as the vivid colors of a painting')
| 2.4375
| 2
|
test/utils/expocat_test.py
|
emthompson-usgs/pager
| 9
|
12779603
|
#!/usr/bin/env python
# stdlib imports
import urllib.request as request
import tempfile
import os.path
import sys
from datetime import datetime
# third party imports
import numpy as np
# local imports
from losspager.utils.expocat import ExpoCat
def commify(value):
if np.isnan(value):
return 'NaN'
return format(int(value), ",d")
def get_max_mmi(tdict, minimum=1000):
indices = ['MMI1', 'MMI2', 'MMI3', 'MMI4',
'MMI5', 'MMI6', 'MMI7', 'MMI8', 'MMI9+']
exparray = np.array([tdict[idx] for idx in indices])
imax = (exparray > 1000).nonzero()[0].max()
return (imax + 1, exparray[imax])
def test():
homedir = os.path.dirname(os.path.abspath(
__file__)) # where is this script?
expocat = ExpoCat.fromDefault()
clat = 0.37
clon = -79.94
radius = 400
ndeaths = 9
minicat = expocat.selectByRadius(clat, clon, radius)
print('Testing that historical events returned are correct...')
maxmmi = 8
nmaxmmi = 103000
events = minicat.getHistoricalEvents(maxmmi, nmaxmmi, ndeaths, clat, clon)
assert events[0]['EventID'] == '199603282303'
assert events[1]['EventID'] == '197912120759'
assert events[2]['EventID'] == '198703060410'
print('Passed.')
print('Testing that events selected by hazard are correct...')
fire = expocat.selectByHazard('fire')
tsunami = expocat.selectByHazard('tsunami')
liquefaction = expocat.selectByHazard('liquefaction')
landslide = expocat.selectByHazard('landslide')
assert fire._dataframe['Fire'].sum() == len(fire)
assert tsunami._dataframe['Tsunami'].sum() == len(tsunami)
assert liquefaction._dataframe['Liquefaction'].sum() == len(liquefaction)
assert landslide._dataframe['Landslide'].sum() == len(landslide)
# test exclusion method
test_time = datetime(1994, 1, 1)
expocat.excludeFutureEvents(test_time)
assert expocat._dataframe['Time'].max() < test_time
print('Passed.')
if __name__ == '__main__':
test()
| 2.328125
| 2
|
test/python/WMCore_t/JobSplitting_t/Generators_t/AutomaticSeeding_t.py
|
khurtado/WMCore
| 21
|
12779604
|
#!/usr/bin/env python
# encoding: utf-8
"""
AutomaticSeeding_t.py
Created by <NAME> on 2010-08-30.
Copyright (c) 2010 Fermilab. All rights reserved.
"""
from __future__ import print_function
import sys
import os
import unittest
from WMCore.JobSplitting.Generators.AutomaticSeeding import AutomaticSeeding
from WMCore.DataStructs.Job import Job
from PSetTweaks.PSetTweak import PSetTweak
class AutomaticSeeding_tTests(unittest.TestCase):
def testA(self):
"""test creating the plugin"""
try:
seeder = AutomaticSeeding()
except Exception as ex:
msg = "Failed to instantiate an AutomaticSeeder: "
msg += str(ex)
self.fail(msg)
def testB(self):
"""test plugin acts on a Job as expected"""
job = Job("TestJob")
seeder = AutomaticSeeding()
seeder(job)
def testC(self):
"""test building a tweak from the seeds"""
job = Job("TestJob")
seeder = AutomaticSeeding()
job.addBaggageParameter("process.RandomNumberGeneratorService.seed1.initialSeed", 123445)
job.addBaggageParameter("process.RandomNumberGeneratorService.seed2.initialSeed", 123445)
job.addBaggageParameter("process.RandomNumberGeneratorService.seed3.initialSeed", 7464738)
job.addBaggageParameter("process.RandomNumberGeneratorService.seed44.initialSeed", 98273762)
seeder(job)
tweak = PSetTweak()
for x in job.baggage.process.RandomNumberGeneratorService:
parameter = "process.RandomNumberGeneratorService.%s.initialSeed" % x._internal_name
tweak.addParameter(parameter, x.initialSeed)
print(tweak)
if __name__ == '__main__':
unittest.main()
| 2.109375
| 2
|
Z - Tool Box/x2john/ccache2john.py
|
dfirpaul/Active-Directory-Exploitation-Cheat-Sheet-1
| 1,290
|
12779605
|
#!/usr/bin/env python2
"""
This script extracts crackable hashes from krb5's credential cache files (e.g.
/tmp/krb5cc_1000).
NOTE: This attack technique only works against MS Active Directory servers.
This was tested with CentOS 7.4 client running krb5-1.15.1 software against a
Windows 2012 R2 Active Directory server.
Usage: python ccache2john.py ccache_file
Upstream: https://github.com/rvazarkar/KrbCredExport
Authors: <NAME> (main author), <NAME> (splitting support), and <NAME> (misc. glue)
Resources,
https://lapo.it/asn1js/
https://tools.ietf.org/html/rfc1510#section-5.8.1
https://github.com/CoreSecurity/impacket/tree/master/impacket/krb5
https://www.gnu.org/software/shishi/manual/html_node/The-Credential-Cache-Binary-File-Format.html
https://github.com/wireshark/wireshark/blob/master/epan/dissectors/asn1/kerberos/KerberosV5Spec2.asn
"""
import sys
import os.path
import time
import struct
import datetime
from pyasn1.codec.ber import decoder
# LB is a single byte representing the length of the rest of the section
# LT is a 3 byte structure consisting of the byte 82 followed by 2 bytes representing the length of the rest of the file
# header {
# uint16 tag
# uint16 taglen
# uint8[taglen] tagdata
# }
class Header:
def __init__(self):
self.tag = None
self.taglen = None
self.deltatime = DeltaTime()
def parsefile(self, f):
self.tag, self.taglen = struct.unpack(">HH", f.read(4))
self.deltatime.parsefile(f)
def tostring(self):
r = ''
r += struct.pack(">HH", self.tag, self.taglen)
r += self.deltatime.tostring()
return r
# deltatime {
# uint32 time_offset
# uint32 usec_offset
# }
class DeltaTime:
def __init__(self):
self.usec_offset = None
self.time_offset = None
def parsefile(self, f):
self.time_offset, self.usec_offset = struct.unpack(">LL", f.read(8))
def tostring(self):
r = ''
r += struct.pack(">LL", self.time_offset, self.usec_offset)
return r
# ccacheheader {
# uint16 version
# uint16 header_len
# header[] headers
# principal primary_principal
# }
class CCacheHeader:
def __init__(self):
self.version = None
self.header_length = None
self.header = Header()
def parsefile(self, f):
self.version, = struct.unpack(">H", f.read(2))
self.header_length, = struct.unpack(">H", f.read(2))
# self.header.parsefile(f) # this is perhaps buggy?
f.read(self.header_length)
def tostring(self):
r = ''
r += struct.pack(">HH", self.version, self.header_length)
r += self.header.tostring()
return r
# times {
# uint32 authtime
# uint32 starttime
# uint32 endtime
# uint32 renew_till
# }
class KerbTimes:
def __init__(self):
self.authtime = None
self.starttime = None
self.endtime = None
self.renew_till = None
def parsefile(self, f):
self.authtime, self.starttime, self.endtime, self.renew_till = struct.unpack(">IIII", f.read(16))
def tostring(self):
return struct.pack(">IIII", self.authtime, self.starttime, self.endtime, self.renew_till)
# counted_octet {
# uint32 length
# uint8[char] data
# }
class CountedOctet:
def __init__(self):
self.length = None
self.data = None
def parsefile(self, f):
self.length, = struct.unpack(">L", f.read(4))
self.data, = struct.unpack(">%ds" % self.length, f.read(self.length))
def tostring(self):
r = b''
r += struct.pack(">L", self.length)
r += struct.pack(">%ds" % self.length, self.data)
return r
# keyblock {
# uint16 keytype
# uint16 etype
# uint16 keylen
# uint8[keylen] key
# }
class Keyblock:
def __init__(self):
self.keytype = None
self.etype = None
self.keylen = None
self.key = None
def parsefile(self, f):
self.keytype, self.etype, self.keylen = struct.unpack(">HHH", f.read(6))
self.key, = struct.unpack(">%ds" % self.keylen, f.read(self.keylen))
def tostring(self):
r = ''
r += struct.pack(">HHH", self.keytype, self.etype, self.keylen)
r += struct.pack(">%ds" % self.keylen, self.key)
return r
# principal {
# uint32 name_type
# uint32 num_components
# counted_octet realm
# counted_octet[num_components] components
# }
class Principal:
def __init__(self):
self.name_type = None
self.num_components = None
self.realm = CountedOctet()
self.components = []
def parsefile(self, f):
self.name_type, self.num_components = struct.unpack(">LL", f.read(8))
self.realm.parsefile(f)
for i in range(0, self.num_components):
component = CountedOctet()
component.parsefile(f)
self.components.append(component.data)
def tostring(self):
r = ''
r += struct.pack(">LL", self.name_type, self.num_components)
r += self.realm.tostring()
for i in self.components:
r += struct.pack(">L", len(i))
r += i
return r
# address {
# uint16 address_type
# counted_octet address
# }
class Address:
def __init__(self):
self.address_type = None
self.address = CountedOctet()
def parsefile(self, f):
self.address_type, = struct.unpack(">H", f.read(2))
self.address.parsefile(f)
def tostring(self):
r = ''
r += struct.pack(">H", self.address_type)
r += self.address.tostring()
return r
# authdata {
# uint16 authtype
# counted_octet authdata
# }
class AuthData:
def __init__(self):
self.authtype = None
self.authdata = CountedOctet()
def parsefile(self, f):
self.authtype, = struct.unpack(">H", f.read(2))
self.authdata.parsefile(f)
def tostring(self):
r = ''
r += struct.pack(">H", self.authtype)
r += self.authdata.tostring()
return r
# credential {
# principal client
# principal server
# keyblock key
# times timedata
# uint8 skey
# uint32 tktFlags (Reverse Byte Order!)
# uint32 num_address
# address[num_address] addresses
# uint32 num_authdata
# authdata[num_authdata] auths
# counted_octet ticket_1
# counted_octet ticket_2 (nothing here in what I've seen)
# }
class Credential:
def __init__(self):
self.client = Principal()
self.server = Principal()
self.keyblock = Keyblock()
self.times = KerbTimes()
self.is_skey = None
self.tktFlags = None
self.num_address = None
self.address = []
self.num_authdata = None
self.authdata = []
self.ticket = CountedOctet()
self.secondticket = CountedOctet()
def parsefile(self, f):
self.client.parsefile(f)
self.server.parsefile(f)
self.keyblock.parsefile(f)
self.times.parsefile(f)
self.is_skey, = struct.unpack(">B", f.read(1))
self.tktFlags, = struct.unpack("<I", f.read(4))
self.num_address, = struct.unpack(">I", f.read(4))
for i in range(0, self.num_address):
self.address.append(Address().parsefile(f))
self.num_authdata, = struct.unpack(">I", f.read(4))
for i in range(0, self.num_authdata):
self.authdata.append(AuthData().parsefile(f))
self.ticket.parsefile(f)
self.secondticket.parsefile(f)
def tostring(self):
r = ''
r += self.client.tostring()
r += self.server.tostring()
r += self.keyblock.tostring()
r += self.times.tostring()
r += struct.pack(">B", self.is_skey)
r += struct.pack("<I", self.tktFlags)
r += struct.pack(">I", self.num_address)
for i in self.address:
r += i.tostring()
r += struct.pack(">I", self.num_authdata)
for i in self.authdata:
r += i.tostring()
r += self.ticket.tostring()
r += self.secondticket.tostring()
return r
# Prepend, shortened for convenience
def p(a, b):
return b + a
# Returns the length of s as a single byte
def clen(s):
return chr(len(s))
# key {
# 0xA0 LB
# 0x30 LB
# 0xA0 0x03 0x02 0x01
# uint8 key_type
# 0xA1 LB
# 0x03 LB
# keydata
# }
class Key:
def __init__(self):
self.key = None
self.keytype = None
def parsefile(self, f):
f.read(8)
self.keytype, = struct.unpack('>B', f.read(1))
f.read(3)
keylen, = struct.unpack('>B', f.read(1))
self.key, = struct.unpack(">%ds" % keylen, f.read(keylen))
def tostring(self):
r = ''
r += self.key
r = p(r, clen(r))
r = p(r, '\x04')
r = p(r, clen(r))
r = p(r, '\xA1')
r = p(r, chr(self.keytype))
r = p(r, '\xA0\x03\x02\x01')
r = p(r, clen(r))
r = p(r, '\x30')
r = p(r, clen(r))
r = p(r, '\xA0')
return r
# This section represents the primary principal realm. Corresponds to the domain name
# prealm {
# 0xA1 LB
# 0x1B LB
# Primary Principal Realm
# }
class PRealm:
def __init__(self):
self.principal_realm = None
def parsefile(self, f):
f.read(3)
length, = struct.unpack(">b", f.read(1))
self.principal_realm, = struct.unpack(">%ds" % length, f.read(length))
def tostring(self):
r = ''
r += self.principal_realm
r = p(r, clen(r))
r = p(r, '\x1B')
r = p(r, clen(r))
r = p(r, '\xA1')
return r
# This section represents the primary principal realm
# pname {
# 0xA2 LB
# 0x30 LB
# 0xA0 0x03 0x02 0x01
# uint8 name_type
# 0xA1 LB
# 0x30 LB
# 0x1B LB
# Primary Principal Name
# }
class PName:
def __init__(self):
self.principal_components = []
self.principal_name_type = None
def parsefile(self, f):
f.read(8)
self.principal_name_type, = struct.unpack(">B", f.read(1))
f.read(3)
rem_length, = struct.unpack(">B", f.read(1))
while (rem_length > 0):
f.read(1)
l, = struct.unpack(">B", f.read(1))
component, = struct.unpack("%ds" % l, f.read(l))
self.principal_components.append(component)
rem_length -= (2 + l)
def tostring(self):
r = ''
for s in self.principal_components:
r += '\x1B' + chr(len(s)) + s
r = p(r, clen(r))
r = p(r, '\x30')
r = p(r, clen(r))
r = p(r, '\xA1')
r = p(r, chr(self.principal_name_type))
r = p(r, '\xA0\x03\x02\x01')
r = p(r, clen(r))
r = p(r, '\x30')
r = p(r, clen(r))
r = p(r, '\xA2')
return r
# This section details flags for the ticket
# tktflags {
# 0xA3 LB
# 0x03 LB
# 0x00 Always 0, apparently number of unused bytes. tktFlags is always a uint32
# uint32 Ticket Flags
# }
class TicketFlags:
def __init__(self):
self.ticket_flags = None
def parsefile(self, f):
f.read(5)
self.ticket_flags, = struct.unpack("I", f.read(4))
def tostring(self):
r = ''
r += struct.pack("I", self.ticket_flags)
r = p(r, '\x00')
r = p(r, clen(r))
r = p(r, '\x03')
r = p(r, clen(r))
r = p(r, '\xA3')
return r
# These sections contain the ticket timestamps. Note that the timestamps are in a consistent format, so length tags are always the same
# Timestamp format is YYYYmmddHHMMSSZ and must be UTC!
# 0xA5 is starttime, 0xA6 is endtime, 0xA7 is renew_till
# time {
# uint8 Identifier
# LB (Always 0x11)
# 0x18 LB (Always 0x0F)
# start_time
# }
class Time:
def __init__(self, identifier):
self.identifier = identifier
self.time = None
@staticmethod
def convert_to_unix(timestr):
epoch = datetime.datetime(1970, 1, 1)
t = datetime.datetime.strptime(timestr[:-1], '%Y%m%d%H%M%S')
td = t - epoch
return int((td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) / 1e6)
@staticmethod
def convert_to_kerbtime(unixtime):
t = datetime.datetime.utcfromtimestamp(unixtime)
t = ''.join([t.strftime('%Y'), t.strftime('%m'), t.strftime('%d'),
t.strftime('%H'), t.strftime('%M'), t.strftime('%S'), 'Z'])
return t
def parsefile(self, f):
self.identifier, = struct.unpack(">B", f.read(1))
f.read(3)
strtime, = struct.unpack(">15s", f.read(15))
self.time = Time.convert_to_unix(strtime)
def tostring(self):
r = ''
r += struct.pack(">15s", Time.convert_to_kerbtime(self.time))
r = p(r, '\x11\x18\x0F')
r = p(r, chr(self.identifier))
return r
# This section represents the server realm (domain)
# srealm {
# 0xA8 LB
# 0x1B LB
# server_realm (domain name of server)
# }
class SRealm:
def __init__(self):
self.server_realm = None
def parsefile(self, f):
f.read(3)
length, = struct.unpack(">B", f.read(1))
self.server_realm, = struct.unpack(">%ds" % length, f.read(length))
def tostring(self):
r = ''
r += self.server_realm
r = p(r, clen(r))
r = p(r, '\x1B')
r = p(r, clen(r))
r = p(r, '\xA8')
return r
# This section represents the server name components
# sname {
# 0xA9 LB
# 0x30 LB
# 0xA0 0x03 0x02 0x01
# uint8 server_name_type
# 0xA1 LB
# 0x30 LB
# components[]
# }
#
# components {
# 0x1B
# uint8 Component Length
# Component
# }
class SName:
def __init__(self):
self.server_components = []
self.server_name_type = None
def parsefile(self, f):
f.read(8)
self.server_name_type, = struct.unpack(">B", f.read(1))
f.read(3)
rem_length, = struct.unpack(">B", f.read(1))
while rem_length > 0:
f.read(1)
l, = struct.unpack(">B", f.read(1))
component, = struct.unpack(">%ds" % l, f.read(l))
self.server_components.append(component)
rem_length -= (2 + l)
def tostring(self):
r = ''
for s in self.server_components:
r += '\x1B' + chr(len(s)) + s
r = p(r, clen(r))
r = p(r, '\x30')
r = p(r, clen(r))
r = p(r, '\xA1')
r = p(r, chr(self.server_name_type))
r = p(r, '\xA0\x03\x02\x01')
r = p(r, clen(r))
r = p(r, '\x30')
r = p(r, clen(r))
r = p(r, '\xA9')
return r
# header {
# 0x7D LT
# 0x30 LT
# 0xA0 LT
# 0x30 LT
# 0x30 LT
# }
class KrbCredInfo:
def __init__(self):
self.krbcredinfo = None
self.key = Key()
self.prealm = PRealm()
self.pname = PName()
self.flags = TicketFlags()
self.starttime = Time(165)
self.endtime = Time(166)
self.renew_till = Time(167)
self.srealm = SRealm()
self.sname = SName()
def parsefile(self, f):
f.read(20)
self.key.parsefile(f)
self.prealm.parsefile(f)
self.pname.parsefile(f)
self.flags.parsefile(f)
self.starttime.parsefile(f)
self.endtime.parsefile(f)
self.renew_till.parsefile(f)
self.srealm.parsefile(f)
self.sname.parsefile(f)
self.krbcredinfo = self.key.tostring() + self.prealm.tostring() + self.pname.tostring() + self.flags.tostring() + \
self.starttime.tostring() + self.endtime.tostring() + \
self.renew_till.tostring() + self.srealm.tostring() + \
self.sname.tostring()
def tostring(self):
r = self.krbcredinfo
r = p(r, struct.pack(">H", len(r)))
r = p(r, '\x30\x82')
r = p(r, struct.pack(">H", len(r)))
r = p(r, '\x30\x82')
r = p(r, struct.pack(">H", len(r)))
r = p(r, '\xA0\x82')
r = p(r, struct.pack(">H", len(r)))
r = p(r, '\x30\x82')
r = p(r, struct.pack(">H", len(r)))
r = p(r, '\x7D\x82')
return r
def createkrbcrdinfo(self):
self.krbcredinfo = self.key.tostring() + self.prealm.tostring() + self.pname.tostring() + self.flags.tostring() + \
self.starttime.tostring() + self.endtime.tostring() + \
self.renew_till.tostring() + self.srealm.tostring() + \
self.sname.tostring()
# The encpart serves as a sort of header for the EncKrbCredPart
# encpart {
# 0xA0 0x03 0x02 0x01
# uint8 etype (Seems to always be 0 in my testing)
# 0xA2 LT
# 0x04 LT
# }
class EncPart:
def __init__(self):
self.krbcredinfo = KrbCredInfo()
self.etype = None
def parsefile(self, f):
f.read(4)
self.etype, = struct.unpack(">B", f.read(1))
f.read(8)
self.krbcredinfo.parsefile(f)
def tostring(self):
r = self.krbcredinfo.tostring()
r = p(r, struct.pack(">H", len(r)))
r = p(r, '\x04\x82')
r = p(r, struct.pack(">H", len(r)))
r = p(r, '\xA2\x82')
r = p(r, chr(self.etype))
r = p(r, '\xA0\x03\x02\x01')
return r
# This section represents the tickets section of the overall KrbCred
# tickets {
# 0xA2 0x82
# uint16 ticket_length + 4
# 0x30 0x82
# uint16 ticket_length
# ticket
# 0xA3 LT
# 0x30 LT
# }
class TicketPart:
def __init__(self):
self.ticket = None
self.encpart = EncPart()
def parsefile(self, f):
f.read(6)
ticketlen, = struct.unpack(">H", f.read(2))
self.ticket, = struct.unpack(">%ds" % ticketlen, f.read(ticketlen))
f.read(8)
self.encpart.parsefile(f)
def tostring(self):
r = self.encpart.tostring()
r = p(r, struct.pack(">H", len(r)))
r = p(r, '\x30\x82')
r = p(r, struct.pack(">H", len(r)))
r = p(r, '\xA3\x82')
r = p(r, self.ticket)
r = p(r, struct.pack(">H", len(self.ticket)))
r = p(r, '\x30\x82')
r = p(r, struct.pack(">H", len(self.ticket) + 4))
r = p(r, '\xA2\x82')
return r
# This is the header for the kerberos ticket, and the final section
# header {
# 0x76 LT
# 0x30 LT
# 0xA0 0x03 0x02 0x01
# uint8 pvno (Protocol Version, always 0x05)
# 0xA1 0x03 0x02 0x01
# uint8 msg-type (Always 0x16 for krbcred)
# }
class KrbCredHeader:
def __init__(self):
self.ticketpart = TicketPart()
def parsefile(self, f):
f.read(18)
self.ticketpart.parsefile(f)
def tostring(self):
r = self.ticketpart.tostring()
r = p(r, '\xA1\x03\x02\x01\x16')
r = p(r, '\xA0\x03\x02\x01\x05')
r = p(r, struct.pack(">H", len(r)))
r = p(r, '\x30\x82')
r = p(r, struct.pack(">H", len(r)))
r = p(r, '\x76\x82')
return r
# borrowed from https://stackoverflow.com
def swap32(i):
return struct.unpack("<I", struct.pack(">I", i))[0]
# src/include/krb5/krb5.h
"""
#define TKT_FLG_FORWARDABLE 0x40000000
#define TKT_FLG_FORWARDED 0x20000000
#define TKT_FLG_PROXIABLE 0x10000000
#define TKT_FLG_PROXY 0x08000000
#define TKT_FLG_MAY_POSTDATE 0x04000000
#define TKT_FLG_POSTDATED 0x02000000
#define TKT_FLG_INVALID 0x01000000
#define TKT_FLG_RENEWABLE 0x00800000
#define TKT_FLG_PRE_AUTH 0x00200000
#define TKT_FLG_HW_AUTH 0x00100000
#define TKT_FLG_TRANSIT_POLICY_CHECKED 0x00080000
#define TKT_FLG_OK_AS_DELEGATE 0x00040000
#define TKT_FLG_ENC_PA_REP 0x00010000
#define TKT_FLG_ANONYMOUS 0x00008000
"""
TKT_FLG_INITIAL = 0x00400000
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Usage: {0} <input credential cache file>".format(sys.argv[0]))
print("\nExample: {0} /tmp/krb5cc_1000".format(sys.argv[0]))
sys.exit(0)
with open(sys.argv[1], 'rb') as f:
fileid, = struct.unpack(">B", f.read(1))
if fileid == 0x5: # Credential Cache (ccache)
f.seek(0)
header = CCacheHeader()
primary_principal = Principal()
credential = Credential()
header.parsefile(f)
primary_principal.parsefile(f)
i = 0
sys.stderr.write("WARNING: Not all the hashes generated by this program are crackable. Please select the relevant hashes manually!\n")
time.sleep(2)
# Check if you've reached the end of the file. If not get the next credential
while(f.read(1) != ''):
f.seek(-1, 1)
credential.parsefile(f)
out = []
KrbCred = KrbCredHeader()
KrbCred.ticketpart.ticket = credential.ticket.data # extract hash from here!
try:
# this code is terrible!
etype = str(decoder.decode(credential.ticket.data)[0][3][0])
data = str(decoder.decode(credential.ticket.data)[0][3][2])
if etype != "23":
sys.stderr.write("Unsupported etype %s found. Such hashes can't be cracked it seems.\n" % etype)
continue
except:
continue
# print(credential.ticket.data.encode("hex"))
KrbCred.ticketpart.encpart.etype = credential.keyblock.etype
krbcredinfo = KrbCred.ticketpart.encpart.krbcredinfo
krbcredinfo.key.key = credential.keyblock.key
krbcredinfo.key.keytype = credential.keyblock.keytype
# print(credential.keyblock.keytype)
krbcredinfo.prealm.principal_realm = primary_principal.realm.data
# print(primary_principal.realm.data)
krbcredinfo.pname.principal_components = primary_principal.components
# print(primary_principal.components)
krbcredinfo.pname.principal_name_type = primary_principal.name_type
krbcredinfo.flags.ticket_flags = credential.tktFlags
tktFlags = swap32(credential.tktFlags)
if tktFlags & TKT_FLG_INITIAL:
continue
krbcredinfo.starttime.time = credential.times.starttime
krbcredinfo.endtime.time = credential.times.endtime
krbcredinfo.renew_till.time = credential.times.renew_till
krbcredinfo.srealm.server_realm = credential.server.realm.data
# print(credential.server.realm.data)
krbcredinfo.sname.server_components = credential.server.components
for c in credential.server.components: # dirty hack
if c not in ['krbtgt', 'krb5_ccache_conf_data', 'pa_type']:
out.append(c)
name = b"-".join(out[-2:])
krbcredinfo.sname.server_name_type = credential.server.name_type
krbcredinfo.createkrbcrdinfo()
sys.stdout.write("%s:$krb5tgs$%s$%s$%s\n" % (os.path.basename(name), etype, data[:16].encode("hex"), data[16:].encode("hex")))
"""
# Write seperate files for each ticket found. postfix is just a number for now.
with open(sys.argv[2] + "_" + str(i), 'wb') as o:
o.write(KrbCred.tostring())
i = i + 1
"""
sys.exit(0)
elif fileid == 0x76: # untested code, don't use!
f.seek(0)
KrbCred = KrbCredHeader()
KrbCred.parsefile(f)
header = CCacheHeader()
primary_principal = Principal()
credential = Credential()
header.version = 0x504
header.header_length = 0xC
header.header.deltatime.time_offset = 4294967295
header.header.deltatime.usec_offset = 0
header.header.tag = 0x01
header.header.taglen = 0x08
KrbCredInfo_ = KrbCred.ticketpart.encpart.krbcredinfo
primary_principal.name_type = KrbCredInfo_.pname.principal_name_type
primary_principal.components = KrbCredInfo_.pname.principal_components
primary_principal.num_components = len(primary_principal.components)
primary_principal.realm.data = KrbCredInfo.prealm.principal_realm
primary_principal.realm.length = len(primary_principal.realm.data)
credential.client.name_type = KrbCredInfo.pname.principal_name_type
credential.client.components = KrbCredInfo.pname.principal_components
credential.client.num_components = len(credential.client.components)
credential.client.realm.data = KrbCredInfo.prealm.principal_realm
credential.client.realm.length = len(credential.client.realm.data)
credential.server.name_type = KrbCredInfo.sname.server_name_type
credential.server.components = KrbCredInfo.sname.server_components
credential.server.num_components = len(credential.server.components)
credential.server.realm.data = KrbCredInfo.srealm.server_realm
credential.server.realm.length = len(credential.server.realm.data)
credential.keyblock.etype = KrbCred.ticketpart.encpart.etype
credential.keyblock.key = KrbCredInfo.key.key
credential.keyblock.keylen = len(credential.keyblock.key)
credential.keyblock.keytype = KrbCredInfo.key.keytype
credential.times.authtime = KrbCredInfo.starttime.time
credential.times.starttime = KrbCredInfo.starttime.time
credential.times.endtime = KrbCredInfo.endtime.time
credential.times.renew_till = KrbCredInfo.renew_till.time
credential.is_skey = 0
credential.tktFlags = KrbCredInfo.flags.ticket_flags
credential.num_address = 0
credential.address = []
credential.num_authdata = 0
credential.authdata = []
credential.ticket.data = KrbCred.ticketpart.ticket
credential.ticket.length = len(credential.ticket.data)
credential.secondticket.length = 0
credential.secondticket.data = ''
with open(sys.argv[2], 'wb') as o:
o.write(header.tostring())
o.write(primary_principal.tostring())
o.write(credential.tostring())
sys.exit(0)
else:
print('Unknown File Type!')
sys.exit(0)
| 2.46875
| 2
|
libs/utils/utils.py
|
niryarden/lyrics_bot
| 0
|
12779606
|
import configparser
def get_base_url():
parser = configparser.ConfigParser()
parser.read('token.cfg')
token = parser.get('creds', 'token')
return f"https://api.telegram.org/bot{token}/"
| 2.5
| 2
|
companies_app/models.py
|
sabuhish/document-transfer
| 0
|
12779607
|
<reponame>sabuhish/document-transfer
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Company(models.Model):
voen = models.IntegerField(verbose_name="Vöen", unique=True)
name = models.CharField(max_length=255, verbose_name="<NAME>")
ceo_first_name = models.CharField(max_length=120, verbose_name="<NAME>")
ceo_lastname =models.CharField(max_length=250)
user = models.OneToOneField(User, on_delete=models.CASCADE)
class AsanImza(models.Model):
company = models.ForeignKey(Company, verbose_name=(""), on_delete=models.CASCADE)
asan_imza = models.IntegerField("Asan imza", unique=True)
asan_nomre =models.CharField("Asan nomre", unique=True, max_length=20)
class CompanyContacts(models.Model):
__choices = (
(1, "Telefon"),
(2, "email"),
(3, "address")
)
company =models.ForeignKey(Company, on_delete=models.CASCADE)
contacnt_type = models.IntegerField(choices = __choices)
comtact = models.CharField(max_length=255, verbose_name="elaqe")
| 2.390625
| 2
|
server.py
|
tunicashashi/IBM_Cloud_Flask
| 0
|
12779608
|
import os
from flask import Flask
from flask import request
try:
from SimpleHTTPServer import SimpleHTTPRequestHandler as Handler
from SocketServer import TCPServer as Server
except ImportError:
from http.server import SimpleHTTPRequestHandler as Handler
from http.server import HTTPServer as Server
# Read port selected by the cloud for our application
PORT = int(os.getenv('PORT', 8000))
# Change current directory to avoid exposure of control files
#os.chdir('static')
# server http port 8000
#httpd = Server(("", PORT), Handler)
#try:
# print("Start pythonserving at port %i" % PORT)
# httpd.serve_forever()
#except KeyboardInterrupt:
# pass
#httpd.server_close()
# flask http port
app = Flask(__name__)
print ("port num", PORT)
@app.route("/flask")
def hello():
return "Hello World! ftom flask \n"
if __name__ == '__main__':
app.run(debug=True, port=PORT)
#app.run(host="", debug=True)
| 2.953125
| 3
|
DailyProgrammer/DP20121103B.py
|
DayGitH/Python-Challenges
| 2
|
12779609
|
"""
[11/3/2012] Challenge #110 [Intermediate] Creepy Crawlies
https://www.reddit.com/r/dailyprogrammer/comments/12k3xt/1132012_challenge_110_intermediate_creepy_crawlies/
**Description:**
The web is full of creepy stories, with Reddit's /r/nosleep at the top of this list. Since you're a huge fan of not
sleeping (we are programmers, after all), you need to amass a collection of creepy stories into a single file for easy
reading access! Your goal is to write a web-crawler that downloads all the text submissions from the top 100 posts on
/r/nosleep and puts it into a simple text-file.
**Formal Inputs & Outputs:**
*Input Description:*
No formal input: the application should simply launch and download the top 100 posts from /r/nosleep into a special
file format.
*Output Description:*
Your application must either save to a file, or print to standard output, the following format: each story should start
with a title line. This line is three equal-signs, the posts's name, and then three more equal-signs. An example is
"=== People are Scary! ===". The following lines are the story itself, written in regular plain text. No need to worry
about formatting, HTML links, bullet points, etc.
**Sample Inputs & Outputs:**
If I were to run the application now, the following would be examples of output:
=== Can I use the bathroom? ===
Since tonight's Halloween, I couldn't... (your program should print the rest of the story, I omit that for example
brevity)
=== She's a keeper. ===
I love this girl with all of my... (your program should print the rest of the story, I omit that for example brevity)
"""
def main():
pass
if __name__ == "__main__":
main()
| 4
| 4
|
sistercities/web/sistercities/sister_graph.py
|
displayn/sistercities
| 2
|
12779610
|
# -*- coding: utf-8 -*-
import networkx as nx
from networkx.readwrite import json_graph
import json
def read_json_file(filename: object) -> object:
# from http://stackoverflow.com/a/34665365
"""
:type filename: object
"""
with open(filename.name) as f:
js_graph = json.load(f)
return json_graph.node_link_graph(js_graph)
def city_build(name_list, qid_list) -> object:
object_list = []
for e in qid_list:
x = [e, name_list[e]]
object_list.append(x)
object_list = sorted(object_list, key=lambda x: x[1])
return object_list
def get(w, d) -> object:
wikipedia = nx.DiGraph(w)
wikidata = nx.DiGraph(d)
root_nodes = nx.get_node_attributes(wikipedia, 'group')
wikidata_root_nodes = nx.get_node_attributes(wikidata, 'group')
assert (len(root_nodes) == len(wikidata_root_nodes)), 'Error: Graph root size should be the same!'
url_wiki = nx.get_node_attributes(wikipedia, 'url')
url_data = nx.get_node_attributes(wikidata, 'url')
revision_id_wikipedia = nx.get_node_attributes(wikipedia, 'revision_id_wikipedia')
revision_id_wikidata = nx.get_node_attributes(wikidata, 'revision_id_wikidata')
city_list = []
for c in root_nodes.keys():
wg_neighbors = wikipedia.successors(c)
wd_neighbors = wikidata.successors(c)
pedia = set(wg_neighbors)
data = set(wd_neighbors)
intersection = set(pedia).intersection(data)
wikipedia_missing = set(data) - set(pedia)
wikidata_missing = set(pedia) - set(data)
city_dict = {'qid': c,
'revision_id_wikipedia': revision_id_wikipedia[c],
'revision_id_wikidata': revision_id_wikidata[c],
'url': url_wiki[c],
'miss_wikipedia': city_build(url_data, wikipedia_missing),
'intersection': city_build(url_wiki, intersection),
'data_cities': city_build(url_wiki, wikidata_missing)
}
city_list.append(city_dict)
city_list = sorted(city_list, key=lambda x: x['url'])
return city_list
| 2.84375
| 3
|
source/tagger/dataset/cleaning.py
|
chrka/cip-tagging-exercise
| 0
|
12779611
|
import numpy as np
import pandas as pd
import fasttext
from sklearn.preprocessing import MultiLabelBinarizer
from skmultilearn.model_selection import IterativeStratification, \
iterative_train_test_split
from functools import reduce
CIP_TAGS = list(map(lambda x: x.strip(),
"gratis, mat, musik, kurs, casino, dans, musuem, inlines, "
"båt, barn, film, språk, hockey, bowling, fika, sport, "
"biljard, bingo, bio, opera, kultur, grilla, kubb, "
"festival, cykel, brännboll, picknick, konsert, pub, "
"frisbeegolf, mc, gokart, svamp, bangolf, teater, "
"afterwork, promenad, humor, utmaning, fest, shopping, "
"resa, sällskapsspel, träna, pubquiz, poker, bok, foto, "
"hund, skridskor, karaoke, dart, bada, diskussion, "
"badminton, pyssel, golf, klättring, loppis, boule, mässa, "
"flytthjälp, yoga, innebandy, pingis, handboll, jogga, "
"tennis, högtid, astronomi, fiske, beachvolleyboll, "
"friluftsliv, volleyboll, geocaching, vindsurfing, "
"shuffleboard, SUP, standup, paddel".split(',')))
def load_raw_normalized_dataset(path, drop_missing):
"""Load raw CiP dataset.
Args:
path: Path to raw CSV file
drop_missing: If true, drop events with missing titles or descriptions
Returns:
events_df, tags_df: Event and tag dataframes as tuple
"""
# FIXME: Import 'id' as integer
cip_df = pd.read_csv(path,
header=None,
names=['id', 'weekday', 'time', 'title', 'description',
'tag_status', 'tag'],
na_values=['-01:00:00'])
# Drop any events with missing titles or descriptions
cip_df.dropna(subset=['title', 'description'], inplace=True)
# Convert time strings to actual times
cip_df['time'] = pd.to_datetime(cip_df['time']).dt.time
events_df = cip_df.groupby('id').first().drop(
columns=['tag_status', 'tag']).reset_index()
tags_df = pd.DataFrame({
'id': cip_df['id'],
'tag': cip_df['tag'],
'verified': cip_df['tag_status'] == 1,
'removed': cip_df['tag_status'] == 2
})
# Ignore verified and remove 'removed' tags
tags_df = tags_df[~tags_df['removed']]
tags_df.drop(columns=['verified', 'removed'], inplace=True)
return events_df, tags_df
def calculate_top_tags(tags_df, n_tags, use_cip_tags=True):
"""Calculate top tags from tags dataset
Args:
tags_df: Dataset to extract top tags from
n_tags: Number of topmost tags to get if generating
use_cip_tags: Use pre-defined tags from CiP (ignores `n_tags`)
Returns:
List of topmost tags
"""
tag_counts = tags_df['tag'].value_counts()
if use_cip_tags:
# Not all CiP tags are necessarily present in the dataset
# and not necessarily in sufficient amounts
present_tags = set(tag_counts[tag_counts > 5].index)
return list(filter(lambda t: t in present_tags, CIP_TAGS))
else:
return tag_counts.index[:n_tags]
def tags_to_matrix(events_df, tags_df, top_tags):
"""Converts tags to feature matrix
Args:
events_df: Events dataset
tags_df: Tags dataset
top_tags: Tags to include
Returns:
Feature matrix for tags
"""
# Combine tags into lists
tags = tags_df.groupby('id')['tag'].agg(lambda x: list(x)).reset_index()
# Handle events with no top tags
# TODO: Kludge, write nicer
missing_tags = pd.DataFrame({
'id': events_df[~events_df['id'].isin(tags['id'])]['id'].unique()
})
missing_tags['tag'] = [[] for _ in range(len(missing_tags))]
tags = pd.concat([tags, missing_tags])
# Align tags with events
aligned_tags = events_df.merge(tags, on='id')
# Convert aligned tags to matrix
mlb = MultiLabelBinarizer(classes=top_tags)
return mlb.fit_transform(aligned_tags['tag'])
def matrix_to_tags(tags, top_tags):
top_array = np.array(top_tags)
joined_tags = []
for row in tags:
joined_tags.append(reduce(lambda a, b: a + "," + b, top_array[row > 0]))
return np.array(joined_tags)
def load_datasets(path, drop_missing=True, n_tags=72,
test_size=0.2, random_state=42):
"""Load and split dataset from raw CiP data.
Args:
path: Path to raw CiP dataset
drop_missing: Drop events with no description or title
n_tags: Number of top tags to use (ignored)
test_size: Fraction of events to include in test set
random_state: Random state for the split
Returns:
(events_train, tags_train, events_test, tags_test, top_tags,
tags_train_stats)
"""
events_df, tags_df = load_raw_normalized_dataset(path,
drop_missing=drop_missing)
top_tags = calculate_top_tags(tags_df, n_tags=n_tags)
# Only keep top tags
tags_df = tags_df[tags_df['tag'].isin(top_tags)]
tag_matrix = tags_to_matrix(events_df, tags_df, top_tags)
# Split data into public training set and private test set
stratifier = IterativeStratification(
n_splits=2, order=2,
sample_distribution_per_fold=[test_size, 1.0 - test_size],
random_state=random_state)
train_indices, test_indices = next(stratifier.split(events_df, tag_matrix))
events_train, tags_train = events_df.iloc[train_indices], \
tag_matrix[train_indices, :]
events_test, tags_test = events_df.iloc[test_indices], \
tag_matrix[test_indices, :]
tags_train_stats = pd.DataFrame({
'tag': top_tags,
'count': tags_train.sum(axis=0)
}).sort_values('count', ascending=False)
return (events_train, tags_train, events_test, tags_test, top_tags,
tags_train_stats)
def extract_corpus(events_df):
"""Extract text corpus from event descriptions.
Args:
events_df: Event dataset
Returns:
List of event descriptions as raw text
"""
from tagger._preprocessing.html import HTMLToText
from tagger._preprocessing.characterset import CharacterSet
from tagger._preprocessing.lowercase import Lowercase
from sklearn.pipeline import Pipeline
cleaning_pipeline = Pipeline([
('html', HTMLToText()),
('cset', CharacterSet(punctuation=False)),
('lcase', Lowercase())
])
return list(cleaning_pipeline.fit_transform(events_df['description']))
def fasttext_wordvectors(corpus_path, model_path):
"""Compute word vectors using FastText.
Args:
corpus_path: Path to corpus
model_path: Path for storing FastText model
Returns:
FastText model
"""
model = fasttext.train_unsupervised(corpus_path)
model.save_model(model_path)
return model
def save_corpus(events_df, path):
"""Extract and store corpus for events.
Args:
events_df: Events dataset
path: Path for storing corpus
"""
corpus = extract_corpus(events_df)
with open(path, 'w') as f:
for doc in corpus:
f.write(doc + '\n')
if __name__ == '__main__':
# Generate static datasets and wordvectors for local dev
import os
print("Current working directory:", os.getcwd())
# Compute word vectors
events_df, tags_df = load_raw_normalized_dataset(
"../../../data/raw/citypolarna_public_events_out.csv",
drop_missing=True)
CORPUS_PATH = "../../../data/corpus.txt"
MODEL_PATH = "../../../data/wordvectors.bin"
save_corpus(events_df, CORPUS_PATH)
model = fasttext_wordvectors(CORPUS_PATH, MODEL_PATH)
# Split datasets
events_train, tags_train, events_test, tags_test, top_tags, tags_train_stats = load_datasets(
"../../../data/raw/citypolarna_public_events_out.csv"
)
print(f"Number of train events: {len(events_train)}")
print(f"Number of test events: {len(events_test)}")
# TODO: Proper path handling
DATA_PATH = "../../../data/"
events_train.to_csv(DATA_PATH + "events_train.csv", index=False)
events_test.to_csv(DATA_PATH + "events_test.csv", index=False)
# A kludge, but convenient — pandas can load from URL:s
pd.DataFrame(tags_train).to_csv(DATA_PATH + "tags_train.csv", index=False)
pd.DataFrame(tags_test).to_csv(DATA_PATH + "tags_test.csv", index=False)
pd.DataFrame({'tag': top_tags}).to_csv(DATA_PATH + "top_tags.csv",
index=False)
tags_train_stats.to_csv(DATA_PATH + "tags_train_stats.csv", index=False)
| 2.453125
| 2
|
scripts/parse_plateplan.py
|
jcbird/ppv
| 1
|
12779612
|
<filename>scripts/parse_plateplan.py
from astropy.table import Table
from astropy.io.registry import (register_identifier, register_reader,
register_writer)
from pydl.pydlutils.yanny import (is_yanny, read_table_yanny,
write_table_yanny, yanny)
from pathlib import Path
import numpy as np
import ppv.config
register_identifier('yanny', Table, is_yanny)
register_reader('yanny', Table, read_table_yanny)
register_writer('yanny', Table, write_table_yanny)
platePlans = Table.read('../data/raw/platePlans.par', format='yanny',
tablename='PLATEPLANS')
print('platePlans.par is read')
is_mwm_plate = np.array(['mwm' in prun for prun in platePlans['platerun']])
is_bhm_plate = np.array(['bhm' in prun for prun in platePlans['platerun']])
is_sdss5_plate = np.bitwise_or(is_mwm_plate, is_bhm_plate)
sdss5_plates = platePlans[is_sdss5_plate]
# parent in root directory of repository
dir_ = (Path.cwd().parent / ppv.config._src_dir) / 'data'
out_filename = (dir_ / 'platePlans_sdss5.fits').as_posix()
sdss5_plates.write(out_filename, overwrite='True', format='fits')
print(f'SDSS-V platePlans table written to {out_filename}')
| 2.25
| 2
|
azure/mgmt/monitor/models/monitor_management_client_enums.py
|
EnjoyLifeFund/py36pkgs
| 2
|
12779613
|
<reponame>EnjoyLifeFund/py36pkgs
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from enum import Enum
class MetricStatisticType(Enum):
average = "Average"
min = "Min"
max = "Max"
sum = "Sum"
class TimeAggregationType(Enum):
average = "Average"
minimum = "Minimum"
maximum = "Maximum"
total = "Total"
count = "Count"
class ComparisonOperationType(Enum):
equals = "Equals"
not_equals = "NotEquals"
greater_than = "GreaterThan"
greater_than_or_equal = "GreaterThanOrEqual"
less_than = "LessThan"
less_than_or_equal = "LessThanOrEqual"
class ScaleDirection(Enum):
none = "None"
increase = "Increase"
decrease = "Decrease"
class ScaleType(Enum):
change_count = "ChangeCount"
percent_change_count = "PercentChangeCount"
exact_count = "ExactCount"
class RecurrenceFrequency(Enum):
none = "None"
second = "Second"
minute = "Minute"
hour = "Hour"
day = "Day"
week = "Week"
month = "Month"
year = "Year"
class ConditionOperator(Enum):
greater_than = "GreaterThan"
greater_than_or_equal = "GreaterThanOrEqual"
less_than = "LessThan"
less_than_or_equal = "LessThanOrEqual"
class TimeAggregationOperator(Enum):
average = "Average"
minimum = "Minimum"
maximum = "Maximum"
total = "Total"
last = "Last"
class ReceiverStatus(Enum):
not_specified = "NotSpecified"
enabled = "Enabled"
disabled = "Disabled"
| 2.09375
| 2
|
tests/test_cmd.py
|
gLhookniano/autoargparse
| 0
|
12779614
|
<filename>tests/test_cmd.py
#!coding:utf-8
from sys import path as sys_path
from os import path as os_path
import subprocess
import pytest
sys_path.append(os_path.abspath(os_path.join(os_path.dirname(__file__), "../")))
import autoargparse
@pytest.mark.inner
@pytest.mark.parametrize(
"input,stdin,expected",
[
("python ./cmd_inner_function.py 1 -b 2", "func_args", "[1, '2', False]"),
(
"python ./cmd_inner_function.py 1 -b 2 -c -d 3 4",
"cmd_args",
"{'a': '1', 'b': '2', 'c': True, 'd': [3, 4]}",
),
(
"python ./cmd_inner_function.py 1 -b 2 -c -d 3 4",
"func_args",
"[1, '2', True, 3, 4]",
),
],
)
def test_cmd_inner_function(input, stdin, expected):
obj = subprocess.Popen(
input.split(),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding="utf-8",
cwd="./tests",
)
obj.stdin.writelines(stdin)
out, err = obj.communicate()
# [6:-7] hide 'debug>'
assert out.strip()[6:-7] == expected
@pytest.mark.cmd
@pytest.mark.parametrize(
"input,expected",
[
("python ./cmd_no_deco_use.py 1 2 3 4", "4"),
("python ./cmd_no_deco_use.py --sum 1 2 3 4", "10"),
],
)
def test_cmd_no_deco_use(input, expected):
obj = subprocess.Popen(
input.split(),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding="utf-8",
cwd="./tests",
)
out, err = obj.communicate()
assert err == ""
assert out.strip() == expected
@pytest.mark.cmd
@pytest.mark.parametrize(
"input,expected",
[
("python ./cmd_deco_args.py 1 -b 2", "1 2 False ()"),
("python ./cmd_deco_args.py 1 -b 2 -c -d 3 4", "1 2 True (3, 4)"),
],
)
def test_cmd_deco_args(input, expected):
obj = subprocess.Popen(
input.split(),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding="utf-8",
cwd="./tests",
)
out, err = obj.communicate()
assert err == ""
assert out.strip() == expected
@pytest.mark.cmd
@pytest.mark.parametrize(
"input,expected",
[
("python ./cmd_deco_kwargs.py -s 1 2 3 4 ", "10"),
("python ./cmd_deco_kwargs.py --max -s 1 2 3 4", "4"),
("python ./cmd_deco_kwargs.py --negative -s 1 2 3 4 --max", "-4"),
],
)
def test_cmd_deco_kwargs(input, expected):
obj = subprocess.Popen(
input.split(),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding="utf-8",
cwd="./tests",
)
out, err = obj.communicate()
assert err == ""
assert out.strip() == expected
@pytest.mark.cmd
@pytest.mark.parametrize(
"input,expected", [("python ./cmd_deco_mark.py -a 1 -b 2 -c 3 -d 4 ", "1\n4\n3\n2")]
)
def test_cmd_deco_mark_run_order(input, expected):
obj = subprocess.Popen(
input.split(),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding="utf-8",
cwd="./tests",
)
out, err = obj.communicate()
assert err == ""
assert out.strip() == expected
@pytest.mark.cmd
@pytest.mark.parametrize(
"input,expected",
[("python ./cmd_deco_in_class.py 1 -b 2 -c -d 3 4", "1 2 True (3, 4)")],
)
def test_cmd_deco_in_class(input, expected):
obj = subprocess.Popen(
input.split(),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding="utf-8",
cwd="./tests",
)
out, err = obj.communicate()
assert err == ""
assert out.strip() == expected
| 2.53125
| 3
|
epidag/monitor.py
|
TimeWz667/PyEpiDAG
| 2
|
12779615
|
<gh_stars>1-10
import logging
import pandas as pd
__author__ = 'TimeWz667'
__all__ = ['Monitor']
class Monitor:
def __init__(self, name):
self.Title = name
self.Logger = logging.getLogger(name)
self.Logger.setLevel(logging.INFO)
self.Records = []
self.Time = 0
self.Last = dict()
def info(self, msg, *arg, **kwargs):
self.Logger.info(msg, *arg, **kwargs)
def warning(self, msg, *arg, **kwargs):
self.Logger.warning(msg, *arg, **kwargs)
def error(self, msg, *arg, **kwargs):
self.Logger.error(msg, *arg, **kwargs)
def set_log_path(self, filename):
fhl = logging.FileHandler(filename)
self.add_handler(fhl)
def add_handler(self, handler):
if not handler.formatter:
handler.setFormatter(
logging.Formatter('%(asctime)s %(levelname)s: %(message)s',
'%d-%m-%Y %H:%M:%S'))
self.Logger.addHandler(handler)
def __getitem__(self, item):
return self.Last[item]
def reset(self, time: int=0):
self.Time = time
self.Records.clear()
self.Last = dict()
def step(self, time=None):
time = time if time else self.Time + 1
if time < self.Time:
raise KeyError('Backward time specified')
elif time == self.Time:
return
self.Last['Time'] = self.Time
self.Records.append(self.Last)
self.Time = time
self.Last = dict()
self.Logger.info('Step to {}'.format(self.Time))
def keep(self, **kwargs):
self.Last.update(kwargs)
@property
def Trajectories(self):
dat = pd.DataFrame(self.Records)
return dat.set_index('Time')
def save_trajectories(self, filename):
self.Trajectories.to_csv(filename)
if __name__ == '__main__':
mon = Monitor('Test')
mon.add_handler(logging.StreamHandler())
mon.keep(Size=4)
mon.step()
mon.keep(Size=6)
mon.step()
print(mon.Trajectories)
| 2.671875
| 3
|
test/dragon/test_autograph.py
|
seetaresearch/Dragon
| 81
|
12779616
|
<filename>test/dragon/test_autograph.py<gh_stars>10-100
# ------------------------------------------------------------
# Copyright (c) 2017-present, SeetaTech, Co.,Ltd.
#
# Licensed under the BSD 2-Clause License.
# You should have received a copy of the BSD 2-Clause License
# along with the software. If not, See,
#
# <https://opensource.org/licenses/BSD-2-Clause>
#
# ------------------------------------------------------------
"""Test the autograph module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import unittest
import dragon
from dragon.core.framework import config
from dragon.core.testing.unittest.common_utils import run_tests
class TestConfig(unittest.TestCase):
"""Test the graph config."""
def test_optimization(self):
dragon.autograph.set_optimization(1)
self.assertEqual(config.config().graph_optimization, 1)
def test_scheduler(self):
for scheduler in ('SIMPLE', 'FUSION', 'KNOWN', 'SIMPLE'):
try:
dragon.autograph.set_scheduler(scheduler)
if scheduler == 'FUSION':
self.assertEqual(config.config().graph_type, 'FusionGraph')
else:
self.assertEqual(config.config().graph_type, '')
except ValueError:
pass
def test_verbosity(self):
dragon.autograph.set_verbosity(1)
self.assertEqual(config.config().graph_verbosity, 1)
dragon.autograph.set_verbosity(0)
class TestFunction(unittest.TestCase):
"""Test the graph function."""
@dragon.function(input_signature=[
dragon.Tensor((1,), dtype='int32'),
dragon.Tensor((1,), dtype='int32'),
dragon.Tensor((1,), dtype='int32'),
])
def func1(self, a, b, c=0, **kwargs):
_ = kwargs
return a + b + c
def test_def_function(self):
@dragon.function(input_signature=[dragon.Tensor(None, symbolic=True)])
def func2(a, b):
return a + b
self.assertEqual(self.func1([1, 2], [3, 4]).numpy().tolist(), [4, 6])
self.assertEqual(self.func1([1, 2], b=[3, 4]).numpy().tolist(), [4, 6])
self.assertEqual(self.func1([1, 2], b=[3, 4], c=1).numpy().tolist(), [5, 7])
self.assertEqual(self.func1([1, 2], b=[3, 4], c=1).numpy().tolist(), [5, 7])
self.assertEqual(self.func1([1, 2], [3, 4], executing_stage='forward').numpy().tolist(), [4, 6])
dragon.function(func=lambda: dragon.optimizers.SGD())()
try:
self.func1(1, 2, 3, 4)
except ValueError:
pass
try:
func2(1, 2)
except ValueError:
pass
def test_update_function(self):
optimizer = dragon.optimizers.SGD(lr=1, momentum=0)
try:
_ = optimizer.op_type
except KeyError:
pass
var = dragon.constant(1, dtype='float32')
grad = dragon.constant(1, dtype='float32')
with dragon.eager_mode():
optimizer.apply_gradients([(grad, var)])
with dragon.graph_mode():
optimizer.apply_gradients([(grad, var)]).run()
self.assertEqual(float(var), -1.)
class TestOpSpec(unittest.TestCase):
"""Test the op spec."""
sym1 = dragon.Tensor(None, None, symbolic=True)
sym2 = dragon.Tensor((1,), symbolic=True)
sym3 = dragon.Tensor((1, None), symbolic=True)
sym4 = dragon.Tensor((1, None, None, None), symbolic=True)
sym5 = dragon.Tensor((1, None, None, None, None), symbolic=True)
def test_accuracy(self):
with dragon.graph_mode():
self.assertEqual(dragon.metrics.accuracy(
[self.sym1, self.sym1]).shape, ())
def test_arg_reduce(self):
with dragon.graph_mode():
self.assertEqual(dragon.math.argmax(
self.sym1, axis=0, keepdims=True).shape, None)
self.assertEqual(dragon.math.argmax(
self.sym1, axis=0, keepdims=False).shape, None)
self.assertEqual(dragon.math.argmax(
self.sym2, axis=0, keepdims=True).shape, (1,))
self.assertEqual(dragon.math.argmax(
self.sym2, axis=0, keepdims=False).shape, ())
def test_binary_ops(self):
with dragon.graph_mode():
self.assertEqual(dragon.math.add(
[self.sym1, self.sym1]).shape, None)
self.assertEqual(dragon.math.add(
[self.sym2, self.sym2]).shape, (1,))
self.assertEqual(dragon.math.add(
[self.sym2, self.sym3]).shape, (1, None))
self.assertEqual(dragon.math.add(
[self.sym3, self.sym2]).shape, (1, None))
self.assertEqual(dragon.math.equal(
[self.sym1, self.sym1]).shape, None)
def test_boolean_mask(self):
with dragon.graph_mode():
self.assertEqual(dragon.boolean_mask(
[self.sym1, self.sym1]).shape, (None,))
def test_broadcast(self):
with dragon.graph_mode():
self.assertEqual(dragon.broadcast_to(
self.sym1, shape=(1,)).shape, None)
self.assertEqual(dragon.broadcast_to(
self.sym2, shape=(1, 2)).shape, (1, 2))
self.assertEqual(dragon.broadcast_to(
self.sym3, shape=(2,)).shape, self.sym3.shape[:-1] + (2,))
self.assertEqual(dragon.broadcast_to(
self.sym3, shape=(-1, 2, 2)).shape, (1, 2, 2))
def test_cast(self):
with dragon.graph_mode():
self.assertEqual(dragon.cast(self.sym1, 'float32').shape, None)
def test_concat(self):
with dragon.graph_mode():
self.assertEqual(dragon.concat([self.sym1, self.sym1]).shape, None)
self.assertEqual(dragon.concat([self.sym1, self.sym2]).shape, (None,))
self.assertEqual(dragon.concat([self.sym2, self.sym3], axis=0).shape, (2,))
self.assertEqual(dragon.concat([self.sym2, self.sym3], axis=1).shape, None)
def test_conv(self):
w = dragon.Tensor((3, 3, 3, 3))
with dragon.graph_mode():
self.assertEqual(dragon.nn.conv2d(
[self.sym1, self.sym1]).shape, None)
self.assertEqual(dragon.nn.conv2d(
[self.sym4, w]).shape, (self.sym4.shape[0], w.shape[0], None, None))
self.assertEqual(dragon.nn.conv2d(
[w, w], kernel_shape=1, out_channels=w.shape[0]).shape, w.shape)
self.assertEqual(dragon.nn.conv2d(
[w, w], kernel_shape=1, padding='SAME').shape, w.shape)
self.assertEqual(dragon.nn.conv2d_transpose(
[self.sym4, w], out_channels=w.shape[1]).shape,
(self.sym4.shape[0], w.shape[1], None, None))
self.assertEqual(dragon.nn.conv2d_transpose(
[w, w], output_padding=(2, 2), kernel_shape=1).shape,
(w.shape[0], w.shape[1], w.shape[2] + 2, w.shape[3] + 2))
self.assertEqual(dragon.nn.conv2d_transpose(
[w, w], output_shape=(4, 4), output_padding=(2, 2), kernel_shape=1).shape,
(w.shape[0], w.shape[1], 6, 6))
def test_depth_to_space(self):
func1 = functools.partial(dragon.nn.depth_to_space, block_size=1)
func2 = functools.partial(dragon.nn.space_to_depth, block_size=1)
with dragon.graph_mode():
for func in (func1, func2):
self.assertEqual(func(self.sym1).shape, None)
self.assertEqual(func(self.sym2).shape, None)
self.assertEqual(func(self.sym4, data_format='NCHW').shape,
(self.sym4.shape[0],) + (None,) * (len(self.sym4.shape) - 1))
self.assertEqual(func(self.sym4, data_format='NCHW').shape,
(self.sym4.shape[0],) + (None,) * (len(self.sym4.shape) - 1))
self.assertEqual(func(dragon.Tensor((1, 2, 3)), data_format='NCHW').shape,
dragon.Tensor((1, 2, 3)).shape)
self.assertEqual(func(dragon.Tensor((1, 2, 3)), data_format='NHWC').shape,
dragon.Tensor((1, 2, 3)).shape)
def test_eltwise_loss(self):
with dragon.graph_mode():
self.assertEqual(dragon.losses.l2_loss(
[self.sym1, self.sym1]).shape, ())
self.assertEqual(dragon.losses.l2_loss(
[self.sym1, self.sym1], reduction='none').shape, None)
def test_expand_dims(self):
with dragon.graph_mode():
self.assertEqual(dragon.expand_dims(
self.sym1, axis=1).shape, None)
self.assertEqual(dragon.expand_dims(
self.sym2, axis=1).shape, (1, 1))
self.assertEqual(dragon.expand_dims(
self.sym2, axis=-1).shape, (1, 1))
self.assertEqual(dragon.expand_dims(
self.sym3, axis=0).shape, (1, 1, None))
self.assertEqual(dragon.expand_dims(
self.sym3, axis=(0, 3)).shape, (1, 1, None, 1))
self.assertEqual(dragon.expand_dims(
self.sym3, axis=(0, 3, 5)).shape, (1, 1, None, 1))
def test_extract_patches(self):
with dragon.graph_mode():
self.assertEqual(dragon.vision.extract_patches(self.sym1).shape, None)
self.assertEqual(dragon.vision.extract_patches(self.sym4).shape,
(self.sym4.shape[0], None, None, None))
def test_init_ops(self):
init_funcs_v1 = [dragon.fill,
dragon.ones,
dragon.random.glorot_normal,
dragon.random.glorot_uniform,
dragon.random.normal,
dragon.random.uniform,
dragon.random.truncated_normal,
dragon.zeros]
for func in init_funcs_v1:
with dragon.graph_mode():
self.assertEqual(func(shape=self.sym1.shape).shape, None)
self.assertEqual(func(shape=self.sym2.shape).shape, self.sym2.shape)
def test_flatten(self):
with dragon.graph_mode():
self.assertEqual(dragon.flatten(
self.sym1, axis=1).shape, None)
self.assertEqual(dragon.flatten(
self.sym4, axis=1, end_axis=3).shape, (1, None))
self.assertEqual(dragon.flatten(
self.sym4, axis=1, end_axis=-1).shape, (1, None))
def test_gather(self):
with dragon.graph_mode():
self.assertEqual(dragon.gather(
[self.sym1, self.sym1]).shape, None)
self.assertEqual(dragon.gather(
[self.sym1, self.sym2], axis=-1).shape, None)
self.assertEqual(dragon.gather(
[self.sym3, self.sym2], axis=1).shape, (1, 1))
def test_gather_elements(self):
with dragon.graph_mode():
self.assertEqual(dragon.gather_elements(
[self.sym1, self.sym1]).shape, None)
self.assertEqual(dragon.gather_elements(
[self.sym1, self.sym2], axis=0).shape, self.sym2.shape)
self.assertEqual(dragon.gather_elements(
[self.sym1, self.sym3], axis=1).shape, self.sym3.shape)
def test_gemm(self):
w = dragon.Tensor((3, 2), symbolic=True)
with dragon.graph_mode():
self.assertEqual(dragon.math.gemm(
[self.sym1, w]).shape, None)
self.assertEqual(dragon.math.gemm(
[self.sym3, w], transpose_a=True).shape, (None, 2))
self.assertEqual(dragon.math.gemm(
[self.sym1, self.sym1]).shape, None)
self.assertEqual(dragon.math.gemm(
[w, self.sym1], transpose_b=True).shape, None)
def test_linspace(self):
with dragon.graph_mode():
self.assertEqual(dragon.linspace(
start=1, stop=5, num=3).shape, (3,))
self.assertEqual(dragon.linspace(
start=(1, 2), stop=(3, 4), num=3, axis=1).shape, (2, 3))
self.assertEqual(dragon.linspace(
start=(1, 2), stop=(3, 4), num=3, axis=0).shape, (3, 2))
def test_matmul(self):
with dragon.graph_mode():
self.assertEqual(dragon.math.matmul(
[self.sym1, self.sym1]).shape, None)
self.assertEqual(dragon.math.matmul(
[self.sym1, self.sym2]).shape, None)
self.assertEqual(dragon.math.matmul(
[self.sym1, self.sym3]).shape, None)
self.assertEqual(dragon.math.matmul(
[self.sym2, self.sym3]).shape, (None,))
self.assertEqual(dragon.math.matmul(
[self.sym3, self.sym2]).shape, (1,))
self.assertEqual(dragon.math.matmul(
[self.sym3, self.sym3]).shape, (1, None))
self.assertEqual(dragon.math.matmul(
[self.sym4, self.sym3]).shape, (1, None, None, None))
self.assertEqual(dragon.math.matmul(
[self.sym4, self.sym4]).shape, (1, None, None, None))
def test_moments(self):
with dragon.graph_mode():
self.assertEqual(dragon.nn.moments(self.sym1)[0].shape, ())
self.assertEqual(dragon.nn.moments(self.sym1, axis=0)[0].shape, None)
self.assertEqual(dragon.nn.moments(self.sym1, keepdims=True)[0].shape, (1,))
self.assertEqual(dragon.nn.moments(self.sym2)[0].shape, ())
self.assertEqual(dragon.nn.moments(self.sym2, axis=0)[0].shape, ())
self.assertEqual(dragon.nn.moments(self.sym2, axis=1)[0].shape, (1,))
self.assertEqual(dragon.nn.moments(self.sym2, axis=0, keepdims=True)[0].shape, (1,))
self.assertEqual(dragon.nn.moments(
dragon.Tensor(None, 'float64', symbolic=True))[0].dtype, 'float64')
self.assertEqual(dragon.nn.moments(
dragon.Tensor(None, 'int64', symbolic=True))[0].dtype, 'float64')
def test_multinomial(self):
with dragon.graph_mode():
self.assertEqual(dragon.random.multinomial(self.sym1).shape, None)
self.assertEqual(dragon.random.multinomial(self.sym2, num_samples=2).shape, (2,))
def test_non_zero(self):
with dragon.graph_mode():
self.assertEqual(dragon.nonzero(self.sym1).shape, None)
self.assertEqual(dragon.nonzero(self.sym2).shape, (None, 1))
def test_one_hot(self):
with dragon.graph_mode():
self.assertEqual(dragon.one_hot(self.sym1, depth=2).shape, None)
self.assertEqual(dragon.one_hot(self.sym2, depth=2).shape, (1, 2))
def test_pad(self):
with dragon.graph_mode():
self.assertEqual(dragon.pad(self.sym1, pads=[(1, 1)]).shape, None)
self.assertEqual(dragon.pad(self.sym3, pads=[(1, 1)]).shape, (3, None))
self.assertEqual(dragon.pad(self.sym3, pads=[(1, 1), (1, 1)]).shape, (3, None))
def test_permutation(self):
with dragon.graph_mode():
self.assertEqual(dragon.random.permutation(5).shape, (5,))
def test_pool(self):
func = functools.partial(dragon.nn.pool2d, kernel_shape=3, strides=1, pads=1)
with dragon.graph_mode():
self.assertEqual(func(self.sym1).shape, None)
self.assertEqual(func(self.sym3).shape, (1, None))
self.assertEqual(func(self.sym4).shape, (1, None, None, None))
self.assertEqual(func(self.sym4, global_pool=True).shape, (1, None, 1, 1))
self.assertEqual(func(dragon.Tensor((1, 3, 4, 4))).shape, (1, 3, 4, 4))
self.assertEqual(func(dragon.Tensor((1, 3, 4, 4)), padding='SAME').shape, (1, 3, 4, 4))
def test_predicative(self):
with dragon.graph_mode():
self.assertEqual(dragon.math.is_inf(self.sym1).shape, self.sym1.shape)
self.assertEqual(dragon.math.is_inf(self.sym3).shape, self.sym3.shape)
self.assertEqual(dragon.math.is_nan(self.sym1).shape, self.sym1.shape)
self.assertEqual(dragon.math.is_nan(self.sym3).shape, self.sym3.shape)
def test_range(self):
with dragon.graph_mode():
self.assertEqual(dragon.range(3).shape, (3,))
self.assertEqual(dragon.range(3, 4).shape, (1,))
self.assertEqual(dragon.range(3, delta=0).shape, None)
def test_reduce(self):
with dragon.graph_mode():
self.assertEqual(dragon.math.sum(self.sym1).shape, ())
self.assertEqual(dragon.math.sum(self.sym1, axis=0).shape, None)
self.assertEqual(dragon.math.sum(self.sym1, keepdims=True).shape, ())
self.assertEqual(dragon.math.sum(self.sym2, axis=0).shape, ())
self.assertEqual(dragon.math.sum(self.sym2, axis=1).shape, None)
self.assertEqual(dragon.math.sum(self.sym2, axis=0, keepdims=True).shape, (1,))
def test_repeat(self):
with dragon.graph_mode():
self.assertEqual(dragon.repeat(self.sym1, axis=None, repeats=2).shape, (None,))
self.assertEqual(dragon.repeat(self.sym1, axis=0, repeats=2).shape, None)
self.assertEqual(dragon.repeat(self.sym2, axis=None, repeats=2).shape, (2,))
self.assertEqual(dragon.repeat(self.sym3, axis=0, repeats=2).shape, (2, None))
self.assertEqual(dragon.repeat(self.sym3, axis=1, repeats=2).shape, (1, None))
def test_reshape(self):
with dragon.graph_mode():
self.assertEqual(dragon.reshape(self.sym2, shape=(0, 1)).shape, (1, 1))
self.assertEqual(dragon.reshape(self.sym3, shape=(0, -1)).shape, (1, None))
self.assertEqual(dragon.reshape(self.sym3, shape=(0, 1, 0)).shape, None)
def test_resize(self):
with dragon.graph_mode():
self.assertEqual(dragon.vision.resize(
self.sym4, sizes=(1,)).shape, (1, None, 1, 1))
self.assertEqual(dragon.vision.resize(
self.sym4, sizes=(1, 1)).shape, (1, None, 1, 1))
self.assertEqual(dragon.vision.resize(
self.sym4, sizes=(1, 1, 1, 1)).shape, (1, None, 1, 1))
self.assertEqual(dragon.vision.resize(
self.sym4, scales=(1,)).shape, (1, None, None, None))
self.assertEqual(dragon.vision.resize(
self.sym4, scales=(1, 1)).shape, (1, None, None, None))
self.assertEqual(dragon.vision.resize(
self.sym4, scales=(1, 1, 1, 1)).shape, (1, None, None, None))
self.assertEqual(dragon.vision.resize(
self.sym5, sizes=(1, 1, 1, 1)).shape, None)
def test_roi_pool(self):
rois = dragon.Tensor((2, 5))
func = functools.partial(dragon.vision.roi_pool, pooled_h=7, pooled_w=7)
with dragon.graph_mode():
self.assertEqual(func([self.sym1, rois]).shape, None)
self.assertEqual(func([self.sym4, rois]).shape, (2, None, 7, 7))
self.assertEqual(func([self.sym4, self.sym1]).shape, (None, None, 7, 7))
def test_slice(self):
with dragon.graph_mode():
self.assertEqual(dragon.slice(self.sym1, (1,), (1,)).shape, None)
self.assertEqual(dragon.slice(self.sym3, (1,), (1,)).shape, (1, None))
def test_softmax_loss(self):
with dragon.graph_mode():
self.assertEqual(dragon.losses.softmax_cross_entropy_loss(
[self.sym1, self.sym1]).shape, ())
self.assertEqual(dragon.losses.softmax_cross_entropy_loss(
[self.sym1, self.sym1], reduction='none').shape, None)
self.assertEqual(dragon.losses.softmax_cross_entropy_loss(
[self.sym3, self.sym1], reduction='none').shape, (self.sym3.shape[0],))
def test_sort(self):
with dragon.graph_mode():
self.assertEqual(dragon.sort(self.sym1)[0].shape, None)
self.assertEqual(dragon.sort(self.sym2)[0].shape, self.sym2.shape)
def test_split(self):
with dragon.graph_mode():
self.assertEqual(dragon.split(self.sym1, 2)[0].shape, None)
self.assertEqual(dragon.split(self.sym2, 2)[0].shape, (1,))
self.assertEqual(dragon.split(self.sym2, 2, axis=1)[0].shape, None)
self.assertEqual(dragon.split(self.sym2, (1, 1))[0].shape, (1,))
self.assertEqual(dragon.split(self.sym3, 2, axis=1)[0].shape, (1, None))
self.assertEqual(dragon.unstack(self.sym1, axis=0, num=2)[0].shape, None)
self.assertEqual(dragon.unstack(self.sym2, axis=0)[0].shape, ())
self.assertEqual(dragon.unstack(self.sym3, axis=1, num=2)[0].shape, (1,))
def test_squeeze(self):
with dragon.graph_mode():
self.assertEqual(dragon.squeeze(self.sym1).shape, None)
self.assertEqual(dragon.squeeze(self.sym2).shape, ())
self.assertEqual(dragon.squeeze(self.sym2, axis=-1).shape, ())
self.assertEqual(dragon.squeeze(self.sym3).shape, (None,))
def test_stack(self):
with dragon.graph_mode():
self.assertEqual(dragon.stack([self.sym1, self.sym1]).shape, None)
self.assertEqual(dragon.stack([self.sym3, self.sym2]).shape, (2, 1, None))
self.assertEqual(dragon.stack([self.sym3, self.sym3]).shape, (2, 1, None))
self.assertEqual(dragon.stack([self.sym3, self.sym3], axis=-1).shape, (1, None, 2))
def test_tile(self):
with dragon.graph_mode():
self.assertEqual(dragon.tile(
self.sym1, repeats=(1, 2)).shape, None)
self.assertEqual(dragon.tile(
self.sym3, repeats=(1, 2)).shape, (1, None))
def test_topk(self):
with dragon.graph_mode():
self.assertEqual(dragon.math.top_k(self.sym1)[0].shape, None)
self.assertEqual(dragon.math.top_k(self.sym2, k=2)[0].shape, (2,))
self.assertEqual(dragon.math.top_k(self.sym2, axis=1)[0].shape, None)
def test_unchanged(self):
with dragon.graph_mode():
self.assertEqual(dragon.math.negative(self.sym1).shape, None)
def test_unique(self):
with dragon.graph_mode():
self.assertEqual(dragon.unique(self.sym1).shape, (None,))
self.assertEqual(dragon.unique(self.sym1, return_counts=True)[1].shape, (None,))
self.assertEqual(dragon.unique(self.sym1, return_inverse=True)[1].shape, None)
self.assertEqual(dragon.unique(self.sym1,
return_inverse=True,
return_counts=True)[1].shape, None)
class TestOpSpecWithTensorDesc(unittest.TestCase):
"""Test the op spec with tensor descriptors."""
sym1 = dragon.Tensor(None, symbolic=True)
sym2 = dragon.Tensor((1, None), symbolic=True)
sym3 = dragon.Tensor((1, None, None, None), symbolic=True)
with dragon.graph_mode():
shape1 = dragon.shape(sym1)
def test_broadcast_to(self):
with dragon.graph_mode():
self.assertEqual(dragon.broadcast_to(
self.sym1, shape=self.shape1).shape, None)
self.assertEqual(dragon.broadcast_to(
self.sym2, shape=self.shape1).shape, (None,) * len(self.sym2.shape))
def test_channel_norm(self):
func = functools.partial(dragon.nn.channel_norm,
mean=(1., 1., 1.), std=(1., 1., 1.))
with dragon.graph_mode():
self.assertEqual(func(self.sym1).shape, None)
self.assertEqual(func(self.sym1, perm=self.shape1).shape, None)
self.assertEqual(func(self.sym2).shape, self.sym2.shape)
self.assertEqual(func(self.sym2, perm=self.shape1).shape,
(None,) * len(self.sym2.shape))
def test_conv_transpose(self):
w = dragon.Tensor((3, 3, 3, 3))
with dragon.graph_mode():
self.assertEqual(dragon.nn.conv2d_transpose(
[self.sym1, self.sym1]).shape, None)
self.assertEqual(dragon.nn.conv2d_transpose(
[self.sym3, self.sym1]).shape, None)
self.assertEqual(dragon.nn.conv2d_transpose(
[self.sym3, w]).shape, (self.sym3.shape[0], w.shape[0], None, None))
self.assertEqual(dragon.nn.conv2d_transpose(
[w, w], output_padding=self.shape1).shape,
(w.shape[0], w.shape[0], None, None))
self.assertEqual(dragon.nn.conv2d_transpose(
[w, w], output_shape=self.shape1).shape,
(w.shape[0], w.shape[0], None, None))
def test_init_ops(self):
init_funcs_v1 = [dragon.fill,
dragon.ones,
dragon.random.glorot_normal,
dragon.random.glorot_uniform,
dragon.random.normal,
dragon.random.uniform,
dragon.random.truncated_normal,
dragon.zeros]
init_funcs_v2 = [dragon.ones_like,
dragon.random.normal_like,
dragon.random.uniform_like,
dragon.zeros_like]
for func in init_funcs_v1:
with dragon.graph_mode():
self.assertEqual(func(shape=self.shape1).shape, None)
for func in init_funcs_v2:
with dragon.graph_mode():
self.assertEqual(func(self.sym1).shape, None)
self.assertEqual(func(self.sym2).shape, self.sym2.shape)
def test_permutation(self):
with dragon.graph_mode():
self.assertEqual(dragon.random.permutation(self.sym1).shape, (None,))
def test_repeat(self):
with dragon.graph_mode():
self.assertEqual(dragon.repeat(
self.sym1, repeats=self.shape1).shape, None)
self.assertEqual(dragon.repeat(
self.sym2, repeats=self.shape1).shape, None)
def test_reshape(self):
with dragon.graph_mode():
self.assertEqual(dragon.reshape(
self.sym1, shape=self.shape1).shape, None)
self.assertEqual(dragon.reshape(
self.sym2, shape=self.shape1).shape, None)
def test_resize(self):
with dragon.graph_mode():
self.assertEqual(dragon.vision.resize(
self.sym1, sizes=self.shape1).shape, None)
self.assertEqual(dragon.vision.resize(
self.sym1, scales=self.shape1).shape, None)
self.assertEqual(dragon.vision.resize(
self.sym2, sizes=self.shape1).shape, (None,) * len(self.sym2.shape))
self.assertEqual(dragon.vision.resize(
self.sym2, scales=self.shape1).shape, (None,) * len(self.sym2.shape))
def test_slice(self):
with dragon.graph_mode():
self.assertEqual(dragon.slice(
self.sym1, starts=self.shape1, sizes=self.shape1).shape, None)
self.assertEqual(dragon.slice(
self.sym2, starts=self.shape1, sizes=self.shape1).shape, None)
def test_tile(self):
with dragon.graph_mode():
self.assertEqual(dragon.tile(
self.sym1, repeats=self.shape1).shape, None)
self.assertEqual(dragon.tile(
self.sym2, repeats=self.shape1).shape, (None,) * len(self.sym2.shape))
def test_transpose(self):
with dragon.graph_mode():
self.assertEqual(dragon.transpose(self.sym1).shape, None)
self.assertEqual(dragon.transpose(self.sym1, perm=self.shape1).shape, None)
self.assertEqual(dragon.transpose(self.sym2).shape, self.sym2.shape[::-1])
self.assertEqual(dragon.transpose(
self.sym2, perm=self.shape1).shape, (None,) * len(self.sym2.shape))
if __name__ == '__main__':
run_tests()
| 2.4375
| 2
|
ros_messaging/scripts/message_subscriber.py
|
emilyxzhou/github-actions-docker
| 0
|
12779617
|
#!/usr/bin/env python
import rospy
from std_msgs.msg import String
class MessageSubscriber:
def __init__(
self,
node_name,
topic_name
):
rospy.init_node(node_name)
self._topic_name = topic_name
self._subscriber = rospy.Subscriber(
self._topic_name,
String,
callback=self._callback,
queue_size=1)
def _callback(self, msg):
rospy.loginfo("I heard: {}".format(msg.data))
if __name__ == "__main__":
message_subscriber = MessageSubscriber("message_subscriber", "example_messaging/messages")
rospy.spin()
| 2.5625
| 3
|
app/__init__.py
|
R-Rijnbeek/IFC_WebViewer
| 0
|
12779618
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
__init__.py: This module define the the webservice function build with Flask
"""
# =============== IMPORTS ==============
from .utils import DeleteJSONFilesFromDirectory, CreateDirectoryIfItNotExist
from .shared import APP, LOG
# =============== PROCESS ===============
def create_app():
"""
INFORMATION: Fuction that activate the webservice with the selected configuration values of url, Port and debug mode and SQL configuration defined in "dev_config.cfg"
INPUT: None
OUTPUT:BOOLEAN
"""
try:
# CONFIGURE THE FLASK OBJECT with the 'dev_config.cfg' configuration file
APP.config.from_pyfile("config.cfg")
# INITIALIZE LOGGER INSTANCE
LOG.init_app(APP)
LOG.info("Register Blueprints")
from .public import public_bp, js, upload
APP.register_blueprint(public_bp)
APP.register_blueprint(js)
APP.register_blueprint(upload)
LOG.info("Execute methods for initialization")
shape_path = APP.config["SHAPE_DIR"]
CreateDirectoryIfItNotExist(shape_path)
DeleteJSONFilesFromDirectory(shape_path)
LOG.info("Defining 'HOST' and 'PORT'")
host = APP.config["HOST"]
port = APP.config["PORT"]
LOG.info("Run the WebService")
APP.run(host = host, port = port)
return True
except Exception as exc:
message = f"unexpected error activting the webservice process: {exc}"
if (LOG.isLoggerActive()):
LOG.critical(message)
else:
print(f"CRITICAL: {message}")
return False
# =============== EXECUTE TEST CODE ===============
if __name__ == "__main__":
pass
| 2.546875
| 3
|
mercury/services/custom_exceptions.py
|
CoffeePerry/mercury-py
| 0
|
12779619
|
<gh_stars>0
# coding=utf-8
from werkzeug.exceptions import HTTPException
class MethodVersionNotFound(HTTPException):
"""*400* `Method Version Not Found` (Bad Request)
Raise if the browser request method through an invalid method version ('Accept-Version').
(Raise if the browser sends something to the application the application
or server cannot handle).
"""
code = 400
description = (
"Bad Request - Method Version Not Found: The browser (or proxy) request an invalid method's version."
)
| 2.734375
| 3
|
auxein/playgrounds/__init__.py
|
auxein/auxein
| 1
|
12779620
|
# flake8: noqa
from .static import Static
| 0.996094
| 1
|
source/faltas simples no drive/fazerGrafico.py
|
andrevier/mestrado_ElePot
| 0
|
12779621
|
<filename>source/faltas simples no drive/fazerGrafico.py
import scipy.io
import matplotlib.pyplot as plt
# Captar os arquivos .mat para um dicionário e do dicionario para uma lista.
fig1 = plt.figure('Valor da corrente média em faltas simples',figsize = (6.3,6.3))
ax = fig1.add_subplot(111)
ax.spines['left'].set_position('zero')
ax.spines['bottom'].set_position('zero')
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
CMC1 = scipy.io.loadmat('CMC1')
CMC1 = CMC1['CM']
CMC1 = CMC1[0]
CMC1real = [CMC1[i].real for i in range(len(CMC1))]
CMC1imag = [CMC1[j].imag for j in range(len(CMC1))]
plt.scatter(CMC1real,CMC1imag,label='C1')
CMC2 = scipy.io.loadmat('CMC2')
CMC2 = CMC2['CM']
CMC2 = CMC2[0]
CMC2real = [CMC2[i].real for i in range(len(CMC2))]
CMC2imag = [CMC2[j].imag for j in range(len(CMC2))]
plt.scatter(CMC2real,CMC2imag,label='C2')
CMC3 = scipy.io.loadmat('CMC3')
CMC3 = CMC3['CM']
CMC3 = CMC3[0]
CMC3real = [CMC3[i].real for i in range(len(CMC3))]
CMC3imag = [CMC3[j].imag for j in range(len(CMC3))]
plt.scatter(CMC3real,CMC3imag,label='C3')
CMC4 = scipy.io.loadmat('CMC4')
CMC4 = CMC4['CM']
CMC4 = CMC4[0]
CMC4real = [CMC4[i].real for i in range(len(CMC4))]
CMC4imag = [CMC4[j].imag for j in range(len(CMC4))]
plt.scatter(CMC4real,CMC4imag,label='C4')
CMC5 = scipy.io.loadmat('CMC5')
CMC5 = CMC5['CM']
CMC5 = CMC5[0]
CMC5real = [CMC5[i].real for i in range(len(CMC5))]
CMC5imag = [CMC5[j].imag for j in range(len(CMC5))]
plt.scatter(CMC5real,CMC5imag,label='C5')
CMC6 = scipy.io.loadmat('CMC6')
CMC6 = CMC6['CM']
CMC6 = CMC6[0]
CMC6real = [CMC6[i].real for i in range(len(CMC6))]
CMC6imag = [CMC6[j].imag for j in range(len(CMC6))]
plt.scatter(CMC6real,CMC6imag,label='C6')
#ax.set_title('Valor da corrente média em faltas simples')
ax.set_xlabel(r'I$\alpha$ (A)',loc='right')
ax.set_ylabel(r'I$\beta$ (A)', loc='top', rotation = 0)
plt.legend()
plt.show()
plt.savefig('CorrentesMediasFaltasSimples.jpeg',dpi=300)
| 2.90625
| 3
|
examples/her/her_sac_gym_fetch_reach.py
|
Lucaskabela/rlkit
| 0
|
12779622
|
import gym
import rlkit.torch.pytorch_util as ptu
from rlkit.data_management.obs_dict_replay_buffer import ObsDictRelabelingBuffer, WeightedObsDictRelabelingBuffer
from rlkit.launchers.launcher_util import setup_logger
from rlkit.samplers.data_collector import GoalConditionedPathCollector
from rlkit.torch.her.her import HERTrainer
from rlkit.torch.networks import ConcatMlp
from rlkit.torch.sac.policies import MakeDeterministic, TanhGaussianPolicy
from rlkit.torch.sac.sac import SACTrainer
from rlkit.torch.torch_rl_algorithm import TorchBatchRLAlgorithm
from robosuite.wrappers import Wrapper, GymWrapper
import robosuite as suite
from robosuite import load_controller_config
import numpy as np
class GoalMountainCar(gym.Wrapper):
def reset(self, **kwargs):
state = self.env.reset(**kwargs)
ag = np.array(self.env.state)
g = np.array([self.env.goal_position, self.env.goal_velocity])
state = {'observation': state, 'achieved_goal': ag, 'desired_goal': g}
return state
def compute_reward(self, achieved_goal, desired_goal, info):
shape = False
dense = 100*((math.sin(3*achieved_goal[0]) * 0.0025 + 0.5 * achieved_goal[1] * achieved_goal[1]) - (math.sin(3*desired_goal[0]) * 0.0025 + 0.5 * desired_goal[1] * desired_goal[1]))
if achieved_goal[0] != desired_goal[0]:
return -1 if not shape else dense
else:
return 0 if achieved_goal[0] >= desired_goal[0] else (-1 if not shape else dense)
def step(self, action):
state, _, done, info = super().step(action)
ag = np.array(self.env.state)
g = np.array([self.env.goal_position, self.env.goal_velocity])
reward = self.compute_reward(ag, g, info)
state = {'observation': state, 'achieved_goal': ag, 'desired_goal': g}
info['is_success'] = reward==0
return state, reward, done, info
class GoalMountainCarContinuous(gym.Wrapper):
def __init__(self, env):
super().__init__(env=env)
env = env.env
print(env)
self.observation_space = gym.spaces.Dict({"observation": env.observation_space, "achieved_goal": env.observation_space, "desired_goal":env.observation_space})
self.action_space = env.action_space
# Default goal_Velocity is 0 - any speed will do (>=)
self.goal = np.array([env.goal_position, 0])
def reset(self, **kwargs):
state = self.env.reset(**kwargs)
ag = np.array(state)
g = self.goal
state = {'observation': state, 'achieved_goal': ag, 'desired_goal': g}
return state
def compute_reward(self, achieved_goal, desired_goal, info):
return 100 if achieved_goal[1] >= desired_goal[1] and achieved_goal[0] >= desired_goal[0] else -1
def step(self, action):
state, _, done, info = super().step(action)
ag = np.array(state)
g = self.goal
reward = self.compute_reward(ag, g, None)
state = {'observation': state, 'achieved_goal': ag, 'desired_goal': g}
info['is_success'] = int(ag[1] >= g[1] and ag[0] >= g[0])
return state, reward, done, info
class DoorWrapper(Wrapper):
"""
Initializes the Gym wrapper. Mimics many of the required functionalities of the Wrapper class
found in the gym.core module
Args:
env (MujocoEnv): The environment to wrap.
keys (None or list of str): If provided, each observation will
consist of concatenated keys from the wrapped environment's
observation dictionary. Defaults to robot-state and object-state.
Raises:
AssertionError: [Object observations must be enabled if no keys]
"""
def __init__(self, env, keys=None):
# Run super method
super().__init__(env=env)
# Create name for gym
robots = "".join([type(robot.robot_model).__name__ for robot in self.env.robots])
self.name = robots + "_" + type(self.env).__name__
# Get reward range
self.reward_range = (0, self.env.reward_scale)
if keys is None:
assert self.env.use_object_obs, "Object observations need to be enabled."
keys = ["object-state"]
# Iterate over all robots to add to state
for idx in range(len(self.env.robots)):
keys += ["robot{}_robot-state".format(idx)]
self.keys = keys
# Gym specific attributes
self.env.spec = None
self.metadata = None
self.goal = np.array([.3])
# set up observation and action spaces
flat_ob = self._flatten_obs(self.env.reset(), verbose=True)
self.obs_dim = flat_ob.size
high = np.inf * np.ones(self.obs_dim)
low = -high
self.observation_space = gym.spaces.Dict({"observation": gym.spaces.Box(low=low, high=high), "achieved_goal": gym.spaces.Box(low=np.zeros(1), high=np.ones(1), shape=(1,)), "desired_goal": gym.spaces.Box(low=np.zeros(1), high=np.ones(1), shape=(1,))})
low, high = self.env.action_spec
self.action_space = gym.spaces.Box(low=low, high=high)
def _flatten_obs(self, obs_dict, verbose=False):
"""
Filters keys of interest out and concatenate the information.
Args:
obs_dict (OrderedDict): ordered dictionary of observations
verbose (bool): Whether to print out to console as observation keys are processed
Returns:
np.array: observations flattened into a 1d array
"""
ob_lst = []
for key in obs_dict:
if key in self.keys:
if verbose:
print("adding key: {}".format(key))
ob_lst.append(obs_dict[key])
return np.concatenate(ob_lst)
def reset(self):
"""
Extends env reset method to return flattened observation instead of normal OrderedDict.
Returns:
np.array: Flattened environment observation space after reset occurs
"""
ob_dict = self.env.reset()
state = self._flatten_obs(ob_dict)
ag = np.array([self.env.sim.data.qpos[self.env.hinge_qpos_addr]])
g = self.goal
return {'observation': state, 'achieved_goal': ag, 'desired_goal': g}
def step(self, action):
"""
Extends vanilla step() function call to return flattened observation instead of normal OrderedDict.
Args:
action (np.array): Action to take in environment
Returns:
4-tuple:
- (np.array) flattened observations from the environment
- (float) reward from the environment
- (bool) whether the current episode is completed or not
- (dict) misc information
"""
ob_dict, reward, done, info = self.env.step(action)
state = self._flatten_obs(ob_dict)
ag = np.array([self.env.sim.data.qpos[self.env.hinge_qpos_addr]])
g = self.goal
ob_dict = {'observation': state, 'achieved_goal': ag, 'desired_goal': g}
info['is_success'] = int(ag[0] > g[0])
return ob_dict, reward, done, info
def seed(self, seed=None):
"""
Utility function to set numpy seed
Args:
seed (None or int): If specified, numpy seed to set
Raises:
TypeError: [Seed must be integer]
"""
# Seed the generator
if seed is not None:
try:
np.random.seed(seed)
except:
TypeError("Seed must be an integer type!")
def compute_reward(self, achieved_goal, desired_goal, info):
return 1 if achieved_goal[0] > desired_goal[0] else 0
def make_env():
controller = load_controller_config(default_controller="OSC_POSE")
env = GymWrapper(suite.make(
"PickPlaceCan",
robots="Panda", # use Sawyer robot
use_camera_obs=False, # do not use pixel observations
has_offscreen_renderer=False, # not needed since not using pixel obs
has_renderer=False, # make sure we can render to the screen
reward_shaping=True, # use dense rewards
reward_scale=1.0, # scale max 1 per timestep
control_freq=20, # control should happen fast enough so that simulation looks smooth
horizon=500,
ignore_done=True,
hard_reset=False,
controller_configs=controller
))
return env
# GoalMountainCarContinuous(gym.make("MountainCarContinuous-v0"))
# GoalMountainCar(gym.make(MountainCar-v0))
def experiment(variant):
# unwrap the TimeLimitEnv wrapper since we manually termiante after 50 steps
# eval_env = gym.make('FetchPickAndPlace-v1').env
# expl_env = gym.make('FetchPickAndPlace-v1').env
eval_env = make_env()
expl_env = make_env()
print(eval_env.observation_space)
observation_key = 'observation'
desired_goal_key = 'desired_goal'
# achieved_goal_key = desired_goal_key.replace("desired", "achieved")
# replay_buffer = ObsDictRelabelingBuffer(
# env=eval_env,
# observation_key=observation_key,
# desired_goal_key=desired_goal_key,
# achieved_goal_key=achieved_goal_key,
# **variant['replay_buffer_kwargs']
# )
obs_dim = eval_env.observation_space.low.size
action_dim = eval_env.action_space.low.size
# goal_dim = eval_env.observation_space.spaces['desired_goal'].low.size
print(obs_dim)
print(action_dim)
# print(goal_dim)
qf1 = ConcatMlp(
input_size=obs_dim + action_dim + goal_dim,
output_size=1,
**variant['qf_kwargs']
)
qf2 = ConcatMlp(
input_size=obs_dim + action_dim + goal_dim,
output_size=1,
**variant['qf_kwargs']
)
target_qf1 = ConcatMlp(
input_size=obs_dim + action_dim + goal_dim,
output_size=1,
**variant['qf_kwargs']
)
target_qf2 = ConcatMlp(
input_size=obs_dim + action_dim + goal_dim,
output_size=1,
**variant['qf_kwargs']
)
policy = TanhGaussianPolicy(
obs_dim=obs_dim + goal_dim,
action_dim=action_dim,
**variant['policy_kwargs']
)
eval_policy = MakeDeterministic(policy)
trainer = SACTrainer(
env=eval_env,
policy=policy,
qf1=qf1,
qf2=qf2,
target_qf1=target_qf1,
target_qf2=target_qf2,
**variant['sac_trainer_kwargs']
)
trainer = HERTrainer(trainer, use_per=False)
eval_path_collector = GoalConditionedPathCollector(
eval_env,
eval_policy,
observation_key=observation_key,
desired_goal_key=desired_goal_key,
)
expl_path_collector = GoalConditionedPathCollector(
expl_env,
policy,
observation_key=observation_key,
desired_goal_key=desired_goal_key,
)
algorithm = TorchBatchRLAlgorithm(
trainer=trainer,
exploration_env=expl_env,
evaluation_env=eval_env,
exploration_data_collector=expl_path_collector,
evaluation_data_collector=eval_path_collector,
replay_buffer=replay_buffer,
**variant['algo_kwargs']
)
algorithm.to(ptu.device)
algorithm.train()
if __name__ == "__main__":
variant = dict(
algorithm='HER-SAC',
version='normal',
algo_kwargs=dict(
batch_size=512,
num_epochs=500,
num_eval_steps_per_epoch=5000,
num_expl_steps_per_train_loop=500,
num_trains_per_train_loop=500,
min_num_steps_before_training=1000,
max_path_length=500,
),
sac_trainer_kwargs=dict(
discount=0.99,
soft_target_tau=5e-3,
target_update_period=1,
policy_lr=3E-4,
qf_lr=3E-4,
reward_scale=1,
use_automatic_entropy_tuning=True,
),
replay_buffer_kwargs=dict(
max_size=int(50000),
fraction_goals_rollout_goals=0.2, # equal to k = 4 in HER paper
fraction_goals_env_goals=0,
),
qf_kwargs=dict(
hidden_sizes=[256, 256],
),
policy_kwargs=dict(
hidden_sizes=[256, 256],
),
)
setup_logger('her-sac-door-experiment', variant=variant)
experiment(variant)
| 2.125
| 2
|
dual_encoder/keras_layers_test.py
|
garyxcheng/federated
| 330
|
12779623
|
# Copyright 2021, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import absltest
import tensorflow as tf
from dual_encoder import keras_layers
l2_normalize_fn = lambda x: tf.keras.backend.l2_normalize(x, axis=-1)
class KerasLayersTest(absltest.TestCase):
def test_masked_average_3d(self):
masked_average_layer = keras_layers.MaskedAverage(1)
inputs = tf.constant([
[[0.5, 0.3], [0.4, 0.1], [0.4, 0.1]],
[[0.6, 0.8], [0.5, 0.4], [0.4, 0.1]],
[[0.9, 0.4], [0.4, 0.1], [0.4, 0.1]],
[[0.9, 0.4], [0.4, 0.1], [0.4, 0.1]],
])
mask = tf.constant([[True, True, True],
[False, False, True],
[True, False, False],
[False, False, False]])
output_average = masked_average_layer.call(inputs, mask=mask)
output_mask = masked_average_layer.compute_mask(inputs, mask=mask)
expected_average = tf.constant([
[1.3 / 3, 0.5 / 3],
[0.4, 0.1],
[0.9, 0.4],
[0.0, 0.0]
])
expected_mask = None
tf.debugging.assert_near(expected_average, output_average)
self.assertEqual(expected_mask, output_mask)
def test_masked_average_4d(self):
masked_average_layer = keras_layers.MaskedAverage(2)
inputs = tf.constant([
[[[0.5, 0.3], [0.4, 0.1], [0.4, 0.1]],
[[0.6, 0.8], [0.5, 0.4], [0.4, 0.1]]],
[[[0.6, 0.8], [0.5, 0.4], [0.4, 0.1]],
[[0.6, 0.8], [0.5, 0.4], [0.4, 0.1]]],
[[[0.9, 0.4], [0.4, 0.1], [0.4, 0.1]],
[[0.6, 0.8], [0.5, 0.4], [0.4, 0.1]]],
[[[0.9, 0.4], [0.4, 0.1], [0.4, 0.1]],
[[0.6, 0.8], [0.5, 0.4], [0.4, 0.1]]],
])
mask = tf.constant([[[True, True, True], [True, False, True]],
[[False, False, True], [False, False, False]],
[[True, False, False], [True, True, True]],
[[False, False, False], [True, False, False]]])
output_average = masked_average_layer.call(inputs, mask=mask)
output_mask = masked_average_layer.compute_mask(inputs, mask=mask)
expected_average = tf.constant([
[[1.3 / 3, 0.5 / 3], [0.5, 0.45]],
[[0.4, 0.1], [0.0, 0.0]],
[[0.9, 0.4], [0.5, 1.3 / 3]],
[[0.0, 0.0], [0.6, 0.8]],
])
expected_mask = tf.constant([[True, True],
[True, False],
[True, True],
[False, True]])
tf.debugging.assert_near(expected_average, output_average)
tf.debugging.assert_equal(expected_mask, output_mask)
def test_masked_average_raises_error(self):
masked_average_layer = keras_layers.MaskedAverage(1)
inputs = tf.constant([
[[0.5, 0.3], [0.4, 0.1], [0.4, 0.1]],
[[0.6, 0.8], [0.5, 0.4], [0.4, 0.1]],
[[0.9, 0.4], [0.4, 0.1], [0.4, 0.1]],
])
mask = None
with self.assertRaises(ValueError):
masked_average_layer.call(inputs, mask=mask)
with self.assertRaises(ValueError):
masked_average_layer.compute_mask(inputs, mask=mask)
def test_masked_reshape(self):
masked_reshape_layer = keras_layers.MaskedReshape((4, 4, 2, 1), (4, 4, 2))
inputs = tf.constant([
[[1.0], [2.0], [0.5], [0.4], [0.4], [0.1], [0.0], [0.0]],
[[0.4], [0.1], [0.0], [0.0], [0.0], [0.0], [0.6], [0.8]],
[[0.9], [0.4], [0.5], [3.0], [0.9], [0.4], [0.5], [3.0]],
[[0.0], [0.0], [0.6], [0.8], [0.4], [0.1], [0.0], [0.0]],
])
mask = tf.constant(
[[True, False, True, True, True, False, False, False],
[True, False, True, True, True, True, False, True],
[False, True, True, False, True, True, True, True],
[False, True, True, True, True, False, False, True]])
output = masked_reshape_layer.call(inputs, mask=mask)
output_mask = masked_reshape_layer.compute_mask(inputs, mask=mask)
expected_output = tf.constant([
[[[1.0], [2.0]], [[0.5], [0.4]], [[0.4], [0.1]], [[0.0], [0.0]]],
[[[0.4], [0.1]], [[0.0], [0.0]], [[0.0], [0.0]], [[0.6], [0.8]]],
[[[0.9], [0.4]], [[0.5], [3.0]], [[0.9], [0.4]], [[0.5], [3.0]]],
[[[0.0], [0.0]], [[0.6], [0.8]], [[0.4], [0.1]], [[0.0], [0.0]]],
])
expected_mask = tf.constant(
[[[True, False], [True, True], [True, False], [False, False]],
[[True, False], [True, True], [True, True], [False, True]],
[[False, True], [True, False], [True, True], [True, True]],
[[False, True], [True, True], [True, False], [False, True]]])
tf.debugging.assert_near(expected_output, output)
tf.debugging.assert_equal(expected_mask, output_mask)
def test_masked_reshape_unknown_batch_size(self):
masked_reshape_layer = keras_layers.MaskedReshape((-1, 4, 2, 1), (-1, 4, 2))
inputs = tf.constant([
[[1.0], [2.0], [0.5], [0.4], [0.4], [0.1], [0.0], [0.0]],
[[0.4], [0.1], [0.0], [0.0], [0.0], [0.0], [0.6], [0.8]],
[[0.9], [0.4], [0.5], [3.0], [0.9], [0.4], [0.5], [3.0]],
[[0.0], [0.0], [0.6], [0.8], [0.4], [0.1], [0.0], [0.0]],
])
mask = tf.constant(
[[True, False, True, True, True, False, False, False],
[True, False, True, True, True, True, False, True],
[False, True, True, False, True, True, True, True],
[False, True, True, True, True, False, False, True]])
output = masked_reshape_layer.call(inputs, mask=mask)
output_mask = masked_reshape_layer.compute_mask(inputs, mask=mask)
expected_output = tf.constant([
[[[1.0], [2.0]], [[0.5], [0.4]], [[0.4], [0.1]], [[0.0], [0.0]]],
[[[0.4], [0.1]], [[0.0], [0.0]], [[0.0], [0.0]], [[0.6], [0.8]]],
[[[0.9], [0.4]], [[0.5], [3.0]], [[0.9], [0.4]], [[0.5], [3.0]]],
[[[0.0], [0.0]], [[0.6], [0.8]], [[0.4], [0.1]], [[0.0], [0.0]]],
])
expected_mask = tf.constant(
[[[True, False], [True, True], [True, False], [False, False]],
[[True, False], [True, True], [True, True], [False, True]],
[[False, True], [True, False], [True, True], [True, True]],
[[False, True], [True, True], [True, False], [False, True]]])
tf.debugging.assert_near(expected_output, output)
tf.debugging.assert_equal(expected_mask, output_mask)
def test_masked_reshape_raises_error(self):
masked_reshape_layer = keras_layers.MaskedReshape((-1, 4, 2, 1), (-1, 4, 2))
inputs = tf.constant([
[[1.0], [2.0], [0.5], [0.4], [0.4], [0.1], [0.0], [0.0]],
[[0.4], [0.1], [0.0], [0.0], [0.0], [0.0], [0.6], [0.8]],
[[0.9], [0.4], [0.5], [3.0], [0.9], [0.4], [0.5], [3.0]],
[[0.0], [0.0], [0.6], [0.8], [0.4], [0.1], [0.0], [0.0]],
])
mask = None
with self.assertRaises(ValueError):
masked_reshape_layer.call(inputs, mask=mask)
with self.assertRaises(ValueError):
masked_reshape_layer.compute_mask(inputs, mask=mask)
def test_embedding_spreadout_regularizer_dot_product(self):
weights = tf.constant(
[[1.0, 0.0, 0.0],
[2.0, 2.0, 2.0],
[0.1, 0.2, 0.3],
[0.3, 0.2, 0.1],
[0.0, 1.0, 0.0]])
regularizer = keras_layers.EmbeddingSpreadoutRegularizer(
spreadout_lambda=0.1,
normalization_fn=None,
l2_regularization=0.0)
# Similarities without diagonal looks like:
# 0.0 2.0 0.1 0.3 0.0
# 2.0 0.0 1.2 1.2 2.0
# 0.1 1.2 0.0 0.1 0.2
# 0.3 1.2 0.1 0.0 0.2
# 0.0 2.0 0.2 0.2 0.0
loss = regularizer(weights)
# L2 norm of above similarities.
expected_loss = 0.47053161424
tf.debugging.assert_near(expected_loss, loss)
regularizer = keras_layers.EmbeddingSpreadoutRegularizer(
spreadout_lambda=0.1,
normalization_fn=None,
l2_regularization=1.0)
l2_regularizer = tf.keras.regularizers.l2(1.0)
loss = regularizer(weights)
expected_loss = 0.47053161424 + l2_regularizer(weights)
tf.debugging.assert_near(expected_loss, loss)
def test_embedding_spreadout_regularizer_cosine_similarity(self):
weights = tf.constant(
[[1.0, 0.0, 0.0],
[2.0, 2.0, 2.0],
[0.1, 0.2, 0.3],
[0.3, 0.2, 0.1],
[0.0, 1.0, 0.0]])
regularizer = keras_layers.EmbeddingSpreadoutRegularizer(
spreadout_lambda=0.1,
normalization_fn=l2_normalize_fn,
l2_regularization=0.0)
loss = regularizer(weights)
# L2 norm of above similarities.
expected_loss = 0.2890284
tf.debugging.assert_near(expected_loss, loss)
regularizer = keras_layers.EmbeddingSpreadoutRegularizer(
spreadout_lambda=0.1,
normalization_fn=l2_normalize_fn,
l2_regularization=1.0)
l2_regularizer = tf.keras.regularizers.l2(1.0)
loss = regularizer(weights)
expected_loss = 0.2890284 + l2_regularizer(weights)
tf.debugging.assert_near(expected_loss, loss)
def test_embedding_spreadout_regularizer_no_spreadout(self):
weights = tf.constant(
[[1.0, 0.0, 0.0],
[2.0, 2.0, 2.0],
[0.1, 0.2, 0.3],
[0.3, 0.2, 0.1],
[0.0, 1.0, 0.0]])
regularizer = keras_layers.EmbeddingSpreadoutRegularizer(
spreadout_lambda=0.0,
normalization_fn=None,
l2_regularization=0.0)
loss = regularizer(weights)
expected_loss = 0.0
tf.debugging.assert_near(expected_loss, loss)
# Test that L2 normalization behaves normally.
regularizer = keras_layers.EmbeddingSpreadoutRegularizer(
spreadout_lambda=0.0,
normalization_fn=None,
l2_regularization=0.1)
l2_regularizer = tf.keras.regularizers.l2(0.1)
loss = regularizer(weights)
l2_loss = l2_regularizer(weights)
tf.debugging.assert_near(l2_loss, loss)
# Test that normalization_fn has no effect.
regularizer = keras_layers.EmbeddingSpreadoutRegularizer(
spreadout_lambda=0.0,
normalization_fn=l2_normalize_fn,
l2_regularization=0.1)
l2_regularizer = tf.keras.regularizers.l2(0.1)
loss = regularizer(weights)
l2_loss = l2_regularizer(weights)
tf.debugging.assert_near(l2_loss, loss)
def test_embedding_spreadout_regularizer_get_config(self):
weights = tf.constant(
[[1.0, 0.0, 0.0],
[2.0, 2.0, 2.0],
[0.1, 0.2, 0.3],
[0.3, 0.2, 0.1],
[0.0, 1.0, 0.0]])
regularizer = keras_layers.EmbeddingSpreadoutRegularizer(
spreadout_lambda=0.0,
normalization_fn=l2_normalize_fn,
l2_regularization=0.1)
config = regularizer.get_config()
expected_config = {
'spreadout_lambda': 0.0,
'normalization_fn': l2_normalize_fn,
'l2_regularization': 0.1
}
new_regularizer = (
keras_layers.EmbeddingSpreadoutRegularizer.from_config(config))
l2_regularizer = tf.keras.regularizers.l2(0.1)
loss = new_regularizer(weights)
l2_loss = l2_regularizer(weights)
self.assertEqual(config, expected_config)
tf.debugging.assert_near(l2_loss, loss)
if __name__ == '__main__':
absltest.main()
| 2.109375
| 2
|
src/stub_libraries/stub_numpy/stub_numpy.py
|
sunblaze-ucb/privguard-artifact
| 6
|
12779624
|
<filename>src/stub_libraries/stub_numpy/stub_numpy.py
# MIT License
# Copyright (c) 2021 sunblaze-ucb
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
""" Function summaries for the numpy library. """
import os
import sys
sys.path.append(os.path.join(os.environ.get('PRIVGUARD'), "src/parser"))
import numpy as np
import stub_pandas as pd
import math
from blackbox import Blackbox
from policy_tree import Policy
from utils import UniversalIndex
from tabular import Tabular
int8 = np.int8
int64 = np.int64
nan = np.nan
ptp = np.ptp
float = np.float
newaxis = np.newaxis
class ndarray(Blackbox):
def __getattr__(self, attr):
if attr == 'shape':
return UniversalIndex()
def __format__(self, _):
return str(self)
def __str__(self):
return f'ndarray({self.policy})'
def __getitem__(self, key):
return self
def __setitem__(self, key, newvalue):
return ndarray(policy=self.policy.join(newvalue.policy))
def copy(self):
return ndarray(policy=self.policy.copy())
def arange(shape, *args, **kwargs):
return ndarray()
def array(obj, *args, **kwargs):
if isinstance(obj, Tabular):
return ndarray(policy=obj.policy)
else:
raise NotImplementedError
def sum(a, **kwargs):
newPolicy = Policy()
for x in a:
newPolicy = newPolicy.join(x.policy)
return Blackbox(newPolicy)
def vstack(arr, *args, **kwargs):
policy = arr[0].policy
for i in range(1, len(arr)):
policy = policy.join(arr[i].policy)
return ndarray(policy)
def concatenate(arr, *args, **kwargs):
policy = arr[0].policy
for i in range(1, len(arr)):
policy = policy.join(arr[i].policy)
return ndarray(policy)
def ones(shape, *args, **kwargs):
return ndarray()
def zeros(shape, *args, **kwargs):
return ndarray()
def tanh(x, **kwargs):
return Blackbox(x.policy)
def log1p(x, **kwargs):
return Blackbox(x.policy)
def exp(x, **kwargs):
return Blackbox(x.policy)
def expm1(x, **kwargs):
return Blackbox(x.policy)
def log(x, **kwargs):
return Blackbox(x.policy)
def corrcoef(x, y, **kwargs):
return Blackbox(x.policy.join(y.policy))
| 1.984375
| 2
|
main.py
|
Poojap19create/Sampling-distribution
| 0
|
12779625
|
<reponame>Poojap19create/Sampling-distribution
import plotly.figure_factory as ff
import plotly.graph_objects as go
import statistics
import random
import pandas as pd
import csv
df = pd.read_csv("data.csv")
data = df["temp"].tolist()
#function to get the mean of the given data samples
# pass the number of data points you want as counter
#function to plot the mean on the graph
# Pass the number of time you want the mean of the data points as a parameter in range function in for loop
#Code to find the mean of the raw data ("population data")
population_mean = statistics.mean(data)
print("population mean:- ", population_mean)
# code to find the standard deviation of the sample data
def standard_deviation():
mean_list = []
for i in range(0,1000):
set_of_means= random_set_of_mean(100)
mean_list.append(set_of_means)
std_deviation = statistics.stdev(mean_list)
print("Standard deviation of sampling distribution:- ", std_deviation)
standard_deviation()
| 3.984375
| 4
|
Examples/Multi-Agent_Examples/OpenAI-Repo/multiagent/scenarios/electricty_market/simple_market2.py
|
alexanderkell/reinforcement-learning-examples
| 0
|
12779626
|
<reponame>alexanderkell/reinforcement-learning-examples<filename>Examples/Multi-Agent_Examples/OpenAI-Repo/multiagent/scenarios/electricty_market/simple_market2.py
# import sys
# sys.path.insert(0, '/Users/b1017579/Documents/PhD/Projects/10. ELECSIM')
#
# from src.plants.plant_costs.estimate_costs.estimate_costs import select_cost_estimator
# from src.plants.plant_registry import PlantRegistry
import numpy as np
from multiagent.core import World
from multiagent.scenario import BaseScenario
from multiagent.scenarios.electricty_market.agent import Agent
from random import choice, randint
class MarketAgent(Agent):
def __init__(self):
super().__init__()
self.money = 0
class Scenario(BaseScenario):
def make_world(self):
world = World()
# add agents
world.agents = [MarketAgent() for i in range(10)]
for i, agent in enumerate(world.agents):
agent.name = 'agent %d' % i
agent.collide = False
agent.silent = True
agent.adversary = False
agent.movable = False
agent.blind = False
agent.u_range = 5000
# make initial conditions
self.reset_world(world)
return world
def reset_world(self, world):
# random properties for agents
plant_types = ['Nuclear','Coal','Gas','PV','Offshore']
for i, agent in enumerate(world.agents):
# agent.plants = [self.create_power_plant(2000, choice(plant_types), randint(20,1500))]*5
agent.money = 0
def reward(self, world):
return [x.money for x in world.agents]
def observation(self, agent, world):
# observations whether bids have been accepted or rejected
for agent in world.agents:
if agent.u < 50:
# bid_result = [x.status for x in agent.bids]
result = agent.bid.result
return result
# def create_power_plant(self, start_year, plant_type, capacity):
# estimated_cost_parameters = select_cost_estimator(start_year=start_year,
# plant_type=plant_type,
# capacity=capacity)
# power_plant_obj = PlantRegistry(plant_type).plant_type_to_plant_object()
# power_plant = power_plant_obj(name="test", plant_type=plant_type,
# capacity_mw=capacity, construction_year=start_year,
# **estimated_cost_parameters)
# return power_plant
| 3.453125
| 3
|
eltyer_investing_algorithm_framework/configuration/constants.py
|
ELTYER/eltyer-investing-algorithm-framework
| 0
|
12779627
|
<gh_stars>0
ELTYER_CLIENT = "ELTYER_CLIENT"
ELTYER_API_KEY = "ELTYER_API_KEY"
| 0.984375
| 1
|
pcbmode/utils/footprint.py
|
Hylian/pcbmode
| 370
|
12779628
|
#!/usr/bin/python
import os
import re
import json
from lxml import etree as et
import pcbmode.config as config
from . import messages as msg
# pcbmode modules
from . import svg
from . import utils
from . import place
import copy
from .style import Style
from .point import Point
from .shape import Shape
class Footprint():
"""
"""
def __init__(self, footprint):
self._footprint = footprint
self._shapes = {'conductor': {},
'pours': {},
'soldermask': {},
'silkscreen': {},
'assembly': {},
'solderpaste': {},
'drills': {}}
self._processPins()
self._processPours()
self._processShapes()
self._processAssemblyShapes()
def getShapes(self):
return self._shapes
def _processPins(self):
"""
Converts pins into 'shapes'
"""
pins = self._footprint.get('pins') or {}
for pin in pins:
pin_location = pins[pin]['layout']['location'] or [0, 0]
try:
pad_name = pins[pin]['layout']['pad']
except:
msg.error("Each defined 'pin' must have a 'pad' name that is defined in the 'pads' dection of the footprint.")
try:
pad_dict = self._footprint['pads'][pad_name]
except:
msg.error("There doesn't seem to be a pad definition for pad '%s'." % pad_name)
# Get the pin's rotation, if any
pin_rotate = pins[pin]['layout'].get('rotate') or 0
shapes = pad_dict.get('shapes') or []
for shape_dict in shapes:
shape_dict = shape_dict.copy()
# Which layer(s) to place the shape on
layers = utils.getExtendedLayerList(shape_dict.get('layers') or ['top'])
# Add the pin's location to the pad's location
shape_location = shape_dict.get('location') or [0, 0]
shape_dict['location'] = [shape_location[0] + pin_location[0],
shape_location[1] + pin_location[1]]
# Add the pin's rotation to the pad's rotation
shape_dict['rotate'] = (shape_dict.get('rotate') or 0) + pin_rotate
# Determine if and which label to show
show_name = pins[pin]['layout'].get('show-label') or True
if show_name == True:
pin_label = pins[pin]['layout'].get('label') or pin
for layer in layers:
shape = Shape(shape_dict)
style = Style(shape_dict, 'conductor')
shape.setStyle(style)
try:
self._shapes['conductor'][layer].append(shape)
except:
self._shapes['conductor'][layer] = []
self._shapes['conductor'][layer].append(shape)
for stype in ['soldermask','solderpaste']:
# Get a custom shape specification if it exists
sdict_list = shape_dict.get(stype)
# Not defined; default
if sdict_list == None:
# Use default settings for shape based on
# the pad shape
sdict = shape_dict.copy()
# Which shape type is the pad?
shape_type = shape.getType()
# Apply modifier based on shape type
if shape_type == 'path':
sdict['scale'] = shape.getScale()*config.brd['distances'][stype]['path-scale']
elif shape_type in ['rect', 'rectangle']:
sdict['width'] += config.brd['distances'][stype]['rect-buffer']
sdict['height'] += config.brd['distances'][stype]['rect-buffer']
elif shape_type in ['circ', 'circle']:
sdict['diameter'] += config.brd['distances'][stype]['circle-buffer']
else:
pass
# Create shape based on new dictionary
sshape = Shape(sdict)
# Define style
sstyle = Style(sdict, stype)
# Apply style
sshape.setStyle(sstyle)
# Add shape to footprint's shape dictionary
#self._shapes[stype][layer].append(sshape)
try:
self._shapes[stype][layer].append(sshape)
except:
self._shapes[stype][layer] = []
self._shapes[stype][layer].append(sshape)
# Do not place shape
elif (sdict_list == {}) or (sdict_list == []):
pass
# Custom shape definition
else:
# If dict (as before support of multiple
# shapes) then append to a single element
# list
if type(sdict_list) is dict:
sdict_list = [sdict_list]
# Process list of shapes
for sdict_ in sdict_list:
sdict = sdict_.copy()
shape_loc = utils.toPoint(sdict.get('location') or [0, 0])
# Apply rotation
sdict['rotate'] = (sdict.get('rotate') or 0) + pin_rotate
# Rotate location
shape_loc.rotate(pin_rotate, Point())
sdict['location'] = [shape_loc.x + pin_location[0],
shape_loc.y + pin_location[1]]
# Create new shape
sshape = Shape(sdict)
# Create new style
sstyle = Style(sdict, stype)
# Apply style
sshape.setStyle(sstyle)
# Add shape to footprint's shape dictionary
#self._shapes[stype][layer].append(sshape)
try:
self._shapes[stype][layer].append(sshape)
except:
self._shapes[stype][layer] = []
self._shapes[stype][layer].append(sshape)
# Add pin label
if (pin_label != None):
shape.setLabel(pin_label)
drills = pad_dict.get('drills') or []
for drill_dict in drills:
drill_dict = drill_dict.copy()
drill_dict['type'] = drill_dict.get('type') or 'drill'
drill_location = drill_dict.get('location') or [0, 0]
drill_dict['location'] = [drill_location[0] + pin_location[0],
drill_location[1] + pin_location[1]]
shape = Shape(drill_dict)
style = Style(drill_dict, 'drills')
shape.setStyle(style)
try:
self._shapes['drills']['top'].append(shape)
except:
self._shapes['drills']['top'] = []
self._shapes['drills']['top'].append(shape)
def _processPours(self):
"""
"""
try:
shapes = self._footprint['layout']['pours']['shapes']
except:
return
for shape_dict in shapes:
layers = utils.getExtendedLayerList(shape_dict.get('layers') or ['top'])
for layer in layers:
shape = Shape(shape_dict)
style = Style(shape_dict, 'conductor', 'pours')
shape.setStyle(style)
try:
self._shapes['pours'][layer].append(shape)
except:
self._shapes['pours'][layer] = []
self._shapes['pours'][layer].append(shape)
def _processShapes(self):
"""
"""
sheets = ['conductor', 'silkscreen', 'soldermask']
for sheet in sheets:
try:
shapes = self._footprint['layout'][sheet]['shapes']
except:
shapes = []
for shape_dict in shapes:
layers = utils.getExtendedLayerList(shape_dict.get('layers') or ['top'])
for layer in layers:
# Mirror the shape if it's text and on bottom later,
# but let explicit shape setting override
if layer == 'bottom':
if shape_dict['type'] == 'text':
shape_dict['mirror'] = shape_dict.get('mirror') or 'True'
shape = Shape(shape_dict)
style = Style(shape_dict, sheet)
shape.setStyle(style)
try:
self._shapes[sheet][layer].append(shape)
except:
self._shapes[sheet][layer] = []
self._shapes[sheet][layer].append(shape)
def _processAssemblyShapes(self):
"""
"""
try:
shapes = self._footprint['layout']['assembly']['shapes']
except:
return
for shape_dict in shapes:
layers = utils.getExtendedLayerList(shape_dict.get('layer') or ['top'])
for layer in layers:
shape = Shape(shape_dict)
style = Style(shape_dict, 'assembly')
shape.setStyle(style)
try:
self._shapes['assembly'][layer].append(shape)
except:
self._shapes['assembly'][layer] = []
self._shapes['assembly'][layer].append(shape)
| 2.75
| 3
|
pysome/maybe.py
|
mdagaro/pysome
| 0
|
12779629
|
from typing import (
Any,
Callable,
cast,
Generic,
Mapping,
NoReturn,
Optional,
TypeVar,
Union,
)
from abc import ABC, abstractmethod
from functools import wraps
import math
import collections
from util import *
__all__ = ["Maybe", "Some", "Nothing", "maybe"]
class Maybe(Generic[T], ABC):
"""
Generic class and type for the pysome Maybe object. Two classes inherit from
this abstract class: Some and Nothing.
"""
def __init__(self, value, is_none=False):
if not is_none:
self._value = value
super().__init__()
@abstractmethod
def get(self) -> Union[T, NoReturn]:
"""
Get the value that is stored if it is a `Some` object, or throw an error
if it is `Nothing` object. Should only be used if you know that it is a
`Some` object. If you are unsure, you can use `Maybe.or_else()`.
"""
pass
@abstractmethod
def or_else(self, value: T) -> T:
"""
Get the value that is stored if it is a `Some` object, or return `value` if it is a `Nothing` object.
value: T
- A default value to be returned if it is a Nothing class
"""
pass
def some_or_else(self, value: T) -> "Maybe[T]":
return maybe(self.or_else(value))
@abstractmethod
def is_some(self) -> bool:
"""
Return whether or not the class is `Some`. Equivalent to `isinstnace(self, Some).
"""
pass
@abstractmethod
def is_none(self) -> bool:
"""
Return whether or not the class is `Some`. Equivalent to `isinstnace(self, Nothing).
"""
pass
@abstractmethod
def comb(self, *funcs: Callable[["Maybe"], "Maybe"]) -> "Maybe":
"""
Given a list of functions, call each function iteratively on self and
return the result. It should be noted that the following two are
equivalent. The functions are assumed to be "maybe" functions, in that
they take in a Maybe object and return a Maybe object. If you have a
function that is not of that type, you can use the @maybefunction
wrapper to convert it.
> something.comb(f1, f2, f3)
> something.comb(f1).comb(f2).comb(f3)
funcs: Callable[[Maybe], Maybe]
- A "maybe" function that takes in a Maybe object and returns
a Maybe object.
"""
pass
class Some(Maybe[T]):
"""
A class that contains something. While it is possible to directly instatiate
a `Some` object, you should instead use the `maybe()` function.
"""
def __init__(self, value):
super().__init__(value)
def get(self) -> T:
return self._value
def or_else(self, value: T) -> T:
return self.get()
def is_some(self):
return True
def is_none(self):
return False
def comb(self, *funcs: Callable[[Maybe[T]], Maybe[U]]) -> Maybe[U]:
value = self.get()
for func in funcs:
value = func(value)
if value == Nothing():
return value
return value
def __magic_wrapper(f):
def wrapper(self, *args):
return f(self.get(), *args)
return wrapper
def __eq__(self, other):
if isinstance(other, Some):
return self.get() == other.get()
elif isinstance(other, Nothing):
return False
return self.get() == other
def __getitem__(self, key: K) -> Maybe[V]:
try:
return maybe(self.get()[key])
except KeyError:
return Nothing()
except IndexError:
return Nothing()
except TypeError:
return Nothing()
def __setitem__(self, key, value):
self.get()[key] = value
__int__ = __magic_wrapper(int)
__complex__ = __magic_wrapper(complex)
__float__ = __magic_wrapper(float)
__bool__ = __magic_wrapper(bool)
__round__ = __magic_wrapper(round)
__trunc__ = __magic_wrapper(math.trunc)
__floor__ = __magic_wrapper(math.floor)
__ceil__ = __magic_wrapper(math.ceil)
__len__ = __magic_wrapper(len)
__hash__ = __magic_wrapper(hash)
def __op_wrapper(func):
@wraps(func)
def wrapper(self, other: Any) -> Maybe:
# Normalize
if isinstance(other, Some):
other = other.get()
try:
return maybe(func(self.get(), other)) # type: ignore
except TypeError:
return Nothing()
# Division case (I don't know how much overhead this adds)
except ZeroDivisionError:
return Nothing()
return wrapper
__add__ = __op_wrapper(lambda x, y: x + y)
__radd__ = __op_wrapper(lambda x, y: y + x)
__sub__ = __op_wrapper(lambda x, y: x - y)
__rsub__ = __op_wrapper(lambda x, y: y - x)
__mul__ = __op_wrapper(lambda x, y: x * y)
__rmul__ = __op_wrapper(lambda x, y: y * x)
__truediv__ = __op_wrapper(lambda x, y: x / y)
__rtruediv__ = __op_wrapper(lambda x, y: y / x)
def __getattr__(self, attr):
try:
if hasattr(self.get(), "__getattr__"):
return self.get().__getattr__(attr)
return self.get().__getattribute__(attr)
except AttributeError:
return Nothing()
def __str__(self):
return str(self.get())
def __repr__(self):
return "Some(%s)" % repr(self.get())
class Nothing(Maybe[T]):
def __init__(self):
super().__init__(None, True)
@staticmethod
def __return_nothing(*args, **kwargs):
return Nothing()
def get(self) -> NoReturn:
raise Exception("bad")
def or_else(self, value: T) -> T:
return value
def is_some(self):
return False
def is_none(self):
return True
def comb(self, *funcs: Callable[[T], Maybe]) -> Maybe:
return self
def __eq__(self, other):
return isinstance(other, Nothing)
# All operators should return Nothing
__add__ = __return_nothing
__radd__ = __return_nothing
__sub__ = __return_nothing
__rsub__ = __return_nothing
__mul__ = __return_nothing
__rmul__ = __return_nothing
__truediv__ = __return_nothing
__rtruediv__ = __return_nothing
__getitem__ = __return_nothing
__getattr__ = __return_nothing
__call__ = __return_nothing
def __str__(self):
return "None"
def __repr__(self):
return "Nothing"
def maybe(value):
if value == Nothing():
return Nothing()
elif isinstance(value, Some):
return value
return Some(value)
| 3.46875
| 3
|
PySort/main.py
|
Cet500/PythonSorter
| 1
|
12779630
|
# программа сортировки файлов по папкам по их типу или расширению
import os
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
from conf import conf_ext
print( ' PythonSorter by SI ver 0.5' )
print( '-----------------------------------------------------------' )
print( ' Здравствуйте, ' + os.getlogin() + '!' )
print( ' Вас приветствует программа сортировки ваших файлов' )
print( '-----------------------------------------------------------' )
print( ' Давайте начнём с выбора задачи: ' )
print( '-----------------------------------------------------------' )
print( ' 1 | сортировка файлов по папкам' )
print( ' 2 | просмотр конфигурации типов' )
print( '-----------------------------------------------------------' )
task = int( input( ' ' ) )
if not ((task == 1) or (task == 2)):
print( '-----------------------------------------------------------' )
print( ' Ошибка: нет такой задачи, вы вероятно ошиблись, бывает' )
print( ' Совет: перезапустите программу и попробуйте снова' )
print( '-----------------------------------------------------------' )
exit()
if task == 1:
print( '-----------------------------------------------------------' )
print( ' Перед началом работы давайте всё настроим как вам нужно' )
print( '-----------------------------------------------------------' )
print( ' Выберите режим сортировки:' )
print( '-----------------------------------------------------------' )
print( ' 1 | по типу' )
print( ' 2 | по расширению' )
print( '-----------------------------------------------------------' )
regime = int( input( ' ' ) )
if not ((regime == 1) or (regime == 2)):
print( '-----------------------------------------------------------' )
print( ' Ошибка: нет такого режима, вы вероятно ошиблись, бывает' )
print( ' Совет: перезапустите программу и попробуйте снова' )
print( '-----------------------------------------------------------' )
exit()
print( '-----------------------------------------------------------' )
print( ' Введите путь к папке в таком формате:' )
print( ' C:/example/path/to/files/' )
print( '-----------------------------------------------------------' )
folder = str( input( ' ' ) )
print( '-----------------------------------------------------------' )
print( ' Настройки сохранены, всё уже работает' )
print( ' Для выхода введите что угодно или закройте консоль' )
print( '-----------------------------------------------------------' )
print( ' Ход работы:' )
class Handler( FileSystemEventHandler ):
def on_modified( self, event ):
global count
count = 0
for filename in os.listdir( folder ):
extension = filename.split( "." )
if len( extension ) > 1:
if regime == 1:
count += 1
for i in range( 0, len( conf_ext ) ):
if extension[ -1 ].lower() in conf_ext[ i ][ 1 ]:
print( ' ' + str( count ) + ' | ' + conf_ext[ i ][ 0 ][ 0 ] + ' | ' + filename )
try:
os.chdir( folder + conf_ext[ i ][ 0 ][ 0 ] + '/' )
except:
try:
os.makedirs( folder + conf_ext[ i ][ 0 ][ 0 ] + '/' )
except:
pass
file = folder + filename
file_new = folder + conf_ext[ i ][ 0 ][ 0 ] + '/' + filename
try:
os.rename( file, file_new )
except:
file_new = folder + conf_ext[ i ][ 0 ][ 0 ] + '/' + str( count ) + filename
try:
os.rename( file, file_new )
except:
pass
if regime == 2:
count += 1
print( ' ' + str( count ) + ' | ' + extension[ -1 ].lower() + ' | ' + filename )
try:
os.chdir( folder + extension[ -1 ].lower() + '/' )
except:
try:
os.makedirs( folder + extension[ -1 ].lower() + '/' )
except:
pass
file = folder + filename
file_new = folder + extension[ -1 ].lower() + '/' + filename
try:
os.rename( file, file_new )
except:
file_new = folder + extension[ -1 ].lower() + '/' + str( count ) + filename
try:
os.rename( file, file_new )
except:
pass
handle = Handler()
observer = Observer()
observer.schedule( handle, folder, recursive = False )
observer.start()
if input():
observer.stop()
observer.join()
if task == 2:
print( '-----------------------------------------------------------' )
print( ' Запускаю вывод всех соотношений типов и расширений...' )
print( '-----------------------------------------------------------' )
conf_ext.sort()
for i in range(0, len(conf_ext)):
print( f' тип {conf_ext[i][0][0]}:' )
conf_ext[i][1].sort()
for j in range(0, len(conf_ext[i][1])):
print( f' - {conf_ext[i][1][j]}' )
print('')
# M:/FilesDump/
| 2.1875
| 2
|
hepynet/common/common_utils.py
|
Hepynet/hepynet
| 1
|
12779631
|
import glob
import logging
import platform
import re
import socket
from typing import Any
logger = logging.getLogger("hepynet")
def get_current_platform_name() -> str:
"""Returns the name of the current platform.
Returns:
str: name of current platform
"""
return platform.platform()
def get_current_hostname() -> str:
"""Returns the hostname of current machine
Returns:
str: current hostname
"""
return socket.gethostname()
def get_default_if_none(input_var: Any, default_value: Any):
if input_var is None:
return default_value
else:
return input_var
def get_newest_file_version(
path_pattern: str,
n_digit: int = 2,
ver_num: int = None,
use_existing: bool = False,
):
"""Check existed file and return last available file path with version.
Version range 00 -> 99 (or 999)
If reach limit, last available version will be used. 99 (or 999)
"""
# Return file path if ver_num is given
if ver_num is not None:
return {
"ver_num": ver_num,
"path": path_pattern.format(str(ver_num).zfill(n_digit)),
}
# Otherwise try to find ver_num
path_list = glob.glob(path_pattern.format("*"))
path_list = sorted(path_list)
if len(path_list) < 1:
if use_existing:
logger.debug(
f"Can't find existing file with path pattern: {path_pattern}, returning empty."
)
return {}
else:
ver_num = 0
path = path_pattern.format(str(0).zfill(n_digit))
else:
path = path_list[-1] # Choose the last match
version_tag_search = re.compile("v(" + "\d" * n_digit + ")")
ver_num = int(version_tag_search.search(path).group(1))
if not use_existing:
ver_num += 1
path = path_pattern.format(str(ver_num).zfill(n_digit))
return {
"ver_num": ver_num,
"path": path,
}
def get_significant_digits(number: float, n_digits: int):
if round(number) == number:
m = len(str(number)) - 1 - n_digits
if number / (10 ** m) == 0.0:
return number
else:
return float(int(number) / (10 ** m) * (10 ** m))
if len(str(number)) > n_digits + 1:
return round(number, n_digits - len(str(int(number))))
else:
return number
| 2.8125
| 3
|
Binary_Search/162_Find_Peak_Element.py
|
hren-ron/LeetCode
| 0
|
12779632
|
'''
A peak element is an element that is greater than its neighbors.
Given an input array nums, where nums[i] ≠ nums[i+1], find a peak element and return its index.
The array may contain multiple peaks, in that case return the index to any one of the peaks is fine.
You may imagine that nums[-1] = nums[n] = -∞.
Example 1:
Input: nums = [1,2,3,1]
Output: 2
Explanation: 3 is a peak element and your function should return the index number 2.
Example 2:
Input: nums = [1,2,1,3,5,6,4]
Output: 1 or 5
Explanation: Your function can return either index number 1 where the peak element is 2,
or index number 5 where the peak element is 6.
Note:
Your solution should be in logarithmic complexity.
'''
class Solution {
public:
int findPeakElement(vector<int>& nums) {
int n=nums.size();
if(n==0)
return -1;
if(n==1)
return 0;
/*
暴力搜索:只需要比较nums[i]>nums[i+1],因为在第(i-1)个位置已经确定nums[i-1]<nums[i],所以不需要比较前面的元素
int res=-1;
for(int i=0;i<n;i++){
if(i==0){
if(nums[i]>nums[i+1])
return i;
}
if(i+1==n){
if(nums[i]>nums[i-1])
return i;
}else{
if(nums[i]>nums[i-1] && nums[i]>nums[i+1])
return i;
}
}
return res;
*/
/*
使用二分搜索,nums[mid]>nums[mid+1],说明峰值在前面(包括mid),如果nums[mid]<nums[mid+1],说明峰值在后面并不包括mid
*/
int left=0,right=n-1;
while(left<right){
int mid=left+(right-left)/2;
if(nums[mid]>nums[mid+1])
right=mid;
else
left=mid+1;
}
return right;
}
};
| 3.546875
| 4
|
aether/sdk/auth/utils.py
|
eHealthAfrica/aether-django-sdk-library
| 1
|
12779633
|
# Copyright (C) 2019 by eHealth Africa : http://www.eHealthAfrica.org
#
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from django.contrib.auth import get_user_model
from aether.sdk.multitenancy.utils import get_current_realm, add_user_to_realm
UserModel = get_user_model()
user_objects = UserModel.objects
def get_or_create_user(request, username):
# gets the existing user or creates a new one
_username = parse_username(request, username)
try:
user = user_objects.get(username=_username)
except UserModel.DoesNotExist:
realm = get_current_realm(request)
user = user_objects.create_user(
username=_username,
first_name=username,
last_name=realm or '',
password=user_objects.make_random_password(length=100),
)
# only add user if it doesn't exist.
add_user_to_realm(request, user)
return user
def parse_username(request, username):
# the internal username prepends the realm name
realm = get_current_realm(request)
if realm and not username.startswith(f'{realm}__'):
username = f'{realm}__{username}'
return username
def unparse_username(request, username):
# the internal username prepends the realm name
realm = get_current_realm(request)
if realm and username.startswith(f'{realm}__'):
username = username[len(f'{realm}__'):]
return username
def user_to_string(user, request=None):
'''
Returns a readable name of the user.
- ``first_name`` + ``last_name``
- ``username``
'''
if user.first_name and user.last_name:
return f'{user.first_name} {user.last_name}'
if request:
return unparse_username(request, user.username)
return user.username
| 2.109375
| 2
|
tencent/tencent/pipelines.py
|
huzing2524/spider
| 0
|
12779634
|
<reponame>huzing2524/spider<gh_stars>0
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
from pymongo import MongoClient
client = MongoClient()
collection = client["tencent"]["hr"]
class TencentPipeline(object):
def process_item(self, item, spider):
# print(type(item)) # dict
collection.insert_one(item)
# return item
| 2.5
| 2
|
src/mdv/plugins/theme_base16.py
|
AXGKl/terminal_markdown_viewer
| 0
|
12779635
|
<filename>src/mdv/plugins/theme_base16.py
rules = [
('em', {'font-style': 'italic'}),
('strong', {'font-weight': 'bold'}),
('code', {'font-style': 'italic', 'background-color': 'grey'}),
]
hcs = ['lime', 'maroon', 'purple', 'teal', 'yellow', 'red', 'green', 'silver', 'navy']
for level, hc in zip(range(1, len(hcs) + 1), hcs):
rules.insert(level - 1, (f'h{level}', {'font-weight': 'bold', 'color': hc}))
| 2.109375
| 2
|
core/function.py
|
vcowwy/CvT_paddle
| 0
|
12779636
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import time
import paddle
from paddle.amp import auto_cast
from .mixup import Mixup
from core.evaluate import accuracy
from utils.comm import comm
def train_one_epoch(config, train_loader, model, criterion, optimizer,
epoch, output_dir, tb_log_dir, writer_dict, scaler=None):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
logging.info('=> switch to train mode')
model.train()
aug = config.AUG
mixup_fn = Mixup(mixup_alpha=aug.MIXUP, cutmix_alpha=aug.MIXCUT,
cutmix_minmax=aug.MIXCUT_MINMAX if aug.MIXCUT_MINMAX else None,
prob=aug.MIXUP_PROB, switch_prob=aug.MIXUP_SWITCH_PROB,
mode=aug.MIXUP_MODE, label_smoothing=config.LOSS.LABEL_SMOOTHING,
num_classes=config.MODEL.NUM_CLASSES) if aug.MIXUP_PROB > 0.0 else None
end = time.time()
for i, (x, y) in enumerate(train_loader):
data_time.update(time.time() - end)
if mixup_fn:
x, y = mixup_fn(x, y)
with auto_cast(enable=config.AMP.ENABLED):
outputs = model(x)
loss = criterion(outputs, y)
optimizer.clear_grad()
is_second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order
scaler.scale(loss).backward(create_graph=is_second_order)
if config.TRAIN.CLIP_GRAD_NORM > 0.0:
scaler.unscale_(optimizer)
paddle.fluid.layers.nn.clip_by_norm(
model.parameters(), config.TRAIN.CLIP_GRAD_NORM
)
scaler.step(optimizer)
scaler.update()
losses.update(loss.item(), x.shape[0])
if mixup_fn:
y = paddle.argmax(y, axis=1)
prec1, prec5 = accuracy(outputs, y, (1, 5))
top1.update(prec1, x.shape[0])
top5.update(prec5, x.shape[0])
batch_time.update(time.time() - end)
end = time.time()
if i % config.PRINT_FREQ == 0:
msg = (
'=> Epoch[{0}][{1}/{2}]: '
'Time {batch_time.val:.3f}s\t '
'({batch_time.avg:.3f}s)\t'
'Speed {speed:.1f} samples/s\t'
'Data {data_time.val:.3f}s ({data_time.avg:.3f}s)\t'
'Loss {loss.val:.5f} ({loss.avg:.5f})\t'
'Accuracy@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Accuracy@5 {top5.val:.3f} ({top5.avg:.3f})\t'.format(epoch, i, len(train_loader),
batch_time=batch_time,
speed=x.shape[0] / batch_time.val,
data_time=data_time, loss=losses,
top1=top1, top5=top5))
logging.info(msg)
if writer_dict and comm.is_main_process():
writer = writer_dict['writer']
global_steps = writer_dict['train_global_steps']
writer.add_scalar('train_loss', losses.avg, global_steps)
writer.add_scalar('train_top1', top1.avg, global_steps)
writer_dict['train_global_steps'] = global_steps + 1
@paddle.no_grad()
def test(config, val_loader, model, criterion, output_dir, tb_log_dir,
writer_dict=None, distributed=False, real_labels=None,
valid_labels=None):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
logging.info('=> switch to eval mode')
model.eval()
end = time.time()
for i, (x, y) in enumerate(val_loader):
outputs = model(x)
if valid_labels:
outputs = outputs[:, valid_labels]
loss = criterion(outputs, y)
if real_labels and not distributed:
real_labels.add_result(outputs)
losses.update(loss.item(), x.shape[0])
prec1, prec5 = accuracy(outputs, y, (1, 5))
top1.update(prec1, x.shape[0])
top5.update(prec5, x.shape[0])
batch_time.update(time.time() - end)
end = time.time()
logging.info('=> synchronize...')
comm.synchronize()
top1_acc, top5_acc, loss_avg = map(_meter_reduce if distributed else lambda x: x.avg,
[top1, top5, losses])
if real_labels and not distributed:
real_top1 = real_labels.get_accuracy(k=1)
real_top5 = real_labels.get_accuracy(k=5)
msg = ('=> TEST using Reassessed labels:\t'
'Error@1 {error1:.3f}%\t'
'Error@5 {error5:.3f}%\t'
'Accuracy@1 {top1:.3f}%\t'
'Accuracy@5 {top5:.3f}%\t'
.format(top1=real_top1, top5=real_top5,
error1=100 - real_top1,
error5=100 - real_top5))
logging.info(msg)
if comm.is_main_process():
msg = ('=> TEST:\t'
'Loss {loss_avg:.4f}\t'
'Error@1 {error1:.3f}%\t'
'Error@5 {error5:.3f}%\t'
'Accuracy@1 {top1:.3f}%\t'
'Accuracy@5 {top5:.3f}%\t'
.format(loss_avg=loss_avg, top1=top1_acc,
top5=top5_acc, error1=100 - top1_acc,
error5=100 - top5_acc))
logging.info(msg)
if writer_dict and comm.is_main_process():
writer = writer_dict['writer']
global_steps = writer_dict['valid_global_steps']
writer.add_scalar('valid_loss', loss_avg, global_steps)
writer.add_scalar('valid_top1', top1_acc, global_steps)
writer_dict['valid_global_steps'] = global_steps + 1
logging.info('=> switch to train mode')
model.train()
return top1_acc
def _meter_reduce(meter):
rank = comm.local_rank
meter_sum = paddle.to_tensor([meter.sum], dtype=paddle.float32)#.cuda(rank)
meter_count = paddle.to_tensor([meter.count], dtype=paddle.float32)#.cuda(rank)
paddle.distributed.reduce(meter_sum, 0)
paddle.distributed.reduce(meter_count, 0)
meter_avg = meter_sum / meter_count
return meter_avg.item()
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
| 2.15625
| 2
|
app/controllers/commands.py
|
pedroermarinho/Dominik
| 0
|
12779637
|
<filename>app/controllers/commands.py
# -*- coding:utf-8 -*-
import logging
from datetime import datetime
from app.controllers import key_words, functions_db
from fuzzywuzzy import fuzz
from fuzzywuzzy import process
from threading import Thread
class Comando:
logging.warning(__name__)
def __init__(self, _arduino):
print(str(__name__) + '__init__')
self.arduino_comando = _arduino
self.palavra_chave = key_words.KeyWords()
self.base_de_dados = functions_db.Database()
self.comandos = None
self.dicionario_cmd = self.base_de_dados.get_cmds
# dicionario_cmd = {} # criando um dicionario
def comando(self, cmd):
"""
Verificação do texto e transcrição para comando
:param cmd: Texto a ser verificado e transcrito para comando
:return: Se positivo retorna ao comando e se falso retorna a None
"""
try: # except para probrelmas
result = process.extract(cmd, self.dicionario_cmd.keys(), scorer=fuzz.token_sort_ratio, limit=1)
comando, confianca = result
logging.warning(str(__name__) + ':comando->' + str(comando))
logging.warning(str(__name__) + ':nivel de confiança->' + str(confianca))
if int(confianca) > 80:
return self.dicionario_cmd[comando] # resultado do comando
else:
return None
except: # caso não tenha
print("erro piada")
return None # retorna a nada
def Lista_comandos(self):
"""
Função para listar os comandos
:return: String
"""
result = None
for cmd, msg in self.base_de_dados.get_mensagem_cmd():
if cmd == 'msg_cmd':
result = msg
return result
def executar_cmd(self, cmd):
"""
Função para execuatr os comandos
:param cmd: Comando a ser executado
:return: String
"""
if cmd == '/start':
resultado = None
for cmd, msg in self.base_de_dados.get_mensagem_cmd():
if (cmd == 'msg_start'):
resultado = msg
return resultado
if cmd == 'cmd_hora':
data = datetime.now()
return 'São ' + str(data.hour) + ' horas e ' + str(data.minute) + ' minutos'
elif cmd == 'cmd_data':
data = datetime.now()
return 'Hoje é ' + str(data.day) + ' de ' + str(data.month)
elif cmd == 'cmd_lista_comandos':
return self.Lista_comandos()
elif cmd == 'cmd_treinar':
# main.treinar()
return 'treinado'
elif cmd == 'cmd_piadas':
return self.base_de_dados.get_piada()
elif cmd == 'cmd_Charadas':
return self.palavra_chave.Charada()
elif cmd == 'cmd_citacoes':
return self.palavra_chave.citacao()
elif cmd == 'cmd_curiosidades':
return self.base_de_dados.get_curiosidade()
# ------------------------------------------------------------------------------
# Thread(target=arduino_comando.cmd_televisao_off()).start()
elif cmd == 'cmd_luz_quarto_1_on':
Thread(target=self.arduino_comando.cmd_luz_quarto_1_on()).start()
return 'OK'
elif cmd == 'cmd_luz_quarto_1_off':
Thread(target=self.arduino_comando.cmd_luz_quarto_1_off()).start()
return 'OK'
elif cmd == 'cmd_luz_quarto_2_on':
Thread(target=self.arduino_comando.cmd_luz_quarto_2_on()).start()
return 'OK'
elif cmd == 'cmd_luz_quarto_2_off':
Thread(target=self.arduino_comando.cmd_luz_quarto_2_off()).start()
return 'OK'
elif cmd == 'cmd_luz_quarto_3_on':
Thread(target=self.arduino_comando.cmd_luz_quarto_3_on()).start()
return 'OK'
elif cmd == 'cmd_luz_quarto_3_off':
Thread(target=self.arduino_comando.cmd_luz_quarto_3_off()).start()
return 'OK'
elif cmd == 'cmd_luz_banheiro_1_on':
Thread(target=self.arduino_comando.cmd_luz_banheiro_1_on()).start()
return 'OK'
elif cmd == 'cmd_luz_banheiro_1_off':
Thread(target=self.arduino_comando.cmd_luz_banheiro_1_off()).start()
return 'OK'
elif cmd == 'cmd_luz_banheiro_2_on':
Thread(target=self.arduino_comando.cmd_luz_banheiro_2_on()).start()
return 'OK'
elif cmd == 'cmd_luz_banheiro_2_off':
Thread(target=self.arduino_comando.cmd_luz_banheiro_2_off()).start()
return 'OK'
elif cmd == 'cmd_luz_sala_on':
Thread(target=self.arduino_comando.cmd_luz_sala_on()).start()
return 'OK'
elif cmd == 'cmd_luz_sala_off':
Thread(target=self.arduino_comando.cmd_luz_sala_off()).start()
return 'OK'
elif cmd == 'cmd_luz_sala_jantar_on':
Thread(target=self.arduino_comando.cmd_luz_sala_jantar_on()).start()
return 'OK'
elif cmd == 'cmd_luz_sala_jantar_off':
Thread(target=self.arduino_comando.cmd_luz_sala_jantar_off()).start()
return 'OK'
elif cmd == 'cmd_luz_cozinha_on':
Thread(target=self.arduino_comando.cmd_luz_cozinha_on()).start()
return 'OK'
elif cmd == 'cmd_luz_cozinha_off':
Thread(target=self.arduino_comando.cmd_luz_cozinha_off()).start()
return 'OK'
elif cmd == 'cmd_luz_externa_entrada_on':
Thread(target=self.arduino_comando.cmd_luz_externa_entrada_on()).start()
return 'OK'
elif cmd == 'cmd_luz_externa_entrada_off':
Thread(target=self.arduino_comando.cmd_luz_externa_entrada_off()).start()
return 'OK'
elif cmd == 'cmd_luz_externa_saida_on':
Thread(target=self.arduino_comando.cmd_luz_externa_saida_on()).start()
return 'OK'
elif cmd == 'cmd_luz_externa_saida_off':
Thread(target=self.arduino_comando.cmd_luz_externa_saida_off()).start()
return 'OK'
elif cmd == 'cmd_luz_externa_lateral_on':
Thread(target=self.arduino_comando.cmd_luz_externa_lateral_on()).start()
return 'OK'
elif cmd == 'cmd_luz_externa_lateral_off':
Thread(target=self.arduino_comando.cmd_luz_externa_lateral_off()).start()
return 'OK'
elif cmd == 'cmd_televisao_on':
Thread(target=self.arduino_comando.cmd_televisao_on()).start()
return 'OK'
elif cmd == 'cmd_televisao_off':
Thread(target=self.arduino_comando.cmd_televisao_off()).start()
return 'OK'
else:
return None
| 2.421875
| 2
|
train/losses.py
|
m2lines/subgrid
| 1
|
12779638
|
<filename>train/losses.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 2 23:08:26 2020
@author: arthur
In this module we define custom loss functions. In particular we define
a loss function based on the Gaussian likelihood with two parameters,
mean and precision.
"""
import torch
from torch.nn.modules.loss import _Loss
from enum import Enum
from abc import ABC
import numpy as np
class VarianceMode(Enum):
variance = 0
precision = 1
# DEPRECIATED
class HeteroskedasticGaussianLoss(_Loss):
def forward(self, input: torch.Tensor, target: torch.Tensor):
# Split the target into mean (first half of channels) and scale
mean, precision = torch.split(input, 2, dim=1)
if not torch.all(precision > 0):
raise ValueError('Got a non-positive variance value. \
Pre-processed variance tensor was: \
{}'.format(torch.min(precision)))
term1 = - 1 / 2 * torch.log(precision)
term2 = 1 / 2 * (target - mean)**2 * precision
return (term1 + term2).mean()
class StudentLoss(_Loss):
def __init__(self, nu: float = 30, n_target_channels: int = 1):
super().__init__()
self.n_target_channels = n_target_channels
@property
def n_required_channels(self):
"""Return the number of input channel required per target channel.
In this case, two, one for the mean, another one for the precision"""
return 2 * self.n_target_channels
def pointwise_likelihood(self, input: torch.Tensor, target: torch.Tensor):
# Temporary fix
input, nu = input
mean, precision = torch.split(input, self.n_target_channels, dim=1)
term1 = - torch.lgamma((nu + 1) / 2)
term2 = 1 / 2 * torch.log(nu) + torch.lgamma(nu / 2)
term3 = - torch.log(precision)
temp = (target - mean) * precision
term4 = (nu + 1) / 2 * torch.log(1 + 1 / nu * temp**2)
return term1 + term2 + term3 + term4
def forward(self, input: torch.Tensor, target: torch.Tensor):
lkhs = self.pointwise_likelihood(input, target)
# Ignore nan values in targets.
lkhs = lkhs[~torch.isnan(target)]
return lkhs.mean()
def predict(self, input: torch.Tensor):
input, nu = input
mean, precision = torch.split(input, self.n_target_channels, dim=1)
return mean
def predict_mean(self, input: torch.Tensor):
input, nu = input
"""Return the mean of the conditional distribution"""
mean, precision = torch.split(input, self.n_target_channels, dim=1)
return mean
@property
def precision_indices(self):
return list(range(self.n_target_channels, self.n_required_channels))
class CauchyLoss(_Loss):
def __init__(self, n_target_channels: int = 1):
super().__init__()
self.n_target_channels = n_target_channels
@property
def n_required_channels(self):
"""Return the number of input channel required per target channel.
In this case, two, one for the mean, another one for the precision"""
return 2 * self.n_target_channels
def pointwise_likelihood(self, input: torch.Tensor, target: torch.Tensor):
mean, scale = torch.split(input, self.n_target_channels, dim=1)
term1 = - torch.log(scale)
term2 = torch.log((target - mean)**2 + scale**2)
return term1 + term2
def forward(self, input: torch.Tensor, target: torch.Tensor):
lkhs = self.pointwise_likelihood(input, target)
# Ignore nan values in targets.
lkhs = lkhs[~torch.isnan(target)]
return lkhs.mean()
def predict(self, input: torch.Tensor):
mean, precision = torch.split(input, self.n_target_channels, dim=1)
return mean
def predict_mean(self, input: torch.Tensor):
"""Return the mean of the conditional distribution"""
mean, precision = torch.split(input, self.n_target_channels, dim=1)
return mean
@property
def precision_indices(self):
return list(range(self.n_target_channels, self.n_required_channels))
class HeteroskedasticGaussianLossV2(_Loss):
"""Class for Gaussian likelihood"""
def __init__(self, n_target_channels: int = 1, bias: float = 0.,
mode=VarianceMode.precision):
super().__init__()
self.n_target_channels = n_target_channels
self.bias = bias
self.mode = mode
@property
def n_required_channels(self):
"""Return the number of input channel required per target channel.
In this case, two, one for the mean, another one for the precision"""
return 2 * self.n_target_channels
@property
def channel_names(self):
return ['S_x', 'S_y', 'S_xscale', 'S_yscale']
@property
def precision_indices(self):
return list(range(self.n_target_channels, self.n_required_channels))
def pointwise_likelihood(self, input: torch.Tensor, target: torch.Tensor):
# Split the target into mean (first half of channels) and scale
mean, precision = torch.split(input, self.n_target_channels, dim=1)
if not torch.all(precision > 0):
raise ValueError('Got a non-positive variance value. \
Pre-processed variance tensor was: \
{}'.format(torch.min(precision)))
if self.mode is VarianceMode.precision:
term1 = - torch.log(precision)
term2 = 1 / 2 * (target - (mean + self.bias))**2 * precision**2
elif self.mode is VarianceMode.variance:
term1 = torch.log(precision)
term2 = 1 / 2 * (target - (mean + self.bias))**2 / precision**2
return term1 + term2
def forward(self, input: torch.Tensor, target: torch.Tensor):
lkhs = self.pointwise_likelihood(input, target)
# Ignore nan values in targets.
lkhs = lkhs[~torch.isnan(target)]
return lkhs.mean()
def predict(self, input: torch.Tensor):
mean, precision = torch.split(input, self.n_target_channels, dim=1)
return mean + self.bias
def predict_mean(self, input: torch.Tensor):
"""Return the mean of the conditional distribution"""
mean, precision = torch.split(input, self.n_target_channels, dim=1)
return mean + self.bias
class HeteroskedasticGaussianLossV3(_Loss):
"""Loss to be used with transform2 from models/submodels.py"""
def __init__(self, *args, **kargs):
super().__init__()
self.base_loss = HeteroskedasticGaussianLossV2(*args, **kargs)
def __getattr__(self, name: str):
try:
# This is necessary as the class Module defines its own __getattr__
return super().__getattr__(name)
except AttributeError:
return getattr(self.base_loss, name)
def forward(self, input: torch.Tensor, target: torch.Tensor):
return self.base_loss.forward(input, target)
def pointwise_likelihood(self, input: torch.Tensor, target: torch.Tensor):
raw_loss = self._base_loss(input, target[:, :self.n_target_channels, ...])
return raw_loss + torch.log(target[:, self.n_target_channels: self.n_target_channels + 1, ...])
class MultimodalLoss(_Loss):
"""General class for a multimodal loss. Each location on
each channel can choose its mode independently."""
def __init__(self, n_modes, n_target_channels, base_loss_cls,
base_loss_params=[], share_mode='C'):
super().__init__()
self.n_modes = n_modes
self.n_target_channels = n_target_channels
self.target_names = ['target' + str(i) for i in range(
n_target_channels)]
self.losses = []
for i in range(n_modes):
if i < len(base_loss_params):
params = base_loss_params[i]
self.losses.append(base_loss_cls(n_target_channels, **params))
else:
self.losses.append(base_loss_cls(n_target_channels))
self.share_mode = share_mode
@property
def target_names(self):
return self._target_names
@target_names.setter
def target_names(self, value):
assert len(value) == self.n_target_channels
self._target_names = value
@property
def n_required_channels(self):
if self.share_mode == 'C':
return sum(self.splits)
@property
def channel_names(self):
"""Automatically assigns names to output channels depending on the
target names. For now not really implemented"""
return [str(i) for i in range(self.n_required_channels)]
@property
def precision_indices(self):
indices = []
for i, loss in enumerate(self.losses):
sub_indices = loss.precision_indices
for j in range(len(sub_indices)):
sub_indices[j] += self.n_modes * self.n_target_channels + i * loss.n_required_channels
indices.extend(sub_indices)
return indices
@property
def splits(self):
"""Return how to split the input to recover the different parts:
- probabilities of the modes
- quantities definining each mode
"""
return ([self.n_modes, ] * self.n_target_channels
+ [loss.n_required_channels for loss in self.losses])
def forward(self, input: torch.Tensor, target: torch.Tensor):
splits = torch.split(input, self.splits, dim=1)
probas, inputs = (splits[:self.n_target_channels],
splits[self.n_target_channels:])
probas = [torch.softmax(proba, dim=1) for proba in probas]
losses_values = []
for i, (loss, input) in enumerate(zip(self.losses, inputs)):
proba_i = torch.stack([proba[:, i, ...] for proba in probas], dim=1)
loss_i = torch.log(proba_i) - loss.pointwise_likelihood(input, target)
losses_values.append(loss_i)
loss = torch.stack(losses_values, dim=2)
final_loss = -torch.logsumexp(loss, dim=2)
final_loss = final_loss.mean()
return final_loss
def predict(self, input: torch.Tensor):
splits = torch.split(input, self.splits, dim=1)
probas, inputs = (splits[:self.n_target_channels],
splits[self.n_target_channels:])
probas = [torch.softmax(proba, dim=1) for proba in probas]
predictions = [loss.predict(input) for loss, input in
zip(self.losses, inputs)]
weighted_predictions = []
for i, pred in enumerate(predictions):
proba_i = torch.stack([proba[:, i, ...] for proba in probas], dim=1)
weighted_predictions.append(proba_i * pred)
final_predictions = sum(weighted_predictions)
return final_predictions
class BimodalGaussianLoss(MultimodalLoss):
"""Class for a bimodal Gaussian loss."""
def __init__(self, n_target_channels: int):
super().__init__(2, n_target_channels,
base_loss_cls=HeteroskedasticGaussianLossV2)
class BimodalStudentLoss(MultimodalLoss):
def __init__(self, n_target_channels: int):
super().__init__(2, n_target_channels, base_loss_cls=StudentLoss)
class TrimodalGaussianLoss(MultimodalLoss):
def __init__(self, n_target_channels: int):
super().__init__(3, n_target_channels,
base_loss_cls=HeteroskedasticGaussianLossV2)
class PentamodalGaussianLoss(MultimodalLoss):
def __init__(self, n_target_channels: int):
super().__init__(5, n_target_channels,
base_loss_cls=HeteroskedasticGaussianLossV2)
| 2.8125
| 3
|
rpython/jit/backend/x86/detect_sse2.py
|
kantai/passe-pypy-taint-tracking
| 2
|
12779639
|
<filename>rpython/jit/backend/x86/detect_sse2.py
from rpython.rtyper.lltypesystem import lltype, rffi
from rpython.rlib.rmmap import alloc, free
def detect_sse2():
data = alloc(4096)
pos = 0
for c in ("\xB8\x01\x00\x00\x00" # MOV EAX, 1
"\x53" # PUSH EBX
"\x0F\xA2" # CPUID
"\x5B" # POP EBX
"\x92" # XCHG EAX, EDX
"\xC3"): # RET
data[pos] = c
pos += 1
fnptr = rffi.cast(lltype.Ptr(lltype.FuncType([], lltype.Signed)), data)
code = fnptr()
free(data, 4096)
return bool(code & (1<<25)) and bool(code & (1<<26))
if __name__ == '__main__':
if detect_sse2():
print 'Processor supports sse2.'
else:
print 'Missing processor support for sse2.'
| 2.4375
| 2
|
src/sympy_utilities.py
|
daschaich/SUSY_QuantumComputing
| 0
|
12779640
|
<reponame>daschaich/SUSY_QuantumComputing
import src.HamiltonianTerms as hmats
import sympy as sp
import numpy as np
p, q = sp.symbols('p, q', commutative=False)
a, adag = sp.symbols('a, ad', commutative=False)
b, bdag = sp.symbols('b, bd', commutative=False)
m = sp.Symbol('m')
g = sp.Symbol('g')
qp_to_ada = {q: 0.5*sp.sqrt(2/m)*(a + adag),
p: complex(0,1)*sp.sqrt(2*m)*(adag - a)/2}
#ada_matrices = {a*adag: sp.Matrix(hmats.a(5)*hmats.adag(5)),
# adag*a: sp.Matrix(hmats.adag(5)*hmats.a(5))}
adaga_sub = {a: sp.Matrix(hmats.a(9)), adag: sp.Matrix(hmats.adag(9))}
def convert(expr):
new_expr = sp.Matrix(sp.zeros(9))
for elem in expr.args:
new_expr += convert_term_to_matrix(elem)
return new_expr
def convert_term_to_matrix(term):
new_elem = 1
has_aadag = False
for elem in term.args:
for i in sp.preorder_traversal(elem):
if( (i is a ) or (i is adag) ):
has_aadag=True
if has_aadag:
for elem in term.args:
is_operator = False
for i in sp.preorder_traversal(elem):
if( (i is a) or (i is adag) ):
is_operator = True
if is_operator:
lst = elem.args
if(len(lst)>1):
for i in range(lst[1]):
new_elem*=lst[0].subs(adaga_sub)
else:
new_elem*=elem.subs(adaga_sub)
else:
new_elem*=elem
return new_elem
else:
for elem in term.args:
new_elem*=elem
return new_elem*sp.Matrix(np.eye(9))
# TODO: Some(all?) of these need to be moved to the MatrixToPauliString function
# as they are not generic and for sympy expressions of a certain type.
# Maybe I should make a PauliStringSympy class to hold all of this stuff safely.
def unique_sympy_symbols(expr):
return set([ str(elem).split('^')[0] for elem in expr.free_symbols ])
def max_sympy_exponent(expr):
return max(set([ int(str(elem).split('^')[1]) for elem in expr.free_symbols ]))
def sympy_expr_to_list(expr):
arg_list=[]
for a in sp.preorder_traversal(expr):
if(a!=expr):
arg_list.append(a)
return arg_list
def identity_qubit_padded_H(ham):
new_h = 0
qubits = [i for i in range(max_sympy_exponent(ham)+1)]
for elem in ham.args:
qubits_with_pauli = []
for e in elem.free_symbols:
qubits_with_pauli.append(int(str(e).split('^')[1]))
#np.setdiff1d doesn't seem to work
ids_to_pad = []
for qi in qubits:
if qi not in qubits_with_pauli:
ids_to_pad.append(qi)
for iden in ids_to_pad:
elem*=sp.Symbol('I^'+str(iden))
new_h+=elem
return new_h
| 2.21875
| 2
|
app/cesium/router.py
|
code-lab-org/tatc
| 0
|
12779641
|
<reponame>code-lab-org/tatc<filename>app/cesium/router.py
from fastapi import APIRouter
from fastapi.responses import PlainTextResponse
def get_cesium_router(token):
router = APIRouter()
@router.get("/token", response_class=PlainTextResponse)
async def get_cesium_token():
return token
return router
| 2.3125
| 2
|
tests/test_examples.py
|
banesullivan/localtileserver
| 105
|
12779642
|
from localtileserver import examples
def test_get_blue_marble():
client = examples.get_blue_marble()
assert client.metadata()
def test_get_virtual_earth():
client = examples.get_virtual_earth()
assert client.metadata()
def test_get_arcgis():
client = examples.get_arcgis()
assert client.metadata()
def test_get_elevation():
client = examples.get_elevation()
assert client.metadata()
def test_get_bahamas():
client = examples.get_bahamas()
assert client.metadata()
def test_get_pine_gulch():
client = examples.get_pine_gulch()
assert client.metadata()
def test_get_landsat():
client = examples.get_landsat()
assert client.metadata()
def test_get_san_francisco():
client = examples.get_san_francisco()
assert client.metadata()
def test_get_oam2():
client = examples.get_oam2()
assert client.metadata()
def test_get_elevation_us():
client = examples.get_elevation_us()
assert client.metadata()
| 1.71875
| 2
|
migrations/versions/4dbf686f4380_added_location_to_pr.py
|
jace/goafunnel
| 0
|
12779643
|
"""Added location to proposal
Revision ID: 4dbf686f4380
Revises: <PASSWORD>
Create Date: 2013-11-08 23:35:43.433963
"""
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '1<PASSWORD>'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('proposal', sa.Column('location', sa.Unicode(length=80), server_default=sa.text(u"''"), nullable=False))
op.alter_column('proposal', 'location', server_default=None)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('proposal', 'location')
### end Alembic commands ###
| 1.335938
| 1
|
DP/minimumTotal.py
|
saai/LeetcodePythonSolutions
| 0
|
12779644
|
class Solution:
# @param triangle, a list of lists of integers
# @return an integer
def minimumTotal(self, triangle):
n = len(triangle)
row = [0 for i in xrange(n)]
row[0] = triangle[0][0]
for i in range(1,n):
m = i+1
pre = row[0] + triangle[i][0]
for j in range(1,m):
cur = min(row[j-1],(sys.maxint if j==m-1 else row[j])) + triangle[i][j]
if j-1 >= 0:
row[j-1] = pre
pre = cur
row[m-1] = pre
return min(row)
| 3.078125
| 3
|
words.py
|
ColinTing/Python-Specialization
| 2
|
12779645
|
<gh_stars>1-10
# name = input('Enter file:')
# handle = open(name)
# counts = dict()
# for line in handle:
# words = line.split()
# for word in words:
# counts[word] = counts.get(word,0)+1
# bigcount = None
# bigword = None
# for word,count in counts.items():
# if bigcount is None or count > bigcount:
# bigword = word
# bigcount = count
# print(bigword, bigcount)
name = input('Enter file:')
handle = open(name)
counts = dict()
for line in handle:
words = line.split()
for word in words:
counts[word] = counts.get(word,0)+1
wordscount = None
#word = None
for word, count in counts.items():
# if wordscount is None or count < smallcount:
# smallword = word
# smallcount = count
if wordscount is None:
wordscount = count
else:
wordscount += count
print(wordscount)
| 3.546875
| 4
|
0095_Unique_Binary_Search_Trees_II.py
|
imguozr/LC-Solutions
| 0
|
12779646
|
import functools
from typing import List
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
"""
Same thought as LC96, we can generate trees recursively.
If the root of tree is i
The left subtree has a sequence of [start ... i - 1]
The right subtree has a sequence of [i + 1 ... end]
Can use cache to improve performance.
"""
def generateTrees(self, n: int) -> List[TreeNode]:
if not n:
return []
return self.generate_subtrees(1, n)
@functools.lru_cache(None)
def generate_subtrees(self, start, end):
res = []
if end < start:
return [None]
for i in range(start, end + 1):
# More concise than declare left/right list. Have same performance.
for left in self.generate_subtrees(start, i - 1):
for right in self.generate_subtrees(i + 1, end):
node = TreeNode(i)
node.left = left
node.right = right
res.append(node)
return res
| 4.0625
| 4
|
src/transformers/criterions/entropic_regularizer.py
|
ashim95/wordsalad
| 13
|
12779647
|
<filename>src/transformers/criterions/entropic_regularizer.py<gh_stars>10-100
import torch
import numpy as np
import math
import torch.nn as nn
class EntropicRegularizer(nn.Module):
def __init__(self, entropy_lambda, ignore_index=None):
super().__init__()
# Specifies a target value that is ignored and
# does not contribute to the input gradient
self.ignore_index = ignore_index
self.entropy_lambda = entropy_lambda
self.log_softmax = torch.nn.LogSoftmax(dim=-1)
def forward(self, scores, target, mask, num_classes, entropy_lambda=None, reduce=True, return_crossentropy=False, return_kl_loss=False):
lprobs = self.log_softmax(scores)
#print(lprobs.shape)
#print(target.shape)
cross_ent = torch.nn.NLLLoss(reduction='none')
loss = cross_ent(lprobs, target)
kl_div = nn.KLDivLoss(reduction='none')
uniform = (1/num_classes)*torch.ones(scores.shape, dtype=torch.float32).to(scores.device)
kl_loss = torch.sum(kl_div(lprobs, uniform), dim=1)
#print('KL Shape is ')
#print(kl_loss.shape)
if entropy_lambda is None:
entropy_lambda = self.entropy_lambda
if mask is None:
if reduce:
return loss.sum()
else:
return loss
cross_loss = mask * loss
kl = (1 - mask) * entropy_lambda * kl_loss
final_loss = cross_loss + kl
#print(final_loss.shape)
if reduce:
final_loss = final_loss.sum()
cross_loss = cross_loss.sum()
kl = kl.sum()
if return_crossentropy and return_kl_loss:
return final_loss, cross_loss, kl
else:
return final_loss
| 2.546875
| 3
|
anasymod/viewer/scansion.py
|
SubjeBilisim/anasymod
| 20
|
12779648
|
<gh_stars>10-100
from anasymod.viewer.viewer import Viewer
from anasymod.util import call
class ScansionViewer(Viewer):
def view(self):
# build command
#cmd = ['open', '/Applications/Scansion.app', self.target.cfg['vcd_path']]
cmd = ['open', '/Applications/Scansion.app']
# run command
call(cmd)
| 2.203125
| 2
|
example.py
|
flywheel-apps/safe-python
| 0
|
12779649
|
<filename>example.py
#!/usr/bin/env python
# Note:
# The above shebang, and setting the file's executable bit, allows the manifest's "command" key to be "./example.py".
# This is ideal as it removes any further bash processing of the gear command.
import json
invocation = json.loads(open('config.json').read())
# Note:
# The above load command robustly parses the /flywheel/v0/config.json file.
# This is ideal as it prevents any hand-parsing of the JSON syntax in bash or similar.
import copy
safe_invocation = copy.deepcopy(invocation)
safe_invocation["inputs"]["api-key"]["key"] = "--- OMITTED ---"
print("Gear invocation:")
print(json.dumps(safe_invocation, indent=4, sort_keys=True))
print()
# Note:
# The above demonstrates how to print the gear's invocation.
# Because there is an API key input provided to this gear, deep-copy is used blank the key out.
# This is ideal as it prevents another user from reading the gear logs and gaining (temporary) credentials.
if invocation["config"]["fast"]:
print("I feel the need... the need, for speed!")
else:
print("We are not going fast today.")
print()
# Note:
# The above conditional demonstrates checking a gear configuration setting.
# This is ideal because it allows robust, typed config options to be loaded from the JSON file.
import flywheel
import sys
fw = flywheel.Client(invocation["inputs"]["api-key"]["key"])
def interact_with_server():
print("Interacting with the server...")
user = fw.get_current_user()
print('Your name in Flywheel is', user.firstname, user.lastname + '.')
try:
interact_with_server()
except Exception as e:
# Expected, runsc does not respect networking settings
print("Could not interact with server, error was:")
print(e, )
print()
# Note:
# The above demonstrates how to best interact with remote services, such as the flywheel API.
# After creating a flywheel.Client object, it is best to define a function that performs your operation.
# When calling functions that interact with remote services, wrap them in a try/catch as shown.
# With this try/catch, a failure produces 2 lines of output. Without it, a failure produces 70 lines.
# This is ideal because it provides far more readable logs to the user.
import subprocess
def interact_with_system():
print("Running some commands...")
sys.stdout.flush()
sys.stderr.flush()
# Note:
# Flushing print buffers is important before running any system command.
# Otherwise, important log statements may not appear until after the command returns.
# This is ideal because some gears can fork off commands that take hours or days to complete.
subprocess.run(["uname", "-sr"], check=True)
# Expected, runsc does not respect networking settings
# subprocess.run(["curl", "-sI", "google.com"], check=True)
result = subprocess.run(["wc", "-w", "example.py"], capture_output=True, check=True)
word_count = result.stdout.decode("utf-8").split(' ')[0]
print("There are", word_count, "words in example.py.")
try:
interact_with_system()
except Exception as e:
print("Could not interact with system, error was:", file=sys.stderr)
print(e, file=sys.stderr)
print()
# Note: The above demonstrates how to best run system commands.
# The subprocess.run is ideal for requiring an explicit command array.
# The "check" parameter is strongly recommended as it raises an exception on non-zero return codes.
# The "shell" paramter is not recommended; instead, use the subprocess module as documented here:
# https://docs.python.org/3/library/subprocess.html#replacing-older-functions-with-the-subprocess-module
# As before, a function is declared and wrapped in a try/catch.
# This is ideal because it minimizes opportunities to run unintended commands.
print("Complete!")
# Note:
# In either of the above try/catches, it may be desirable to add "exit(1)" to the "except Exception" stanzas.
# This depends on if you want the gear to fail or continue as success when a function fails.
# This is ideal because it allows you to tightly control if a gear is marked as red or green.
| 2.5
| 2
|
coworker/services/views.py
|
upstar77/spacemap
| 0
|
12779650
|
<reponame>upstar77/spacemap<filename>coworker/services/views.py
from django.shortcuts import render, get_object_or_404, redirect
# Create your views here.
from django.template.response import TemplateResponse
from django.views.generic import DetailView, ListView
from .models import Category, Service
def category_index(request, path, category_id):
category = get_object_or_404(Category, id=category_id)
actual_path = category.get_full_path()
if actual_path != path:
return redirect('service:category', permanent=True, path=actual_path, category_id=category_id)
return TemplateResponse(request, 'services/index.html', {'category': category})
class ServiceSearchView(ListView):
template_name = 'services/search_services.html'
model = Service
class ServiceDetailView(DetailView):
model = Service
template_name = 'services/service_detail.html'
def get_object(self, queryset=None):
return get_object_or_404(self.model, id=self.kwargs["service_id"])
def product_details(request, slug, service_id, form=None):
# """Product details page
#
# The following variables are available to the template:
#
# product:
# The Product instance itself.
#
# is_visible:
# Whether the product is visible to regular users (for cases when an
# admin is previewing a product before publishing).
#
# form:
# The add-to-cart form.
#
# price_range:
# The PriceRange for the product including all discounts.
#
# undiscounted_price_range:
# The PriceRange excluding all discounts.
#
# discount:
# Either a Price instance equal to the discount value or None if no
# discount was available.
#
# local_price_range:
# The same PriceRange from price_range represented in user's local
# currency. The value will be None if exchange rate is not available or
# the local currency is the same as site's default currency.
# """
# products = products_with_details(user=request.user)
# product = get_object_or_404(products, id=product_id)
# if product.get_slug() != slug:
# return HttpResponsePermanentRedirect(product.get_absolute_url())
# today = datetime.date.today()
# is_visible = (
# product.available_on is None or product.available_on <= today)
# if form is None:
# form = handle_cart_form(request, product, create_cart=False)[0]
# availability = get_availability(product, discounts=request.discounts,
# local_currency=request.currency)
# template_name = 'product/details_%s.html' % (
# type(product).__name__.lower(),)
# templates = [template_name, 'product/details.html']
# product_images = get_product_images(product)
# variant_picker_data = get_variant_picker_data(
# product, request.discounts, request.currency)
# product_attributes = get_product_attributes_data(product)
# show_variant_picker = all([v.attributes for v in product.variants.all()])
# json_ld_data = product_json_ld(product, availability, product_attributes)
return TemplateResponse(request, 'services/service_detail.html', {})
| 2.375
| 2
|