max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
ergo/conditions/maxentropy.py | NixGD/ergo | 93 | 12773851 | from . import condition
class MaxEntropyCondition(condition.Condition):
def loss(self, dist) -> float:
return -self.weight * dist.entropy()
def destructure(self):
return ((MaxEntropyCondition,), (self.weight,))
def __str__(self):
return "Maximize the entropy of the distribution"
def __repr__(self):
return f"MaxEntropyCondition(weight={self.weight})"
| 3.140625 | 3 |
examples/supervised/neuralnets+svm/example_fnn.py | rueckstiess/pybrain | 3 | 12773852 | #!/usr/bin/env python
# Example script for feed-forward network usage in PyBrain.
__author__ = "<NAME>"
__version__ = '$Id$'
from pylab import figure, ioff, clf, contourf, ion, draw, show
from pybrain.utilities import percentError
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.structure.modules import SoftmaxLayer
from datasets import generateGridData, generateClassificationData, plotData
# load the training data set
trndata = generateClassificationData(250)
# neural networks work better if classes are encoded using
# one output neuron per class
trndata._convertToOneOfMany( bounds=[0,1] )
# same for the independent test data set
tstdata = generateClassificationData(100)
tstdata._convertToOneOfMany( bounds=[0,1] )
# build a feed-forward network with 20 hidden units, plus
# a corresponding trainer
fnn = buildNetwork( trndata.indim, 5, trndata.outdim, outclass=SoftmaxLayer )
trainer = BackpropTrainer( fnn, dataset=trndata, momentum=0.1, verbose=True, weightdecay=0.01)
# generate a grid of data points for visualization
griddata, X, Y = generateGridData([-3.,6.,0.2],[-3.,6.,0.2])
# repeat 20 times
for i in range(20):
# train the network for 1 epoch
trainer.trainEpochs( 1 )
# evaluate the result on the training and test data
trnresult = percentError( trainer.testOnClassData(),
trndata['class'] )
tstresult = percentError( trainer.testOnClassData(
dataset=tstdata ), tstdata['class'] )
# print the result
print "epoch: %4d" % trainer.totalepochs, \
" train error: %5.2f%%" % trnresult, \
" test error: %5.2f%%" % tstresult
# run our grid data through the FNN, get the most likely class
# and shape it into an array
out = fnn.activateOnDataset(griddata)
out = out.argmax(axis=1)
out = out.reshape(X.shape)
# plot the test data and the underlying grid as a filled contour
figure(1)
ioff() # interactive graphics off
clf()
# plot the datapoints
plotData(tstdata)
# overlay a contour plot of the functional margin
if out.max()!=out.min():
CS = contourf(X, Y, out)
ion() # interactive graphics on
draw() # update the plot
# show the plot until user kills it
ioff()
show()
| 3.109375 | 3 |
line-bot-tutorial-master/app.py | chungoppa/test | 0 | 12773853 | from flask import Flask, request, abort
import json
import datetime
from linebot import (
LineBotApi, WebhookHandler
)
from linebot.exceptions import (
InvalidSignatureError
)
from linebot.models import *
app = Flask(__name__)
# Channel Access Token
line_bot_api = LineBotApi('<KEY>
# Channel Secret
handler = WebhookHandler('0<PASSWORD>')
# 監聽所有來自 /callback 的 Post Request
@app.route("/callback", methods=['POST'])
def callback():
# get X-Line-Signature header value
signature = request.headers['X-Line-Signature']
# get request body as text
body = request.get_data(as_text=True)
app.logger.info("Request body: " + body)
# handle webhook body
try:
handler.handle(body, signature)
except InvalidSignatureError:
abort(400)
return 'OK'
#myshit
@handler.add(FollowEvent)
def sendGreetingms(event):
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text='hello'))
def gettime(event):
image_carousel_template = ImageCarouselTemplate(columns=[
ImageCarouselColumn(
image_url='https://www.clipartly.com/wp-content/uploads/2018/10/Cartoon-Alarm-Clock-Clipart-Png.png',
action=DatetimePickerAction(label='time',
data='time_postback',
mode='time')),
])
template_message = TemplateSendMessage(alt_text='ImageCarousel alt text', template=image_carousel_template)
line_bot_api.reply_message(event.reply_token, template_message)
@handler.add(MessageEvent, message=TextMessage)
def handle_message(event):
message = TextSendMessage(text=event.message.text)
text = event.message.text
if text == '1':
image_carousel_template = ImageCarouselTemplate(columns=[
ImageCarouselColumn(image_url='https://via.placeholder.com/1024x1024',
action=DatetimePickerAction(label='datetime',
data='datetime_postback',
mode='datetime')),
ImageCarouselColumn(image_url='https://via.placeholder.com/1024x1024',
action=DatetimePickerAction(label='date',
data='date_postback',
mode='time'))
])
template_message = TemplateSendMessage(
alt_text='ImageCarousel alt text', template=image_carousel_template)
line_bot_api.reply_message(event.reply_token, template_message)
elif text == '2':
confirm_template = ConfirmTemplate(text='Do it?', actions=[
MessageAction(label='Yes', text='Yes!'),
MessageAction(label='No', text='No!'),
])
template_message = TemplateSendMessage(
alt_text='Confirm alt text', template=confirm_template)
line_bot_api.reply_message(event.reply_token, template_message)
if text == 'レストラン予約':
if isinstance(event.source, SourceUser):
profile = line_bot_api.get_profile(event.source.user_id)
line_bot_api.reply_message(
event.reply_token, [
TextSendMessage(
text='Hello ' + profile.display_name + '-san :) , you want to book a table ? \n please tell me'),
TextSendMessage(text='何名様でお越しでしょうか?', quick_reply=QuickReply(
items=[
QuickReplyButton(
action=PostbackAction(label="label1", data="data1")
),
QuickReplyButton(
action=MessageAction(label="label2", text="text2")
),
QuickReplyButton(
action=DatetimePickerAction(label="label3",
data="data3",
mode="date")
),
QuickReplyButton(
action=CameraAction(label="label4")
),
QuickReplyButton(
action=CameraRollAction(label="label5")
),
QuickReplyButton(
action=LocationAction(label="label6")
),
]))
]
)
else:
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text="Bot can't use profile API without user ID"))
elif text == '食材・弁当デリバリー':
line_bot_api.reply_message(
event.reply_token, [
TextSendMessage(text='Want to oder ?'),
TextSendMessage(text='Sorry delivery oder is not avalable rightnow !')
]
)
elif text == 'お問合せ':
line_bot_api.reply_message(
event.reply_token, [
TextSendMessage(
text='・居酒屋「くーろん」 \n・原田商店 \n 63 Pham Viet Chanh street.,District Binh Thanh,Ho Chi Minh \n TEL:08 3840 9826 \n 携帯:090 829 5470')
]
)
elif text == 'メニュー':
line_bot_api.reply_message(
event.reply_token, [
ImageSendMessage(
original_content_url='https://www.arclandservice.co.jp/katsuya/menu/1_1.jpg',
preview_image_url='https://www.arclandservice.co.jp/katsuya/menu/1_1.jpg'
)
]
)
elif text == '営業時間':
bubble_string = """
{
"type": "bubble",
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://upload.wikimedia.org/wikipedia/commons/thumb/6/60/Sushi_platter.jpg/1200px-Sushi_platter.jpg",
"position": "relative",
"size": "full",
"aspectMode": "cover",
"aspectRatio": "1:1",
"gravity": "center"
},
{
"type": "box",
"layout": "horizontal",
"contents": [
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "Sushi",
"weight": "bold",
"size": "xl",
"color": "#ffffff"
},
{
"type": "box",
"layout": "baseline",
"margin": "md",
"contents": [
{
"type": "icon",
"size": "sm",
"url": "https://scdn.line-apps.com/n/channel_devcenter/img/fx/review_gold_star_28.png"
},
{
"type": "icon",
"size": "sm",
"url": "https://scdn.line-apps.com/n/channel_devcenter/img/fx/review_gold_star_28.png"
},
{
"type": "icon",
"size": "sm",
"url": "https://scdn.line-apps.com/n/channel_devcenter/img/fx/review_gold_star_28.png"
},
{
"type": "icon",
"size": "sm",
"url": "https://scdn.line-apps.com/n/channel_devcenter/img/fx/review_gold_star_28.png"
},
{
"type": "icon",
"size": "sm",
"url": "https://scdn.line-apps.com/n/channel_devcenter/img/fx/review_gray_star_28.png"
},
{
"type": "text",
"text": "4.0",
"size": "sm",
"color": "#d6d6d6",
"margin": "md",
"flex": 0
}
]
}
]
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "¥62,000",
"color": "#a9a9a9",
"decoration": "line-through",
"align": "end"
},
{
"type": "text",
"text": "¥42,000",
"color": "#ebebeb",
"size": "xl",
"align": "end"
}
]
}
],
"position": "absolute",
"offsetBottom": "0px",
"offsetStart": "0px",
"offsetEnd": "0px",
"backgroundColor": "#00000099",
"paddingAll": "20px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "OFF - 20 %",
"color": "#ffffff"
}
],
"position": "absolute",
"backgroundColor": "#ff2600",
"cornerRadius": "20px",
"paddingAll": "5px",
"offsetTop": "10px",
"offsetEnd": "10px",
"paddingStart": "10px",
"paddingEnd": "10px"
}
],
"paddingAll": "0px"
}
}
"""
message = FlexSendMessage(alt_text="hello", contents=json.loads(bubble_string))
line_bot_api.reply_message(
event.reply_token,
message
)
import os
if __name__ == "__main__":
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port)
| 2.34375 | 2 |
pkgs/core/bdsim/core/tunable.py | CallumJHays/bdsim.micropython | 0 | 12773854 | <reponame>CallumJHays/bdsim.micropython
from numbers import Real
from collections import OrderedDict
from collections.abc import Iterable
from abc import ABC, abstractmethod
from typing import Set
import numpy as np
class Tunable:
"""
A parameter is a variable used by the block diagram that can be
modified during the runtime of the engine. It is mostly used for
real-time execution. Many methods of changing these parameters
are provided with BDSim, from Qt Apps or over a ROS network via dynamic_reconfigure ROS params
"""
def __new__(cls, val, **kwargs):
"if val is already a param, update its unset attributes by kwargs, otherwise actually create a new Tunable"
if isinstance(val, Tunable):
if not issubclass(val.__class__, cls):
# set the class so that super() doesn't complain when passing in a Tunable as a 'new TunableNum' etc
val.__class__ = cls
return val
else:
# choose the correct paramtype based on the kwargs passed
# always check for optionalparam first because it also has kwargs describing the underlying type
# TODO: add range to commonly used tunables
param_cls = OptionalTunable if 'default' in kwargs else \
cls if cls is not Tunable else \
TunableEnum if 'oneof' in kwargs else \
TunableNum if _is_num(val) else \
TunableVec if _is_vectorlike(val) else \
cls
param = super().__new__(param_cls)
# __init__ will not run after this unless param is of class cls,
# For more specific param types such as TunableRange with 'default' in the kwargs,
# the OptionalTunable should wrap the TunableRange but doesn't get it's __init__ called, unless:
if not isinstance(param, cls):
# we do it manually
param_cls.__init__(param, val, cls=cls, **kwargs)
return param
def __init__(
self,
val,
*, # force the use of keyword arguments
name=None,
on_change=None,
created_by_user=False,
**_kwargs):
self.attrs = getattr(self, 'attrs', set())
self.val = self.attr('val', val)
self.name = self.attr('name', name)
self.gui_attrs: Set[str] = self.attr('gui_attrs', set(('name',)))
self.on_change_cbs = self.attr(
'on_change_cbs', [on_change] if on_change else [])
# if a new callback is being added
if on_change is not None and on_change not in self.on_change_cbs:
self.on_change(on_change)
self.gui_reconstructor_cbs = self.attr('gui_reconstructor_cbs', [])
self.created_by_user = self.attr('created_by_user', created_by_user)
# this should only be added to in TunableBlock.param
# list of (TunableBlock, arg_name: str) tuples
self.used_in = self.attr('used_in', [])
# TODO: potentially add validate()?
def override(self, **kwargs):
for attr, val in kwargs.items():
if attr == 'on_change':
self.on_change(val)
else:
setattr(self, attr, val)
def full_name(self):
return self.name or ', '.join('%s.%s' % (block, arg_name)
for block, arg_name in self.used_in)
# add a callback for val change
def on_change(self, cb):
# insert it to the start so that setup callbacks (called last) happen first
self.on_change_cbs.insert(0, cb)
# add a functional API too, to enable triggering with exclusions (to prevent infinite recursion)
def set_val(self, val, exclude_cb=None):
# coalesce single exclude_cb and multiple into tuple
if not isinstance(exclude_cb, Iterable):
exclude_cb = (exclude_cb, )
if val is not self.val: # only trigger if the value actually changed
super().__setattr__('val', val)
# copy the list so that if more cbs are added to self.on_change_cbs during
# callback execution, they don't get run (leading to infinite callbacks)
cbs = list(cb for cb in self.on_change_cbs if cb not in exclude_cb)
for cb in cbs:
cb(val)
def __setattr__(self, attr, val):
# don't trigger the callbacks on the first val 'set', but do on all others
if attr == 'val' and hasattr(self, attr):
self.set_val(val)
else:
super().__setattr__(attr, val)
# gui_attrs may not exist in the first Tunable constructor call
# pylint: disable=unsupported-membership-test
if attr in getattr(self, 'gui_attrs', ()):
self.reconstruct_gui()
def reconstruct_gui(self):
"""
Let the gui know that the sub_params have changed in such a way that
the GUI controls must be reconstructed to reflect the sub_param structure
"""
for cb in self.gui_reconstructor_cbs:
cb(self)
def register_gui_reconstructor(self, cb):
"""
The gui registers a callback with this so that we can let it know if it must be rerendered
"""
self.gui_reconstructor_cbs.append(cb)
@ classmethod
def map(cls, maybe_param, fn):
"helper function to monad-map a variable that may or may not be a parameter"
if isinstance(maybe_param, cls):
maybe_param.val = fn(maybe_param.val)
else:
maybe_param = fn(maybe_param)
return maybe_param
def attr(self, attr, default):
"returns attr if self has attr or it's none, otherwise return default"
self.attrs.add(attr)
val = getattr(self, attr, default)
if val is None:
val = default
return val
class TunableNum(Tunable):
def __init__(self, val, min=None, max=None, step=None, log_scale=False, **kwargs):
"step only works if log_scale is False, and only affects gui's"
super().__init__(val, **kwargs)
self.min = self.attr('min', min)
self.max = self.attr('max', max)
self.step = self.attr('step', step)
self.log_scale = self.attr('log_scale', log_scale)
assert self.min > 0 if self.log_scale else True, \
"log_scaled parameters cannot have a value greater than 1"
self.gui_attrs.update({'min', 'max', 'log_scale', 'step'})
class TunableVec(TunableNum):
def __init__(self, val, min=None, max=None, **kwargs):
super().__init__(val=np.array(val),
min=None if min is None else np.array(min),
max=None if max is None else np.array(max),
**kwargs)
class TunableEnum(Tunable):
def __init__(self, val, oneof=None, **kwargs):
super().__init__(val, **kwargs)
# TODO: support enums or dict mappings "choicename" -> value. Perhaps a bidict?
self.oneof = self.attr('oneof', oneof)
self.gui_attrs.update({'oneof'})
class TunableObject(Tunable, ABC):
"""
A TunableObject is a parameter that is made up of a group of other parameters.
Unlik Tunables, which are typically instantiated directly, TunableObject implementations should subclass this.
Examples of a TunableObject could include a kernel object that could be a just an ndarray
either a description (type, width, height), of which each is a parameter.
"""
def __init__(self, val, **kwargs):
super().__init__(val=None, **kwargs)
# hyperparam setups can be intricate so better to hard reset these attrs every time
# sub-parameters - shouldn't change after instantiation
self.params = self.attr('params', OrderedDict())
self.hidden = self.attr('hidden', set())
self.gui_attrs.update({'hidden'})
# bind the method to a single object so we can exclude it from param update recursion later
# in python each self.func bound method is a different object so it can't
# be checked for equality unless saved like so.
# Need to think of a cleaner way to do this
# I know this looks very strange, but it works
self.update = self.update # type: ignore
@abstractmethod
def update(self, _updated_val=None):
pass
def param(self, name, val, on_change=None, cls=Tunable, **kwargs):
"""
Registers a sub-parameter for control generation.
The order in which these are called determine the order of the GUI controls displayed.
"""
# by default call the update function
if not on_change:
on_change = self.update
if name in self.params:
param = self.params[name]
else:
param = cls(val, **kwargs)
# set this afterwards to avoid a callback after the initial self.val = ...
param.on_change(on_change)
param.on_change(lambda val: setattr(self, name, val))
param.used_in.append((self.full_name(), name))
self.params[name] = param
return param.val
def show_only(self, *params):
self.show(*params)
prev = self.hidden
self.hidden = (prev - set(params)
).union(set(p for p in self.params if p not in params))
if prev != self.hidden:
self.reconstruct_gui()
def show(self, *params):
prev = self.hidden
self.hidden = prev - set(params) # god I love python sometimes
if prev != self.hidden:
self.reconstruct_gui()
def hide(self, *params):
prev = self.hidden
self.hidden = prev.union(set(params))
if prev != self.hidden:
self.reconstruct_gui()
class OptionalTunable(TunableObject):
# Expects a val that can be None. In a GUI, will be guarded by a checkbox
def __init__(self, val, default=None, **kwargs):
super().__init__(val, **kwargs)
self.default = default
self.enabled = self.param('enabled', val is not None)
subvalue_kwargs = {k: v for k, v in kwargs.items(
) if k not in ('on_change', 'name', 'default')}
self.enabled_value = self.param(
'enabled_value', val if val else default, **subvalue_kwargs)
self.update()
def update(self, _=None):
if self.enabled:
self.show('enabled_value')
self.val = self.enabled_value or self.default
else:
self.hide('enabled_value')
self.val = None
class TunableRange(TunableObject):
def __init__(self, val, min, max, step=None, log_scale=False, **kwargs):
super().__init__(val, **kwargs)
lower, upper = val if val else (None, None)
shared_kwargs = dict(min=min, max=max, step=step, log_scale=log_scale)
self.lower = self.param('lower', lower, **shared_kwargs)
self.upper = self.param('upper', upper, **shared_kwargs)
self.update()
def update(self, _=None):
self.val = self.lower, self.upper
def _is_vectorlike(x):
return isinstance(x, np.ndarray) and x.ndim == 1 \
or isinstance(x, Iterable) and all(isinstance(x_i, Real) for x_i in x)
def _is_num(x):
# bool is a real number too according to python, we don't want that
return isinstance(x, Real) and not isinstance(x, bool)
| 3.328125 | 3 |
oth-chain/tests/test_pow_chain.py | McSido/oth-chain | 0 | 12773855 | """ Testing module for the Proof-Of-Work implementation
of the blockchain client.
"""
import hashlib
import math
import time
from queue import Queue
from queue import Empty
import nacl.encoding
import nacl.signing
from chains import Transaction, Block, Header, PoW_Blockchain
import utils
VERSION = 0.7
class TestPOW(object):
""" Testcase used to bundle all tests for the
Proof-Of-Work blockchain
"""
def setup(self):
""" Setup of the blockchain for the tests.
"""
utils.set_debug()
self.sends = Queue()
self.gui_queue = Queue()
self.blockchain = PoW_Blockchain(VERSION, self.sends, self.gui_queue)
self.sender_sign = nacl.signing.SigningKey(seed=b'a' * 32)
self.sender_verify = self.sender_sign.verify_key.encode(
nacl.encoding.HexEncoder)
self.receiver_sign = nacl.signing.SigningKey(seed=b'b' * 32)
self.receiver_verify = self.receiver_sign.verify_key.encode(
nacl.encoding.HexEncoder)
def test_block(self):
""" Test that the block creation works as intended.
"""
proof = self.blockchain.create_proof(self.sender_verify)
block = self.blockchain.create_block(proof)
mining_transaction = \
Transaction(sender='0',
recipient=self.sender_verify,
amount=50,
fee=0,
timestamp=time.time(),
signature='0')
block.transactions.append(mining_transaction)
root_hash = self.blockchain.create_merkle_root(block.transactions)
real_header = Header(
block.header.version,
block.header.index,
block.header.timestamp,
block.header.previous_hash,
root_hash,
block.header.proof
)
real_block = Block(real_header, block.transactions)
self.blockchain.new_block(real_block)
assert self.blockchain.latest_block() == real_block
assert (mining_transaction in
self.blockchain.latest_block().transactions)
assert self.blockchain.check_balance(
self.sender_verify, time.time()) == 50
def test_transaction_invalid_balance(self):
""" Test that the transactions with invalid balances are recognized and
not added to the blockchain.
"""
transaction = self.create_transaction()
assert not self.blockchain.validate_transaction(transaction, False)
self.blockchain.new_transaction(transaction)
assert transaction not in self.blockchain.transaction_pool
assert self.sends.empty()
def test_transaction_invalid_signature(self):
""" Test that the transactions with invalid signatures are recognized
and not added to the blockchain.
"""
self.mine_block(self.blockchain)
transaction = self.create_transaction()
transaction = Transaction(
transaction.sender,
transaction.recipient,
transaction.amount,
transaction.fee,
transaction.timestamp,
self.receiver_sign.sign(
self.create_transaction_hash(
transaction.amount,
transaction.fee,
transaction.timestamp
).encode()
)
)
assert not self.blockchain.validate_transaction(transaction, False)
self.blockchain.new_transaction(transaction)
assert transaction not in self.blockchain.transaction_pool
assert self.sends.empty()
def test_transaction_invalid_double(self):
""" Test that the same transaction is not added twice to the blockchain.
"""
self.mine_block(self.blockchain)
transaction = self.create_transaction()
assert self.blockchain.validate_transaction(transaction, False)
self.blockchain.new_transaction(transaction)
assert transaction in self.blockchain.transaction_pool
assert not self.sends.empty()
assert not self.blockchain.validate_transaction(transaction, False)
def test_transaction_valid(self):
""" Test that a valid transaction is recognized and added to the
blockchain.
"""
self.mine_block(self.blockchain)
transaction = self.create_transaction()
assert self.blockchain.validate_transaction(transaction, False)
self.blockchain.new_transaction(transaction)
assert transaction in self.blockchain.transaction_pool
assert not self.sends.empty()
def test_new_header(self, capsys):
""" Test that a new incoming header is processed accordingly.
"""
with capsys.disabled():
proof = self.blockchain.create_proof(self.sender_verify)
last_header = self.blockchain.latest_header()
# Valid
new_header = Header(0,
1,
time.time(),
last_header.root_hash,
123,
proof
)
self.blockchain.process_message(('new_header',
new_header,
''))
assert self.sends.get() == ('get_block', new_header, 'broadcast')
# Invalid
new_header = Header(0,
1,
time.time(),
321,
123,
proof
)
self.blockchain.process_message(('new_header',
new_header,
''))
captured = capsys.readouterr()
assert captured.out == '### DEBUG ### Invalid header\n'
# Farther away
new_header = Header(0,
123,
time.time(),
321,
last_header.root_hash,
proof
)
self.blockchain.process_message(('new_header',
new_header,
''))
assert self.sends.get() == ('get_chain', '', 'broadcast')
def test_get_block(self):
""" Test that get_block works.
Uses latest_block for comparison.
"""
b = self.blockchain.latest_block()
assert b == self.blockchain.get_block(b.header)
# Invalid header -> return None
assert not self.blockchain.get_block('')
def test_send_block(self):
""" Test that send_block works.
"""
self.mine_block(self.blockchain)
b = self.blockchain.latest_block()
self.blockchain.send_block(b.header, '123')
assert self.sends.get() == ('new_block', b, '123')
def test_merkle_root(self):
""" Test that Merkle root is independent of transaction order.
Only factor for the Merkle root should be timestamp of the transaction.
"""
t = [self.create_transaction() for i in range(15)]
assert self.blockchain.create_merkle_root(t) == \
self.blockchain.create_merkle_root(list(reversed(t)))
def test_msg_transaction(self):
""" Test that the process message can process new transactions
"""
self.mine_block(self.blockchain)
t = self.create_transaction()
self.blockchain.process_message(('new_transaction', t, ''))
assert t in self.blockchain.transaction_pool
def test_resolve_conflict(self):
""" Test that resolve conflict works
"""
# Initial chain
self.mine_block(self.blockchain)
t = self.create_transaction()
self.blockchain.new_transaction(t)
self.mine_block(self.blockchain)
# Secondary chain
bchain2 = PoW_Blockchain(VERSION,
Queue(),
Queue()
)
# Fill secondary chain
for _ in range(3):
self.mine_block(bchain2)
bchain2.new_transaction(t)
bchain2.process_message(('mine', self.sender_verify, 'local'))
# Check new_chain of the initial blockchain
self.blockchain.resolve_conflict(bchain2.get_header_chain())
assert bchain2.latest_header() == self.blockchain.nc_latest_header()
# Add to secondary chain, to test "pre-filling" of new_chain
for _ in range(3):
self.mine_block(bchain2)
self.blockchain.resolve_conflict(bchain2.get_header_chain())
assert bchain2.latest_header() == self.blockchain.nc_latest_header()
# Chain exchange
for b in bchain2.get_block_chain():
self.blockchain.new_block(b)
assert bchain2.latest_block() == self.blockchain.latest_block()
# ####################### HELPER FUNCTIONS ###########################
def mine_block(self, chain):
""" Mine an initial block to add a balance to the test account.
Args:
chain: Chain to mine on
"""
proof = chain.create_proof(self.sender_verify)
block = chain.create_block(proof)
block.transactions.append(
Transaction(sender='0',
recipient=self.sender_verify,
amount=50,
fee=0,
timestamp=time.time(),
signature='0'))
root_hash = chain.create_merkle_root(block.transactions)
real_header = Header(
block.header.version,
block.header.index,
block.header.timestamp,
block.header.previous_hash,
root_hash,
block.header.proof
)
real_block = Block(real_header, block.transactions)
chain.new_block(real_block)
try:
self.sends.get(block=False) # Remove new_block message
except Empty:
pass
def create_transaction(self):
""" Create simple transaction used in tests.
Returns:
A new transaction.
"""
amount = 10
timestamp = time.time()
fee = math.ceil(amount * 0.05)
transaction_hash = self.create_transaction_hash(amount, fee, timestamp)
return Transaction(
self.sender_verify,
self.receiver_verify,
amount,
fee,
timestamp,
self.sender_sign.sign(transaction_hash.encode())
)
def create_transaction_hash(self, amount, fee, timestamp):
""" Creates the transaction-hash used in tests.
Args:
amount: Amount of coins for transaction.
fee: Fee for the transaction.
timestamp: Time of the transaction.
Returns:
Hash for the given transaction data
"""
return hashlib.sha256(
(str(self.sender_verify) + str(self.receiver_verify) +
str(amount) + str(fee) + str(timestamp)).encode()
).hexdigest()
| 2.828125 | 3 |
14.py | vandorjw/ProjectEuler | 0 | 12773856 |
def collatz(n, count):
if n == 1:
return count
else:
count = count + 1
if n % 2 == 0:
n = n /2
else:
n = 3 * n + 1
return collatz(n, count)
max_collatz = 1
iteration = 1
for i in range(1, 1000000):
count = 1
contesting = collatz(i, 1)
if contesting > max_collatz:
max_collatz = contesting
iteration = i
print("Final winner is integer {}".format(iteration))
| 3.65625 | 4 |
scripts/data.py | ldself/covidweb_api_dashboard | 0 | 12773857 | <filename>scripts/data.py
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 20 11:30:55 2021
@author: lself
"""
import pandas as pd
from collections import OrderedDict
import requests
import plotly.graph_objects as go
from plotly.offline import plot
states_default = OrderedDict([('Alabama', 'AL')])
def return_figures(states=states_default):
"""Creates a plotly visualization using the COVID tracking API
(http://covidtracking.com/data/api)
# Example of the COVID API endpoint:
# https://api.covidtracking.com/v1/states/{state}/daily.json by state
Args:
state_default (tuple): tuple of the selected state for filtering the data
Returns:
list (dict): list containing the (possibly) multiple visualizations
"""
# when the states variable is empty, use the states_default dictionary
if not bool(states):
states = states_default
# prepare filter data for the COVID API
states_filter = list(states.values())
states_filter = [x.lower() for x in states_filter]
#print(states_filter)
url = "https://api.covidtracking.com/v1/states/" + states_filter[0] + "/daily.json"
try:
r = requests.get(url)
data = pd.DataFrame(r.json())
except:
data = pd.DataFrame()
print('could not load data ', )
return data
# sort values by date ascending
data = data.sort_values('date').reset_index()
# convert date column to a date data type (yyyy-mm-dd)
data['date'] = pd.to_datetime(data['date'], format='%Y%m%d')
# create 7-day moving average of new positives
data['positives_SMA_07'] = data['positiveIncrease'].rolling(window=7).mean()
# create 7-day moving average of new hospitalizations
data['hospitalizations_SMA_07'] = data['hospitalizedIncrease'].rolling(window=7).mean()
# create 7-day moving average of new deaths
data['deaths_SMA_07'] = data['deathIncrease'].rolling(window=7).mean()
fig = go.Figure()
fig.add_trace(go.Scatter(x=data['date'],
y=data['positives_SMA_07'],
fill='tozeroy',
name='Positives'))
fig.add_trace(go.Scatter(x=data['date'],
y=data['hospitalizations_SMA_07'],
fill='tozeroy',
name="Hospitalizations"))
fig.add_trace(go.Scatter(x=data['date'],
y=data['deaths_SMA_07'],
fill='tozeroy',
name='Deaths'))
fig.update_layout(title='7-day moving averages for ' + list(states.keys())[0])
figures = [fig]
return figures
if __name__ == "__main__":
state_dict = {'Washington': 'WA'}
figures = return_figures(state_dict)
plot(figures[0]) | 3.421875 | 3 |
tecladu.py | wellingtonfs/fsek-pessoal | 0 | 12773858 | <reponame>wellingtonfs/fsek-pessoal
#!/usr/bin/env python3
# so that script can be run from Brickman
import termios, tty, sys
from ev3dev.ev3 import *
# attach large motors to ports B and C, medium motor to port A
motor_left = LargeMotor('outC')
motor_right = LargeMotor('outD')
motor_a = MediumMotor('outA')
motor_b = MediumMotor('outB')
def getch():
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
tty.setcbreak(fd)
ch = sys.stdin.read(1)
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
def forward():
motor_left.run_forever(speed_sp=200)
motor_right.run_forever(speed_sp=200)
#==============================================
def back():
motor_left.run_forever(speed_sp=-200)
motor_right.run_forever(speed_sp=-200)
#==============================================
def left():
motor_left.run_forever(speed_sp=-200)
motor_right.run_forever(speed_sp=200)
#==============================================
def right():
motor_left.run_forever(speed_sp=200)
motor_right.run_forever(speed_sp=-200)
#==============================================
def stop():
motor_left.run_forever(speed_sp=0)
motor_right.run_forever(speed_sp=0)
motor_a.run_forever(speed_sp=0)
motor_b.run_forever(speed_sp=0)
#==============================================
def up():
motor_a.run_forever(speed_sp=200)
motor_b.run_forever(speed_sp=-200)
def down():
motor_a.run_forever(speed_sp=-200)
motor_b.run_forever(speed_sp=200)
while True:
k = getch()
print(k)
if k == 's':
back()
if k == 'w':
forward()
if k == 'd':
right()
if k == 'a':
left()
if k == ' ':
stop()
if k == 'o':
up()
if k == 'p':
down()
if k == 'q':
exit() | 2.375 | 2 |
pages/migrations/0001_initial.py | 501code/fletcher-street-urban-riding-club | 1 | 12773859 | <filename>pages/migrations/0001_initial.py
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-01-31 07:53
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Page',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=300, verbose_name='Page Title')),
('body', models.TextField(max_length=5000, verbose_name='Body')),
('published', models.BooleanField(default=True, verbose_name='Published')),
('pub_date', models.DateTimeField(default=django.utils.timezone.now, verbose_name='Date Published')),
],
),
migrations.CreateModel(
name='Section',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=300, verbose_name='Section Title')),
('description', models.CharField(max_length=400, verbose_name='Description')),
('key', models.CharField(max_length=50)),
('pub_date', models.DateTimeField(default=django.utils.timezone.now, verbose_name='Date Published')),
('visible', models.BooleanField(default=True, verbose_name='Visible')),
],
),
migrations.AddField(
model_name='page',
name='section',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pages.Section'),
),
]
| 1.6875 | 2 |
utils/dataset.py | NaCl-Ocean/Anchor_free_detection_rotation | 12 | 12773860 | import os
import torch
from torchvision import datasets
from utils.boxlist import BoxList
import cv2
import numpy as np
import random
def has_only_empty_bbox(annot):
# if bbox width and height <=1 , then it is a empty box
return all(any(o <= 1 for o in obj['bbox'][2:]) for obj in annot)
def has_valid_annotation(annot):
if len(annot) == 0:
return False
if has_only_empty_bbox(annot):
return False
return True
class DOTADataset(datasets.CocoDetection):
NAME_TAB = ('__background__', 'plane', 'baseball-diamond', 'bridge', 'ground-track-field',
'small-vehicle', 'large-vehicle', 'ship',
'tennis-court', 'basketball-court',
'storage-tank', 'soccer-ball-field',
'roundabout', 'harbor',
'swimming-pool', 'helicopter')
def __init__(self, path, split,image_folder_name, anno_folder_name,transform=None):
"""
path : dataset folder path
dataset structure:
├── dataset_path
│ ├── annotations
│ │ ├── anno_folder_name +'train'.json
│ │ ├── anno_folder_name + 'val'.json
│ │ ├── anno_folder_name + 'test'.json
│ ├── image_folder_name+'train'
│ ├── image_folder_name+'val'
│ ├── image_folder_name+'test'
"""
root, annot = self.get_root_annotation_path(path,split,image_folder_name,anno_folder_name)
super().__init__(root, annot)
self.ids = sorted(self.ids)
if split == 'train' or split == 'val_loss':
ids = []
for id in self.ids:
ann_ids = self.coco.getAnnIds(imgIds=id, iscrowd=None)
annot = self.coco.loadAnns(ann_ids)
if has_valid_annotation(annot):
ids.append(id)
self.ids = ids
self.category2id = {v: i + 1 for i, v in enumerate(self.coco.getCatIds())}
self.id2category = {v: k for k, v in self.category2id.items()}
self.id2img = {k: v for k, v in enumerate(self.ids)}
self.transformer = transform
def set_transform(self,transform):
self.transformer = transform
def get_root_annotation_path(self,path,split,image_folder_name,anno_folder_name):
'''
root : image dir
annot: annotation file path
'''
self.split = split
self.anno_folder_name = anno_folder_name
self.image_folder_name = image_folder_name
'''split: train, val, test'''
if split == 'val_loss':
annot = os.path.join(path, 'annotations', f"{self.anno_folder_name}val.json")
root = os.path.join(path, f'{self.image_folder_name}val')
else:
annot = os.path.join(path, 'annotations', f'{self.anno_folder_name}{split}.json')
root = os.path.join(path, f'{self.image_folder_name}{split}')
return root, annot
def __getitem__(self, index, transform_enable=True):
if isinstance(index, tuple) or isinstance(index, list):
transform_enable = index[1]
index = index[0]
else:
transform_enable = True
coco = self.coco
img_id = self.ids[index]
ann_ids = coco.getAnnIds(imgIds=img_id)
annots = coco.loadAnns(ann_ids)
path = coco.loadImgs(img_id)[0]['file_name']
img = cv2.imread(os.path.join(self.root, path), cv2.IMREAD_UNCHANGED)
if img.ndim == 2:
# if single channel image, then convert to BGR
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
elif img.ndim == 3:
pass
else:
raise RuntimeError("{} channel image not supported".format(img.ndim))
height, width,_ = img.shape
annots = [o for o in annots if o['iscrowd'] == 0]
boxes = [o['bbox'] for o in annots]
boxes = torch.as_tensor(boxes).reshape(-1, 8)
#target = BoxList(boxes, (width,height), mode='xyxyxyxy').convert('xywha')
target = BoxList(boxes, (width,height), mode='xyxyxyxy')
target = target.change_order_to_clockwise()
target = target.convert('xywha_d')
#target = target.convert('xywha')
classes = [o['category_id'] for o in annots]
classes = [self.category2id[c] for c in classes]
classes = torch.tensor(classes)
# target.fields['labels'] = classes
target.add_field('labels', classes)
target = target.clip_to_image(remove_empty=True)
if self.transformer is not None and transform_enable:
img, target = self.transformer(img, target)
return img, target, index, path
def get_image_meta(self, index):
id = self.id2img[index]
img_data = self.coco.imgs[id]
return img_data
class ImageList:
def __init__(self, tensors, sizes):
self.tensors = tensors
self.sizes = sizes
def to(self, *args, **kwargs):
tensor = self.tensors.to(*args, **kwargs)
return ImageList(tensor, self.sizes)
def image_list(tensors, size_divisible=0):
max_size = tuple(max(s) for s in zip(*[img.shape for img in tensors]))
if size_divisible > 0:
stride = size_divisible
max_size = list(max_size)
if max_size[1] % stride != 0:
max_size[1] = (max_size[1] | (stride - 1)) + 1
if max_size[2] % stride != 0:
max_size[2] = (max_size[2] | (stride - 1)) + 1
max_size = tuple(max_size)
shape = (len(tensors),) + max_size
batch = tensors[0].new(*shape).zero_()
for img, pad_img in zip(tensors, batch):
pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
sizes = [img.shape[-2:] for img in tensors]
return ImageList(batch, sizes)
def collate_fn(config):
def collate_data(batch):
batch = list(zip(*batch))
imgs = image_list(batch[0], config.size_divisible)
targets = batch[1]
ids = batch[2]
return imgs, targets, ids
return collate_data
| 2.46875 | 2 |
tests/Internship_app/test_urls_ads/urls_ad.py | StefanDimitrovDimitrov/Internship | 1 | 12773861 | <reponame>StefanDimitrovDimitrov/Internship
from django.urls import reverse,resolve
from django.test import SimpleTestCase
from Internship.internship_app.views import Home, about, CatalogCompanies, catalog_ad, create_ad, details_ad, edit_ad, \
delete_ad, deactivate_ad, activate_ad, apply
class TestUrls(SimpleTestCase):
def test_home_url_resolves(self):
url = reverse('home')
self.assertEquals(resolve(url).func.view_class,Home)
def test_about_url_resolves(self):
url = reverse('about')
self.assertEquals(resolve(url).func, about)
def test_catalog_companies_url_resolves(self):
url = reverse('catalog companies')
self.assertEquals(resolve(url).func.view_class,CatalogCompanies)
def test_ads_url_resolves(self):
url = reverse('catalog ads')
self.assertEquals(resolve(url).func, catalog_ad)
def test_create_ad_url_resolves(self):
url = reverse('create ad')
self.assertEquals(resolve(url).func, create_ad)
def test_details_ad_url_resolves(self):
url = reverse('details ad', args=[1])
self.assertEquals(resolve(url).func,details_ad)
def test_edit_ad_url_resolves(self):
url = reverse('edit ad', args=[1])
self.assertEquals(resolve(url).func,edit_ad)
def test_delete_ad_url_resolves(self):
url = reverse('delete ad', args=[1])
self.assertEquals(resolve(url).func,delete_ad)
def test_deactivate_ad_url_resolves(self):
url = reverse('deactivate ad', args=[1])
self.assertEquals(resolve(url).func,deactivate_ad)
def test_activate_ad_url_resolves(self):
url = reverse('activate ad', args=[1])
self.assertEquals(resolve(url).func,activate_ad)
def test_apply_ad_url_resolves(self):
url = reverse('apply', args=[1])
self.assertEquals(resolve(url).func,apply) | 2.484375 | 2 |
examples/colors.py | edouard-lopez/colorful | 517 | 12773862 | # -*- coding: utf-8 -*-
"""
colorful
~~~~~~~~
Terminal string styling done right, in Python.
:copyright: (c) 2017 by <NAME> <<EMAIL>>
:license: MIT, see LICENSE for more details.
"""
import sys
import colorful
def show():
"""
Show the modifiers and colors
"""
# modifiers
sys.stdout.write(colorful.bold('bold') + ' ')
sys.stdout.write(colorful.dimmed('dimmed') + ' ')
sys.stdout.write(colorful.italic('italic') + ' ')
sys.stdout.write(colorful.underlined('underlined') + ' ')
sys.stdout.write(colorful.inversed('inversed') + ' ')
sys.stdout.write(colorful.concealed('concealed') + ' ')
sys.stdout.write(colorful.struckthrough('struckthrough') + '\n')
# foreground colors
sys.stdout.write(colorful.red('red') + ' ')
sys.stdout.write(colorful.green('green') + ' ')
sys.stdout.write(colorful.yellow('yellow') + ' ')
sys.stdout.write(colorful.blue('blue') + ' ')
sys.stdout.write(colorful.magenta('magenta') + ' ')
sys.stdout.write(colorful.cyan('cyan') + ' ')
sys.stdout.write(colorful.white('white') + '\n')
# background colors
sys.stdout.write(colorful.on_red('red') + ' ')
sys.stdout.write(colorful.on_green('green') + ' ')
sys.stdout.write(colorful.on_yellow('yellow') + ' ')
sys.stdout.write(colorful.on_blue('blue') + ' ')
sys.stdout.write(colorful.on_magenta('magenta') + ' ')
sys.stdout.write(colorful.on_cyan('cyan') + ' ')
sys.stdout.write(colorful.on_white('white') + '\n')
if __name__ == '__main__':
show()
| 3.125 | 3 |
test/echeck_test.py | gitter-badger/easy_echeck | 1 | 12773863 | <reponame>gitter-badger/easy_echeck<filename>test/echeck_test.py
# -*- coding: utf-8 -*-
class TestECurl():
def test_ecurl(self):
from echeck.Curlclient import Curlclient
url_list = ['https://www.baidu.com','http://www.pathcurve.cn']
client = Curlclient(url_list, 'indexfile')
res_list = client.docheck()
for res in res_list:
print(" *URL_LIST:%s" % (res['url']))
if res['res'] == 1:
print(" *HTTP状态码:%s" % (res['HTTP_CODE']))
print(" *DNS解析时间:%.2f ms" % (res['NAMELOOKUP_TIME'] * 1000))
print(" *建立连接时间:%.2f ms" % (res['CONNECT_TIME'] * 1000))
print(" *准备传输时间:%.2f ms" % (res['PRETRANSFER_TIME'] * 1000))
print(" *传输开始时间:%.2f ms" % (res['STARTTRANSFER_TIME'] * 1000))
print(" *传输结束总时间:%.2f ms" % (res['TOTAL_TIME'] * 1000))
print(" *下载数据包大小:%d bytes/s" % (res['SIZE_DOWNLOAD']))
print(" *HTTP头部大小:%d byte" % (res['HEADER_SIZE']))
print(" *平均下载速度:%d bytes/s" % (res['SPEED_DOWNLOAD']))
else:
print(" *失败信息:%s" % (res['mesg'])) | 2.515625 | 3 |
partitions/registry.py | eldarion/django-partitions | 1 | 12773864 | from django.conf import settings
class Registry(object):
def __init__(self):
self._partitions = {}
def register(self, key, app_model, expression):
if not isinstance(app_model, basestring):
app_model = "%s.%s" % (
app_model._meta.app_label,
app_model._meta.object_name
)
if key in self._partitions and app_model in self._partitions[key]:
raise Exception("'%s' is already registered." % key)
if app_model.split(".")[0] not in settings.INSTALLED_APPS:
raise Exception("'%s' is not in INSTALLED_APPS" % app_model.split(".")[0])
if key in self._partitions:
self._partitions[key].update({app_model: expression})
else:
self._partitions[key] = {app_model: expression}
def expression_for(self, key, app_model):
return self._partitions.get(key, {}).get(app_model)
registry = Registry()
def register(key, app_model, expression):
registry.register(key, app_model, expression)
| 2.21875 | 2 |
events/admin.py | MufasaTheMusician/livelobby | 0 | 12773865 | <gh_stars>0
from django.contrib import admin
from events.models import Event, Participant
# Register your models here.
admin.site.register(Event)
admin.site.register(Participant)
| 1.429688 | 1 |
ml_tutorial/rnn.py | sci2lab/ml_tutorial | 38 | 12773866 | # AUTOGENERATED! DO NOT EDIT! File to edit: 09_rnn.ipynb (unless otherwise specified).
__all__ = ['generate_data', 'encode', 'decode']
# Cell
from tqdm import tqdm
from sklearn.model_selection import train_test_split
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, TimeDistributed, Dense, RepeatVector
#export
def generate_data(training_size=10):
X = []
y = []
duplicates = set()
p_bar = tqdm(total=training_size)
while len(X) < training_size:
a = int(''.join(np.random.choice(list('0123456789')) for i in range(np.random.randint(1, DIGITS + 1))))
b = int(''.join(np.random.choice(list('0123456789')) for i in range(np.random.randint(1, DIGITS + 1))))
pair = tuple(sorted((a, b)))
if pair in duplicates:
continue
duplicates.add(pair)
pair_str = '{}+{}'.format(a,b)
pair_str = ' ' * (MAXLEN - len(pair_str)) + pair_str
ans = str(a + b)
ans = ' ' * ((DIGITS + 1) - len(ans)) + ans
X.append(pair_str)
y.append(ans)
p_bar.update(1)
return X,y
#export
def encode(questions, answers, alphabet):
char_to_index = dict((c, i) for i, c in enumerate(alphabet))
x = np.zeros((len(questions), MAXLEN, len(alphabet)))
y = np.zeros((len(questions), DIGITS + 1, len(alphabet)))
for q_counter, pair in enumerate(questions):
encoded_pair = np.zeros((MAXLEN, len(alphabet)))
for i, c in enumerate(pair):
encoded_pair[i, char_to_index[c]] = 1
x[q_counter] = encoded_pair
for a_counter, ans in enumerate(answers):
encoded_ans = np.zeros((DIGITS + 1, len(alphabet)))
for i, c in enumerate(ans):
encoded_ans[i, char_to_index[c]] = 1
y[a_counter] = encoded_ans
return x, y
#export
def decode(seq, alphabet, calc_argmax=True):
index_to_char = dict((i, c) for i, c in enumerate(alphabet))
if calc_argmax:
seq = np.argmax(seq, axis=-1)
return ''.join(index_to_char[c] for c in seq) | 2.484375 | 2 |
kelkoo/parsefields.py | sachiel/kelkoo_jsontoxml | 0 | 12773867 | # -*- coding: utf-8 -*-
"""
Actually, i want to design a django model-like with fields and inline validators,
of course time is against me, so i wrote this; close enough (not really) xD
If 'f' is None means that validator generate the content of the field
"""
MAIN_FIELDS = [
{
'f': None, # From JSON
't': 'offer-id', # To XML
'type': 'uuid',
'required': True
},
{
'f': 'name',
't': 'title',
'type': 'text',
'max_length': 80,
'required': True
},
{
'f': None,
't': 'product-url',
'type': 'url',
'max_length': None,
'required': True
},
{
'f': 'price',
't': 'price',
'type': 'number',
'required': True
},
{
'f': 'stock',
't': 'availability',
'type': 'check_stock',
'required': True
},
{
'f': 'shipping',
't': 'deliver-cost',
'type': 'check_shipping',
'required': True
},
{
'f': 'brand',
't': 'brand',
'type': 'text',
'max_length': None
},
{
'f': 'description',
't': 'description',
'type': 'text',
'max_length': 300
},
{
'f': 'images',
't': 'image-url',
'type': 'image_array'
},
{
'f': 'id_category',
't': 'merchant-category',
'type': 'category_array'
},
{
'f': 'sku',
't': 'sku',
'type': 'text',
'max-length': None
},
{
'f': None,
't': 'currency',
'type': 'currency'
}
]
# TODO: create custom validator for custom product
"""
FASHION:
fashion-type
fashion-gender
fashion-size
color
"""
| 2.828125 | 3 |
index-server/index-server/indexed_chunks.py | doc22940/DarkDarkGo | 1 | 12773868 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
query_processing.py - Matches the query against the saved indexed chunks and returns a list of dictionaries with docID
author: <NAME>
email: <EMAIL>
date: 12/1/2017
"""
from bs4 import BeautifulSoup
sample_indexed_chunks_dict = {"'s": {'doc_id': {'0': [6, 39]}, 'word_count': 2},
'.onion': {'doc_id': {'4': [4, 27]}, 'word_count': 2},
'//dirnxxdraygbifgc.onion/': {'doc_id': {'0': [25]}, 'word_count': 1},
'10years.debconf.org': {'doc_id': {'4': [27]}, 'word_count': 1},
'ammo': {'doc_id': {'4': [3, 9, 19, 25]}, 'word_count': 4},
'android': {'doc_id': {'0': [24]}, 'word_count': 1},
'back': {'doc_id': {'0': [10]}, 'word_count': 1},
'backends': {'doc_id': {'4': [23]}, 'word_count': 1},
'bank': {'doc_id': {'3': [9]}, 'word_count': 1},
'become': {'doc_id': {'2': [8, 34]}, 'word_count': 2},
'best': {'doc_id': {'3': [7]}, 'word_count': 1},
'biggest': {'doc_id': {'4': [2, 25]}, 'word_count': 2},
'bitcoi': {'doc_id': {'2': [22]}, 'word_count': 1},
'bitcoin': {'doc_id': {'0': [1, 5, 8, 28, 31]}, 'word_count': 5},
'bitcoins': {'doc_id': {'0': [13]}, 'word_count': 1},
'bitpharma': {'doc_id': {'4': [0, 23]}, 'word_count': 2},
'buy': {'doc_id': {'2': [3, 29]}, 'word_count': 2},
'cannabis': {'doc_id': {'2': [18]}, 'word_count': 1},
'check': {'doc_id': {'0': [11]}, 'word_count': 1},
'ci': {'doc_id': {'2': [37]}, 'word_count': 1},
'citizen': {'doc_id': {'2': [11]}, 'word_count': 1},
'co': {'doc_id': {'3': [18]}, 'word_count': 1},
'cocai': {'doc_id': {'0': [47]}, 'word_count': 1},
'cocaine': {'doc_id': {'0': [14], '4': [8]}, 'word_count': 2},
'counterfeit': {'doc_id': {'3': [8]}, 'word_count': 1},
'counterfeits': {'doc_id': {'3': [5]}, 'word_count': 1},
'darkweb': {'doc_id': {'0': [5, 38]}, 'word_count': 2},
'debian': {'doc_id': {'4': [13]}, 'word_count': 1},
'dedope': {'doc_id': {'2': [0, 24]}, 'word_count': 2},
'deep': {'doc_id': {'0': [15]}, 'word_count': 1},
'device': {'doc_id': {'0': [20]}, 'word_count': 1},
'drug': {'doc_id': {'0': [1, 9, 27, 34, 42], '4': [5, 28]}, 'word_count': 7},
'easycoin': {'doc_id': {'0': [0, 30]}, 'word_count': 2},
'etc': {'doc_id': {'0': [25]}, 'word_count': 1},
'euro': {'doc_id': {'3': [4, 17]}, 'word_count': 2},
'europe': {'doc_id': {'3': [12]}, 'word_count': 1},
'european': {'doc_id': {'4': [3, 26]}, 'word_count': 2},
'fake': {'doc_id': {'2': [6, 19]}, 'word_count': 2},
'free': {'doc_id': {'0': [4]}, 'word_count': 1},
'für': {'doc_id': {'2': [11, 21]}, 'word_count': 2},
'german': {'doc_id': {'2': [2]}, 'word_count': 1},
'get': {'doc_id': {'2': [4, 17]}, 'word_count': 2},
'guns': {'doc_id': {'4': [1, 7, 17, 23]}, 'word_count': 4},
'heroin': {'doc_id': {'0': [22]}, 'word_count': 1},
'hidden': {'doc_id': {'0': [18]}, 'word_count': 1},
'high': {'doc_id': {'3': [2, 15]}, 'word_count': 2},
'hqer': {'doc_id': {'3': [0, 13]}, 'word_count': 2},
'http': {'doc_id': {'0': [23]}, 'word_count': 1},
'identity': {'doc_id': {'2': [1, 11, 14, 24]}, 'word_count': 4},
'iphone': {'doc_id': {'0': [22]}, 'word_count': 1},
'kaufen': {'doc_id': {'2': [8, 16, 20]}, 'word_count': 3},
'laundry': {'doc_id': {'0': [9]}, 'word_count': 1},
'list': {'doc_id': {'4': [6]}, 'word_count': 1},
'location': {'doc_id': {'0': [16]}, 'word_count': 1},
'maintainance': {'doc_id': {'0': [5]}, 'word_count': 1},
'manage': {'doc_id': {'0': [11]}, 'word_count': 1},
'marijuana': {'doc_id': {'2': [14]}, 'word_count': 1},
'mdma': {'doc_id': {'0': [20]}, 'word_count': 1},
'mixer': {'doc_id': {'0': [6]}, 'word_count': 1},
'n/a': {'doc_id': {'0': [0]}, 'word_count': 1},
'new': {'doc_id': {'2': [10, 23]}, 'word_count': 2},
'notes': {'doc_id': {'3': [10]}, 'word_count': 1},
'ns': {'doc_id': {'2': [23]}, 'word_count': 1},
'onion': {'doc_id': {'2': [0, 13]}, 'word_count': 2},
'onion.debian.org': {'doc_id': {'4': [0, 1, 2]}, 'word_count': 3},
'onionbalance': {'doc_id': {'4': [25]}, 'word_count': 1},
'oniondir': {'doc_id': {'0': [12]}, 'word_count': 1},
'online': {'doc_id': {'0': [27]}, 'word_count': 1},
'passport': {'doc_id': {'2': [7, 20]}, 'word_count': 2},
'passports': {'doc_id': {'2': [1, 6, 15, 19, 24, 27, 32]}, 'word_count': 7},
'pay': {'doc_id': {'0': [30]}, 'word_count': 1},
'peoples': {'doc_id': {'0': [0, 26, 33]}, 'word_count': 3},
'prescriptions': {'doc_id': {'4': [16]}, 'word_count': 1},
'privacy': {'doc_id': {'4': [3]}, 'word_count': 1},
'project': {'doc_id': {'4': [14]}, 'word_count': 1},
'psychedelics': {'doc_id': {'4': [12]}, 'word_count': 1},
'quality': {'doc_id': {'3': [3, 16]}, 'word_count': 2},
'real': {'doc_id': {'2': [4, 23, 30]}, 'word_count': 3},
'run': {'doc_id': {'4': [10]}, 'word_count': 1},
'search': {'doc_id': {'1': [3, 8]}, 'word_count': 2},
'served': {'doc_id': {'4': [20]}, 'word_count': 1},
'service': {'doc_id': {'0': [19]}, 'word_count': 1},
'services': {'doc_id': {'2': [2, 15]}, 'word_count': 2},
'several': {'doc_id': {'4': [22]}, 'word_count': 1},
'shop': {'doc_id': {'2': [4]}, 'word_count': 1},
'site': {'doc_id': {'0': [1]}, 'word_count': 1},
'speed': {'doc_id': {'0': [16]}, 'word_count': 1},
'sto': {'doc_id': {'4': [29]}, 'word_count': 1},
'store': {'doc_id': {'0': [2, 28, 35], '4': [6]}, 'word_count': 4},
'supplier': {'doc_id': {'0': [10, 43]}, 'word_count': 2},
'today': {'doc_id': {'2': [12, 25]}, 'word_count': 2},
'tor': {'doc_id': {'4': [0]}, 'word_count': 1},
'torch': {'doc_id': {'1': [0, 5]}, 'word_count': 2},
'uk': {'doc_id': {'2': [0, 5, 10, 26, 31, 36]}, 'word_count': 6},
'untill': {'doc_id': {'0': [7]}, 'word_count': 1},
'using': {'doc_id': {'4': [24]}, 'word_count': 1},
'viagra': {'doc_id': {'4': [20]}, 'word_count': 1},
'wall': {'doc_id': {'0': [32]}, 'word_count': 1},
'wallet': {'doc_id': {'0': [2, 29]}, 'word_count': 2},
'web': {'doc_id': {'0': [16]}, 'word_count': 1},
'websites': {'doc_id': {'0': [21]}, 'word_count': 1},
'weed': {'doc_id': {'2': [3, 6, 10]}, 'word_count': 3},
'xtc': {'doc_id': {'0': [18]}, 'word_count': 1}}
sample_content_chunk = [{'doc_id': 0,
'html': 'Site is down for maintainance.<br>\r\n<br>\r\nUntill we are back check OnionDir for other Deep Web Tor hidden service .onion websites:<br>\r\n<br>\r\n<a href="http://dirnxxdraygbifgc.onion/">http://dirnxxdraygbifgc.onion/</a>',
'link': 'http://6w6vcynl6dumn67c.onion',
'title': 'N/A'},
{'doc_id': 1,
'html': '',
'link': 'http://p3igkncehackjtib.onion',
'title': ''},
{'doc_id': 2,
'html': '',
'link': 'http://54flq67kqr5wvjqf.onion',
'title': ''},
{'doc_id': 3,
'html': '',
'link': 'http://dppmfxaacucguzpc.onion',
'title': ''},
{'doc_id': 4,
'html': '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">\r\n<html>\r\n<head>\r\n<meta http-equiv="Content-type" content="text/html; charset=utf-8">\r\n<meta name="description" content="UK Guns and ammo store, buy guns and ammo on the deep web with bitcoin at our Tor store." />\r\n<link rel="icon" type="image/icon" href="favicon.ico">\t\r\n<link rel="shortcut icon" type="image/icon" href="favicon.ico">\r\n<title>UK Guns and Ammo Store - Buy guns and ammo in the UK for Bitcoi',
'link': 'http://tuu66yxvrnn3of7l.onion',
'title': 'UK Guns and Ammo Store - Buy guns and ammo in the UK for Bitcoin.'},
{'doc_id': 0,
'html': '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">\r\n<html>\r\n<head>\r\n<meta http-equiv="Content-type" content="text/html; charset=utf-8">\r\n<meta name="description" content="Peoples drug store, the number one deep web drug vendor. Buy drugs with Bitcoin"/>\r\n<link rel="icon" type="image/icon" href="favicon.ico">\t\r\n<link rel="shortcut icon" type="image/icon" href="favicon.ico">\r\n<title>Peoples Drug Store - The Darkweb\'s Best Online Drug Supplier! - Buy cocai',
'link': 'http://newpdsuslmzqazvr.onion',
'title': "Peoples Drug Store - The Darkweb's Best Online Drug Supplier! - Buy cocaine, speed, xtc, mdma, heroin and more at peoples drug store, pay with Bitcoin"},
{'doc_id': 1,
'html': '<html xmlns="http://www.w3.org/1999/xhtml"> \n<head> \n<title>TORCH: Tor Search!</title> \n<meta http-equiv="Content-Type" content="text/html; charset=utf-8"/> \n<meta name="description" content=""/> \n<meta name="keywords" content=""/> \n<link rel="shortcut icon" href="favicon.png" type="image/png" />\n \n<style type="text/css"> \nbody{\n\ttext-align: center;\n\tfont-family:Verdana, Arial, Helvetica, sans-serif;\n\tfont-size:.7em;\n\tmargin: 10px;\n\tcolor: #000;\n\tbackground: #fff;\n\tmin-width: 520px;\n}\na{\n\tcolor: #009;\n\tt',
'link': 'http://xmh57jrzrnw6insl.onion',
'title': 'TORCH: Tor Search!'},
{'doc_id': 2,
'html': 'ns<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">\r\n<html>\r\n<head>\r\n<meta http-equiv="Content-type" content="text/html; charset=utf-8">\r\n<meta name="description" content="DeDope - Deutscher Weed und Hash Shop, weed online kaufen, weed für bitcoins, marijuana online kaufen, cannabis online kaufen für Bitcoins" />\r\n<link rel="icon" type="image/icon" href="favicon.ico">\t\r\n<link rel="shortcut icon" type="image/icon" href="favicon.ico">\r\n<title>DeDope - ',
'link': 'http://kbvbh4kdddiha2ht.onion',
'title': 'DeDope - German Weed Shop - weed online kaufen, weed für bitcoins, marijuana online kaufen, cannabis online kaufen für Bitcoi'},
{'doc_id': 3,
'html': '',
'link': 'http://fogcore5n3ov3tui.onion',
'title': ''},
{'doc_id': 4,
'html': '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">\r\n<html>\r\n<head>\r\n<meta http-equiv="Content-type" content="text/html; charset=utf-8">\r\n<meta name="description" content="Bitpharma - Cocaine for Bitcoins, Psychedelics for Bitcoins, Prescriptions for Bitcoins, Viagra for Bitcoins"/>\r\n<link rel="icon" type="image/icon" href="favicon.ico">\t\r\n<link rel="shortcut icon" type="image/icon" href="favicon.ico">\r\n<title>BitPharma - biggest european .onion drug sto',
'link': 'http://s5q54hfww56ov2xc.onion',
'title': 'BitPharma - biggest european .onion drug store - Cocaine for Bitcoins, Psychedelics for Bitcoins, Prescriptions for Bitcoins, Viagra for Bitcoins'},
{'doc_id': 0,
'html': '',
'link': 'https://www.facebookcorewwwi.onion',
'title': ''},
{'doc_id': 1,
'html': '',
'link': 'http://xdagknwjc7aaytzh.onion',
'title': ''},
{'doc_id': 2,
'html': '',
'link': 'http://wvk32thojln4gpp4.onion',
'title': ''},
{'doc_id': 3,
'html': '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">\r\n<html>\r\n<head>\r\n<meta http-equiv="Content-type" content="text/html; charset=utf-8">\r\n<meta name="description" content="High quality counterfeit euro banknotes for bitcoin - buy fake euros with bitcoin - best quality counterfeits on the deep web"/>\r\n<link rel="icon" type="image/icon" href="favicon.ico">\t\r\n<link rel="shortcut icon" type="image/icon" href="favicon.ico">\r\n<title>HQER - High Quality Euro Co',
'link': 'http://y3fpieiezy2sin4a.onion',
'title': 'HQER - High Quality Euro Counterfeits - best counterfeit bank notes in europe'},
{'doc_id': 4,
'html': '<!DOCTYPE html>\n <html>\n <head>\n <meta charset="utf-8">\n <meta http-equiv="X-UA-Compatible" content="IE=edge">\n <meta name="viewport" content="width\\=device-width, initial-scale=1">\n <meta name="author" content="The Tor Project, Inc.">\n <meta name="description" content="The Tor Project\'s free software protects your privacy online. Site blocked? Email [mailto:<EMAIL>] for help downloading Tor Browser.">\n <meta name="keywords" content="tor, tor project, tor browser, avoid censorsh',
'link': 'http://expyuzz4wqqyqhjn.onion',
'title': 'Tor Project | Privacy Online'},
{'doc_id': 0,
'html': '',
'link': 'http://storegsq3o5mfxiz.onion',
'title': ''},
{'doc_id': 1,
'html': '',
'link': 'http://jvrnuue4bvbftiby.onion',
'title': ''},
{'doc_id': 2,
'html': '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">\r\n<html>\r\n<head>\r\n<meta http-equiv="Content-type" content="text/html; charset=utf-8">\r\n<meta name="description" content="UKPassports - Buy passport from the United Kingdom UK, real passports from the UK, no fake passports"/>\r\n<link rel="icon" type="image/icon" href="favicon.ico">\t\r\n<link rel="shortcut icon" type="image/icon" href="favicon.ico">\r\n<title>UK Passports - Buy real UK passports, become a UK ci',
'link': 'http://vfqnd6mieccqyiit.onion',
'title': 'UK Passports - Buy real UK passports, become a UK citizen now. Our passports are no fake passports, they are real passports.'},
{'doc_id': 3,
'html': '',
'link': 'http://5plvrsgydwy2sgce.onion',
'title': ''},
{'doc_id': 4,
'html': '<!DOCTYPE html>\n<HTML lang="en">\n<HEAD>\n <TITLE>onion.debian.org</TITLE>\n <meta charset="UTF-8">\n</HEAD>\n<BODY>\n\n<H1>onion.debian.org</H1>\n\nThis is a list of <a href="https://www.torproject.org/docs/hidden-services">onion services</a>\nrun by the <a href="https://www.debian.org/">Debian project</a>. Most of them are served\nfrom several backends using\n<a href="https://github.com/DonnchaC/onionbalance">OnionBalance</a>.\n\n<ul>\n\n<li id="10years.debconf.org"><strong>10years.debconf.org</strong>: <a href="',
'link': 'http://5nca3wxl33tzlzj5.onion',
'title': 'onion.debian.org'},
{'doc_id': 0,
'html': '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">\r\n<html>\r\n<head>\r\n<meta http-equiv="Content-type" content="text/html; charset=utf-8">\r\n<meta name="description" content="EasyCoin.net is a Bitcoin Wallet and Bitcoin Laundry service, we offer bitcoin laundry without any fees, use on Iphone, Android." />\r\n<link rel="icon" type="image/icon" href="favicon.ico">\t\r\n<link rel="shortcut icon" type="image/icon" href="favicon.ico">\r\n<title>\r\nEasyCoin Bitcoin Wall',
'link': 'http://easycoinsayj7p5l.onion',
'title': '\r\nEasyCoin Bitcoin Wallet and free Bitcoin Mixer / Bitcoin Laundry, manage your Bitcoins from any location, from any device: Iphone, Android etc - Online Bitcoin Wallet'},
{'doc_id': 1,
'html': '',
'link': 'http://5mvm7cg6bgklfjtp.onion',
'title': ''},
{'doc_id': 2,
'html': '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">\r\n<html>\r\n<head>\r\n<meta http-equiv="Content-type" content="text/html; charset=utf-8">\r\n<meta name="description" content="Onion Identity Store - buy european fake ids, fake passports with Bitcoin"/>\r\n<link rel="icon" type="image/icon" href="favicon.ico">\t\r\n<link rel="shortcut icon" type="image/icon" href="favicon.ico">\r\n<title>Onion Identity Services - Get your fake passport and a new identity today</titl',
'link': 'http://abbujjh5vqtq77wg.onion',
'title': 'Onion Identity Services - Get your fake passport and a new identity today'},
{'doc_id': 3,
'html': '',
'link': 'http://lw4ipk5choakk5ze.onion',
'title': ''},
{'doc_id': 4,
'html': '',
'link': 'http://e2qizoerj4d6ldif.onion',
'title': ''}]
key_docs = [k['doc_id'] for k in sample_content_chunk if k.get('doc_id')]
N = len(key_docs)
print(N)
def snippet_builder(doc_ID):
if doc_ID in key_docs:
a = search_list_of_dict(doc_ID, sample_content_chunk)
snippet = {}
soup = BeautifulSoup(a['html'])
s = soup.getText()
desc = s[1:300]
snippet['title'] = a['title']
snippet['href'] = a['link']
snippet['desc'] = desc + '...'
return snippet
def search_list_of_dict(doc_ID, content_chunk_list):
for item in content_chunk_list:
if item['doc_ID'] == doc_ID:
return item
| 2.796875 | 3 |
apps/usermgmt/serializers.py | ecognize-hub/ecognize | 1 | 12773869 | from rest_captcha.serializers import RestCaptchaSerializer
from rest_framework import serializers
from .models import OrgAdditionRequest
from apps.profiles.models import UserProfile
class OrgAdditionRequestSerializer(serializers.ModelSerializer):
class Meta:
model = OrgAdditionRequest
fields = ('org_name', 'org_website', 'parent_org_name', 'supplicant_email_address', 'supplicant_name')
class AnonymousUserCreationSerializer(RestCaptchaSerializer, serializers.ModelSerializer):
password = serializers.CharField(max_length=24, min_length=24, allow_blank=False, allow_null=False, write_only=True)
class Meta:
model = UserProfile
fields = ('country', 'captcha_value', 'captcha_key', 'password')
| 2.1875 | 2 |
Chapter 7/args-copy.py | JoeBugajski/python-examples | 0 | 12773870 | #!/usr/bin/env python3
# Copyright 2009-2017 BHG http://bw.org/
# Functions allow variable-length argument lists
def main():
kitten('meow', 'grrr', 'purr')
# We treat it as a sequence, actually a tuple
def kitten(*args): # It's denoted as *args. args is the conventional name
if len(args): # If the length of args is greater than zero
for s in args:
print(s) # If len(args), print all the items in the tuple
else: print('Meow.') # otherwise print meow
if __name__ == '__main__': main()
# You can call the kitten function like this:
x = ('hiss', 'howl', 'roar', 'screech')
kitten(*x) # note the star | 4.375 | 4 |
cn_dpm/train.py | ryanlindeborg/CN-DPM | 1 | 12773871 | <filename>cn_dpm/train.py<gh_stars>1-10
import os
import pickle
from typing import Optional
import torch
from sequoia.settings.sl import ContinualSLSetting, SLEnvironment
from torch import Tensor
from .data import DataScheduler
from .models import NdpmModel
def _make_collage(samples, config, grid_h, grid_w):
s = samples.view(grid_h, grid_w, config["x_c"], config["x_h"], config["x_w"])
collage = (
s.permute(2, 0, 3, 1, 4)
.contiguous()
.view(config["x_c"], config["x_h"] * grid_h, config["x_w"] * grid_w)
)
return collage
def train_model_with_sequoia_env(
config, model: NdpmModel, sequoia_env: SLEnvironment
):
model.train()
# NOTE: This is just here to show the types of the `observations` and `rewards` that
# are yielded below.
observations: ContinualSLSetting.Observations
rewards: Optional[ContinualSLSetting.Rewards]
for step, (observations, rewards) in enumerate(sequoia_env):
x: Tensor = observations.x
t: Optional[Tensor] = observations.task_labels
y: Optional[Tensor] = rewards.y if rewards is not None else None
step += 1
print(
"\r[Step {:4}] STM: {:5}/{} | #Expert: {}".format(
step,
len(model.ndpm.stm_x),
config["stm_capacity"],
len(model.ndpm.experts) - 1,
),
end="",
)
# TODO: (low-priority) If `rewards` is None, it means that we need to get the
# "action" (y_pred) for the current observations and send it to the environment
# in order to receive the "rewards". This will probably require fine-grain
# modifications in the `learn` method of the `Ndpm` class, so we can leave this
# for later.
assert (
y is not None
), "Assuming that we have access to both `x` and `y` at the same time for now."
# learn the model
model.learn(x, y, t, step)
def train_model(config, model: NdpmModel, scheduler: DataScheduler):
model.train()
for step, (x, y, t) in enumerate(scheduler):
step += 1
if isinstance(model, NdpmModel):
print(
"\r[Step {:4}] STM: {:5}/{} | #Expert: {}".format(
step,
len(model.ndpm.stm_x),
config["stm_capacity"],
len(model.ndpm.experts) - 1,
),
end="",
)
else:
print("\r[Step {:4}]".format(step), end="")
summarize = step % config["summary_step"] == 0
summarize_experts = summarize and isinstance(model, NdpmModel)
summarize_samples = summarize and config["summarize_samples"]
# learn the model
model.learn(x, y, t, step)
# Evaluate the model
evaluatable = not isinstance(model, NdpmModel) or len(model.ndpm.experts) > 1
if evaluatable and step % config["eval_step"] == 0:
scheduler.eval(model, None, step, "model")
# Evaluate experts of the model's DPMoE
# if summarize_experts:
# writer.add_scalar('num_experts', len(model.ndpm.experts) - 1, step)
# Summarize samples
if summarize_samples:
is_ndpm = isinstance(model, NdpmModel)
comps = (
[e.g for e in model.ndpm.experts[1:]] if is_ndpm else [model.component]
)
if len(comps) == 0:
continue
grid_h, grid_w = config["sample_grid"]
total_samples = []
# Sample from each expert
for i, expert in enumerate(comps):
with torch.no_grad():
samples = expert.sample(grid_h * grid_w)
total_samples.append(samples)
collage = _make_collage(samples, config, grid_h, grid_w)
# writer.add_image('samples/{}'.format(i + 1), collage, step)
if is_ndpm:
counts = model.ndpm.prior.counts[1:]
expert_w = counts / counts.sum()
num_samples = (
torch.distributions.multinomial.Multinomial(
grid_h * grid_w, probs=expert_w
)
.sample()
.type(torch.int)
)
to_collage = []
for i, samples in enumerate(total_samples):
to_collage.append(samples[: num_samples[i]])
to_collage = torch.cat(to_collage, dim=0)
collage = _make_collage(to_collage, config, grid_h, grid_w)
# writer.add_image('samples/ndpm', collage, step)
| 2.1875 | 2 |
app/app.py | ibm-skills-network/next_instagram_pinterest | 0 | 12773872 | from posixpath import dirname
from flask import Flask, request, render_template,redirect, url_for,abort,send_from_directory
from werkzeug.utils import secure_filename
import os.path
import tempfile
import io
import os
import base64
from datetime import datetime
from pathlib import Path
import torchvision
from torchvision import transforms
import torch
from torch import no_grad
import cv2
import numpy as np
from PIL import Image
# Here are the 91 classes.
OBJECTS = [
'__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'N/A', 'stop sign',
'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
'elephant', 'bear', 'zebra', 'giraffe', 'N/A', 'backpack', 'umbrella', 'N/A', 'N/A',
'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket',
'bottle', 'N/A', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'N/A', 'dining table',
'N/A', 'N/A', 'toilet', 'N/A', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'N/A', 'book',
'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush'
]
# Here are the classesj for display
OBJECTS_html=['all', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella',
'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket',
'bottle', 'N/A', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book',
'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush']
#key type of objects valuue: list of files that pertain to each objects
FILE_OBJ={}
def get_predictions(pred,threshold=0.8,objects=None ):
"""
This function will assign a string name to a predicted class and eliminate predictions whose likelihood is under a threshold
pred: a list where each element contains a tuple that corresponds to information about the different objects; Each element includes a tuple with the class yhat, probability of belonging to that class and the coordinates of the bounding box corresponding to the object
image : frozen surface
predicted_classes: a list where each element contains a tuple that corresponds to information about the different objects; Each element includes a tuple with the class name, probability of belonging to that class and the coordinates of the bounding box corresponding to the object
thre
"""
predicted_classes= [(OBJECTS[i],p,[(box[0], box[1]), (box[2], box[3])]) for i,p,box in zip(list(pred[0]['labels'].numpy()),pred[0]['scores'].detach().numpy(),list(pred[0]['boxes'].detach().numpy()))]
predicted_classes=[ stuff for stuff in predicted_classes if stuff[1]>threshold ]
if objects and predicted_classes :
predicted_classes=[ (name, p, box) for name, p, box in predicted_classes if name in objects ]
return predicted_classes
def draw_box(predicted_classes,image,rect_th= 30,text_size= 3,text_th=3):
"""
draws box around each object
predicted_classes: a list where each element contains a tuple that corresponds to information about the different objects; Each element includes a tuple with the class name, probability of belonging to that class and the coordinates of the bounding box corresponding to the object
image : frozen surface
"""
img=(np.clip(cv2.cvtColor(np.clip(image.numpy().transpose((1, 2, 0)),0,1), cv2.COLOR_RGB2BGR),0,1)*255).astype(np.uint8).copy()
for predicted_class in predicted_classes:
label=str(predicted_class[0]) + " likelihood"
probability=predicted_class[1]
box=predicted_class[2]
cv2.rectangle(img, (int(box[0][0]), int(box[0][1])), (int(box[1][0]), int(box[1][1])),(0, 255, 0), 4) # Draw Rectangle with the coordinates
cv2.putText(img,label, (int(box[0][0]), int(box[0][1])), cv2.FONT_HERSHEY_SIMPLEX, text_size, (0,255,0),thickness=3)
cv2.putText(img,label+": "+str(round(probability,2)), (int(box[0][0]), int(box[0][1])), cv2.FONT_HERSHEY_SIMPLEX, text_size, (0,255,0),thickness=3)
return img
#Faster R-CNN is a model that predicts both bounding boxes and class scores for potential objects in the image pre-trained on COCO.
model_ = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)
# set to eval
model_.eval()
# save memory
for name, param in model_.named_parameters():
param.requires_grad = False
#the function calls Faster R-CNN model_ but save RAM:
def model(x):
with torch.no_grad():
yhat = model_(x)
return yhat
# transform image to tensor
transform = transforms.Compose([transforms.ToTensor()])
app=Flask(__name__)
# EXTENSIONS allowed
dostuff=None
app.config['UPLOAD_EXTENSIONS'] = ['.jpg', '.png', '.gif','.jpeg']
# paths
app.config['UPLOAD_PATH'] = 'uploads'
app.config['OBJECTS_PATH'] = 'objects'
# confident_range
app.config['CONFIDENT_RANG'] = None
#path of images
app.config['FILE_PATH']=None
app.config['FILE_NAME']=[]
# directory of path
dir_name = Path(app.instance_path)
@app.route('/')
def home():
#new file that has been uploaded
files= os.listdir(app.config['UPLOAD_PATH'])
# check if a the following
files=[ file for file in files if os.path.splitext(file )[1] in app.config['UPLOAD_EXTENSIONS'] ]
#files that has been uploaded that have been uploaded
object_files=os.listdir(app.config['OBJECTS_PATH'])
object_files=[ file for file in object_files if os.path.splitext(file )[1] in app.config['UPLOAD_EXTENSIONS'] ]
return render_template('index.html', files=app.config['FILE_NAME'] ,objects_list=OBJECTS_html,object_files=object_files)
@app.route('/', methods=['POST'])
def upload_file():
#file object
uploaded_file = request.files['file']
#file name
filename= secure_filename(uploaded_file.filename)
#file extention
file_ext = os.path.splitext(filename)[1]
#check if empty file
if filename != '':
# file path /uploads/filename
#check if .jpg, .png, .gif if not send an error
if file_ext not in app.config['UPLOAD_EXTENSIONS']:
abort(400)
#send back to home agument is the fuction "home"
#upload file path
#uploaded_file.save(filename)
file_path=Path(app.config['UPLOAD_PATH']).joinpath(filename)
# same the file name to be used in other parts of app
app.config['FILE_NAME']=[filename]
# file path to be used in app
app.config['FILE_PATH']=file_path
uploaded_file.save(file_path)
return redirect(url_for('home'))
@app.route('/find_object', methods=['POST'])
def find():
redirect(url_for('home'))
# object to find
object=request.form.get("objects")
confident_range = request.form.get("confident_range")
app.config['CONFIDENT_RANG'] = int(confident_range) / int(100)
print("++++++++", confident_range)
# this is a bug fix as it will only save the image twice
object_=object
if object_:
half = 0.5
print(app.config['FILE_PATH'])
image = Image.open(app.config['FILE_PATH'])
arr = []
image.resize( [int(half * s) for s in image.size] )
img = transform(image)
pred = model(torch.unsqueeze(img,0))
if object=='all':
pred_thresh=get_predictions(pred,threshold=app.config['CONFIDENT_RANG'])
else:
pred_thresh=get_predictions(pred,threshold=app.config['CONFIDENT_RANG'],objects=object)
object_=None
#draw box on image
image_numpy=draw_box(pred_thresh,img,rect_th= 1,text_size= 1,text_th=1)
#save image with box with new name
filename, file_extension = os.path.splitext(app.config['FILE_NAME'][0])
print(filename, file_extension)
app.config['FILE_NAME'] = []
#name of file with lables
new_file_name=filename+"_object"+file_extension
new_file_path=Path(app.config['OBJECTS_PATH']).joinpath(new_file_name)
#save file we use opencv as the boxes look better
cv2.imwrite(str(new_file_path), image_numpy)
#get differnet objects and save as image
for obj in pred_thresh:
#Top and bottom corner of box
x_0,y_0=obj[2][0]
x_1,y_1=obj[2][1]
#save the image with a name that inculds the object and time
now = datetime.now()
dt_string = now.strftime("_%d_%m_%Y_%H_%M_%S_%f").strip()
obj_file_name=obj[0]+dt_string+file_extension
object_file_ext=Path(app.config['OBJECTS_PATH']).joinpath(obj_file_name)
if not(obj[0] in set(FILE_OBJ.keys())):
FILE_OBJ[obj[0]]=[obj_file_name ]
else:
FILE_OBJ[obj[0]].append(obj_file_name)
new_image=image.copy().crop((x_0,y_0,x_1,y_1))
new_image.save(object_file_ext)
if (request.form.get("Find_New")):
os.remove(app.config['FILE_PATH'])
return redirect(url_for('home'))
return render_template("find_object.html" ,objects=object,file=new_file_name, title=object, range1=confident_range)
@app.route('/your_object')
def your_gallery():
print('assss',FILE_OBJ)
return render_template("your_object.html" ,obj_files=FILE_OBJ)
#serve these uploade files from following route
@app.route('/uploads/<filename>')
def upload(filename):
#get file this is called in index.html
return send_from_directory(app.config['UPLOAD_PATH'], filename)
#serve these files from following routey
@app.route('/objects/<filename>')
def upload_objects(filename):
#get file this is called in index.html
return send_from_directory(app.config['OBJECTS_PATH'], filename)
@app.route('/your_object/<galleryName>')
def view_obejct(galleryName):
return render_template("view_obejct.html" ,obj_files=FILE_OBJ[galleryName], title=galleryName)
@app.route('/your_galary')
def view_gallery():
files = os.listdir(app.config['UPLOAD_PATH'])
print("test")
return render_template("your_galary.html" ,obj_files=files)
if __name__=="__main__":
app.run(host="0.0.0.0", port=8080)
| 1.914063 | 2 |
play02.py | hnishi/hnishi_test_multiprocessing | 0 | 12773873 | # -*- coding: utf-8 -*-
from multiprocessing import Pool
import os
import time
start = time.time()
def f(x):
time.sleep(1)
value = x * x
print('{}s passed...\t{}\t(pid:{})'.format(int(time.time() - start), value, os.getpid()))
return value
timeout = time.time() + 10 # sec
while True:
with Pool(processes=2) as p:
if time.time() > timeout:
p.close()
break
print(p.map(f, [1, 2]))
p.close()
# コンテキストマネージャを使わずに以下のように書いても良い
# Pool(3).map(f, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
| 3.5625 | 4 |
mayan/apps/rest_api/urls.py | YingWang-Clare/mayan-edms-with-elasticsearch | 2 | 12773874 | from __future__ import unicode_literals
from django.conf.urls import url
from .api_views import APIResourceTypeListView
from .views import APIBase, BrowseableObtainAuthToken
urlpatterns = []
api_urls = [
url(r'^$', APIBase.as_view(), name='api_root'),
url(
r'^resources/$', APIResourceTypeListView.as_view(),
name='resource-list'
),
url(
r'^auth/token/obtain/$', BrowseableObtainAuthToken.as_view(),
name='auth_token_obtain'
),
]
| 1.578125 | 2 |
gradefast/grader/grader.py | jhartz/gradefast | 5 | 12773875 | """
GradeFast Grader - Runs commands on submissions and controls the grading process.
Licensed under the MIT License. For more, see the LICENSE file.
Author: <NAME> <<EMAIL>>
"""
import difflib
import os
import random
import re
from collections import defaultdict
from typing import Any, Dict, List, Mapping, Optional, Sequence, Tuple, Union
from iochannels import Channel, HTMLMemoryLog, MemoryLog, Msg
from pyprovide import Injector, inject
from gradefast import events
from gradefast.grader.banners import BANNERS
from gradefast.hosts import BackgroundCommand, CommandRunError, CommandStartError, Host
from gradefast.loggingwrapper import get_logger
from gradefast.models import Command, CommandItem, Path, Settings
from gradefast.submissions import Submission, SubmissionManager
_logger = get_logger("grader")
class Grader:
"""
Control the grading process and run commands on submissions.
"""
@inject(injector=Injector.CURRENT_INJECTOR)
def __init__(self, injector: Injector, channel: Channel, host: Host,
event_manager: events.EventManager, settings: Settings,
submission_manager: SubmissionManager) -> None:
self.injector = injector
self.channel = channel
self.host = host
self.event_manager = event_manager
self.settings = settings
self.submission_manager = submission_manager
def prompt_for_submissions(self) -> bool:
"""
Nag the user into choosing at least one folder of submissions. The user is prompted
(repeatedly) to choose the folder.
:return: True if there's some submissions to go on; False if they got nuthin'.
"""
if self.submission_manager.has_submissions():
# Go easy on them
while self.channel.prompt("Want to add another folder of submissions?",
["y", "N"], "n") == "y":
self.add_submissions(None)
else:
# They don't have anything yet; they're in for quite a treat
self.channel.input("Press Enter to choose a folder containing the submissions...")
while True:
if not self.add_submissions(None):
# They've actually hit "cancel"; I guess we can give up
break
if not self.submission_manager.has_submissions():
self.channel.error("No submissions found")
continue
self.channel.print()
if self.channel.prompt("Add another folder?", ["y", "N"], "n") == "n":
break
return self.submission_manager.has_submissions()
def add_submissions(self, base_folder: Path = None) -> bool:
"""
Add a folder of submissions to our list of submissions. The user is prompted to choose the
folder.
The regular expression is used to limit which folders are picked up. Also, the first
matched group in the regex is used as the name of the submission.
For more info on some of the parameters, see the documentation on the GradeFast wiki:
https://github.com/jhartz/gradefast/wiki
:param base_folder: The path to a base folder to use when prompting the user to choose a
folder. If it does not exist, then it falls back to the Host::choose_dir default.
:return: True if the user actually tried to pick something (even if we couldn't find any
submissions in the folder they picked); False if they cancelled.
"""
check_file_extensions = self.settings.check_file_extensions
if check_file_extensions is None:
check_file_extensions = []
# Step 1: Prompt the user for a folder
path = self.host.choose_folder(base_folder)
if not path:
self.channel.error("No folder provided")
return False
# Step 2: Find matching submissions
regex = None
if self.settings.submission_regex:
regex = re.compile(self.settings.submission_regex)
for name, type, is_link in sorted(self.host.list_folder(path)):
submission_match = False # type: Any
folder_path = None # type: Path
valid_submission = False
if type == "folder":
if regex:
submission_match = regex.fullmatch(name)
else:
submission_match = True
if submission_match:
self.channel.print("Found submission folder: {}", name)
folder_path = path.append(name)
valid_submission = True
elif type == "file" and name.find(".") > 0:
name, ext = name.rsplit(".", maxsplit=1)
if regex:
submission_match = regex.fullmatch(name)
else:
submission_match = True
if submission_match and not self.host.exists(path.append(name)):
folder_path = path.append(name)
file_path = path.append(name + "." + ext)
if self.settings.check_zipfiles and ext == "zip":
self.channel.print("Found submission zipfile: {}.zip", name)
self.host.unzip(file_path, folder_path)
self.channel.print(" extracted to {}/", name)
valid_submission = True
elif ext in check_file_extensions:
self.channel.print("Found submission file: {}.{}", name, ext)
self.host.move_to_folder(file_path, folder_path)
self.channel.print(" moved into {}/", name)
valid_submission = True
if valid_submission:
submission_name = name
if regex:
for group in submission_match.groups():
if group:
submission_name = group
break
# Add the submission, but don't send the event yet
# (we'll send one big one at the end)
self.submission_manager.add_submission(submission_name, name, folder_path,
send_event=False)
# Step 3: Tell the world
if self.submission_manager.has_submissions():
self.event_manager.dispatch_event(events.NewSubmissionsEvent())
return True
def run_commands(self) -> None:
"""
Run some commands on each of the previously added submissions.
For details on what should be in the list of commands, see the GradeFast wiki:
https://github.com/jhartz/gradefast/wiki/Command-Structure
"""
self.channel.print()
self.channel.print()
self.channel.error_bordered(random.choice(BANNERS))
self.channel.print()
submission_id = self.submission_manager.get_first_submission_id()
background_commands = [] # type: List[BackgroundCommand]
while True:
if not self.submission_manager.has_submissions():
# Ideally, this shouldn't ever happen, but...
self.channel.error_bordered("No submissions!")
break
if submission_id is None:
# Special case: we're at the end
self.channel.print()
self.channel.status_bordered("End of submissions!")
loop = self.channel.prompt(
"Loop back around to the front?", ["y", "n"],
empty_choice_msg="C'mon, you're almost done; you can make a simple choice "
"between `yes' and `no'")
if loop == "y":
submission_id = self.submission_manager.get_first_submission_id()
else:
# Well, they said they're done
break
submission = self.submission_manager.get_submission(submission_id)
self.channel.print()
self.channel.status_bordered("Next Submission: {} ({}/{})",
submission.get_name(), submission.get_id(),
self.submission_manager.get_last_submission_id())
what_to_do = self.channel.prompt(
"Press Enter to begin; (g)oto, (b)ack, (s)kip, (l)ist, (a)dd, (d)rop, "
"(q)uit, (h)elp",
["", "g", "goto", "b", "back", "s", "skip", "l", "list", "a", "add", "d", "drop",
"q", "quit", "h", "help", "?"],
show_choices=False)
if what_to_do == "?" or what_to_do == "h" or what_to_do == "help":
# Print more help
self.channel.print("(Enter): Start the next submission")
self.channel.print("g/goto: Go to a specific submission")
self.channel.print("b/back: Go to the previous submission (goto -1)")
self.channel.print("s/skip: Skip the next submission (goto +1)")
self.channel.print("l/list: List all the submissions and corresponding indices")
self.channel.print("a/add: Add another folder of submissions")
self.channel.print("d/drop: Drop the next submission from the list of submissions")
self.channel.print("q/quit: Give up on grading")
elif what_to_do == "g" or what_to_do == "goto":
# Go to a user-entered submission
self.channel.print("Enter index of submission to jump to.")
self.channel.print("n Jump to submission n")
self.channel.print("+n Jump forward n submissions")
self.channel.print("-n Jump back n submissions")
new_id = self.channel.input("Go:")
if new_id:
try:
if new_id[0] == "+":
new_submission_id = submission_id + int(new_id[1:])
elif new_id[0] == "-":
new_submission_id = submission_id - int(new_id[1:])
else:
new_submission_id = int(new_id)
except (ValueError, IndexError):
self.channel.error("Invalid index!")
if new_submission_id not in self.submission_manager.get_all_submission_ids():
self.channel.error("Invalid index: {}", new_submission_id)
else:
submission_id = new_submission_id
elif what_to_do == "b" or what_to_do == "back":
# Go back to the last-completed submission
new_submission_id = self.submission_manager.get_previous_submission_id(
submission_id)
if new_submission_id is not None:
submission_id = new_submission_id
elif what_to_do == "s" or what_to_do == "skip":
# Skip to the next submission
# (skipping the last submission will trigger the "end of submissions" branch at the
# beginning of the infinite while loop)
submission_id = self.submission_manager.get_next_submission_id(submission_id)
elif what_to_do == "l" or what_to_do == "list":
# List all the submissions
id_len = len(str(self.submission_manager.get_last_submission_id()))
for submission in self.submission_manager.get_all_submissions():
self.channel.print("{:{}}: {}",
submission.get_id(), id_len, submission.get_name())
elif what_to_do == "a" or what_to_do == "add":
# Add another folder of submissions
self.add_submissions(None)
elif what_to_do == "d" or what_to_do == "drop":
# Drop the next submission, moving on to the one after it
if self.channel.prompt("Are you sure you want to drop " + submission.get_name() +
"?", ["y", "n"]) == "y":
new_submission_id = self.submission_manager.get_next_submission_id(
submission_id)
self.submission_manager.drop_submission(submission_id)
submission_id = new_submission_id
elif what_to_do == "q" or what_to_do == "quit":
# Give up on the rest
if self.channel.prompt("Are you sure you want to quit grading?", ["y", "n"]) == "y":
break
else:
# Run the next submission
# Set up logs for the submission
html_log = HTMLMemoryLog()
text_log = MemoryLog()
self.channel.add_delegate(html_log, text_log)
submission.add_logs(html_log, text_log)
timer_context = submission.start_timer()
self.event_manager.dispatch_event(events.SubmissionStartedEvent(submission_id))
runner = CommandRunner(self.injector, self.channel, self.host, self.settings,
submission)
runner.run()
# Stop the logs and clean up
html_log.close()
text_log.close()
background_commands += runner.get_background_commands()
submission.stop_timer(timer_context)
self.event_manager.dispatch_event(events.SubmissionFinishedEvent(submission_id))
# By default, we want to move on to the next submission in the list
submission_id = self.submission_manager.get_next_submission_id(submission_id)
# All done with everything
self.event_manager.dispatch_event(events.EndOfSubmissionsEvent())
for background_command in background_commands:
background_command = background_command
self.channel.print()
self.channel.output(Msg().status("Waiting for background command")
.print("{}", background_command.get_description()))
background_command.wait()
self.channel.print()
if background_command.get_error():
self.channel.error("ERROR: {}", background_command.get_error())
if background_command.get_output():
self.channel.status("Background command output:")
self.channel.print(background_command.get_output())
class CommandRunner:
"""
Class that actually handles running commands on a submission.
"""
def __init__(self, injector: Injector, channel: Channel, host: Host, settings: Settings,
submission: Submission) -> None:
"""
Initialize a new CommandRunner to use for running commands on a submission.
"""
self.injector = injector
self.channel = channel
self.host = host
self.settings = settings
self._submission = submission
self._background_commands = [] # type: List[BackgroundCommand]
def _check_folder(self, path: Path) -> Optional[Path]:
"""
Check whether the user is satisfied with a folder, and, if not, allow them to choose a
different one.
:param path: The path to the folder to check.
:return: Either the original folder (if they're satisfied), a different folder of their
choice, or None if they're feeling particularly unagreeable today.
"""
self.channel.print()
self.host.print_folder(path, self._submission.get_path())
choice = self.channel.prompt("Does this folder satisfy your innate human needs?",
["Y", "n"], "y")
if choice == "y":
return path
else:
return self.host.choose_folder(path)
def _find_folder_from_regex(self, base_path: Path, folder_regex: str) -> Optional[Path]:
"""
Find a folder, relative to an existing folder, based on a regular expression.
:param base_path: The path to the current folder.
:param folder_regex: The regex to match to a subfolder of base_folder.
:return: The path to a valid subfolder, or None if none was found.
"""
regex = re.compile(folder_regex)
matches = []
for name, type, is_link in self.host.list_folder(base_path):
if type == "folder":
match = regex.fullmatch(name)
if match is not None:
matches.append(name)
folder = None
if len(matches) == 1:
folder = matches[0]
elif len(matches) > 1:
self.channel.status("Multiple folders found when looking for {} in {}:", folder_regex,
base_path.relative_str(self._submission.get_path()))
for name in matches:
self.channel.print(" ", name)
choice = self.channel.input("Make a choice:", matches)
if choice and choice in matches:
folder = choice
if folder is None:
return None
return base_path.append(folder)
def _find_folder(self, base_path: Path, subfolder: Union[str, Sequence[str]]) -> Optional[Path]:
"""
Find a new path to a folder based on a current folder and either a subfolder or a list of
regular expressions representing subfolders. Prompts the user for validation.
:param base_path: The path to the base folder to start the search from.
:param subfolder: The name of a subfolder (or relative path to a subfolder), or a list of
regular expressions.
:return: The path to a valid (sub)*folder, or None if none was found.
"""
path = base_path
if isinstance(subfolder, str):
path = path.append(subfolder)
else:
for folder_regex in subfolder:
new_path = self._find_folder_from_regex(path, folder_regex)
if new_path is None:
break
path = new_path
if not self.host.folder_exists(path):
self.channel.error("Folder not found: {}", path.relative_str(base_path))
path = base_path
return path
def _get_modified_command(self, command: CommandItem) -> CommandItem:
"""
Prompt the user for a modified version of a command.
:param command: The command to modify.
:return: A copy of "command" with "name" and "command" changed.
"""
self.channel.print("Existing command: {}", command.command)
new_command = self.channel.input("Enter new command (TAB to input old):", command.command)
if not new_command:
self.channel.print("No change :(")
return command
return command.get_modified(new_command)
def run(self) -> None:
"""
Run the commands on the submission.
"""
_logger.info("Running commands for: {}", self._submission)
try:
base_path = self._check_folder(self._submission.get_path())
if base_path is None:
_logger.info("Skipping submission because user didn't pick a folder")
self.channel.error("Skipping submission")
return
self._do_command_set(self.settings.commands, base_path, self.settings.base_env or {})
except (InterruptedError, KeyboardInterrupt):
self.channel.print("")
self.channel.error("Submission interrupted")
self.channel.print("")
def get_background_commands(self) -> List[BackgroundCommand]:
"""
Get any background commands that were started. (They're not necessarily still running.)
"""
return self._background_commands
def _do_command_set(self, commands: Sequence[Command], path: Path,
environment: Mapping[str, str]) -> bool:
"""
Run a group of commands on the submission.
:param commands: The commands to run.
:param path: The initial working directory for the commands.
:param environment: A base dictionary of environment variables for the commands.
:return: True if we made it through successfully, or False if we should skip the rest of
this submission.
"""
if not self.host.folder_exists(path):
_logger.warning("_do_command_set: Folder not found: {}", path)
self.channel.print()
self.channel.error("Folder not found: {}", path)
self.channel.error("Skipping {} commands: {}",
len(commands),
[command.name for command in commands])
return False
_logger.debug("_do_command_set: in {}", path)
for command in commands:
if hasattr(command, "commands"):
# It's a command set
msg = Msg(sep="").print("\n").status("Command Set")
if command.name:
msg.status(": {}", command.name)
if command.folder:
msg.print(" ({})", command.folder)
self.channel.output(msg)
new_path = path
if command.folder:
new_path = self._find_folder(path, command.folder)
if command.confirm_folder:
new_path = self._check_folder(new_path)
if new_path is None:
# The user didn't let us get a path; cancel this bit
self.channel.print()
self.channel.error("Skipping {} commands: {}",
len(command.commands),
[command.name for command in command.commands])
self.channel.input("Press Enter to continue...")
continue
new_environment = dict(environment)
new_environment.update(command.environment)
# Run the command set
# If it returns False, then we want to skip the rest of this submission
if not self._do_command_set(command.commands, new_path, new_environment):
return False
self.channel.print()
self.channel.status("End Command Set", end="")
if command.name:
self.channel.status(": {}", command.name, end="")
self.channel.print()
else:
# It's a command item
# Run the command
# If it returns False, then we want to skip the rest of this submission
if not self._do_command(command, path, environment):
return False
# Everything went well!
return True
def _do_command(self, command: CommandItem, path: Path, environment: Mapping[str, str]) -> bool:
"""
Run an individual command on the submission.
:param command: The command to run.
:param path: The working directory for the command.
:param environment: A base dictionary of environment variables for the command.
:return: True to move on to the next command, False to skip the rest of this submission.
"""
_logger.debug("_do_command: {}", command)
msg = Msg(sep="\n").print()
status_title = ("-" * 3) + " " + self._submission.get_name()
if len(status_title) < 56:
status_title += " "
status_title += "-" * (56 - len(status_title))
msg.status(status_title)
msg.status("::: {}", command.name)
if command.is_background:
msg.status(" (background command)")
for line in command.command.split("\n"):
if line:
msg.bright(" {}", line)
self.channel.output(msg.print())
# Set up the command environment dictionary
# (This is used for running the command, and if we open a shell)
env = dict(environment)
env.update(command.environment)
env.update({
"SUBMISSION_NAME": self._submission.get_name()
})
# Before starting, ask the user what they want to do
while True:
choice = self.channel.prompt("What now?", ["o", "f", "m", "s", "ss", "?", ""])
if choice == "o":
# Open a shell in the current folder
self.host.open_shell(path, env)
elif choice == "f":
# Open the current folder
self.host.open_folder(path)
elif choice == "m":
# Modify the command
command = self._get_modified_command(command)
elif choice == "s":
# Skip this command
return True
elif choice == "ss":
# Skip the rest of this submission
return False
elif choice == "?":
# Show help
msg = Msg(sep="\n")
msg.print(" o: Open a shell in the current folder")
msg.print(" f: Open the current folder")
msg.print(" m: Modify the command (just for this submission)")
msg.print(" s: Skip this command")
msg.print(" ss: Skip the rest of this submission")
msg.print(" ?: Show this help message")
msg.print(" Enter: Run the command")
self.channel.output(msg)
else:
# Run the command
self.channel.print("")
break
# Alrighty, it's command-running time!
if command.is_background:
self._run_background_command(command, path, env)
else:
self._run_foreground_command(command, path, env)
# All done with the command!
# Ask user what they want to do
while True:
self.channel.print("")
choice = self.channel.prompt("Repeat command?", ["y", "N"], "n")
self.channel.print("")
if choice == "y":
# Repeat the command
return self._do_command(command, path, environment)
else:
# Move on to the next command
return True
def _run_background_command(self, command: CommandItem, path: Path,
environment: Mapping[str, str]) -> None:
"""
Actually run an individual background command.
:param command: The command to run.
:param path: The working directory for the command.
:param environment: A dictionary of environment variables for the command.
"""
try:
self._background_commands.append(self.host.start_background_command(
command.command, path, environment, command.stdin))
except CommandStartError as e:
self.channel.print()
self.channel.error("Error starting background command: {}", e.message)
else:
self.channel.print()
self.channel.status("Background command started.")
def _run_foreground_command(self, command: CommandItem, path: Path,
environment: Mapping[str, str]) -> None:
"""
Actually run an individual foreground command.
:param command: The command to run.
:param path: The working directory for the command.
:param environment: A dictionary of environment variables for the command.
"""
# Filled with the text content to compare the command's output to (if any)
diff_reference = None
diff_reference_source = None
if command.diff:
if command.diff.content:
diff_reference = command.diff.content
diff_reference_source = "content from command config"
elif command.diff.file and self.settings.diff_file_path:
local_diff_path = os.path.join(self.settings.diff_file_path.get_local_path(),
command.diff.file)
try:
with open(local_diff_path) as f:
diff_reference = f.read()
diff_reference_source = "local file ({})".format(command.diff.file)
except FileNotFoundError:
self.channel.error("Diff file not found: {} ({})", command.diff.file,
self.settings.diff_file_path)
elif command.diff.submission_file:
diff_path = path.append(command.diff.submission_file)
try:
diff_reference = self.host.read_text_file(diff_path)
diff_reference_source = "submission file ({})".format(
command.diff.submission_file)
except FileNotFoundError:
self.channel.error("Diff file not found: {} ({})",
command.diff.submission_file, path)
elif command.diff.command:
try:
diff_reference = self.host.run_command(command.diff.command, path, environment,
print_output=False)
diff_reference_source = "command ({})".format(command.diff.command)
except CommandStartError as e:
self.channel.error("Error starting diff command: {}", e.message)
except CommandRunError as e:
self.channel.error("Error running diff command: {}", e.message)
else:
self.channel.error("Diff object doesn't include "
"\"content\", \"file\", \"submission_file\", or \"command\"")
output = None
try:
if command.is_passthrough:
self.host.run_command_passthrough(command.command, path, environment)
else:
output = self.host.run_command(command.command, path, environment, command.stdin)
except CommandStartError as e:
self.channel.print()
self.channel.error("Error starting command: {}", e.message)
return
except CommandRunError as e:
self.channel.print()
self.channel.error("Error running command: {}", e.message)
return
if diff_reference is not None:
self.channel.print()
self.channel.status("DIFF with reference from {}", diff_reference_source)
self.channel.print()
self._print_diff(output, diff_reference, command.diff)
@staticmethod
def _clean_lines(lines: Sequence[str], collapse_whitespace: bool = False) \
-> Tuple[List[str], Dict[str, List[str]]]:
"""
Clean up some lines of output to make diffing work better. In particular, make an
entirely-lowercase version and optionally collapse whitespace.
:return: A tuple with (list of str, dict) representing the list of cleaned-up lines
(each ending with a newline) and a dictionary mapping each cleaned-up line to a list
of the original line(s) that it came from (none ending with a newline).
"""
clean_to_orig = defaultdict(lambda: []) # type: Dict[str, List[str]]
clean_lines = [] # type: List[str]
for line in lines:
if collapse_whitespace:
line = re.sub(r'\s+', " ", line.strip())
else:
line = line.rstrip()
clean_line = line.lower() + "\n"
clean_lines.append(clean_line)
clean_to_orig[clean_line].append(line)
return clean_lines, clean_to_orig
def _print_diff(self, output: str, reference: str, options: CommandItem.Diff) -> None:
"""
Print the results of performing a diff between "output" and "reference".
"""
# Nothing ain't anything without a reference
self.channel.bg_happy("- Reference")
self.channel.bg_sad ("+ Output")
self.channel.bg_meh (" Both")
self.channel.print ("-----------")
self.channel.print ("")
# Split everything by lines
output_lines = output.splitlines()
reference_lines = reference.splitlines()
# Try some metric-level hackery to ignore case and clean up a bit
reference_clean, reference_orig = CommandRunner._clean_lines(
reference_lines, options.collapse_whitespace)
output_clean, output_orig = CommandRunner._clean_lines(
output_lines, options.collapse_whitespace)
# Print that diff!
for line in difflib.ndiff(reference_clean, output_clean):
signal = line[0]
content = line[2:]
self.channel.bright("{}", line[0:2], end="")
if signal == "-":
# Line from reference only
self.channel.bg_happy("{}", reference_orig[content].pop(0))
elif signal == "+":
# Line from output only
self.channel.bg_sad("{}", output_orig[content].pop(0))
elif signal == "?":
# Extra line (to mark locations, etc.)
self.channel.bright("{}", content.rstrip("\n"))
else:
# Line from both reference and output
# Pop the reference side
reference_orig[content].pop(0)
# Pop and print the output side
self.channel.bg_meh("{}", output_orig[content].pop(0))
| 2.65625 | 3 |
tests/test-source.py | Maryan23/News-On-The-Go | 0 | 12773876 | <filename>tests/test-source.py
import unittest
class Sources:
'''
News Sources class to define news source objects
'''
def __init__(self,id,name,category,description):
self.id = id
self.name = name
self.category = category
self.description = description
class TestSources(unittest.TestCase):
'''
Test class that defines test cases for the Sources class behaviours.
Args:
unittest.TestCase: TestCase class that helps in creating test cases
'''
def setUp(self):
'''
Set up method to run before each test cases.
'''
self.new_source = Sources("abc-news","ABC-NEWS","sports","") # create a source object
def test_init(self):
'''
test_init test case to test if the object is initialized properly
'''
self.assertEqual(self.new_source.id,"abc-news")
self.assertEqual(self.new_source.name,"ABC-NEWS")
self.assertEqual(self.new_source.category,"sports")
self.assertEqual(self.new_source.description,"")
if __name__ == '__main__':
unittest.main() | 3.84375 | 4 |
storm_analysis/jupyter_examples/multiplane_psfs_to_splines.py | bintulab/storm-analysis | 0 | 12773877 | <reponame>bintulab/storm-analysis<filename>storm_analysis/jupyter_examples/multiplane_psfs_to_splines.py
#!/usr/bin/env python
"""
Helper functions for Multiplane PSF to spline conversion.
Hazen 10/17
"""
import storm_analysis.sa_library.parameters as parameters
pixel_size = 100.0
spline_z_range = 0.75
z_value = [-0.3, 0.0, 0.3]
def multiplaneXML():
"""
Create a Multiplane parameters object.
"""
params = parameters.ParametersMultiplaneArb()
params.setAttr("max_frame", "int", -1)
params.setAttr("start_frame", "int", -1)
params.setAttr("background_sigma", "float", 8.0)
params.setAttr("find_max_radius", "int", 2)
params.setAttr("independent_heights", "int", 0)
params.setAttr("iterations", "int", 20)
params.setAttr("mapping", "filename", "map.map")
params.setAttr("no_fitting", "int", 0)
params.setAttr("pixel_size", "float", pixel_size)
params.setAttr("sigma", "float", 1.5)
params.setAttr("threshold", "float", 6.0)
params.setAttr("weights", "filename", "weights.npy")
params.setAttr("z_value", "float-array", z_value)
params.setAttr("channel0_cal", "filename", "calib.npy")
params.setAttr("channel1_cal", "filename", "calib.npy")
params.setAttr("channel0_ext", "string", "_c1.dax")
params.setAttr("channel1_ext", "string", "_c2.dax")
params.setAttr("channel0_offset", "int", 0)
params.setAttr("channel1_offset", "int", 0)
params.setAttr("spline0", "filename", "c1_psf.spline")
params.setAttr("spline1", "filename", "c2_psf.spline")
# Don't do tracking.
params.setAttr("descriptor", "string", "1")
params.setAttr("radius", "float", "0.0")
params.setAttr("max_z", "float", str(spline_z_range + 0.001))
params.setAttr("min_z", "float", str(-(spline_z_range - 0.001)))
# Don't do drift-correction.
params.setAttr("d_scale", "int", 2)
params.setAttr("drift_correction", "int", 0)
params.setAttr("frame_step", "int", 500)
params.setAttr("z_correction", "int", 0)
params.toXMLFile("multiplane.xml")
if (__name__ == "__main__"):
multiplaneXML()
| 2.09375 | 2 |
test/functional/f5_plugins/test_deploy_lb.py | F5Networks/f5-openstack-heat | 9 | 12773878 | # Copyright 2015-2016 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
import os
import pytest
@pytest.fixture
def F5PluginTemplateLoc(SupportedDir):
return os.path.join(
os.path.join(SupportedDir, 'f5_plugins', 'deploy_lb.yaml')
)
def test_deploy_lb(
HeatStack,
symbols,
F5PluginTemplateLoc
):
hc, stack = HeatStack(
F5PluginTemplateLoc,
'func_test_deploy_lb',
parameters={
'client_server_image': symbols.ubuntu_image,
'client_server_flavor': symbols.ubuntu_flavor,
'client_server_sec_group': symbols.secgroup,
'key_name': symbols.ssh_key,
'client_network': symbols.client_net,
'server_network': symbols.server_net,
'bigip_pw': symbols.bigip_admin_password,
'bigip_fip': symbols.bigip_ip,
'vs_vip': symbols.vs_vip
}
)
| 1.609375 | 2 |
py/plot_afefeh.py | jobovy/apogee-maps | 1 | 12773879 | ###############################################################################
# plot_afefeh: the basic [a/Fe] vs. [Fe/H] plot for the data section
###############################################################################
import sys
import matplotlib
import numpy
from scipy import special
matplotlib.use('Agg')
from galpy.util import bovy_plot
from matplotlib import pyplot
import define_rcsample
def plot_afefeh(plotfilename):
# Load the data
data= define_rcsample.get_rcsample()
# Plot the data
bovy_plot.bovy_print()
bovy_plot.scatterplot(data[define_rcsample._FEHTAG],
data[define_rcsample._AFETAG],
'k.',ms=.8,
levels=special.erf(numpy.arange(1,2)/numpy.sqrt(2.)),
xrange=[-1.,0.4],
yrange=[-0.15,0.35],
xlabel=r'$[\mathrm{Fe/H}]$',
ylabel=define_rcsample._AFELABEL)
# Overplot sub-samples
# low alpha, low feh
lowfeh= define_rcsample._lowlow_lowfeh(0.)
highfeh= define_rcsample._lowlow_highfeh(0.)
pyplot.plot([lowfeh,lowfeh],[define_rcsample._lowlow_lowafe(lowfeh),
define_rcsample._lowlow_highafe(lowfeh)],
'k--',lw=2.)
pyplot.plot([highfeh,highfeh],[define_rcsample._lowlow_lowafe(highfeh),
define_rcsample._lowlow_highafe(highfeh)],
'k--',lw=2.)
pyplot.plot([lowfeh,highfeh],[define_rcsample._lowlow_lowafe(lowfeh),
define_rcsample._lowlow_lowafe(highfeh)],
'k--',lw=2.)
pyplot.plot([lowfeh,highfeh],[define_rcsample._lowlow_highafe(lowfeh),
define_rcsample._lowlow_highafe(highfeh)],
'k--',lw=2.)
# high alpha
lowfeh= define_rcsample._highalpha_lowfeh(0.)
highfeh= define_rcsample._highalpha_highfeh(0.)
pyplot.plot([lowfeh,lowfeh],[define_rcsample._highalpha_lowafe(lowfeh),
define_rcsample._highalpha_highafe(lowfeh)],
'k--',lw=2.)
pyplot.plot([highfeh,highfeh],[define_rcsample._highalpha_lowafe(highfeh),
define_rcsample._highalpha_highafe(highfeh)],
'k--',lw=2.)
pyplot.plot([lowfeh,highfeh],[define_rcsample._highalpha_lowafe(lowfeh),
define_rcsample._highalpha_lowafe(highfeh)],
'k--',lw=2.)
pyplot.plot([lowfeh,highfeh],[define_rcsample._highalpha_highafe(lowfeh),
define_rcsample._highalpha_highafe(highfeh)],
'k--',lw=2.)
# solar
lowfeh= define_rcsample._solar_lowfeh(0.)
highfeh= define_rcsample._solar_highfeh(0.)
pyplot.plot([lowfeh,lowfeh],[define_rcsample._solar_lowafe(lowfeh),
define_rcsample._solar_highafe(lowfeh)],
'k--',lw=2.)
pyplot.plot([highfeh,highfeh],[define_rcsample._solar_lowafe(highfeh),
define_rcsample._solar_highafe(highfeh)],
'k--',lw=2.)
pyplot.plot([lowfeh,highfeh],[define_rcsample._solar_lowafe(lowfeh),
define_rcsample._solar_lowafe(highfeh)],
'k--',lw=2.)
pyplot.plot([lowfeh,highfeh],[define_rcsample._solar_highafe(lowfeh),
define_rcsample._solar_highafe(highfeh)],
'k--',lw=2.)
# high [Fe/H]
lowfeh= define_rcsample._highfeh_lowfeh(0.)
highfeh= define_rcsample._highfeh_highfeh(0.)
pyplot.plot([lowfeh,lowfeh],[define_rcsample._highfeh_lowafe(lowfeh),
define_rcsample._highfeh_highafe(lowfeh)],
'k--',lw=2.)
pyplot.plot([highfeh,highfeh],[define_rcsample._highfeh_lowafe(highfeh),
define_rcsample._highfeh_highafe(highfeh)],
'k--',lw=2.)
pyplot.plot([lowfeh,highfeh],[define_rcsample._highfeh_lowafe(lowfeh),
define_rcsample._highfeh_lowafe(highfeh)],
'k--',lw=2.)
pyplot.plot([lowfeh,highfeh],[define_rcsample._highfeh_highafe(lowfeh),
define_rcsample._highfeh_highafe(highfeh)],
'k--',lw=2.)
# Label them
bovy_plot.bovy_text(-0.4,0.265,r'$\mathrm{high}\ [\alpha/\mathrm{Fe}]$',
size=15.,backgroundcolor='w')
bovy_plot.bovy_text(-0.975,0.05,r'$\mathrm{low\ [Fe/H]}$',
size=15.,backgroundcolor='w')
bovy_plot.bovy_text(0.,-0.125,r'$\mathrm{high\ [Fe/H]}$',
size=15.,backgroundcolor='w')
bovy_plot.bovy_text(-0.225,-0.125,r'$\mathrm{solar}$',
size=15.,backgroundcolor='w')
# Loci
if False:
haloc= define_rcsample.highalphalocus()
bovy_plot.bovy_plot(haloc[:,0],haloc[:,1],'k-',lw=2.,overplot=True)
haloc= define_rcsample.lowalphalocus()
bovy_plot.bovy_plot(haloc[:,0],haloc[:,1],'k-',lw=2.,overplot=True)
bovy_plot.bovy_end_print(plotfilename)
return None
if __name__ == '__main__':
plot_afefeh(sys.argv[1])
| 2.5625 | 3 |
get-lib-sizes.py | bmajoros/BIGGER | 0 | 12773880 | #!/usr/bin/env python
#=========================================================================
# This is OPEN SOURCE SOFTWARE governed by the Gnu General Public
# License (GPL) version 3, as described at www.opensource.org.
# Author:<NAME>
#=========================================================================
from __future__ import (absolute_import, division, print_function,
unicode_literals, generators, nested_scopes, with_statement)
from builtins import (bytes, dict, int, list, object, range, str, ascii,
chr, hex, input, next, oct, open, pow, round, super, filter, map, zip)
# The above imports should allow this program to run in both Python 2 and
# Python 3. You might need to update your version of module "future".
import sys
import statistics
import os
import ProgramName
import gzip
#=========================================================================
# main()
#=========================================================================
if len(sys.argv)!=3:
exit(ProgramName.get()+" <in:mRNA.mtx.gz> <out:library-sizes.txt>\n")
(mtxFile,outFile)=sys.argv[1:]
d={}
with gzip.open(mtxFile,"r") as matr:
matr.readline()
matr.readline()
matr.readline()
for line in matr:
line = line.decode("utf8")
(guide,cell,lib)=line.strip().split()
if cell in d:
d[cell]+=int(lib)
else:
d[cell]=int(lib)
OUT=open(outFile,"wt")
for key in d:
print(key,d[key],sep="\t",file=OUT)
OUT.close()
| 2.109375 | 2 |
bejmy/transactions/apps.py | bejmy/backend | 0 | 12773881 | from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class TransactionsConfig(AppConfig):
name = 'bejmy.transactions'
category = 'transactions'
verbose_name = _("transactions")
def ready(self):
# apply signal receivers after all apps are ready
import bejmy.transactions.receivers # noqa
| 1.507813 | 2 |
fxi/prompt.py | cleberzavadniak/fxi | 1 | 12773882 | <filename>fxi/prompt.py
import time
import tkinter
from tkinter.ttk import Label
class Prompt(Label):
def __init__(self, command_line, parent):
self.command_line = command_line
self.content = tkinter.StringVar()
self.reset()
self.answer = None
super().__init__(
parent,
textvariable=self.content,
anchor=tkinter.W,
justify=tkinter.LEFT,
font=("Terminus", 14)
)
def reset(self):
self.content.set('> ')
def ask(self, question, hidden=False):
self.content.set(f'{question}: ')
self.command_line.callback = self.answer_callback
if hidden:
self.command_line.configure(show="-")
self.answer = None
while self.command_line.parent.alive and self.answer is None:
time.sleep(0.1)
self.command_line.configure(show='')
return self.answer
def answer_callback(self, answer):
self.command_line.callback = None
self.answer = answer
self.reset()
| 3.25 | 3 |
rafi/rv.py | fjt7tdmi/rafi-emu-python | 1 | 12773883 | # Copyright 2018 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum
from fixedint import *
from . import util
# =============================================================================
# General register definitions
#
INT_REG_NAMES = [
"zero", "ra", "sp", "gp", "tp", "t0", "t1", "t2",
"s0", "s1", "a0", "a1", "a2", "a3", "a4", "a5",
"a6", "a7", "s2", "s3", "s4", "s5", "s6", "s7",
"s8", "s9", "s10", "s11", "t3", "t4", "t5", "t6",
]
# =============================================================================
# CSR definitions
#
class CsrAddr(Enum):
USTATUS = 0x000
FFLAGS = 0x001
FRM = 0x002
FCSR = 0x003
UIE = 0x004
UTVEC = 0x005
USCRATCH = 0x040
UEPC = 0x041
UCAUSE = 0x042
UTVAL = 0x043
UIP = 0x044
SSTATUS = 0x100
SEDELEG = 0x102
SIDELEG = 0x103
SIE = 0x104
STVEC = 0x105
SCOUNTEREN = 0x106
SSCRATCH = 0x140
SEPC = 0x141
SCAUSE = 0x142
STVAL = 0x143
SIP = 0x144
MSTATUS = 0x300
MISA = 0x301
MEDELEG = 0x302
MIDELEG = 0x303
MIE = 0x304
MTVEC = 0x305
MCOUNTEREN = 0x306
MHPMEVENT0 = 0x320
MHPMEVENT1 = 0x321
MHPMEVENT2 = 0x322
MHPMEVENT3 = 0x323
MHPMEVENT4 = 0x324
MHPMEVENT5 = 0x325
MHPMEVENT6 = 0x326
MHPMEVENT7 = 0x327
MHPMEVENT8 = 0x328
MHPMEVENT9 = 0x329
MHPMEVENT10 = 0x32a
MHPMEVENT11 = 0x32b
MHPMEVENT12 = 0x32c
MHPMEVENT13 = 0x32d
MHPMEVENT14 = 0x32e
MHPMEVENT15 = 0x32f
MHPMEVENT16 = 0x330
MHPMEVENT17 = 0x331
MHPMEVENT18 = 0x332
MHPMEVENT19 = 0x333
MHPMEVENT20 = 0x334
MHPMEVENT21 = 0x335
MHPMEVENT22 = 0x336
MHPMEVENT23 = 0x337
MHPMEVENT24 = 0x338
MHPMEVENT25 = 0x339
MHPMEVENT26 = 0x33a
MHPMEVENT27 = 0x33b
MHPMEVENT28 = 0x33c
MHPMEVENT29 = 0x33d
MHPMEVENT30 = 0x33e
MHPMEVENT31 = 0x33f
MSCRATCH = 0x340
MEPC = 0x341
MCAUSE = 0x342
MTVAL = 0x343
MIP = 0x344
PMPCMG0 = 0x3a0
PMPCMG1 = 0x3a1
PMPCMG2 = 0x3a2
PMPCMG3 = 0x3a3
PMPADDR0 = 0x3b0
PMPADDR1 = 0x3b1
PMPADDR2 = 0x3b2
PMPADDR3 = 0x3b3
PMPADDR4 = 0x3b4
PMPADDR5 = 0x3b5
PMPADDR6 = 0x3b6
PMPADDR7 = 0x3b7
PMPADDR8 = 0x3b8
PMPADDR9 = 0x3b9
PMPADDR10 = 0x3ba
PMPADDR11 = 0x3bb
PMPADDR12 = 0x3bc
PMPADDR13 = 0x3bd
PMPADDR14 = 0x3be
PMPADDR15 = 0x3bf
TSELECT = 0x7a0
TDATA1 = 0x7a1
TDATA2 = 0x7a2
TDATA3 = 0x7a3
DCSR = 0x7b0
DPC = 0x7b1
DSCRATCH = 0x7b2
MCYCLE = 0xb00
MTIME = 0xb01
MINSTRET = 0xb02
MHPMCOUNTER3 = 0xb03
MHPMCOUNTER4 = 0xb04
MHPMCOUNTER5 = 0xb05
MHPMCOUNTER6 = 0xb06
MHPMCOUNTER7 = 0xb07
MHPMCOUNTER8 = 0xb08
MHPMCOUNTER9 = 0xb09
MHPMCOUNTER10 = 0xb0a
MHPMCOUNTER11 = 0xb0b
MHPMCOUNTER12 = 0xb0c
MHPMCOUNTER13 = 0xb0d
MHPMCOUNTER14 = 0xb0e
MHPMCOUNTER15 = 0xb0f
MCYCLEH = 0xb80
MTIMEH = 0xb81
MINSTRETH = 0xb82
MHPMCOUNTER3H = 0xb83
MHPMCOUNTER4H = 0xb84
MHPMCOUNTER5H = 0xb85
MHPMCOUNTER6H = 0xb86
MHPMCOUNTER7H = 0xb87
MHPMCOUNTER8H = 0xb88
MHPMCOUNTER9H = 0xb89
MHPMCOUNTER10H = 0xb8a
MHPMCOUNTER11H = 0xb8b
MHPMCOUNTER12H = 0xb8c
MHPMCOUNTER13H = 0xb8d
MHPMCOUNTER14H = 0xb8e
MHPMCOUNTER15H = 0xb8f
CYCLE = 0xc00
TIME = 0xc01
INSTRET = 0xc02
HPMCOUNTER3 = 0xc03
HPMCOUNTER4 = 0xc04
HPMCOUNTER5 = 0xc05
HPMCOUNTER6 = 0xc06
HPMCOUNTER7 = 0xc07
HPMCOUNTER8 = 0xc08
HPMCOUNTER9 = 0xc09
HPMCOUNTER10 = 0xc0a
HPMCOUNTER11 = 0xc0b
HPMCOUNTER12 = 0xc0c
HPMCOUNTER13 = 0xc0d
HPMCOUNTER14 = 0xc0e
HPMCOUNTER15 = 0xc0f
CYCLEH = 0xc80
TIMEH = 0xc81
INSTRETH = 0xc82
HPMCOUNTER3H = 0xc83
HPMCOUNTER4H = 0xc84
HPMCOUNTER5H = 0xc85
HPMCOUNTER6H = 0xc86
HPMCOUNTER7H = 0xc87
HPMCOUNTER8H = 0xc88
HPMCOUNTER9H = 0xc89
HPMCOUNTER10H = 0xc8a
HPMCOUNTER11H = 0xc8b
HPMCOUNTER12H = 0xc8c
HPMCOUNTER13H = 0xc8d
HPMCOUNTER14H = 0xc8e
HPMCOUNTER15H = 0xc8f
MVENDORID = 0xf11
MARCHID = 0xf12
MIMPID = 0xf13
MHARTID = 0xf14
CSR_NAMES = {
0x000: "ustatus",
0x001: "fflags",
0x002: "frm",
0x003: "fcsr",
0x004: "uie",
0x005: "utvec",
0x040: "uscratch",
0x041: "uepc",
0x042: "ucause",
0x043: "utval",
0x044: "uip",
0x100: "sstatus",
0x102: "sedeleg",
0x103: "sideleg",
0x104: "sie",
0x105: "stvec",
0x106: "scounteren",
0x140: "sscratch",
0x141: "sepc",
0x142: "scause",
0x143: "stval",
0x144: "sip",
0x180: "satp",
0x300: "mstatus",
0x301: "misa",
0x302: "medeleg",
0x303: "mideleg",
0x304: "mie",
0x305: "mtvec",
0x306: "mcounteren",
0x320: "mhpmevent0",
0x321: "mhpmevent1",
0x322: "mhpmevent2",
0x323: "mhpmevent3",
0x324: "mhpmevent4",
0x325: "mhpmevent5",
0x326: "mhpmevent6",
0x327: "mhpmevent7",
0x328: "mhpmevent8",
0x329: "mhpmevent9",
0x32a: "mhpmevent10",
0x32b: "mhpmevent11",
0x32c: "mhpmevent12",
0x32d: "mhpmevent13",
0x32e: "mhpmevent14",
0x32f: "mhpmevent15",
0x330: "mhpmevent16",
0x331: "mhpmevent17",
0x332: "mhpmevent18",
0x333: "mhpmevent19",
0x334: "mhpmevent20",
0x335: "mhpmevent21",
0x336: "mhpmevent22",
0x337: "mhpmevent23",
0x338: "mhpmevent24",
0x339: "mhpmevent25",
0x33a: "mhpmevent26",
0x33b: "mhpmevent27",
0x33c: "mhpmevent28",
0x33d: "mhpmevent29",
0x33e: "mhpmevent30",
0x33f: "mhpmevent31",
0x340: "mscratch",
0x341: "mepc",
0x342: "mcause",
0x343: "mtval",
0x344: "mip",
0x3a0: "pmpcfg0",
0x3a1: "pmpcfg1",
0x3a2: "pmpcfg2",
0x3a3: "pmpcfg3",
0x3b0: "pmpaddr0",
0x3b1: "pmpaddr1",
0x3b2: "pmpaddr2",
0x3b3: "pmpaddr3",
0x3b4: "pmpaddr4",
0x3b5: "pmpaddr5",
0x3b6: "pmpaddr6",
0x3b7: "pmpaddr7",
0x3b8: "pmpaddr8",
0x3b9: "pmpaddr9",
0x3ba: "pmpaddr10",
0x3bb: "pmpaddr11",
0x3bc: "pmpaddr12",
0x3bd: "pmpaddr13",
0x3be: "pmpaddr14",
0x3bf: "pmpaddr15",
0x7a0: "tselect",
0x7a1: "tdata1",
0x7a2: "tdata2",
0x7a3: "tdata3",
0x7b0: "dcsr",
0x7b1: "dpc",
0x7b2: "dscratch",
0xb00: "mcycle",
0xb01: "mtime",
0xb02: "minstret",
0xb03: "mhpmcounter3",
0xb04: "mhpmcounter4",
0xb05: "mhpmcounter5",
0xb06: "mhpmcounter6",
0xb07: "mhpmcounter7",
0xb08: "mhpmcounter8",
0xb09: "mhpmcounter9",
0xb0a: "mhpmcounter10",
0xb0b: "mhpmcounter11",
0xb0c: "mhpmcounter12",
0xb0d: "mhpmcounter13",
0xb0e: "mhpmcounter14",
0xb0f: "mhpmcounter15",
0xb80: "mcycleh",
0xb81: "mtimeh",
0xb82: "minstreth",
0xb83: "mhpmcounter3h",
0xb84: "mhpmcounter4h",
0xb85: "mhpmcounter5h",
0xb86: "mhpmcounter6h",
0xb87: "mhpmcounter7h",
0xb88: "mhpmcounter8h",
0xb89: "mhpmcounter9h",
0xb8a: "mhpmcounter10h",
0xb8b: "mhpmcounter11h",
0xb8c: "mhpmcounter12h",
0xb8d: "mhpmcounter13h",
0xb8e: "mhpmcounter14h",
0xb8f: "mhpmcounter15h",
0xc00: "cycle",
0xc01: "time",
0xc02: "instret",
0xc03: "hpmcounter3",
0xc04: "hpmcounter4",
0xc05: "hpmcounter5",
0xc06: "hpmcounter6",
0xc07: "hpmcounter7",
0xc08: "hpmcounter8",
0xc09: "hpmcounter9",
0xc0a: "hpmcounter10",
0xc0b: "hpmcounter11",
0xc0c: "hpmcounter12",
0xc0d: "hpmcounter13",
0xc0e: "hpmcounter14",
0xc0f: "hpmcounter15",
0xc80: "cycleh",
0xc81: "timeh",
0xc82: "instreth",
0xc83: "hpmcounter3h",
0xc84: "hpmcounter4h",
0xc85: "hpmcounter5h",
0xc86: "hpmcounter6h",
0xc87: "hpmcounter7h",
0xc88: "hpmcounter8h",
0xc89: "hpmcounter9h",
0xc8a: "hpmcounter10h",
0xc8b: "hpmcounter11h",
0xc8c: "hpmcounter12h",
0xc8d: "hpmcounter13h",
0xc8e: "hpmcounter14h",
0xc8f: "hpmcounter15h",
0xf11: "mvendorid",
0xf12: "marchid",
0xf13: "mimpid",
0xf14: "mhartid",
}
class MSTATUS(util.BitField32):
def get_MPP(self):
return self.get_value(12, 11)
def set_MPP(self, value):
self.set_value(value, 12, 11)
def get_MPIE(self):
return self.get_bit(7)
def set_MPIE(self, value):
self.set_bit(value, 7)
def get_MIE(self):
return self.get_bit(3)
def set_MIE(self, value):
self.set_bit(value, 3)
class MTVEC(util.BitField32):
def get_BASE(self):
return self.get_value(31, 2)
def set_BASE(self, value):
self.set_value(value, 31, 2)
def get_MODE(self):
return self.get_value(1, 0)
def set_MODE(self, value):
self.set_value(value, 1, 0)
| 1.773438 | 2 |
src/homeautomate.py | THaeckel/PyHomeAutomate | 0 | 12773884 | """
Copyright (c) 2021 <NAME>
"""
import statedb
import traceback
import time
import datetime
from devicedetectionskill import DetectDevicePresenceSkill
from wheatherskill import WeatherSkill
from daytimeskill import DaytimeSkill
from raumfeldskill import RaumfeldTVWakeup
from hueskill import HueDaytimeAndWeatherSkill
# logFile = str(datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")) + ".log"
jsonSettingsFile = "my_skills_config.json"
statedb = statedb.StateDataBase()
skillList = [
DetectDevicePresenceSkill(statedb=statedb, settingsFile=jsonSettingsFile),
WeatherSkill(statedb=statedb, settingsFile=jsonSettingsFile),
DaytimeSkill(statedb=statedb, settingsFile=jsonSettingsFile),
RaumfeldTVWakeup(statedb=statedb, settingsFile=jsonSettingsFile),
HueDaytimeAndWeatherSkill(statedb=statedb, settingsFile=jsonSettingsFile)
]
def startSkills():
for skill in skillList:
skill.start()
time.sleep(10)
def joinSkills():
for skill in skillList:
skill.join()
def interruptSkills():
for skill in skillList:
skill.stopEvent.set()
if __name__ == "__main__":
try:
print("Starting")
startSkills()
joinSkills()
except KeyboardInterrupt:
print("terminating...")
interruptSkills()
except Exception:
print("Error... " + str(traceback.format_exc))
finally:
joinSkills()
print("Terminated")
| 2.46875 | 2 |
fluence/models/siamese_model.py | prajjwal1/fluence2 | 64 | 12773885 | import logging
import torch
from torch import nn
from transformers import AutoModel
from ..pooling import MeanPooling
logger = logging.getLogger(__name__)
class SiameseTransformer(nn.Module):
def __init__(self, args, config):
super(SiameseTransformer, self).__init__()
self.model_a = AutoModel.from_pretrained(args.model_name, config=config, cache_dir=args.cache_dir)
self.model_b = AutoModel.from_pretrained(args.model_name, config=config, cache_dir=args.cache_dir)
logging.info("**** Encoder will not be trained ****")
for param in self.model_a.parameters():
param.requires_grad = False
for param in self.model_b.parameters():
param.requires_grad = False
self.classifier = nn.Linear(config.hidden_size*3, config.num_labels)
self.criterion = nn.CrossEntropyLoss()
def forward(self, a, b):
labels = a.pop('labels')
if 'labels' in list(b.keys()):
b.pop('labels')
output_a = self.model_a(**a)
output_b = self.model_b(**b)
embeddings_a = MeanPooling(output_a[0], a['attention_mask'])
embeddings_b = MeanPooling(output_b[0], b['attention_mask'])
output = torch.cat([embeddings_a, embeddings_b, embeddings_a-embeddings_b], dim=1)
logits = self.classifier(output)
loss = self.criterion(logits, labels)
return loss, logits
| 2.609375 | 3 |
dep/reportlab/tests/test_graphics_render.py | csterryliu/Legal-Attest-Letter-Generator | 52 | 12773886 | <reponame>csterryliu/Legal-Attest-Letter-Generator<filename>dep/reportlab/tests/test_graphics_render.py<gh_stars>10-100
#Copyright ReportLab Europe Ltd. 2000-2016
#see license.txt for license details
"""
Tests for renderers
"""
from reportlab.lib.testutils import setOutDir,makeSuiteForClasses, outputfile, printLocation
setOutDir(__name__)
import unittest, os, sys, glob
from reportlab.lib.utils import isPy3
try:
from reportlab.graphics import _renderPM
except:
_renderPM = None
class RenderTestCase(unittest.TestCase):
"Test renderPS classes."
@classmethod
def setUpClass(cls):
cls.outDir = outDir = outputfile('render-out')
if not os.path.isdir(outDir):
os.makedirs(outDir)
for x in glob.glob(os.path.join(outDir,'*')):
os.remove(x)
def test0(self):
from reportlab.graphics.renderPS import test
assert test(self.outDir) is None
def test1(self):
from reportlab.graphics.renderPDF import test
assert test(self.outDir) is None
@unittest.skipIf(not _renderPM,'no _renderPM')
def test2(self):
from reportlab.graphics.renderPM import test
assert test(self.outDir) is None
def test3(self):
from reportlab.graphics.renderSVG import test
assert test(self.outDir) is None
def makeSuite():
return makeSuiteForClasses(RenderTestCase)
#noruntests
if __name__ == "__main__":
unittest.TextTestRunner().run(makeSuite())
printLocation()
| 2.25 | 2 |
handlers/web.py | wangdi1024/wangdi | 1 | 12773887 | import bcrypt
from tornado.escape import json_encode
from handlers.base import BaseHandler
class WebHandler(BaseHandler):
def get(self):
if not self.get_cookie("_csrf"):
self.set_cookie("_csrf", self.xsrf_token)
# user = xhtml_escape(self.current_user or '')
user = self.current_user or ''
self.render('poker.html', user=user)
class RegHandler(BaseHandler):
def post(self):
email = self.get_argument('email', self.get_argument('username'))
account = self.db.get('SELECT * FROM account WHERE email="%s"', email)
if account:
self.write('1')
return
username = self.get_argument('username')
password = self.get_argument('password')
password = <PASSWORD>(password.encode('<PASSWORD>'), bcrypt.<PASSWORD>())
uid = self.db.insert('INSERT INTO account (email, username, password) VALUES ("%s", "%s", "%s")',
email, username, password)
self.set_current_user(uid, username)
self.set_header('Content-Type', 'application/json')
info = {
'uid': uid,
'username': username,
}
self.write(json_encode(info))
class LoginHandler(BaseHandler):
def post(self):
email = self.get_argument('email')
password = self.get_argument("password")
account = self.db.get('SELECT * FROM account WHERE email="%s"', email)
password = <PASSWORD>(password.encode('<PASSWORD>'), account.get('password'))
self.set_header('Content-Type', 'application/json')
if password == account.get('password'):
self.set_current_user(account.get('id'), account.get('username'))
self.redirect(self.get_argument("next", "/"))
class LogoutHandler(BaseHandler):
def post(self):
self.clear_cookie('user')
self.redirect(self.get_argument("next", "/"))
| 2.59375 | 3 |
kubernetes_typed/client/models/v1_windows_security_context_options.py | sobolevn/kubernetes-typed | 22 | 12773888 | # Code generated by `typeddictgen`. DO NOT EDIT.
"""V1WindowsSecurityContextOptionsDict generated type."""
from typing import TypedDict
V1WindowsSecurityContextOptionsDict = TypedDict(
"V1WindowsSecurityContextOptionsDict",
{
"gmsaCredentialSpec": str,
"gmsaCredentialSpecName": str,
"runAsUserName": str,
},
total=False,
)
| 1.539063 | 2 |
app.py | TonyZTYang/nyu_networking_chatapp | 0 | 12773889 | from urllib import parse
from flask import Flask, send_from_directory
from flask_restful import Resource, Api, reqparse, fields, marshal_with
from os import path
from datetime import datetime
import json
app = Flask(__name__)
api = Api(app)
# parser = reqparse.RequestParser()
users = {"test": "1"}
rooms = {
"Public": [],
"1": ["test"]
}
# messages = {
# "Public": [],
# "1": {0: {"time": "2021.04.25 00:00:00", "name": "test", "text": "hey there"}}
# }
messages = {
"Public": [],
"1": [[ "2021.04.25 00:00:00", "test", "hey there"]]
}
pwd = path.dirname(path.realpath(__file__))
user_room_change = 0
message_change = {
"Public": 0,
"1": 1
}
next_room = 2
# return the full app
@app.route('/', methods=['GET'])
def index_get():
return send_from_directory(pwd, "vue_app/index.html")
@app.route('/vue.js', methods=['GET'])
def vuejs_get():
return send_from_directory(pwd, "vue_app/vue.js")
@app.route('/script.js', methods=['GET'])
def scriptjs_get():
return send_from_directory(pwd, "vue_app/script.js")
@app.route('/bootstrap.css', methods=['GET'])
def bootstrapcss_get():
return send_from_directory(pwd, "vue_app/bootstrap.css")
@app.route('/style.css', methods=['GET'])
def stylecss_get():
return send_from_directory(pwd, "vue_app/style.css")
class Login(Resource):
def post(self):
global user_room_change
parser = reqparse.RequestParser()
parser.add_argument("username",type=str)
username = parser.parse_args()["username"]
if username in users.keys():
return {"status": 0}
else:
users[username] = "Public"
rooms["Public"].append(username)
user_room_change += 1
print("new user " + username)
return {"status": 1}
class Logout(Resource):
def post(self):
global user_room_change
parser = reqparse.RequestParser()
parser.add_argument("username",type=str)
username = parser.parse_args()["username"]
if username in users.keys():
room = users.pop(username)
rooms[room].remove(username)
user_room_change += 1
if ((room != "Public") and (rooms[room] == [])):
del rooms[room]
del message_change[room]
return {"status": 1}
class GetUserRoomChange(Resource):
def post(self):
global user_room_change
parser = reqparse.RequestParser()
parser.add_argument("user_room_change", type=int)
parser.add_argument("username",type=str)
client_urc = parser.parse_args()["user_room_change"]
username = parser.parse_args()["username"]
if client_urc == user_room_change:
return {"changed": 0}
else:
# room = users[username]
# print(room)
# room_member = rooms[room]
# room.remove(username)
return {
"changed": 1,
"server_urc": user_room_change,
"users": users
}
class MakeUserRoomChange(Resource):
def post(self):
global user_room_change
global next_room
parser = reqparse.RequestParser()
parser.add_argument("myName", type=str)
parser.add_argument("otherName",type=str)
myId = parser.parse_args()["myName"]
otherId = parser.parse_args()["otherName"]
myRoom = users[myId]
otherRoom = users[otherId]
user_room_change += 1
if myRoom == otherRoom:
new_room = str(next_room)
next_room += 1
rooms[new_room] = [myId, otherId]
message_change[new_room] = 0
messages[new_room] = []
users[myId] = new_room
users[otherId] = new_room
rooms[myRoom].remove(myId)
rooms[myRoom].remove(otherId)
if ((myRoom != "Public") and (rooms[myRoom] == [])):
del rooms[myRoom]
else:
users[myId] = otherRoom
rooms[myRoom].remove(myId)
rooms[otherRoom].append(myId)
if ((myRoom != "Public") and (rooms[myRoom] == [])):
del rooms[myRoom]
return {"status": 1}
class GetMessageChange(Resource):
def post(self):
global message_change
parser = reqparse.RequestParser()
parser.add_argument("messageChange", type=int)
parser.add_argument("username",type=str)
client_msgc = parser.parse_args()["messageChange"]
username = parser.parse_args()["username"]
if username == '':
return {"changed":0}
room = users[username]
if client_msgc == message_change[room]:
return {"changed":0}
else:
return {
"changed": 1,
"messageChange": message_change[room],
"messages": messages[room]
}
class SendMessage(Resource):
def post(self):
global message_change
parser = reqparse.RequestParser()
parser.add_argument("message", type=str)
parser.add_argument("username",type=str)
msg = parser.parse_args()["message"]
username = parser.parse_args()["username"]
room = users[username]
time = datetime.now().strftime('%Y.%M.%D %H:%M:%S')
messages[room].append([time, username, msg])
message_change[room] += 1
return {"status": 1}
api.add_resource(Login,"/login")
api.add_resource(Logout,"/logout")
api.add_resource(GetUserRoomChange, "/getUserRoomChange")
api.add_resource(MakeUserRoomChange, "/makeUserRoomChange")
api.add_resource(GetMessageChange, "/getMessageChange")
api.add_resource(SendMessage, "/sendMessage")
if __name__ == "__main__":
app.run("127.0.0.1", 5000, True) | 2.5625 | 3 |
datasets/__init__.py | Kevincrh/multi-model_fusion | 33 | 12773890 | from .pc_aug import rotate_point_cloud_by_angle, rotation_point_cloud, jitter_point_cloud, pc_aug_funs, normal_pc
STATUS_TRAIN = "train"
STATUS_TEST = "test"
from .data_pth import * | 1.132813 | 1 |
source/apps/crawl_space/migrations/0001_initial.py | nasa-jpl-memex/memex-explorer | 31 | 12773891 | <filename>source/apps/crawl_space/migrations/0001_initial.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import apps.crawl_space.models
import django.db.models.deletion
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('base', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Crawl',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=64, validators=[django.core.validators.RegexValidator(b'^[a-zA-Z0-9-_ ]+$', b'Only numbers, letters, underscores, dashes and spaces are allowed.')])),
('slug', models.SlugField(unique=True, max_length=64)),
('description', models.TextField(blank=True)),
('crawler', models.CharField(max_length=64, choices=[(b'nutch', b'Nutch'), (b'ache', b'ACHE')])),
('status', models.CharField(default=b'NOT STARTED', max_length=64)),
('config', models.CharField(default=b'config_default', max_length=64)),
('seeds_list', models.FileField(default=None, null=True, upload_to=apps.crawl_space.models.get_seeds_upload_path, blank=True)),
('pages_crawled', models.BigIntegerField(default=0)),
('harvest_rate', models.FloatField(default=0)),
('rounds_left', models.IntegerField(default=1, null=True, blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='CrawlModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=64, validators=[django.core.validators.RegexValidator(b'^[a-zA-Z0-9-_ ]+$', b'Only numbers, letters, underscores, dashes and spaces are allowed.')])),
('slug', models.SlugField(unique=True, max_length=64)),
('model', models.FileField(upload_to=apps.crawl_space.models.get_model_upload_path, validators=[apps.crawl_space.models.validate_model_file])),
('features', models.FileField(upload_to=apps.crawl_space.models.get_model_upload_path, validators=[apps.crawl_space.models.validate_features_file])),
('project', models.ForeignKey(to='base.Project')),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='crawl',
name='crawl_model',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, default=None, blank=True, to='crawl_space.CrawlModel', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='crawl',
name='project',
field=models.ForeignKey(to='base.Project'),
preserve_default=True,
),
migrations.AddField(
model_name='crawl',
name='seeds_object',
field=models.ForeignKey(to='base.SeedsList', on_delete=django.db.models.deletion.PROTECT),
preserve_default=True,
),
]
| 1.851563 | 2 |
pstock/utils/quote.py | obendidi/pstock | 5 | 12773892 | import typing as tp
from datetime import date
import numpy as np
import pendulum
def get_latest_price_from_quote(price_data: tp.Dict[str, tp.Any]) -> float:
if not price_data:
raise ValueError("No price data found.")
# regular market price
regular_market_price = price_data["regularMarketPrice"]["raw"]
regular_market_time = pendulum.from_timestamp(price_data["regularMarketTime"])
prices = {"regular": (regular_market_time, regular_market_price)}
# pre-market price
pre_market_price = (
price_data.get("preMarketPrice", {}).get("raw")
if price_data.get("preMarketPrice", {}) is not None
else None
)
if pre_market_price is not None:
prices["pre"] = (
pendulum.from_timestamp(price_data["preMarketTime"]),
pre_market_price,
)
# post-market price
post_market_price = (
price_data.get("postMarketPrice", {}).get("raw")
if price_data.get("postMarketPrice", {}) is not None
else None
)
if post_market_price is not None:
prices["post"] = (
pendulum.from_timestamp(price_data["postMarketTime"]),
post_market_price,
)
_, (_, price) = min(prices.items(), key=lambda x: abs(pendulum.now() - x[1][0]))
return price
def get_asset_data_from_quote(quote: tp.Dict[str, tp.Any]) -> tp.Dict[str, tp.Any]:
profile = quote.get("summaryProfile", {}) or {}
quote_type = quote.get("quoteType", {}) or {}
price_data = quote.get("price", {}) or {}
symbol = quote.get("symbol") or quote_type.get("symbol") or price_data.get("symbol")
name = quote_type.get("longName", price_data.get("longName")) or quote_type.get(
"shortName", price_data.get("shortName")
)
asset_type = quote_type.get("quoteType") or price_data.get("quoteType")
currency = price_data.get("currency")
latest_price = get_latest_price_from_quote(price_data)
sector = profile.get("sector")
industry = profile.get("industry")
return {
"symbol": symbol,
"name": name,
"asset_type": asset_type,
"currency": currency,
"latest_price": latest_price,
"sector": sector,
"industry": industry,
}
def get_earnings_data_from_quote(
quote: tp.Dict[str, tp.Any]
) -> tp.List[tp.Dict[str, tp.Union[str, float]]]:
earnings = quote.get("earnings")
if not earnings or not isinstance(earnings, dict):
return []
earnings_chart = earnings.get("earningsChart", {})
quarterly_earnings = earnings_chart.get("quarterly", [])
quarterly_financial_chart = earnings.get("financialsChart", {}).get("quarterly", [])
date_to_earnings = {
e.get("date", ""): {
"actual": e.get("actual", {}).get("raw", np.nan),
"estimate": e.get("estimate", {}).get("raw", np.nan),
}
for e in quarterly_earnings
if "date" in e
}
date_to_fin_chart = {
c.get("date", ""): {
"revenue": c.get("revenue", {}).get("raw", np.nan),
"earnings": c.get("earnings", {}).get("raw", np.nan),
}
for c in quarterly_financial_chart
if "date" in c
}
all_dates = set(list(date_to_earnings.keys()) + list(date_to_fin_chart.keys()))
passed_earnings = [
dict(
quarter=quarter,
actual=date_to_earnings.get(quarter, {}).get("actual", np.nan),
estimate=date_to_earnings.get(quarter, {}).get("estimate", np.nan),
revenue=date_to_fin_chart.get(quarter, {}).get("revenue", np.nan),
earnings=date_to_fin_chart.get(quarter, {}).get("earnings", np.nan),
)
for quarter in all_dates
]
next_earnings = [
dict(
quarter=(
f"{earnings_chart.get('currentQuarterEstimateDate', '')}"
f"{earnings_chart.get('currentQuarterEstimateYear', '')}"
),
estimate=earnings_chart.get("currentQuarterEstimate", {}).get(
"raw", np.nan
),
actual=np.nan,
revenue=np.nan,
earnings=np.nan,
)
]
return passed_earnings + next_earnings
def get_trends_data_from_quote(
quote: tp.Dict[str, tp.Any]
) -> tp.List[tp.Dict[str, tp.Any]]:
if not quote:
return []
recommendation_trend = quote.get("recommendationTrend", {})
if not recommendation_trend:
return []
trends = recommendation_trend.get("trend", [])
return [
{
"date": date.today() + pendulum.duration(months=int(trend["period"][:-1])),
"strong_buy": trend.get("strongBuy", 0),
"buy": trend.get("buy", 0),
"hold": trend.get("hold", 0),
"sell": trend.get("sell", 0),
"strong_sell": trend.get("stronSell", 0),
}
for trend in trends
]
| 2.84375 | 3 |
riss2018/synonym_stats_style.py | buoyancy99/glove | 2 | 12773893 | <filename>riss2018/synonym_stats_style.py<gh_stars>1-10
"""Author: <NAME>.
Gets the closest neighbors to the given words in embedding space.
"""
import glove
import glove.configuration
import glove.neighbors
import numpy as np
import json
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser("synonyms")
parser.add_argument("--threshold", type=float, default=5.632423353947836)
args = parser.parse_args()
threshold = args.threshold
config = glove.configuration.Configuration(
embedding=300, filedir="./embeddings/", length=70000,
start_word="<S>", end_word="</S>", unk_word="<UNK>")
vocab, embeddings = glove.load(config)
with open("style.captions.json", "r") as f:
captions = json.load(f)
dataset_ids = []
original_ids = []
style_ids = []
for e in captions:
dataset_ids.extend([vocab.word_to_id(w) for w in e["ground_truth"].strip().lower().split()])
original_ids.extend([vocab.word_to_id(w) for w in e["original"].strip().lower().split()])
style_ids.extend([vocab.word_to_id(w) for w in e["styled"].strip().lower().split()])
dataset_ids = set(dataset_ids)
original_ids = set(original_ids)
style_ids = set(style_ids)
def get_synonyms(ids, vocab, embeddings, threshold):
counts = []
for x in ids:
print("Beginning %d of %d" % (x, len(ids)))
x_embeddings = embeddings[x, :]
x_count = 0
for y in range(70000):
y_embeddings = embeddings[y, :]
distance = np.linalg.norm(x_embeddings - y_embeddings)
if distance < threshold:
x_count += 1
counts.append(x_count)
return counts
dataset_synonyms = get_synonyms(dataset_ids, vocab, embeddings, threshold)
original_synonyms = get_synonyms(original_ids, vocab, embeddings, threshold)
styled_synonyms = get_synonyms(style_ids, vocab, embeddings, threshold)
mean_dataset_synonyms = np.mean(dataset_synonyms)
mean_original_synonyms = np.mean(original_synonyms)
mean_styled_synonyms = np.mean(styled_synonyms)
std_dataset_synonyms = np.std(dataset_synonyms)
std_original_synonyms = np.std(original_synonyms)
std_styled_synonyms = np.std(styled_synonyms)
synonyms_dump = {
"mean_dataset_synonyms": mean_dataset_synonyms,
"mean_original_synonyms": mean_original_synonyms,
"mean_styled_synonyms": mean_styled_synonyms,
"std_dataset_synonyms": std_dataset_synonyms,
"std_original_synonyms": std_original_synonyms,
"std_styled_synonyms": std_styled_synonyms}
with open("style.synonym_stats.json", "w") as f:
json.dump(synonyms_dump, f)
| 2.46875 | 2 |
1-getting-started/lessons/4-connect-internet/code-telemetry/pi/nightlight/app.py | kekiel/IoT-For-Beginners | 0 | 12773894 | import time
from grove.grove_light_sensor_v1_2 import GroveLightSensor
from grove.grove_led import GroveLed
import paho.mqtt.client as mqtt
import json
light_sensor = GroveLightSensor(0)
led = GroveLed(5)
id = '<ID>'
client_telemetry_topic = 'kekiot/' + id + '/telemetry'
client_name = id + 'nightlight_client'
mqtt_client = mqtt.Client(client_name)
mqtt_client.connect('test.mosquitto.org')
mqtt_client.loop_start()
print("MQTT connected!")
while True:
light = light_sensor.light
telemetry = json.dumps({'light' : light, 'name' : id})
print("Sending telemetry ", telemetry)
mqtt_client.publish(client_telemetry_topic, telemetry)
time.sleep(5)
| 2.5625 | 3 |
tests/factories/site.py | Stormheg/wagtail-bakery | 98 | 12773895 | <gh_stars>10-100
import factory
from wagtail.core.models import Site
class SiteFactory(factory.DjangoModelFactory):
hostname = 'localhost'
port = 80
is_default_site = True
class Meta:
model = Site
django_get_or_create = ('hostname', 'port')
| 1.726563 | 2 |
eval.py | irebai/wav2vec2 | 3 | 12773896 | <filename>eval.py
#!/usr/bin/env python3
from module.data_prep import data_prep
from module.processor import Wav2Vec2Processor
from module.model import Wav2Vec2ForCTC
import torch
import torch.nn.functional as F
from module.trainer import DataCollatorCTCWithPadding, BatchRandomSampler
from module.decoder import KenLMDecoder
from torch.utils.data import DataLoader
from tqdm import tqdm
import datasets
import argparse
import logging
import sys
import os
wer_metric = datasets.load_metric("wer")
cer_metric = datasets.load_metric("cer")
logger = logging.getLogger(__name__)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
logger.setLevel(logging.INFO)
def get_data(
processor,
dataset,
name,
split,
data_path,
batch_size=1):
dataset = data_prep(processor, dataset, name, split, batch_size, data_path)
data_collator = DataCollatorCTCWithPadding(processor=processor, padding=True)
return DataLoader(
dataset,
batch_size=batch_size,
shuffle=False,
sampler=None,
collate_fn=data_collator
)
def write_result(
data,
file_path):
with open(file_path, "a+") as f:
for line in data:
f.write(line.strip()+"\n")
def main(
model_dir,
batch_size,
tokenizer,
dataset,
name,
data_path,
data_split,
lm,
device,
log_probs=True):
if (not os.path.exists(model_dir+"/text.txt")) or (not os.path.exists(model_dir+"/trans.txt")):
logger.info("################### LOAD PROCESSOR ##################")
processor = Wav2Vec2Processor.from_pretrained(model_dir, tokenizer_type=tokenizer)
logger.info("################### LOAD DATASETS ##################")
eval_dataset = get_data(processor, dataset, name, data_split, data_path, batch_size=batch_size)
logger.info("################### LOAD MODEL ##################")
model = Wav2Vec2ForCTC.from_pretrained(model_dir)
model = model.to(device)
logger.info("################### DECODE SPEECH DATASETS ##################")
trans = []
trans_lm = []
text = []
if lm['do']:
decoder = KenLMDecoder(
processor,
lm['lm_path'],
lm['lex_path'])
for data in tqdm(eval_dataset):
data = data.to(device)
with torch.no_grad():
logits = model(data.input_values, attention_mask=data.attention_mask).logits
if log_probs:
logits = F.log_softmax(logits, dim=-1)
else:
logits = F.softmax(logits, dim=-1)
if lm['do']:
# KenLM search
lm_tokens, lm_scores = decoder.decode(logits.cpu().detach())
trans_batch = processor.batch_decode(lm_tokens[0][:])
trans.append(trans_batch)
else:
# Greedy search
predicted_ids = torch.argmax(logits, dim=-1)
trans_batch = processor.batch_decode(predicted_ids)
trans.append(trans_batch)
# Groundtruth
data.labels[data.labels == -100] = processor.tokenizer.pad_token_id
text_batch = processor.batch_decode(data.labels, group_tokens=False)
text.append(text_batch)
write_result(text_batch, model_dir+"/text.txt")
write_result(trans_batch, model_dir+"/trans.txt")
trans = [item for sublist in trans for item in sublist]
text = [item for sublist in text for item in sublist]
else:
logger.info('Decode is already performed!')
with open(model_dir+"/trans.txt") as f:
trans = f.readlines()
with open(model_dir+"/text.txt") as f:
text = f.readlines()
logger.info('################### COMPUTE METRICS ###################')
wer = wer_metric.compute(predictions=trans, references=text, chunk_size=1000)
cer = cer_metric.compute(predictions=trans, references=text, chunk_size=1000)
if lm['do']:
write_result(["WER_LM="+str(wer*100), "CER_LM="+str(cer*100)], model_dir+"/results.txt")
else:
write_result(["WER="+str(wer*100), "CER="+str(cer*100)], model_dir+"/results.txt")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
'--model',
help='wav2vec2 model path',
type=str,
required=True)
parser.add_argument(
'--tokenizer',
help='tokenizer type used for training the model',
type=str,
required=True)
parser.add_argument(
'--dataset',
help='dataset (commonvoice, voxforge, etc.)',
type=str,
required=True)
parser.add_argument(
'--name',
help='dataset name',
type=str,
default=None)
parser.add_argument(
'--data',
type=str,
help='evaluation corpus name',
default='test')
parser.add_argument(
'--data_path',
type=str,
help='evaluation corpus path',
default="/workspace/output_models/data")
parser.add_argument(
'--batch',
type=int,
help='batch size',
default=32)
parser.add_argument(
'--device',
help='set the device to be used for computation',
type=str,
default='cpu')
parser.add_argument(
'--lm',
help='Do language model rescoring',
type=dict,
default={'do':False, 'lm_path':None, 'lex_path': None})
eval_args = parser.parse_args()
assert eval_args.tokenizer in ["sp", "char"], "tokenizer type must be either 'sp' or 'char'."
assert eval_args.device in ["cpu", "cuda"], "device must be either 'cuda' or 'cpu'."
logger.info("Evaluation parameters %s", eval_args)
main(
model_dir=eval_args.model,
batch_size=eval_args.batch,
tokenizer=eval_args.tokenizer,
dataset=eval_args.dataset,
name=eval_args.name,
data_path=eval_args.data_path,
data_split=eval_args.data,
lm=eval_args.lm,
device=eval_args.device,
)
| 2.234375 | 2 |
code/report_accuracy.py | lionelmessi6410/Face-Detection-with-a-Sliding-Window | 10 | 12773897 | <filename>code/report_accuracy.py
import numpy as np
# DO NOT MODIFY EVALUATION CODE
def report_accuracy(confidences, label_vector):
confidences = confidences.ravel()
label_vector = label_vector.ravel()
assert confidences.size==label_vector.size, "Size of confidences and label_vector should be the same"
# compute accuracy
accuracy = ((label_vector*confidences)>0).sum() / float(confidences.size)
print(" accuracy: {:.3f}".format(accuracy))
# compute true positive rate (TP)
true_positives = np.logical_and((confidences>=0), (label_vector>=0))
tpr = true_positives.sum() / float(true_positives.size)
print(" true positive rate: {:.3f}".format(tpr))
# compute false positive rate (FP)
false_positives = np.logical_and((confidences>=0), (label_vector<0))
fpr = false_positives.sum() / float(false_positives.size)
print(" false positive rate: {:.3f}".format(fpr))
# compute true negative rate (TN)
true_negatives = np.logical_and((confidences<0), (label_vector<0))
tnr = true_negatives.sum() / float(true_negatives.size)
print(" true negative rate: {:.3f}".format(tnr))
# compute false negative rate (FN)
false_negatives = np.logical_and((confidences<0), (label_vector>=0))
fnr = false_negatives.sum() / float(false_negatives.size)
print(" false negative rate: {:.3f}".format(fnr))
return tpr, fpr, tnr, fnr
| 3.234375 | 3 |
var/spack/repos/builtin/packages/lunchbox/package.py | MatMaul/spack | 0 | 12773898 | # Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Lunchbox(CMakePackage):
"""A core C++ library for multi-threaded programming."""
homepage = "https://github.com/Eyescale/Lunchbox"
git = "https://github.com/Eyescale/Lunchbox.git"
generator = 'Ninja'
version('develop', submodules=True)
version('1.17', tag='1.17.0', preferred=True, submodules=True)
depends_on('cmake@3.1:', type='build')
depends_on('ninja', type='build')
depends_on('boost')
depends_on('servus')
| 1.25 | 1 |
models/HAN/inference.py | wiekern/GenderPerformance | 0 | 12773899 | <filename>models/HAN/inference.py<gh_stars>0
import torch
import torch.nn as nn
from torch import optim
import numpy as np
from torch.nn.utils import rnn
import torch.nn.functional as F
from HierarchicalAttentionNet_pre_embed import createBatches,sortbylength,wordEncoder,sentenceEncoder,text2tensor,createEmbeddingMatrix
import sys
import itertools
import gensim
import pickle
from sklearn.metrics import accuracy_score,confusion_matrix
device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")
def creatingDatasetIDs(fname, w2i, max_length=15):
dataset={}
with open(fname + '_filtered') as fs:
for line in fs:
line = line.strip()
x = line.find(',')
id_ = line[:x]
label = int(line[-1])
review = line[x+1:-2]
temp = review.split('.')
encoded_review = text2tensor(temp, w2i)
length = len(encoded_review)
if length>max_length:
continue
if length not in dataset and length > 0:
dataset[length] = []
if length > 0:
dataset[length].append((encoded_review, label, id_))
return dataset
def mergeSentences(batch):
sent = []
label = []
rev_id = []
for review, l, id_ in batch:
sent += review
label.append(l)
rev_id.append(id_)
return sent, label, rev_id
def inference(wordEnc,sentEnc,validation_dataset,batch_size):
wordEnc.to(device)
sentEnc.to(device)
wordEnc.eval()
sentEnc.eval()
true_labels = []
predicted_labels = []
review_id = []
data = createBatches(validation_dataset,batch_size)
ft = open('output_han_amazon.csv','w')
ft.write('True_label,Predicted_label,ReviewerID')
ft.write('\n')
with torch.no_grad():
for batch, lengths in data:
if len(lengths) > 2 and len(set(lengths))==1:
sent, label, id_ = mergeSentences(batch)
true_labels += label
review_id += id_
label = torch.LongTensor(label)
sentence_length = [len(s) for s in sent]
sent = np.array(list(itertools.zip_longest(*sent, fillvalue=0))).T
X = torch.from_numpy(sent)
X_lengths = torch.LongTensor(sentence_length)
X, X_lengths, mapped_index = sortbylength(X, X_lengths)
batch_s = len(sentence_length)
X, X_lengths, label = X.to(device), X_lengths.to(device), label.to(device)
sent_out = wordEnc(X, X_lengths, batch_s)
sent_out = sent_out.squeeze()[mapped_index, :]
review_batch = torch.Tensor().to(device)
r = 0
c = sent_out.shape[1]
for l in lengths:
review_batch = torch.cat((review_batch, sent_out[r:r + l, :]))
r += l
review_batch = review_batch.view(len(lengths), -1, c)
review_lengths = torch.LongTensor(lengths).to(device)
output = sentEnc(review_batch, review_lengths , len(lengths))
output = output.squeeze()
output = F.softmax(output,dim=1)
value,lbl = torch.max(output,1)
predicted_labels += lbl.cpu().numpy().tolist()
for t_l,p_l,id_ in zip(true_labels,predicted_labels,review_id):
ft.write(str(t_l)+','+str(p_l)+','+str(id_))
ft.write('\n')
ft.close()
print(confusion_matrix(true_labels,predicted_labels))
return accuracy_score(true_labels,predicted_labels)
if __name__=='__main__':
with open('word2index.pickle','rb') as fs:
w2i = pickle.load(fs)
print('Loaded vocabulary - ',len(w2i))
if os.path.exits(test_file+'_filtered'):
print('filtered file already exists... skipping creation of filtered file')
else:
print('filtered file not found... creating filtered file')
pp.filterByFrequencyIDs(w2i,test_file=test_file)
test_dataset = creatingDatasetIDs('../../../amazonUser/User_level_test_with_id.csv',w2i)
print('Dataset creation complete')
model = gensim.models.Word2Vec.load('../Embeddings/amazonWord2Vec')
w_input_size,w_encoding_size = model.wv.vectors.shape
matrix = createEmbeddingMatrix(model,w2i,w_encoding_size)
print('embedding matrix obtained.')
#w_input_size = len(w2i)
#w_encoding_size = 75
w_hidden_size = 50
w_output_size = 100
s_input_size = w_output_size
s_hidden_size = w_hidden_size
s_repr_size = 2*w_hidden_size
s_output_size = 2
padding_idx = 0
wordEnc = wordEncoder(matrix,w_input_size+1,w_encoding_size,w_hidden_size,w_output_size,padding_idx)
sentEnc = sentenceEncoder(s_input_size,s_hidden_size,s_repr_size,s_output_size)
wordEnc.load_state_dict(torch.load('wordEncoder_model-pre_embed.pt'))
sentEnc.load_state_dict(torch.load('sentEncoder_model-pre_embed.pt'))
print(inference(wordEnc,sentEnc,test_dataset,128))
| 2.15625 | 2 |
codechef.py | TheCez/cp-api | 0 | 12773900 | import requests
from bs4 import BeautifulSoup
import re
'''def fate_proxy():
resp=requests.get('https://raw.githubusercontent.com/fate0/proxylist/master/proxy.list')
#print(resp.text)
a=((resp.text).split('\n'))
#print(a)
p_list=[]
for i in a:
try:
p_list.append(json.loads(i))
except Exception as e:
continue
#print(p_list)
np_list=[]
for i in p_list:
if i['country']=='IN':
np_list.append(i)
proxy=[]
fast_proxy=sorted(np_list,key=lambda k: k['response_time'])
for p in fast_proxy:
proxy.append(str(p['host'])+':'+str(p['port']))
return proxy'''
def present():
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:49.0) Gecko/20100101 Firefox/49.0'
}
url='https://www.codechef.com/contests'
res = requests.get(url, headers=headers)
soup = BeautifulSoup(res.text,"lxml")
contest = soup.find_all('table',{"class":"dataTable"})
contest = contest[0].find_all('tbody')
#contest = contest[0].find_all('tr')
name = []
link = []
code = []
sdate = []
edate = []
stime = []
etime = []
j = 0
for i in contest[0].findAll('a', attrs={'href': re.compile("^/")}):
link.append('https://www.codechef.com'+i.get('href'))
for i in contest[0].findAll('a'):
name.append(i.text)
for i in contest[0].findAll('td'):
if(j%4==2):
sdate.append(i.text[:-10])
stime.append(i.text[-8:])
if(j%4==0):
code.append(i.text)
if (j % 4 == 3):
edate.append(i.text[:-10])
etime.append(i.text[-8:])
j+=1
#print(stime)
out=[]
for i in range(len(name)):
d = {}
d.update({'code':code[i],'name':name[i],'link':link[i],'sdate':sdate[i],'edate':edate[i],'stime':stime[i],'etime':etime[i]})
out.append(d)
#print(out)
#print(d)
#pot()
return out
def future():
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:49.0) Gecko/20100101 Firefox/49.0'
}
url='https://www.codechef.com/contests'
res = requests.get(url, headers=headers)
soup = BeautifulSoup(res.text,"lxml")
contest = soup.find_all('table',{"class":"dataTable"})
contest = contest[1].find_all('tbody')
#contest = contest[0].find_all('tr')
name = []
link = []
code = []
sdate = []
edate = []
stime = []
etime = []
j = 0
for i in contest[0].findAll('a', attrs={'href': re.compile("^/")}):
link.append('https://www.codechef.com'+i.get('href'))
for i in contest[0].findAll('a'):
name.append(i.text)
for i in contest[0].findAll('td'):
if(j%4==2):
sdate.append(i.text[:-10])
stime.append(i.text[-8:])
if(j%4==0):
code.append(i.text)
if (j % 4 == 3):
edate.append(i.text[:-10])
etime.append(i.text[-8:])
j+=1
#print(stime)
out=[]
for i in range(len(name)):
d = {}
d.update({'code':code[i],'name':name[i],'link':link[i],'sdate':sdate[i],'edate':edate[i],'stime':stime[i],'etime':etime[i]})
out.append(d)
#print(out)
#print(d)
#pot()
return out
def past():
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:49.0) Gecko/20100101 Firefox/49.0'
}
url='https://www.codechef.com/contests'
res = requests.get(url, headers=headers)
soup = BeautifulSoup(res.text,"lxml")
contest = soup.find_all('table',{"class":"dataTable"})
contest = contest[2].find_all('tbody')
#contest = contest[0].find_all('tr')
name = []
link = []
code = []
sdate = []
edate = []
stime = []
etime = []
j = 0
for i in contest[0].findAll('a', attrs={'href': re.compile("^/")}):
link.append('https://www.codechef.com'+i.get('href'))
for i in contest[0].findAll('a'):
name.append(i.text)
for i in contest[0].findAll('td'):
if(j%4==2):
sdate.append(i.text[:-10])
stime.append(i.text[-8:])
if(j%4==0):
code.append(i.text)
if (j % 4 == 3):
edate.append(i.text[:-10])
etime.append(i.text[-8:])
j+=1
#print(stime)
out=[]
for i in range(len(name)):
d = {}
d.update({'code':code[i],'name':name[i],'link':link[i],'sdate':sdate[i],'edate':edate[i],'stime':stime[i],'etime':etime[i]})
out.append(d)
#print(out)
#print(d)
#pot()
return out | 2.765625 | 3 |
python_smaclient/smapi_response.py | jloehel/python_smaclient | 0 | 12773901 | #!/usr/bin/env python
import uuid
class SMAPI_Response(object):
'''
Implentation of a ICUV Request
'''
def __init__(self, output_parameters):
self._uuid = uuid.uuid1()
self._date = None
self._output_parameters = output_parameters
def get_output_parameters(self):
return self._output_parameters
def get_date(self):
return self._date
def set_date(self, date):
self._date = date
def __repr__(self):
"<{} (name={}, input parameters={})>".format(
self.__class__.__name__,
self.name,
self._output_parameters)
| 2.796875 | 3 |
setup.py | kylef/irctk | 6 | 12773902 | <gh_stars>1-10
from setuptools import setup
with open('VERSION', 'r') as fp:
version = fp.read().strip()
setup(
name='irc-toolkit',
version=version,
author='<NAME>',
author_email='<EMAIL>',
packages=['irctk'],
entry_points={},
install_requires=[],
url='https://github.com/kylef/irctk/',
license='BSD',
description='A Python IRC client library',
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Topic :: Communications :: Chat',
'Topic :: Software Development :: Libraries',
]
)
| 1.28125 | 1 |
tests/python/test_jit_transform.py | ishine/aps | 117 | 12773903 | <gh_stars>100-1000
#!/usr/bin/env python
# Copyright 2021 <NAME>
# License: Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import pytest
import torch as th
from aps.io import read_audio
from aps.transform.utils import forward_stft, export_jit
from aps.transform import AsrTransform
egs1_wav = read_audio("tests/data/transform/egs1.wav", sr=16000)
@pytest.mark.parametrize("wav", [egs1_wav])
@pytest.mark.parametrize("feats", ["fbank-log-cmvn", "perturb-mfcc-aug-delta"])
def test_asr_transform_jit(wav, feats):
wav = th.from_numpy(wav[None, ...])
packed = forward_stft(wav,
400,
160,
mode="librosa",
window="hamm",
pre_emphasis=0.96,
center=False,
return_polar=False)
trans = AsrTransform(feats=feats,
stft_mode="librosa",
window="hamm",
frame_len=400,
frame_hop=160,
use_power=True,
pre_emphasis=0.96,
center=False,
aug_prob=0.5,
aug_mask_zero=False)
trans.eval()
scripted_trans = th.jit.script(export_jit(trans.transform))
ref_out = trans(wav, None)[0]
jit_out = scripted_trans(packed)
th.testing.assert_allclose(ref_out, jit_out)
if __name__ == "__main__":
test_asr_transform_jit(egs1_wav, "fbank-log-cmvn")
| 1.898438 | 2 |
var/spack/repos/builtin/packages/mod2c/package.py | lguyot/spack | 2 | 12773904 | ##############################################################################
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Mod2c(CMakePackage):
"""MOD2C is NMODL to C converter adapted for CoreNEURON simulator.
More information about NMODL can be found NEURON simulator
documentation at Yale University."""
homepage = "https://github.com/BlueBrain/mod2c"
url = "https://github.com/BlueBrain/mod2c.git"
version('develop', git=url, preferred=True)
depends_on('cmake@2.8.12:', type='build')
def cmake_args(self):
spec = self.spec
options = []
if 'bgq' in spec.architecture and '%xl' in spec:
options.append('-DCMAKE_BUILD_WITH_INSTALL_RPATH=1')
return options
def setup_environment(self, spack_env, run_env):
run_env.set('MODLUNIT', join_path(self.prefix, 'share/nrnunits.lib'))
| 1.570313 | 2 |
src/responsibleai/rai_analyse/create_error_analysis.py | Azure/automl-devplat2-preview | 7 | 12773905 | # ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import argparse
import json
import logging
from responsibleai import RAIInsights
from constants import RAIToolType
from rai_component_utilities import (
load_rai_insights_from_input_port,
save_to_output_port,
copy_dashboard_info_file,
)
_logger = logging.getLogger(__file__)
logging.basicConfig(level=logging.INFO)
def parse_args():
# setup arg parser
parser = argparse.ArgumentParser()
parser.add_argument("--rai_insights_dashboard", type=str, required=True)
parser.add_argument("--max_depth", type=int)
parser.add_argument("--num_leaves", type=int)
parser.add_argument("--filter_features", type=json.loads, help="List")
parser.add_argument("--error_analysis_path", type=str)
# parse args
args = parser.parse_args()
# Patch issue with argument passing
if isinstance(args.filter_features, list) and len(args.filter_features) == 0:
args.filter_features = None
# return args
return args
def main(args):
# Load the RAI Insights object
rai_i: RAIInsights = load_rai_insights_from_input_port(args.rai_insights_dashboard)
# Add the error analysis
rai_i.error_analysis.add(
max_depth=args.max_depth,
num_leaves=args.num_leaves,
filter_features=args.filter_features,
)
_logger.info("Added error analysis")
# Compute
rai_i.compute()
_logger.info("Computation complete")
# Save
save_to_output_port(rai_i, args.error_analysis_path, RAIToolType.ERROR_ANALYSIS)
_logger.info("Saved to output port")
# Copy the dashboard info file
copy_dashboard_info_file(args.rai_insights_dashboard, args.error_analysis_path)
_logger.info("Completing")
# run script
if __name__ == "__main__":
# add space in logs
print("*" * 60)
print("\n\n")
# parse args
args = parse_args()
# run main function
main(args)
# add space in logs
print("*" * 60)
print("\n\n")
| 2.171875 | 2 |
distributed-queue/asyncResult.py | StolerHua/tools | 0 | 12773906 | <reponame>StolerHua/tools<filename>distributed-queue/asyncResult.py
import gevent.monkey as monkey
from celery import Celery
monkey.patch_all()
app = Celery('asyncResult', backend='redis://localhost:6379/0', broker='redis://localhost:6379/0')
@app.task
def add(x, y):
return x + y
# celery -A tasks worker --loglevel=info
def on_result_ready(result):
print('Received result for id %r: %r' % (result.id, result.result,))
add.delay(2, 2).then(on_result_ready) | 2.125 | 2 |
brickout.py | prake71/Breakout | 0 | 12773907 | # Brickout Game V 0.1
# 2018 by <NAME>
# color constants
# a website for finding out color names
# https://www.w3schools.com/colors/colors_converter.asp
GREY = [105, 105, 105]
BLACK = [0, 0, 0]
PINK = [168, 76, 96]
BROWN = [133, 107, 17]
OTHERBROWN = [157, 90, 48]
GREEN = [28, 120, 29]
LIGHTGREEN = [56, 141, 47]
DARKGREEN = [46, 137, 95]
BLUE = [91, 92, 214]
BALL = [145, 100, 71]
# I - Import and Initialize
import pygame
pygame.mixer.pre_init(22050, -16, 1, 2048)
pygame.mixer.init()
pygame.init()
# D - Display
screen = pygame.display.set_mode([640, 500])
screen.fill(BLACK)
# E - Entitiess
# classes
class Block(pygame.sprite.Sprite):
def __init__(self, color, width, height):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface([width, height])
self.image.fill(color)
self.rect = self.image.get_rect()
# A - Action
# A - Assign Values to key variables
blocks_container = pygame.sprite.Group()
blocks = pygame.sprite.Group()
all_sprites = pygame.sprite.Group()
walls = pygame.sprite.Group()
wall_left = Block(GREY, 32, 430)
wall_right = Block(GREY, 32, 430)
wall_horiz = Block(GREY, 640, 32)
wall_left.rect.x = 0
wall_left.rect.y = 64
wall_right.rect.x = 640 - 32
wall_right.rect.y = 64
wall_horiz.rect.x = 0
wall_horiz.rect.y = 64
all_sprites.add(wall_left, wall_right, wall_horiz)
walls.add(wall_left, wall_right, wall_horiz)
paddle = Block(OTHERBROWN, 64, 8)
paddle.rect.x = (640 - 64) / 2
paddle.rect.y = 500 - 22
all_sprites.add(paddle)
# Create a ball sprite
ball = Block(BALL, 10, 10)
ball.rect.x = (640 - 10) / 2
ball.rect.y = paddle.rect.top - 20
all_sprites.add(ball)
# Score
font = pygame.font.Font("2600.ttf", 38)
text = font.render("000", False, GREY)
# Sounds
pygame.mixer.init()
snd_paddle_hit = pygame.mixer.Sound("hit_paddle.ogg")
snd_side_wall_hit = pygame.mixer.Sound("hit_side_wall.ogg")
snd_block_row1 = pygame.mixer.Sound("hit_block_row_1.ogg")
snd_block_row2 = pygame.mixer.Sound("hit_block_row_2.ogg")
snd_block_row3 = pygame.mixer.Sound("hit_block_row_3.ogg")
snd_block_row4 = pygame.mixer.Sound("hit_block_row_4.ogg")
snd_block_row5 = pygame.mixer.Sound("hit_block_row_5.ogg")
snd_block_row6 = pygame.mixer.Sound("hit_block_row_6.ogg")
"""
BxH = 48x20
165 Score 80 Paddles 96 Level
D 16 D 16 D
"""
# Set the initial ball speed
ball_dx = 1
ball_dy = 1
ball_speed = 1
ball_speed_max = 1.1
# A couple of 'flags' (Boolean values)
ball_in_play = False
just_bounced = False
# 1 block = 32 pixel x 16 pixel
color_list = [GREEN, LIGHTGREEN, PINK, DARKGREEN, BROWN, BLUE]
def setup_blocks():
# Create a horizontal row of blocks for each color
for block_row, block_color in enumerate(color_list):
for block_column in range(1, 19):
# Create a block, leaving 1 pixels around the four edges
block = Block(block_color, 32, 16)
block.rect.x = block_column * 32 + 1
block.rect.y = 145 + block_row * 16
blocks.add(block)
all_sprites.add(block)
# Create a horizontal row of blocks for each color
setup_blocks()
"""
for block_row, block_color in enumerate(color_list):
for block_column in range(1,19):
# Create a block, leaving 1 pixels around the four edges
block = Block(block_color, 32, 16)
block.rect.x = block_column * 32 + 1
block.rect.y = 145 + block_row * 16
blocks.add(block)
all_sprites.add(block)
"""
pygame.mouse.set_visible(False)
pygame.event.set_grab(True)
clock = pygame.time.Clock()
game_over = False
score = 0
lives = 5
live_text = font.render("{}".format(lives), False, GREY)
mouse_x_old = paddle.rect.x
assert isinstance(paddle.rect.x, object)
mouse_x = paddle.rect.x
# L - Loop
while not game_over:
# T - Timer
clock.tick(240)
# E- Event Handling
for event in pygame.event.get():
if event.type == pygame.QUIT:
game_over = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
game_over = True
# b1, b2, b3 = pygame.mouse.get_pressed()
if pygame.mouse.get_pressed()[0]:
if lives > 0 and ball_in_play == False:
ball_in_play = True
ball.rect.x = paddle.rect.centerx
ball.rect.y = paddle.rect.top - 20
all_sprites.add(ball)
if pygame.mouse.get_pressed()[2] and lives == 0:
lives = 5
score = 0
setup_blocks()
ball.rect.x = paddle.rect.centerx
ball.rect.y = paddle.rect.top - 20
ball_in_play = True
all_sprites.add(ball)
# print("Mouse Speed per frame:" , abs(mouse_x_old - mouse_x) )
mouse_x_old = mouse_x
mouse_x = pygame.mouse.get_pos()[0]
mouse_pos_equal = True
while mouse_pos_equal:
mouse_pos_equal = mouse_x != paddle.rect.left
# print(mouse_pos_equal)
if mouse_x < paddle.rect.left:
if paddle.rect.left > 32:
paddle.rect.x = paddle.rect.x - 1
else:
mouse_pos_equal = False
elif mouse_x > paddle.rect.left:
if paddle.rect.right < 640 - 32:
paddle.rect.x = paddle.rect.x + 1
else:
mouse_pos_equal = False
if ball_in_play:
# Move the ball
ball.rect.x += ball_dx
ball.rect.y += ball_dy
# Check if it collidedd with the paddle
if ball.rect.y < paddle.rect.top:
just_bounced = False
# Bounce off the screen edges
if ball.rect.left <= 0 + 32:
snd_side_wall_hit.play()
ball.rect.x = 0 + 32
ball_dx = -ball_dx
if ball.rect.y <= 0 + 96:
snd_side_wall_hit.play()
ball.rect.y = 0 + 96
ball_dy = -ball_dy
if ball.rect.x > 640 - 32 - 10:
snd_side_wall_hit.play()
ball.rect.x = 640 - 32 - 10
ball_dx = -ball_dx
# Check if the ball bounced off the paddle
# Collision detection between two sprites, using rects.
if pygame.sprite.collide_rect(ball, paddle) and not just_bounced:
snd_paddle_hit.play()
ball_dy = -ball_dy
just_bounced = True
# While ball and paddle are in contact, don't bounce again
# Ball didn't - game over
elif ball.rect.y > paddle.rect.top + 10 / 2:
ball_in_play = False
all_sprites.remove(ball)
lives = lives - 1
# Check if the ball bounced off a block
blocks_hit_list = pygame.sprite.spritecollide(ball, blocks, True)
if blocks_hit_list:
if ball_speed < ball_speed_max:
ball_speed += 0.01
print(ball_speed)
for block in blocks_hit_list:
score = score + 1
print(block.rect.y)
if block.rect.y == 145 + 6 * 16:
snd_block_row1.play()
elif block.rect.y == 145 + 5 * 16:
snd_block_row2.play()
elif block.rect.y == 145 + 4 * 16:
snd_block_row3.play()
elif block.rect.y == 145 + 3 * 16:
snd_block_row4.play()
elif block.rect.y == 145 + 2 * 16:
snd_block_row5.play()
elif block.rect.y == 145 + 1 * 16:
snd_block_row6.play()
# ball_dy = -ball_dy * ball_speeds
blocks_container.add(blocks_hit_list)
ball_dy = -ball_dy # * ball_speed
# scorestr = "{:0>3}".format(score)
text = font.render("{:0>3}".format(score), False, GREY)
live_text = font.render("{}".format(lives), False, GREY)
all_sprites.update()
screen.fill(BLACK)
screen.blit(text, (165, 32))
screen.blit(live_text, (165 + text.get_width() + 80, 32))
# R - Refresh Display
all_sprites.draw(screen)
pygame.display.update()
pygame.quit()
| 2.828125 | 3 |
ansible_shell_monitoring/bsd_top.py | nortics/python | 0 | 12773908 | <gh_stars>0
#!/usr/local/etc/ansible/venv/top/bin/python
import json,os,psycopg2,time,subprocess,yaml
from datetime import datetime
playbook_file = '/usr/local/etc/ansible/playbooks/tops.yml'
def letter_degree(value = ''):
if value[-2].isdigit():
if value[-1] == 'K':
float_value = int(value[:-1])*1E3
elif value[-1] == 'M':
float_value = int(value[:-1])*1E6
elif value[-1] == 'G':
float_value = int(value[:-1])*1E9
elif value[-1] == '%':
float_value = int(value[:-1])
else:
float_value = 0
return float_value
def get_param_id(param_name='', param_dict=''):
for i in param_dict:
if i[0] == param_name:
return i[1]
def get_params_dict():
conn = psycopg2.connect("dbname=sys_db user=postgres host=192.168.1.203")
cur = conn.cursor()
cur.execute("SELECT param_name, param_id FROM bsd_stat.parameters;")
param_dict = cur.fetchall()
conn.commit()
cur.close()
conn.close()
return param_dict
def send_parameters(vals = {}):
conn = psycopg2.connect("dbname = sys_db user = postgres host=192.168.1.203")
cur = conn.cursor()
cur.execute("select param_name from bsd_stat.parameters;")
sql_param_names = cur.fetchall()
conn.commit()
spn = {}
for pname in sql_param_names:
spn[pname[0]] = ''
for param_key in vals.keys():
if param_key not in spn:
try:
cur.execute("INSERT INTO bsd_stat.parameters (param_name) VALUES (%s);",(param_key,))
conn.commit()
except:
pass
cur.close()
conn.close()
def send_hostname(hostname=''):
conn = psycopg2.connect("dbname=sys_db user=postgres host=192.168.1.203")
cur = conn.cursor()
cur.execute("SELECT host_id FROM bsd_stat.ansible_hosts where host_name=%s",(hostname,))
host_id = cur.fetchone()
conn.commit()
if host_id == None:
try:
cur.execute("INSERT INTO bsd_stat.ansible_hosts(host_name) VALUES (%s);",(hostname,))
except:
pass
conn.commit()
cur.execute("SELECT host_id FROM bsd_stat.ansible_hosts where host_name=%s",(hostname,))
host_id = cur.fetchone()
conn.commit()
cur.close()
conn.close()
return host_id
def send_values(vals = {}, param_dict = [], current_epoch_time = 0, host_id = 0):
conn = psycopg2.connect("dbname=sys_db user=postgres host=192.168.1.203")
cur = conn.cursor()
for key in vals.keys():
param_id = get_param_id(param_name=key, param_dict = param_dict)
try:
cur.execute("INSERT INTO bsd_stat.top_values(param_id, param_value, epochtime, ansible_host_id)VALUES (%s,%s,%s,%s);",(param_id, vals[key], current_epoch_time, host_id))
conn.commit()
except:
conn.rollback()
pass
cur.close()
conn.close()
def get_file_list(top_playbook_file = 'top.yml'):
with open(top_playbook_file) as f:
file_list = {}
yml = (yaml.safe_load(f))
for i in yml:
if os.name == 'nt':
file_list[i['hosts']] = i['tasks'][1]['local_action']['dest'].split('/')[-1]
else:
file_list[i['hosts']] = i['tasks'][1]['local_action']['dest']
return file_list
def get_sys_pid_info_from_json(lines = {}):
last_pid_pos = 0
pid_list_pos = 0
i = 0
for line_str in lines:
i += 1
if line_str.startswith('last pid:') == True:
last_pid_pos = i
if line_str.find('PID') > 0:
pid_list_pos = i
last_page_processes_list = []
for pid_params in lines[pid_list_pos-1:pid_list_pos][0].split(' '):
if len(pid_params) > 0:
last_page_processes_list.append(pid_params)
last_page_top = lines[last_pid_pos-1:pid_list_pos-2]
last_page_processes = lines[pid_list_pos:]
return last_page_top, last_page_processes, last_page_processes_list
def get_values_from_pid_info(pid_list = [], pid = ''):
pid_dict = {}
for i in pid:
pid_current_row = []
for m in i.split(' '):
if len(m)>0:
pid_current_row.append(m)
pl={}
for j in range(len(pid_list)):
pl[pid_list[j]] = pid_current_row[j]
pid_dict[pid_current_row[0]] = pl
return pid_dict
def get_arc_parameters(sys = []):
counter = 0
top_dict = {}
remove_keys = []
for i in sys:
top_dict[counter] = i
counter += 1
for i in top_dict.keys():
if top_dict[i].find('pid:') >= 0 or top_dict[i].find('processes:') >= 0 or top_dict[i].find('CPU:') >= 0 or top_dict[i].find('Mem:') >= 0 or top_dict[i].find('Swap:') >= 0 :
remove_keys.append(i)
for i in remove_keys:
top_dict.pop(i)
ret =''
for i in top_dict.keys():
ret += top_dict[i]+','
arc_dict = {}
for i in ret.split(':',1)[1].split(','):
if len(i)>0:
arc_dict['arc_'+i.strip().split(' ')[1].lower()]=i.strip().split(' ')[0]
for i in arc_dict.keys():
if i == 'arc_ratio':
arc_dict[i] = float(arc_dict[i].split(':')[0])/float(arc_dict[i].split(':')[1])
else:
arc_dict[i] = letter_degree(value = arc_dict[i])
return arc_dict
def get_cpu_mem_swap_parameters(sys = []):
counter = 0
top_dict = {}
new_vals_cpu = {}
new_vals_mem = {}
new_vals_swap = {}
vals = {}
for i in sys:
top_dict[counter] = i
counter += 1
for i in top_dict.keys():
if top_dict[i].find('CPU:') >= 0:
if len(top_dict[i].split(':')[1].split(',')) > 1:
for j in top_dict[i].split(':')[1].split(','):
new_vals_cpu['cpu_'+j.split('%')[1].strip()] = float(j.split('%')[0].strip())
elif top_dict[i].find('Mem:') >= 0:
if len(top_dict[i].split(':')[1].split(',')) > 1:
for j in top_dict[i].split(':')[1].split(','):
new_vals_mem['mem_'+j.split(' ')[-1].lower()] = letter_degree(value = j.split(' ')[-2])
elif top_dict[i].find('Swap:') >= 0 :
if len(top_dict[i].split(':')[1].split(',')) > 1:
for j in top_dict[i].split(':')[1].split(','):
new_vals_swap['swap_'+j.split(' ')[-1].lower()] = letter_degree(value = j.split(' ')[-2])
vals.update(new_vals_cpu)
vals.update(new_vals_mem)
vals.update(new_vals_swap)
return vals
def get_load_parameters(sys = []):
vals = {}
begin = sys[0].split(';')[1].find(':')+1
end = sys[0].split(';')[1].find('up')
load_list = sys[0].split(';')[1][begin:end].replace(' ','').split(',')
vals['load_avg_5min'] = float(load_list[0])
vals['load_avg_10min'] = float(load_list[1])
vals['load_avg_15min'] = float(load_list[2])
return vals
def get_processes_params(sys = []):
vals[sys[1].split(':')[0].split(' ')[1]]=int(sys[1].split(':')[0].split(' ')[0])
for l in sys[1].split(':')[1].split(','):
vals['processes_'+l.split(' ')[-1]] = int(l.split(' ')[-2])
return vals
subprocess.call('ansible-playbook ' + playbook_file, shell=True)
vals={}
fl = get_file_list(top_playbook_file = playbook_file)
for top_file_keys in fl.keys():
with open(fl[top_file_keys], "r") as top_file:
json_top = json.load(top_file)
sys,pid,pid_list = get_sys_pid_info_from_json(lines = json_top['stdout_lines'])
try:
vals.update(get_load_parameters(sys = sys))
except:
pass
try:
vals.update(get_processes_params(sys = sys))
except:
pass
try:
vals.update(get_cpu_mem_swap_parameters(sys = sys))
except:
pass
try:
vals.update(get_arc_parameters(sys = sys))
except:
pass
current_epoch_time = int(datetime.strptime(json_top['start'], '%Y-%m-%d %H:%M:%S.%f').timestamp())
host_id = send_hostname(hostname = top_file_keys)
send_parameters(vals = vals)
param_dict = get_params_dict()
send_values(vals = vals, param_dict = param_dict, current_epoch_time = current_epoch_time, host_id = host_id)
| 2.28125 | 2 |
gql/schema.py | canburaks/djr | 3 | 12773909 | # ~/Blog/djr/gql/schema.py
import graphene
from items.models import Movie
from graphene_django.types import DjangoObjectType
# api-movie-model
class MovieType(DjangoObjectType):
id = graphene.Int()
name = graphene.String()
year = graphene.Int()
summary = graphene.String()
poster_url = graphene.String()
slug = graphene.String()
class Meta:
model = Movie
def resolve_id(self, info):
return self.id
def resolve_name(self, info):
return self.name
def resolve_year(self, info):
return self.year
def resolve_summary(self, info):
return self.summary
def resolve_poster_url(self, info):
# Note: in client side app snake_case fields
# will be resolved as camelCase
# Eg: poster_url ==> posterUrl
return self.poster_url
def resolve_slug(self, info):
return self.slug
class Query(graphene.ObjectType):
movie_list = graphene.List(MovieType)
movie = graphene.Field(MovieType, slug=graphene.String())
def resolve_movie_list(self, info, *_):
# for large lists only query what you need
return Movie.objects.all().only("name", "poster_url", "slug")
def resolve_movie(self, info, slug):
movie_queryset = Movie.objects.filter(slug=slug)
if movie_queryset.exists():
return movie_queryset.first()
schema = graphene.Schema(query=Query) | 2.328125 | 2 |
verkkokauppa/payment/exceptions.py | SuviVappula/tilavarauspalvelu-core | 0 | 12773910 | <filename>verkkokauppa/payment/exceptions.py
from ..exceptions import VerkkokauppaError
class PaymentError(VerkkokauppaError):
pass
class ParsePaymentError(PaymentError):
pass
class GetPaymentError(PaymentError):
pass
| 1.6875 | 2 |
maui/api.py | kblicharski/py-maui | 0 | 12773911 | from functools import reduce
from pprint import pprint
from typing import Sequence, Tuple
import requests
from graph import Graph
spring_id = 71
spring_id_legacy = 20178
def modify_string(p: str, repl: Sequence[Tuple[str, str]]) -> str:
return reduce(lambda a, kv: a.replace(*kv), repl, p)
url = 'https://api.maui.uiowa.edu/maui/api/pub/registrar/sections'
subject = 'ece'
payload = "json={{sessionId: {}, courseSubject: '{}'}}".format(str(spring_id), subject)
response = requests.get(url=url, params=payload)
if response.status_code != 200:
print('Error: HTTP {}'.format(response.status_code))
data = response.json()
raw_courses = data['payload']
courses_with_prereqs = list(filter(lambda x: x['prerequisite'] is not None, raw_courses))
replacements = (' ', ''), ('and', '+'), ('or', '?')
for d in courses_with_prereqs:
d.update((k, modify_string(v, replacements)) for k, v in d.items() if k ==
'prerequisite')
prereqs = {map(lambda x: x['prerequisite'], courses_with_prereqs)}
pairings = {(x['subjectCourse'], x['prerequisite']) for x in courses_with_prereqs}
courses = set()
replacements = ('(', ''), (')', ''), ('+', ','), ('?', ',')
for pair in pairings:
courses.add(pair[0])
for prereq in modify_string(pair[1], replacements).split(','):
courses.add(prereq)
pprint(courses)
print(len(courses))
connections = []
for pair in pairings:
pprint('{} => {}'.format(pair[0], pair[1]))
# g = Graph(directed=True)
# 'ECE:5220 => (BIOS:4120?STAT:3510)+BME:5320+(CS:5110?ENGR:1300)'
# ('BIOS:4120?', 'ECE:5220', | 2.609375 | 3 |
exercicios/Lista4/Q28.py | AlexandrePeBrito/CursoUdemyPython | 0 | 12773912 | #Leia 10 números inteiros e armazene em um vetor v. Crie dois
#novos vetores v1 e v2. Copie os valores ímpares de v para
#v1, e os valores pares de v para v2. Note que cada um dos
#vetores v1 e v2 têm no máximo 10 elementos, mas nem todos
#os elementos são utilizados. No final escreva os elementos
#UTILIZADOS de v1 e v2.
import random
v=[]
v1=[0,0,0,0,0,0,0,0,0,0]
v2=[0,0,0,0,0,0,0,0,0,0]
for c in range(0,10):
n=random.randint(1,50)
v.append(n)
if(n%2==0):
v2[c]=n
else:
v1[c]=n
print(v)
for c in range(0,10):
if(v1[c]!=0):
print(f"V1 = {v1[c]}")
elif(v2[c]!=0):
print(f"V2 = {v2[c]}") | 3.515625 | 4 |
funcs/cycles/gldas_to_cycles.py | mintproject/MINT-Transformation | 1 | 12773913 | import argparse
import subprocess
from dtran.dcat.api import DCatAPI
from funcs.readers.dcat_read_func import DATA_CATALOG_DOWNLOAD_DIR
import os
import csv
import json
import shutil
from datetime import datetime
from datetime import timedelta
from pathlib import Path
from typing import Optional, Dict
import re
import xarray as xr
from netCDF4 import Dataset
from dtran import IFunc, ArgType
from dtran.ifunc import IFuncType
from dtran.metadata import Metadata
from zipfile import ZipFile
import logging, sys
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
class GldasToCyclesBatched(IFunc):
id = "gldas_to_cycles_batched_func"
description = """ A reader-transformation-writer multi-adapter.
Creates Cycles input (weather and soil file zip) from GLDAS NetCDF (climate) files & Soil files.
"""
inputs = {
"gldas_dataset_id": ArgType.String,
"gldas_soil_map_file": ArgType.String,
"start_date": ArgType.String,
"end_date": ArgType.String,
"batch_numdays": ArgType.Number,
"output_path": ArgType.FilePath
}
outputs = {"output_files": ArgType.FilePath}
friendly_name: str = "GldasToCyclesBatched"
func_type = IFuncType.MODEL_TRANS
example = {
"gldas_dataset_id": "5babae3f-c468-4e01-862e-8b201468e3b5",
"gldas_soil_map_file": "/tmp/gldas_soil_43.E_8.4N.json",
"start_date": "2000-01-01",
"end_date": "2018-01-31",
"batch_numdays": 14,
"output_path": "/tmp/output"
}
def __init__(
self,
gldas_dataset_id,
gldas_soil_map_file,
start_date,
end_date,
batch_numdays,
output_path
):
self.gldas_dataset_id = gldas_dataset_id
self.gldas_soil_map_file = gldas_soil_map_file
self.output_path = output_path
self.end_date = end_date
self.start_date = start_date
self.batch_numdays = batch_numdays
def validate(self) -> bool:
return True
def exec(self) -> dict:
output_file = gldas_to_cycles(
self.gldas_dataset_id,
self.gldas_soil_map_file,
self.start_date,
self.end_date,
self.batch_numdays,
self.output_path
)
return {"output_files": output_file}
def change_metadata(
self, metadata: Optional[Dict[str, Metadata]]
) -> Dict[str, Metadata]:
return metadata
def convert_to_cycles_input(ds):
"""
Resample GLDAS data for a location by 24 hours(1day), and convert to Cycles input
"""
# Calculate RH variable values
logging.debug("Reading variables from dataset..")
(_prcp, _temp, _wind, _solar, _rh) = read_variables_from_dataset(ds)
logging.debug("Finished reading variables from dataset..")
logging.debug("Start resampling...")
# Try group_by (time.dayofyear)
# - dataarray
# Resample/Group by 1 Day - Some variables are grouped by averaging, others by max/min
prcp_daily = _prcp.resample(time="1D")
temp_daily = _temp.resample(time="1D")
solar_daily = _solar.resample(time="1D")
rh_daily = _rh.resample(time="1D")
wind_daily = _wind.resample(time="1D")
prcp = prcp_daily.mean().rename("PP")
tx = temp_daily.max().rename("TX")
tn = temp_daily.min().rename("TN")
solar = solar_daily.mean().rename("SOLAR")
rhx = rh_daily.max().rename("RHX")
rhn = rh_daily.min().rename("RHN")
wind = wind_daily.mean().rename("WIND")
logging.debug("Finished resampling...")
logging.debug("Doing unit conversions...")
# Some unit conversions
prcp *= 86400.0
solar *= 86400.0 / 1.0e6
rhx *= 100.0
rhn *= 100.0
tx -= 273.15
tn -= 273.15
logging.debug("Finished unit conversions...")
# Get Year and Day of Year
year = prcp.time.dt.year.rename("YEAR")
doy = prcp.time.dt.dayofyear.rename("DOY")
logging.debug("Merge variables...")
# Create a dataset with all the required variables
cycles_weather_ds = xr.merge([year, doy, prcp, tx, tn, solar, rhx, rhn, wind])
cycles_weather_ds = cycles_weather_ds.reset_coords(names=["lat", "lon"], drop=True)
logging.debug("Finished merging variables...")
return cycles_weather_ds
def create_rh(nc):
"""
Calculate RH (Relative Humidity) value from GLDAS data
"""
_temp = nc["Tair_f_inst"]
_pres = nc["Psurf_f_inst"]
_spfh = nc["Qair_f_inst"]
es = 611.2 * xr.ufuncs.exp(17.67 * (_temp - 273.15) / (_temp - 273.15 + 243.5))
ws = 0.622 * es / (_pres - es)
w = _spfh / (1.0 - _spfh)
nc["rh"] = w / ws
nc["rh"].clip(max=1.0)
return nc
def read_variables_from_dataset(nc):
"""
Read/Calculate relevant variables from GLDAS dataset
"""
_prcp = nc["Rainf_f_tavg"]
_temp = nc["Tair_f_inst"]
_wind = nc["Wind_f_inst"]
_solar = nc["SWdown_f_tavg"]
create_rh(nc)
_rh = nc["rh"]
return _prcp, _temp, _wind, _solar, _rh
def load_gldas_dataset(gldas_files):
"""
Load GLDAS files using XArray
"""
if gldas_files is not None and len(gldas_files) > 0:
# Open a sample gldas file and get all variables to remove from the load (to make the loading faster)
first_file = gldas_files[0]
d1 = xr.open_dataset(first_file)
varnames = list(d1.data_vars.keys())
varnames.remove('Rainf_f_tavg')
varnames.remove('Tair_f_inst')
varnames.remove('Wind_f_inst')
varnames.remove('SWdown_f_tavg')
varnames.remove('Psurf_f_inst')
varnames.remove('Qair_f_inst')
d1.close()
ds=xr.open_mfdataset(gldas_files, drop_variables=varnames, chunks='auto')
return ds
def gldas_to_cycles(
gldas_dataset_id,
gldas_soil_map_file,
start_date,
end_date,
batch_numdays,
output_path
):
gldas_directory = DATA_CATALOG_DOWNLOAD_DIR + "/gldas"
if not os.path.exists(gldas_directory):
Path(gldas_directory).mkdir(exist_ok=True, parents=True)
if not os.path.exists(output_path):
Path(output_path).mkdir(exist_ok=True, parents=True)
# Load soil and weather information from input soil-weather map file
soil_grid_points = {}
weather_grid_points = []
with open(gldas_soil_map_file) as mapf:
weather_grid_points = json.load(mapf)
for weather_point in weather_grid_points:
soils = weather_point["soils"]
for soil in soils:
soil_grid_points[soil["name"]] = {
"weather": weather_point["weather"],
"soil_path": soil["path"]
}
num_weather_points = len(weather_grid_points)
num_soil_points = len(soil_grid_points.keys())
logging.info(f"Processing {num_weather_points} GLDAS grid points for {num_soil_points} Soil points")
# Get latest dates for existing weather files
point_latest_dates = {}
for grid_point in weather_grid_points:
weather_point = grid_point["weather"]
common_weather_fname = weather_point["filename"]
lat = weather_point["lat"]
lon = weather_point["lon"]
elevation = weather_point["elevation"]
common_weather_file = os.path.join(output_path, common_weather_fname)
# Check if the weather file already exists
if os.path.exists(common_weather_file):
# If yes, then get latest start date for this weather file
with open(common_weather_file) as weatherf:
for line in weatherf:
items = re.split(r"\s+", line)
if items[0].isnumeric():
point_latest_dates[common_weather_fname] = datetime.strptime("{} {}".format(items[0], items[1]), "%Y %j")
else:
# If not, then create the weather file headers
outfp = open(common_weather_file, "w")
outfp.write("LATITUDE %.2f\n" % (lat))
#outfp.write("LONGITUDE %.2f\n" % (lon))
outfp.write("ALTITUDE %.2f\n" % (elevation))
outfp.write("SCREENING_HEIGHT 2\n")
outfp.write("%-8s%-8s%-8s%-8s%-8s%-8s%-8s%-8s%-8s\n" % (
'YEAR', 'DOY', 'PP', 'TX', 'TN', 'SOLAR', 'RHX', 'RHN', 'WIND'
))
outfp.close()
# Do the GLDAS to cycles conversion in batches of N number of days
# - For each batch of start-date/end-date, load GLDAS and create cycles inputs
start_date = datetime.strptime(start_date, "%Y-%m-%d")
end_date = datetime.strptime(end_date, "%Y-%m-%d")
cur_start_date = start_date
while cur_start_date < end_date:
cur_end_date = cur_start_date + timedelta(days = batch_numdays)
if cur_end_date > end_date:
cur_end_date = end_date
logging.info(f"Fetching GLDAS files list for dates from {cur_start_date} to {cur_end_date}")
logging.info("Downloading missing GLDAS files..")
# Download GLDAS Datasets for the time period
gldas_resources = DCatAPI.get_instance().find_resources_by_dataset_id(gldas_dataset_id, cur_start_date, cur_end_date)
gldas_files = []
for resource in gldas_resources:
temporal_metadata = resource['resource_metadata']['temporal_coverage']
gldas_date_str = temporal_metadata['start_time'].split("T")[0]
gldas_date = datetime.strptime(gldas_date_str, "%Y-%m-%d")
nc_path = "%s/%4.4d/%3.3d/" % (gldas_directory, gldas_date.timetuple().tm_year, gldas_date.timetuple().tm_yday)
ofile = os.path.join(nc_path, resource['resource_name'])
if not os.path.exists(nc_path):
Path(nc_path).mkdir(parents=True, exist_ok=True)
if not os.path.exists(ofile):
logging.debug(ofile)
subprocess.check_call(f"wget -q \"{resource['resource_data_url']}\" -O {ofile}", shell=True, close_fds=False)
if os.path.exists(ofile):
gldas_files.append(ofile)
num_weather_files = len(gldas_files)
logging.info(f"Loading GLDAS data from {num_weather_files} files..")
gldas_ds = load_gldas_dataset(gldas_files)
logging.info("Loaded GLDAS data")
# Do the cycles conversion for all weather points
for grid_point in weather_grid_points:
weather_point = grid_point["weather"]
common_weather_fname = weather_point["filename"]
lat = weather_point["lat"]
lon = weather_point["lon"]
elevation = weather_point["elevation"]
common_weather_file = os.path.join(output_path, common_weather_fname)
point_start_date = cur_start_date
if common_weather_fname in point_latest_dates:
point_start_date = point_latest_dates[common_weather_fname] + timedelta(days=1)
# If we've already processed this time period for this point, then don't go further
if point_start_date > cur_end_date:
continue
# Load GLDAS data for the exact gridpoint location
logging.debug(f"Loading GLDAS data for grid point {lat}, {lon}")
loc_ds = gldas_ds.sel(lat=lat, lon=lon, time=slice(point_start_date, cur_end_date)).load()
logging.debug("Loaded gldas data for location")
logging.debug("Converting to Cycles input data")
# Convert to Cycles Input
loc_by_day_ds = convert_to_cycles_input(loc_ds)
logging.debug("Finished conversion to cycles input data")
logging.debug("Converting weather input data to Pandas Dataframe...")
loc_by_day_df = loc_by_day_ds.to_dataframe()
loc_by_day_df.sort_values(by=['YEAR', 'DOY'])
logging.debug("Finished converting to Dataframe")
logging.debug ("Writing the cycles weather file..")
# Append to the weather file
outfp = open(common_weather_file, "a")
for index, row in loc_by_day_df.iterrows():
if index < cur_end_date: # Sometimes an extra day is returned (for midnight file of next day. Do a check here to ignore that)
outfp.write("%-8.0f%-8.0f%-8.4f%-8.2f%-8.2f%-8.4f%-8.2f%-8.2f%-8.2f\n" % (
row['YEAR'], row['DOY'],
row['PP'], row['TX'], row['TN'],
row['SOLAR'], row['RHX'], row['RHN'],
row['WIND'])
)
outfp.close()
gldas_ds.close()
cur_start_date = cur_end_date
logging.info(f"Done converting GLDAS data to cycles input weather file for {num_weather_points} points")
logging.info(f"Creating {num_soil_points} cycles input zip files, each containing a weather and a soil file...")
fnames = []
# Create the Zip file for all soil points containing the soil file and the generated weather file
for fname in soil_grid_points.keys():
point = soil_grid_points[fname]
soil_path = point["soil_path"]
weather_point = point["weather"]
if not os.path.exists(soil_path):
continue
common_weather_fname = weather_point["filename"]
common_weather_file = os.path.join(output_path, common_weather_fname)
logging.debug (f"Creating Cycles zip file for {fname}")
weather_fname = fname + ".weather"
soil_fname = fname + ".soil"
zip_fname = fname + ".soil_weather.zip"
tmp_soil_file = os.path.join(output_path, soil_fname)
tmp_weather_file = os.path.join(output_path, weather_fname)
common_weather_file = os.path.join(output_path, common_weather_fname)
soil_weather_file = os.path.join(output_path, zip_fname)
shutil.copyfile(soil_path, Path(tmp_soil_file))
shutil.copyfile(common_weather_file, Path(tmp_weather_file))
zipObj = ZipFile(soil_weather_file, 'w')
zipObj.write(tmp_soil_file, soil_fname)
zipObj.write(tmp_weather_file, weather_fname)
zipObj.close()
logging.debug ("Done writing cycles zip file")
fnames.append(zip_fname)
logging.info(f"Done Creating {num_soil_points} cycles input zip files")
return fnames
| 2.046875 | 2 |
Workshop/Workshop/main_app/validators.py | petel3/Softuni_education | 2 | 12773914 | from django.core.exceptions import ValidationError
def only_letters_validator(value):
for ch in value:
if not ch.isalpha():
raise ValidationError("Value must contains only letters")
def file_max_size_in_mb_validator(max_size):
def validate(value):
filesize = value.file.size
if filesize > max_size * 1024 * 1024:
raise ValidationError("Max file size is %sMB" % str(max_size))
return validate
| 2.75 | 3 |
rlpyt/models/dqn/cartpole_dqn_model.py | ElisevanderPol/mdp-homomorphic-networks | 17 | 12773915 |
import torch
from rlpyt.utils.tensor import infer_leading_dims, restore_leading_dims
from rlpyt.models.conv2d import Conv2dModel
from rlpyt.models.mlp import MlpModel
from rlpyt.models.dqn.dueling import DuelingHeadModel
class CartpoleDqnModel(torch.nn.Module):
def __init__(
self,
image_shape,
output_size,
fc_sizes=[64, 64],
dueling=False,
use_maxpool=False,
channels=None, # None uses default.
kernel_sizes=None,
strides=None,
paddings=None,
):
super().__init__()
self.dueling = dueling
input_size = image_shape[0]
# self.mlp = MlpModel(input_size, fc_sizes, output_size)
if dueling:
self.head = DuelingHeadModel(input_size, fc_sizes, output_size)
else:
self.head = MlpModel(input_size, fc_sizes, output_size)
def forward(self, observation, prev_action, prev_reward):
"""Feedforward layers process as [T*B,H]. Return same leading dims as
input, can be [T,B], [B], or []."""
img = observation.type(torch.float) # Expect torch.uint8 inputs
# Infer (presence of) leading dimensions: [T,B], [B], or [].
lead_dim, T, B, img_shape = infer_leading_dims(img, 1)
# conv_out = self.conv(img.view(T * B, *img_shape)) # Fold if T dimension.
q = self.head(img.view(T * B, -1))
# Restore leading dimensions: [T,B], [B], or [], as input.
q = restore_leading_dims(q, lead_dim, T, B)
return q
| 2.15625 | 2 |
diplomacy_research/scripts/build_dataset.py | wwongkamjan/dipnet_press | 39 | 12773916 | <reponame>wwongkamjan/dipnet_press<filename>diplomacy_research/scripts/build_dataset.py
#!/usr/bin/env python3
# ==============================================================================
# Copyright 2019 - <NAME>
#
# NOTICE: Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# ==============================================================================
""" Dataset Builder
- Builds the various datasets required from the zip dataset
"""
import argparse
import glob
import importlib
import inspect
import logging
import os
# Constants
LOGGER = logging.getLogger('diplomacy_research.scripts.build_dataset')
def parse_args():
""" Returns the args parsed from the command line """
parser = argparse.ArgumentParser()
parser.add_argument('--log', type=str,
help='Output logging file to collect processing info. If not specified, no logging info about '
'games processing will be printed.')
parser.add_argument('--input-path', type=str, default=os.getcwd(),
help='Folder of raw games data. Should contain sub-folders curl_p, curl_w, and/ord curl_f.')
parser.add_argument('--hostname', type=str, default='localhost',
help='Hostname to connect to sql_w MySQL database.')
parser.add_argument('--port', type=int, default=3306,
help='port to connect to sql_w MySQL database.')
parser.add_argument('--username', type=str, default='root',
help='username to connect to sql_w MySQL database.')
parser.add_argument('--password', type=str, default='',
help='password to connect to sql_w MySQL database.')
parser.add_argument('--database', type=str, default='webdiplomacy',
help='database name to connect to sql_w MySQL database.')
parser.add_argument('--filter', type=str, default='',
help='A comma-separated list of proto dataset to generate (e.g. "order_based/no_press_all")')
return parser.parse_args()
if __name__ == '__main__':
from diplomacy_research.models.datasets.base_builder import BaseBuilder
# Parsing arguments
ARGS = parse_args()
# General dataset builders
# Loading all files in diplomacy_research/scripts/dataset
# and running the run() function in each of them
SCRIPTS_DATASET_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'dataset')
for dataset_path in sorted(glob.glob(os.path.join(SCRIPTS_DATASET_DIR, '*_???_*.py'))):
module_path = 'diplomacy_research.scripts.dataset.%s' % dataset_path.split('/')[-1].replace('.py', '')
imported_path = importlib.import_module(module_path)
for run_fn in [obj for name, obj in inspect.getmembers(imported_path) if name == 'run']:
LOGGER.info('========== %s ==========', module_path)
run_fn(**vars(ARGS))
# We only want to generate the common dataset scripts - Exiting here
if ARGS.filter and ARGS.filter == 'common':
exit(0)
# Policy dataset builders
# Loading all files in diplomacy_research/models/policy/xxxx/dataset
# and calling the generate_proto_files() of each builder class
for dataset_path in ['diplomacy_research.models.policy.order_based.dataset',
'diplomacy_research.models.policy.token_based.dataset']:
imported_path = importlib.import_module(dataset_path)
for obj_name, obj_class in [(name, obj) for name, obj in inspect.getmembers(imported_path)
if inspect.isclass(obj) and issubclass(obj, BaseBuilder) and obj != BaseBuilder]:
obj_filtered_class = '%s/%s' % (dataset_path.split('.')[-2], obj_class.__module__.split('.')[-1])
if ARGS.filter and obj_filtered_class not in ARGS.filter.split(','):
continue
LOGGER.info('Checking if dataset builder "%s" in path "%s" exists...', obj_filtered_class, dataset_path)
if not os.path.exists(obj_class.training_dataset_path):
obj_class().generate_proto_files()
else:
LOGGER.info('Dataset for "%s" in path "%s" exist. Skipping...', obj_filtered_class, dataset_path)
| 2.171875 | 2 |
qchem.py | EmmanuelG0ldstein/pyGSM_AMS_3 | 0 | 12773917 | from .base_lot import *
import numpy as np
import os
from .units import *
#TODO get rid of get_energy
class QChem(Lot):
def run(self,geom,multiplicity):
tempfilename = 'tempQCinp'
tempfile = open(tempfilename,'w')
if self.lot_inp_file == False:
tempfile.write(' $rem\n')
tempfile.write(' JOBTYPE FORCE\n')
tempfile.write(' EXCHANGE {}\n'.format(self.functional))
tempfile.write(' SCF_ALGORITHM rca_diis\n')
tempfile.write(' SCF_MAX_CYCLES 300\n')
tempfile.write(' BASIS {}\n'.format(self.basis))
#tempfile.write(' ECP LANL2DZ \n')
tempfile.write(' WAVEFUNCTION_ANALYSIS FALSE\n')
tempfile.write(' GEOM_OPT_MAX_CYCLES 300\n')
tempfile.write('scf_convergence 6\n')
tempfile.write(' SYM_IGNORE TRUE\n')
tempfile.write(' SYMMETRY FALSE\n')
tempfile.write('molden_format true\n')
tempfile.write(' $end\n')
tempfile.write('\n')
tempfile.write('$molecule\n')
else:
with open(self.lot_inp_file) as lot_inp:
lot_inp_lines = lot_inp.readlines()
for line in lot_inp_lines:
tempfile.write(line)
tempfile.write('{} {}\n'.format(self.charge,multiplicity))
if os.path.isfile("link.txt"):
with open("link.txt") as link:
link_lines = link.readlines()
tmp_geom = [list(i) for i in geom]
for i,coord in enumerate(tmp_geom):
coord.append(link_lines[i].rstrip('\n'))
for i in coord:
tempfile.write(str(i)+' ')
tempfile.write('\n')
else:
for coord in geom:
for i in coord:
tempfile.write(str(i)+' ')
tempfile.write('\n')
tempfile.write('$end')
tempfile.close()
cmd = "qchem -nt {} -save {} {}.qchem.out {}.{}".format(self.nproc,tempfilename,tempfilename,self.node_id,multiplicity)
os.system(cmd)
efilepath = os.environ['QCSCRATCH']
efilepath += '/{}.{}/GRAD'.format(self.node_id,multiplicity)
with open(efilepath) as efile:
elines = efile.readlines()
temp = 0
for lines in elines:
if temp == 1:
self.E.append((multiplicity,float(lines.split()[0])))
break
if "$" in lines:
temp += 1
gradfilepath = os.environ['QCSCRATCH']
gradfilepath += '/{}.{}/GRAD'.format(self.node_id,multiplicity)
with open(gradfilepath) as gradfile:
gradlines = gradfile.readlines()
temp = 0
tmp=[]
for lines in gradlines:
if '$' in lines:
temp+=1
elif temp == 2:
tmpline = lines.split()
tmp.append([float(i) for i in tmpline])
elif temp == 3:
break
self.grada.append((multiplicity,tmp))
return
def get_energy(self,coords,multiplicity,state):
#if self.has_nelectrons==False:
# for i in self.states:
# self.get_nelec(geom,i[0])
# self.has_nelectrons==True
if self.hasRanForCurrentCoords==False or (coords != self.currentCoords).any():
self.currentCoords = coords.copy()
geom = manage_xyz.np_to_xyz(self.geom,self.currentCoords)
self.runall(geom)
self.hasRanForCurrentCoords=True
tmp = self.search_tuple(self.E,multiplicity)
return np.asarray(tmp[state][1])*KCAL_MOL_PER_AU
def get_gradient(self,coords,multiplicity,state):
#if self.has_nelectrons==False:
# for i in self.states:
# self.get_nelec(geom,i[0])
# self.has_nelectrons==True
if self.hasRanForCurrentCoords==False or (coords != self.currentCoords).any():
self.currentCoords = coords.copy()
geom = manage_xyz.np_to_xyz(self.geom,self.currentCoords)
self.runall(geom)
tmp = self.search_tuple(self.grada,multiplicity)
return np.asarray(tmp[state][1])*ANGSTROM_TO_AU
@classmethod
def copy(cls,lot,**kwargs):
base = os.environ['QCSCRATCH']
for state in self.states:
multiplicity = state[0]
efilepath_old=base+ '/{}.{}'.format(self.node_id,multiplicity)
efilepath_new =base+ '/{}.{}'.format(node_id,multiplicity)
os.system('cp -r ' + efilepath_old +' ' + efilepath_new)
return cls(lot.options.copy().set_values(options))
| 2.3125 | 2 |
mtda/usb/rpi_gpio.py | LevyForchh/mtda | 0 | 12773918 | # System imports
import abc
import RPi.GPIO as GPIO
# Local imports
from mtda.usb.switch import UsbSwitch
class RPiGpioUsbSwitch(UsbSwitch):
def __init__(self):
self.dev = None
self.pin = 0
self.enable = GPIO.HIGH
self.disable = GPIO.LOW
GPIO.setwarnings(False)
def configure(self, conf):
""" Configure this USB switch from the provided configuration"""
if 'pin' in conf:
self.pin = int(conf['pin'], 10)
if 'enable' in conf:
if conf['enable'] == 'high':
self.enable = GPIO.HIGH
self.disable = GPIO.LOW
elif conf['enable'] == 'low':
self.enable = GPIO.LOW
self.disable = GPIO.HIGH
if self.pin > 0:
GPIO.setmode(GPIO.BCM)
GPIO.setup(self.pin, GPIO.OUT)
return
def probe(self):
return
def on(self):
""" Power on the target USB port"""
GPIO.output(self.pin, self.enable)
return self.status() == self.POWERED_ON
def off(self):
""" Power off the target USB port"""
GPIO.output(self.pin, self.disable)
return self.status() == self.POWERED_OFF
def status(self):
""" Determine the current power state of the USB port"""
if GPIO.input(self.pin) == self.enable:
return self.POWERED_ON
else:
return self.POWERED_OFF
def toggle(self):
s = self.status()
if s == self.POWERED_ON:
self.off()
return self.POWERED_OFF
else:
self.on()
return self.POWERED_ON
def instantiate():
return RPiGpioUsbSwitch()
| 3.25 | 3 |
glamod-parser/glamod/parser/processors.py | GLAMOD-test/glamod-dm | 0 | 12773919 | <reponame>GLAMOD-test/glamod-dm
import os
import logging
import stringcase
from importlib import import_module
from cdmapp.models import SourceConfiguration, StationConfiguration, \
StationConfigurationOptional, HeaderTable, ObservationsTable
from .settings import CHUNK_CACHE_DIR, CHUNK_CACHE_DIR_DEPTH
from .chunk_manager import ChunkManager
from .record_manager import RecordManager
from .utils import get_path_sub_dirs, timeit
from .db_writer import DBWriter
from .rules import (SourceConfigurationParserRules,
StationConfigurationParserRules, StationConfigurationOptionalParserRules,
HeaderTableParserRules, ObservationsTableParserRules)
logger = logging.getLogger(__name__)
class _DeliveryProcessorBase:
_schema = 'cdm_v1'
def __init__(self, location):
self.location = location
self.model_name = self._app_model.__name__
data_dirs = get_path_sub_dirs(location, depth=CHUNK_CACHE_DIR_DEPTH)
pickle_directory = os.path.join(CHUNK_CACHE_DIR, data_dirs)
structure_check_class = self._get_check_by_name('StructureCheck')
self._structure_check = structure_check_class(location)
self._content_check_class = self._get_check_by_name('ContentCheck')
self._logic_check_class = self._get_check_by_name('LogicCheck')
self._chunk_manager = ChunkManager(pickle_directory)
rules = self._rules_class()
self._record_manager = RecordManager(self._app_model, rules)
self._db_writer = DBWriter(
self._record_manager, self._table_name, self._schema)
def run(self):
self._run_structure_checks()
self._run_content_checks()
self._run_logic_checks()
@timeit
def _run_structure_checks(self):
logger.info(f'Structure checks for files of type: {self.model_name}')
self._structure_check.run()
self._data_files = self._structure_check.get_files()
@timeit
def _run_content_checks(self):
logger.info(f'Content checks for files of type: {self.model_name}')
for file_path in self._data_files:
content_check = self._content_check_class(
file_path, self._chunk_manager, self._record_manager)
content_check.run()
@timeit
def _run_logic_checks(self):
logger.info(f'Logic checks for files of type: {self.model_name}')
logic_check = self._logic_check_class(self._chunk_manager)
logic_check.run()
@timeit
def write_to_db(self):
logger.info(f'Writing data to DB for files of type: {self.model_name}')
chunks = self._chunk_manager.read_cached_chunks()
self._db_writer.write_to_db(chunks)
def _get_check_by_name(self, check_name):
class_name = self.model_name + check_name
module_name = stringcase.snakecase(check_name)
module = import_module('.' + module_name, package='glamod.parser')
return getattr(module, class_name)
class HeaderTableProcessor(_DeliveryProcessorBase):
_app_model = HeaderTable
_table_name = 'header_table'
_rules_class = HeaderTableParserRules
class ObservationsTableProcessor(_DeliveryProcessorBase):
_app_model = ObservationsTable
_table_name = 'observations_table'
_rules_class = ObservationsTableParserRules
class SourceConfigurationProcessor(_DeliveryProcessorBase):
_app_model = SourceConfiguration
_table_name = 'source_configuration'
_rules_class = SourceConfigurationParserRules
class StationConfigurationProcessor(_DeliveryProcessorBase):
_app_model = StationConfiguration
_table_name = 'station_configuration'
_rules_class = StationConfigurationParserRules
class StationConfigurationOptionalProcessor(_DeliveryProcessorBase):
_app_model = StationConfigurationOptional
_table_name = 'station_configuration_optional'
_rules_class = StationConfigurationOptionalParserRules
| 1.875 | 2 |
past_archive/swexpert/2027(makeDiagonal).py | DongHyunByun/algorithm_practice | 0 | 12773920 | <reponame>DongHyunByun/algorithm_practice
for i in range(5):
for j in range(5):
if i==j:
print('#',end='')
else:
print("+",end='')
print("")
| 3.953125 | 4 |
clif/testing/python/return_value_policy_test.py | rwgk/clif | 0 | 12773921 | <reponame>rwgk/clif
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import absltest
from absl.testing import parameterized
from clif.testing.python import return_value_policy
# TODO: Restore simple import after OSS setup includes pybind11.
# pylint: disable=g-import-not-at-top
try:
from clif.testing.python import return_value_policy_pybind11
except ImportError:
return_value_policy_pybind11 = None
# pylint: enable=g-import-not-at-top
_TEST_CASES = (
('return_value', '^return_value_MvCtor(_MvCtor)*$'),
('return_reference', r'^return_reference(_CpCtor)*(_MvCtor)*$'),
('return_const_reference', '^return_const_reference_CpCtor(_MvCtor)*$'),
('return_pointer', '^return_pointer$'),
('return_const_pointer', '^return_const_pointer_CpCtor$'),
('return_shared_pointer', '^return_shared_pointer$'),
('return_unique_pointer', '^return_unique_pointer$'),
('return_value_nocopy', '^return_value_nocopy_MvCtor(_MvCtor)*$'),
('return_reference_nocopy', '^return_reference_nocopy_MvCtor$'),
('return_pointer_nocopy', '^return_pointer_nocopy$'),
('return_shared_pointer_nocopy', '^return_shared_pointer_nocopy$'),
('return_unique_pointer_nocopy', '^return_unique_pointer_nocopy$'),
('return_value_nomove', '^return_value_nomove_CpCtor(_CpCtor)*$'),
('return_reference_nomove', '^return_reference_nomove_CpCtor(_CpCtor)*$'),
('return_pointer_nomove', '^return_pointer_nomove$'),
('return_const_reference_nomove',
'^return_const_reference_nomove_CpCtor(_CpCtor)*$'),
('return_const_pointer_nomove', '^return_const_pointer_nomove_CpCtor$'),
('return_shared_pointer_nomove', '^return_shared_pointer_nomove$'),
('return_unique_pointer_nomove', '^return_unique_pointer_nomove$'),
('return_pointer_nocopy_nomove', '^return_pointer_nocopy_nomove$'),
('return_shared_pointer_nocopy_nomove',
'^return_shared_pointer_nocopy_nomove$'),
('return_unique_pointer_nocopy_nomove',
'^return_unique_pointer_nocopy_nomove$'),
)
def MakeNamedParameters():
np = []
for code_gen, wrapper_lib in (('c_api', return_value_policy),
('pybind11', return_value_policy_pybind11)):
if wrapper_lib is not None:
for return_function, expected_regex in _TEST_CASES:
np.append(('_'.join((return_function, code_gen)),
getattr(wrapper_lib, return_function),
expected_regex))
return np
@parameterized.named_parameters(MakeNamedParameters())
class ReturnValuePolicyTestCase(parameterized.TestCase):
def testReturnValue(self, return_function, expected):
ret = return_function()
self.assertRegex(ret.mtxt, expected)
if __name__ == '__main__':
absltest.main()
| 1.773438 | 2 |
experitur/core/trial.py | moi90/experitur | 3 | 12773922 | <filename>experitur/core/trial.py
import collections.abc
import inspect
import itertools
from collections import OrderedDict, defaultdict
from collections.abc import Collection
from typing import (
TYPE_CHECKING,
Any,
Callable,
Iterable,
List,
Mapping,
Tuple,
TypeVar,
Union,
)
from experitur.core.logger import YAMLLogger
if TYPE_CHECKING: # pragma: no cover
from experitur.core.experiment import Experiment
from experitur.core.trial_store import TrialStore
T = TypeVar("T")
def _get_object_name(obj):
try:
return obj.__name__
except AttributeError:
pass
try:
return obj.__class__.__name__
except AttributeError:
pass
raise ValueError(f"Unable to determine the name of {obj}")
class Trial(collections.abc.MutableMapping):
"""
Data related to a trial.
Args:
store: TrialStore
data (optional): Trial data dictionary.
func (optional): Experiment function.
This is automatically instanciated by experitur and provided to the experiment function:
.. code-block:: python
@Experiment(parameters={"a": [1,2,3], "prefix_a": [10]})
def exp1(trial: Trial):
# Access current value of parameter `a` (item access)
trial["a"]
# Access extra data (attribute access)
trial.id # Trial ID
trial.wdir # Trial working directory
def func(a=1, b=2):
...
# Record default trial of `func`
trial.record_defaults(func)
# Call `func` with current value of parameter `a` and `b`=5
trial.call(func, b=5)
# Access only trial starting with a certain prefix
trial_prefix = trial.prefix("prefix_")
# All the above works as expected:
# trial_prefix.<attr>, trial_prefix[<key>], trial_prefix.record_defaults, trial_prefix.call, ...
# In our case, trial_prefix["a"] == 10.
"""
def __init__(self, data: Mapping, store: "TrialStore", prefix: str = ""):
self._store = store
self._data = data
self._prefix = prefix
self._validate_data()
self._logger = YAMLLogger(self)
def _validate_data(self):
if "wdir" not in self._data:
raise ValueError("data has to contain 'wdir'")
if "id" not in self._data:
raise ValueError("data has to contain 'id'")
# MutableMapping provides concrete generic implementations of all
# methods except for __getitem__, __setitem__, __delitem__,
# __iter__, and __len__.
def __getitem__(self, name):
"""Get the value of a parameter."""
return self._data["resolved_parameters"][f"{self._prefix}{name}"]
def __setitem__(self, name, value):
"""Set the value of a parameter."""
self._data["resolved_parameters"][f"{self._prefix}{name}"] = value
def __delitem__(self, name):
"""Delete a parameter."""
del self._data["resolved_parameters"][f"{self._prefix}{name}"]
def __iter__(self):
start = len(self._prefix)
return (
k[start:]
for k in self._data["resolved_parameters"]
if k.startswith(self._prefix)
)
def __len__(self):
return sum(1 for k in self)
def __repr__(self):
return f"<Trial({dict(self)})>"
def get(self, key, default=None):
"""Get a parameter value.
If key is not present, it is initialized with the provided default, just like :py:meth:`Trial.setdefault`.
"""
return self.setdefault(key, default)
def save(self):
self._store[self.id] = self._data
@property
def is_failed(self):
return self._data.get("error", None) is not None
def remove(self):
"""Remove this trial from the store."""
del self._store[self.id]
def get_result(self, name):
result = self._data["result"]
if result is None:
return None
return result.get(name, None)
def __getattr__(self, name: str):
"""Access extra attributes."""
__tracebackhide__ = True # pylint: disable=unused-variable
try:
return self._data[name]
except KeyError:
raise AttributeError(name) from None
def __setattr__(self, name: str, value):
if name.startswith("_"):
object.__setattr__(self, name, value)
else:
self._data[name] = value
def record_defaults(self, func: Callable, **defaults):
"""
Record default parameters from a function and additional parameters.
Args:
func (callable): The keyword arguments of this function will be recorded if not already present.
**kwargs: Additional arguments that will be recorded if not already present.
Use :py:class:`functools.partial` to pass keyword parameters to `func` that should not be recorded.
"""
__tracebackhide__ = True # pylint: disable=unused-variable
if not callable(func):
raise ValueError("Only callables may be passed as first argument.")
# Ignore partial keyword arguments
try:
partial_keywords = func.keywords # type: ignore
except AttributeError:
partial_keywords = set()
func_defaults = {
param.name: param.default
for param in inspect.signature(func).parameters.values()
if param.name not in partial_keywords
}
# First set explicit defaults
for name, value in defaults.items():
if func_defaults is not None and name not in func_defaults:
raise TypeError(f"{func} got an unexpected keyword argument '{name}'")
self.setdefault(name, value)
# Second, set remaining func defaults
if func_defaults is not None:
self.setdefaults(
{
k: v
for k, v in func_defaults.items()
if v is not inspect.Parameter.empty
}
)
def call(self, func: Callable[..., T], *args, **kwargs) -> T:
"""
Call the function applying the configured parameters.
Args:
func (callable): Function to be called.
*args: Positional arguments to the function.
**kwargs: Named defaults for the function.
Returns:
The return value of the function.
The default values of the function are determined using :py:func:`inspect.signature`.
Additional defaults can be given using ``**kwargs``.
These defaults are recorded into the trial.
As all default values are recorded, make sure that these have simple
YAML-serializable types.
If the called function throws an exception, an exception of the same type
is thrown with additional information about the parameters.
Use :py:class:`functools.partial` to pass hidden keyword parameters that should not be recorded.
"""
# Record default parameters
self.record_defaults(func, **kwargs)
# Ignore partial keyword arguments
try:
partial_keywords = func.keywords # type: ignore
except AttributeError:
partial_keywords = set()
signature = inspect.signature(func)
# Apply
# Parameter names that can be given to the callable
callable_names = set(
param.name
for param in signature.parameters.values()
if param.kind in (param.POSITIONAL_OR_KEYWORD, param.KEYWORD_ONLY)
and param.name not in partial_keywords
and param.kind not in (param.VAR_POSITIONAL, param.VAR_KEYWORD)
)
# Parameter names that have to be given to the callable
required_names = set(
param.name
for param in signature.parameters.values()
if param.default == param.empty
and param.kind not in (param.VAR_POSITIONAL, param.VAR_KEYWORD)
)
# Does the callable accept kwargs?
kwargs_present = any(
param.kind == param.VAR_KEYWORD for param in signature.parameters.values()
)
if kwargs_present:
parameters = dict(self)
else:
parameters = {k: v for k, v in self.items() if k in callable_names}
# Bind known arguments and calculate missing
bound_arguments = signature.bind_partial(*args, **parameters)
missing_names = required_names - bound_arguments.arguments.keys()
if missing_names:
missing_names_str = ", ".join(f"'{n}'" for n in sorted(missing_names))
missing_names_prefixed = ", ".join(
f"'{self._prefix}{n}'" for n in sorted(missing_names)
)
raise TypeError(
f"Missing required parameter(s) {missing_names_str} for {func}.\n"
f"Supply {missing_names_prefixed} in your configuration."
)
try:
return func(*args, **parameters)
except Exception as exc:
raise type(exc)(
f"Error calling {func} (args={args}, kwargs={kwargs}) with {self}"
) from exc
def prefixed(self, prefix: str) -> "Trial":
"""
Return new :py:class:`Trial` instance with prefix applied.
Prefixes allow you to organize parameters and save keystrokes.
Example:
.. code-block:: python
trial_prefix = trial.prefix("prefix_")
trial_prefix["a"] == trial["prefix_a"] # True
"""
return Trial(self._data, self._store, f"{self._prefix}{prefix}")
def setdefaults(
self,
defaults: Union["Trial", Mapping, Iterable[Tuple[str, Any]], None] = None,
**kwargs,
):
"""
Set multiple default values for parameters that do not yet exist.
Existing keys are not overwritten.
If keyword arguments are given, the keyword arguments and their values are added.
Parameters
----------
defaults (Mapping or Iterable, optional): Default values.
**kwargs: Additional default values.
"""
itemiters = []
if isinstance(defaults, Trial):
itemiters.append(dict(defaults).items())
elif isinstance(defaults, collections.abc.Mapping):
itemiters.append(defaults.items())
elif isinstance(defaults, collections.abc.Iterable):
itemiters.append(defaults) # type: ignore
elif defaults is None:
pass
else:
raise ValueError(f"Unexpected type for defaults: {type(defaults)}")
itemiters.append(kwargs.items())
for key, value in itertools.chain(*itemiters):
self.setdefault(key, value)
return self
def choice(
self,
parameter_name: str,
choices: Union[Mapping, Iterable],
default=None,
):
"""
Chose a value from an iterable whose name matches the value stored in parameter_name.
If parameter_name is not configured, the first entry is returned and recorded as default.
Args:
parameter_name (str): Name of the parameter.
choices (Mapping or Iterable): Mapping of names -> values or Iterable of values with a name (e.g. classes or functions).
default: Default key in choices.
Returns:
The configured value from the iterable.
"""
if isinstance(choices, collections.abc.Mapping):
mapping = choices
elif isinstance(choices, collections.abc.Iterable):
names_values = [(_get_object_name(v), v) for v in choices]
mapping = OrderedDict(names_values)
if len(mapping) != len(names_values):
raise ValueError("Duplicate names in {choices}")
else:
raise ValueError(f"Unexpected type of choices: {choices!r}")
if default is not None:
self.setdefault(parameter_name, default)
entry_name = self[parameter_name]
return mapping[entry_name]
def log(self, values, **kwargs):
"""
Record metrics.
Args:
values (Mapping): Values to log.
**kwargs: Further values.
"""
values = {**values, **kwargs}
self._logger.log(values)
class TrialCollection(Collection):
_missing = object()
def __init__(self, trials: List[Trial]):
self.trials = trials
def __len__(self):
return len(self.trials)
def __iter__(self):
yield from self.trials
def __contains__(self, trial: Trial):
return trial in self.trials
def pop(self, index=-1):
return self.trials.pop(index)
@property
def independent_parameters(self):
independent_parameters = set()
for t in self.trials:
independent_parameters.update(
getattr(t, "experiment", {}).get("independent_parameters", [])
)
return independent_parameters
@property
def varying_parameters(self):
"""Independent parameters that vary in this trial collection."""
independent_parameters = self.independent_parameters
parameter_values = defaultdict(set)
for t in self.trials:
for p in independent_parameters:
try:
v = t[p]
except KeyError:
parameter_values[p].add(self._missing)
else:
parameter_values[p].add(v)
return set(p for p in independent_parameters if len(parameter_values[p]) > 1)
def to_pandas(self):
import pandas as pd
return pd.json_normalize([t.data for t in self.trials], max_level=1).set_index(
"id"
)
def one(self):
if len(self.trials) != 1:
raise ValueError("No individual trial.")
return self.trials[0]
def filter(self, fn: Callable[[Trial], bool]) -> "TrialCollection":
"""
Return a filtered version of this trial collection.
Args:
fn (callable): A function that receives a Trial instance and returns True if the trial should be kept.
Returns:
A new trial collection.
"""
return TrialCollection(list(filter(fn, self.trials)))
| 2.5625 | 3 |
sources/experiments/data_generation/results_data.py | JohannOberleitner/pdesolver | 0 | 12773923 | <gh_stars>0
import numpy as np
import datetime
import json
def encode_ndarray(array):
return array.tolist()
def as_ndarray(array):
return np.asarray(array, dtype=float)
def as_ResultsSet(json_data):
if '__ResultsSet__' in json_data:
return ResultsSetDecoder().decode(json_data)
return json_data
class ResultsSetEncoder(json.JSONEncoder):
def default(self, data):
if isinstance(data, ResultsSet):
return data.encode()
else:
super().default(self, data)
class ResultsSetDecoder:
def decode(self, json_data):
count = json_data["count"]
items = json_data["items"]
label = json_data["label"]
timestamp = json_data["createdAt"]
resultsSet = ResultsSet(label, timestamp)
resultsSet.decode(items)
return resultsSet
class ResultsElement:
def __init__(self, resultsSet, index):
self.resultsSet = resultsSet
self.index = index
def get_values(self):
return self.resultsSet.resultValues[self.index]
def set_values(self, values):
self.resultsSet.resultValues[self.index] = values
class ResultsSet:
def __init__(self, label=None, timestamp=None):
self.label = label
self.timestamp = timestamp or datetime.datetime.utcnow()
self.resultValues = []
def count(self):
return len(self.resultValues)
def add(self, results):
self.resultValues.append(results.tolist())
def __iter__(self):
self.index = 0
return self
def __next__(self):
if self.index < len(self.resultValues):
element = ResultsElement(self, self.index)
self.index += 1
return element
else:
raise StopIteration
def encode(self):
items = []
for item in self:
items.append({'index':item.index+1,
'values':item.get_values()
})
return { '__ResultsSet__':True, 'label':self.label, 'createdAt':str(self.timestamp), 'count': self.count(), 'items':items }
def decode(self, itemsArray):
for item in itemsArray:
self.resultValues.append(as_ndarray(item["values"])) | 2.796875 | 3 |
constants.py | Ahmed4221/CICD-Test | 0 | 12773924 | <reponame>Ahmed4221/CICD-Test<filename>constants.py
DATA_URL = 'http://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data'
DATA_COLUMNS = ['MPG', 'Cylinders', 'Displacement', 'Horsepower', 'Weight',
'Acceleration', 'Model Year', 'Origin']
NORMALIZE = False
TARGET_VARIABLE = 'MPG'
# FEATURES_TO_USE = [ 'Cylinders', 'Displacement', 'Horsepower', 'Weight', 'Acceleration', 'Model Year', 'Europe', 'Japan', 'USA']
FEATURES_TO_USE = [ 'MPG', 'Horsepower','Displacement']
NORMALIZE_HORSEPOWER = False | 2.21875 | 2 |
VMBackup/main/patch/__init__.py | harvek/azure-linux-extensions | 0 | 12773925 | <filename>VMBackup/main/patch/__init__.py<gh_stars>0
#!/usr/bin/python
#
# Copyright 2015 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.4+
import os
import re
import platform
import traceback
from patch.UbuntuPatching import UbuntuPatching
from patch.debianPatching import debianPatching
from patch.redhatPatching import redhatPatching
from patch.centosPatching import centosPatching
from patch.SuSEPatching import SuSEPatching
from patch.oraclePatching import oraclePatching
from patch.KaliPatching import KaliPatching
from patch.DefaultPatching import DefaultPatching
from patch.FreeBSDPatching import FreeBSDPatching
from patch.NSBSDPatching import NSBSDPatching
# Define the function in case waagent(<2.0.4) doesn't have DistInfo()
def DistInfo():
try:
if 'FreeBSD' in platform.system():
release = re.sub('\-.*\Z', '', str(platform.release()))
distinfo = ['FreeBSD', release]
return distinfo
if 'NS-BSD' in platform.system():
release = re.sub('\-.*\Z', '', str(platform.release()))
distinfo = ['NS-BSD', release]
return distinfo
if 'linux_distribution' in dir(platform):
distinfo = list(platform.linux_distribution(full_distribution_name=0))
# remove trailing whitespace in distro name
if(distinfo[0] == ''):
osfile= open("/etc/os-release", "r")
for line in osfile:
lists=str(line).split("=")
if(lists[0]== "NAME"):
distname = lists[1].split("\"")
distinfo[0] = distname[1]
if(distinfo[0].lower() == "sles"):
distinfo[0] = "SuSE"
osfile.close()
distinfo[0] = distinfo[0].strip()
return distinfo
else:
return platform.dist()
except Exception as e:
errMsg = 'Failed to retrieve the distinfo with error: %s, stack trace: %s' % (str(e), traceback.format_exc())
logger.log(errMsg)
distinfo = ['Default','1.0']
return distinfo
def GetMyPatching(logger):
"""
Return MyPatching object.
NOTE: Logging is not initialized at this point.
"""
dist_info = DistInfo()
if 'Linux' in platform.system():
Distro = dist_info[0]
else: # I know this is not Linux!
if 'FreeBSD' in platform.system():
Distro = platform.system()
if 'NS-BSD' in platform.system():
Distro = platform.system()
Distro = Distro.replace("-", "")
Distro = Distro.strip('"')
Distro = Distro.strip(' ')
orig_distro = Distro
patching_class_name = Distro + 'Patching'
if patching_class_name not in globals():
if ('SuSE'.lower() in Distro.lower()):
Distro = 'SuSE'
elif ('Ubuntu'.lower() in Distro.lower()):
Distro = 'Ubuntu'
elif ('centos'.lower() in Distro.lower() or 'big-ip'.lower() in Distro.lower()):
Distro = 'centos'
elif ('debian'.lower() in Distro.lower()):
Distro = 'debian'
elif ('oracle'.lower() in Distro.lower()):
Distro = 'oracle'
elif ('redhat'.lower() in Distro.lower()):
Distro = 'redhat'
elif ("Kali".lower() in Distro.lower()):
Distro = 'Kali'
elif ('FreeBSD'.lower() in Distro.lower() or 'gaia'.lower() in Distro.lower() or 'panos'.lower() in Distro.lower()):
Distro = 'FreeBSD'
else:
Distro = 'Default'
patching_class_name = Distro + 'Patching'
patchingInstance = globals()[patching_class_name](logger,dist_info)
return patchingInstance, patching_class_name, orig_distro
| 1.898438 | 2 |
src/transformers/models/roformer/tokenization_utils.py | liminghao1630/transformers | 50,404 | 12773926 | <filename>src/transformers/models/roformer/tokenization_utils.py
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization utils for RoFormer."""
from typing import List
from tokenizers import NormalizedString, PreTokenizedString, normalizers
class JiebaPreTokenizer:
def __init__(self, vocab) -> None:
self.vocab = vocab
self.normalizers = normalizers.BertNormalizer(
clean_text=False,
handle_chinese_chars=True,
strip_accents=False,
lowercase=False,
)
try:
import rjieba
except ImportError:
raise ImportError(
"You need to install rjieba to use RoFormerTokenizer. "
"See https://pypi.org/project/rjieba/ for installation."
)
self.jieba = rjieba
def jieba_split(self, i: int, normalized_string: NormalizedString) -> List[NormalizedString]:
splits = []
# this code slice normalized_string is too slow (6s) but test_alignement_methods can pass
for token, start, end in self.jieba.tokenize(str(normalized_string), hmm=False):
if token in self.vocab:
splits.append(normalized_string[start:end])
else:
token_list = self.normalizers.normalize_str(token).split()
for token in token_list:
if token:
end = start + len(token)
splits.append(normalized_string[start:end])
start = end
# this code test_alignement_methods can't pass but fast (300ms)
# for token in self.jieba.cut(str(normalized_string), False):
# if token in self.vocab:
# splits.append(NormalizedString(token))
# else:
# token_list = self.normalizers.normalize_str(token).split()
# for token in token_list:
# if token:
# splits.append(NormalizedString(token))
return splits
def pre_tokenize(self, pretok: PreTokenizedString):
pretok.split(self.jieba_split)
| 2.109375 | 2 |
train.py | rozim/ChessAtAGlance | 0 | 12773927 | import sys
import tensorflow as tf
import leveldb
from absl import app
from absl import flags
from absl import logging
from datetime import datetime
import warnings
import glob
import toml
import re
from contextlib import redirect_stdout
import collections
import datetime
import functools
import itertools
import math
import numpy as np
import os
import random
import sys
import time
from zipfile import ZipFile
from datetime import datetime
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras import regularizers
from tensorflow.keras.callbacks import TerminateOnNaN, EarlyStopping, ModelCheckpoint, LambdaCallback, Callback
from tensorflow.keras.layers import BatchNormalization, LayerNormalization, Flatten, Add, Conv2D, Permute
from tensorflow.keras.layers import Dense, Dropout, Input, Embedding, Concatenate, Activation
from tensorflow.keras.layers import GaussianNoise, LeakyReLU, Softmax
from tensorflow.keras.layers.experimental.preprocessing import IntegerLookup, Discretization
from tensorflow.keras.losses import SparseCategoricalCrossentropy
from tensorflow.keras.metrics import AUC
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam, Ftrl, SGD
from tensorflow.python.keras import backend
import pandas as pd
from data import create_input_generator
from plan import load_plan
from model import create_model
from lr import create_lr_schedule
from tf_utils_callbacks.callbacks import BestNModelCheckpoint
from timing_callback import TimingCallback
FLAGS = flags.FLAGS
flags.DEFINE_string('plan', None, 'toml file')
flags.DEFINE_multi_string('d', None, 'override plan settings')
def df_to_csv(df, fn, float_format='%6.4f'):
df.to_csv(fn, index=False, float_format=float_format)
class LogLrCallback(Callback):
def on_epoch_end(self, epoch, logs):
try:
logs['lr'] = float(backend.get_value(self.model.optimizer.lr(epoch)))
except TypeError:
logs['lr'] = float(backend.get_value(self.model.optimizer.lr))
def main(_argv):
flags.mark_flags_as_required(['plan'])
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
logging.set_verbosity('error')
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
warnings.filterwarnings('ignore', category=Warning)
t1 = time.time()
timing = TimingCallback()
timing.record('overall_begin')
out_dir = datetime.today().strftime('%Y-%m-%d_%H:%M:%S')
out_dir = os.path.join('results', out_dir)
print('mkdir', out_dir)
os.mkdir(out_dir)
plan = load_plan(FLAGS.plan)
fn = os.path.join(out_dir, os.path.basename(FLAGS.plan))
print(f'Write {fn}')
with open(fn, 'w') as f:
toml.dump(plan, f)
os.chmod(fn, 0o444)
mplan = plan.model
return_legal_moves = mplan.mask_legal_moves
dplan = plan.data
ds1 = create_input_generator(dplan, dplan.train, is_train=True, return_legal_moves=return_legal_moves)
ds2 = create_input_generator(dplan, dplan.validate, is_train=False, return_legal_moves=return_legal_moves)
ds3 = create_input_generator(dplan, dplan.test, is_train=False, do_repeat=False,
return_legal_moves=return_legal_moves) if 'test' in dplan else None
if dplan.prefetch_to_device:
bs = dplan.get('prefetch_to_device_buffer', None)
ds1 = ds1.apply(tf.data.experimental.prefetch_to_device('/gpu:0', bs))
ds2 = ds2.apply(tf.data.experimental.prefetch_to_device('/gpu:0', bs))
ds3 = ds3.apply(tf.data.experimental.prefetch_to_device('/gpu:0', bs))
m = create_model(mplan)
fn = os.path.join(out_dir, 'model-summary.txt')
print(f'Write {fn}')
with open(fn, 'w') as f:
with redirect_stdout(f):
m.summary()
os.chmod(fn, 0o444)
callbacks = [TerminateOnNaN(),
LogLrCallback()]
tplan = plan.train
(lr_callback, lr) = create_lr_schedule(tplan)
if lr_callback:
callbacks.append(lr_callback)
# lr = CosineDecayRestarts(initial_learning_rate=tplan.lr,
# first_decay_steps=tplan.first_decay_steps,
# t_mul=1,
# m_mul=1,
# alpha=tplan.alpha)
if tplan.optimizer == 'SGD':
optimizer = SGD(learning_rate=lr)
elif tplan.optimizer == 'Adam':
optimizer = Adam(learning_rate=lr)
else:
assert False, tplan.optimizer
m.compile(optimizer=optimizer,
loss=SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
#tf.keras.metrics.Precision(top_k=3, name='p_3'),
#tf.keras.metrics.Recall(top_k=3, name='r_3')])
best_path = os.path.join(out_dir, 'best.model')
callbacks.append(BestNModelCheckpoint(
filepath=best_path,
monitor='val_accuracy',
model='max',
max_to_keep=1,
save_weights_only=False,
verbose=0))
callbacks.append(timing)
timing.record('on_fit_begin')
history = m.fit(x=ds1,
epochs=tplan.epochs,
steps_per_epoch=tplan.steps_per_epoch,
validation_data=ds2,
validation_steps=tplan.validation_steps,
callbacks=callbacks)
timing.record('on_fit_end')
df = pd.DataFrame(history.history)
fn = os.path.join(out_dir, 'last.model')
print(f'Write {fn}')
m.save(fn)
os.chmod(fn, 0o755)
timing.record('after_fit_begin')
test_ev, test_ev2 = None, None
if tplan.test_steps > 0 and ds3:
tt0 = time.time()
print('Test (last)')
test_ev = m.evaluate(x=ds3, return_dict=True, steps=tplan.test_steps)
dt = time.time() - tt0
print('Test:', test_ev, int(dt))
print('Test (best)')
tt0 = time.time()
ds3 = create_input_generator(dplan, dplan.test, is_train=False, return_legal_moves=return_legal_moves) # rewind
if dplan.prefetch_to_device:
ds3 = ds3.apply(tf.data.experimental.prefetch_to_device('/gpu:0', 32))
test_ev2 = tf.keras.models.load_model(best_path).evaluate(x=ds3, return_dict=True, steps=tplan.test_steps)
dt = time.time() - tt0
print('Test/2:', test_ev2, int(dt))
timing.record('after_fit_end')
fn = os.path.join(out_dir, 'history.csv')
print(f'Write {fn}')
with open(fn, 'w') as f:
df_to_csv(df, f)
os.chmod(fn, 0o444)
v1 = df['val_accuracy'].max()
v2 = df['val_accuracy'].values[-1]
fn = os.path.join(out_dir, 'report.txt')
print(f'Write {fn}')
with open(fn, 'w') as f:
print(f'val_accuracy {v1:6.4f} (best)')
print(f' {v2:6.4f} (last)')
if test_ev:
print(f'test_accuracy {test_ev2["accuracy"]:6.4f} (best)')
print(f' {test_ev["accuracy"]:6.4f} (last)')
f.write(f'val_accuracy : {v1:6.4f} (best)\n')
f.write(f'val_accuracy : {v2:6.4f} (last)\n')
if test_ev:
f.write(f'test_accuracy : {test_ev2["accuracy"]:6.4f} (best)\n')
f.write(f'test_accuracy : {test_ev["accuracy"]:6.4f} (last)\n')
f.write(f'time : {int(time.time() - t1)}\n')
os.chmod(fn, 0o444)
timing.record('overall_end')
print('Timing')
for k in timing.tot:
print(f'{k:16s} | {timing.num[k]:8d} | {timing.tot[k]:.2f}')
if __name__ == '__main__':
app.run(main)
| 1.75 | 2 |
chb/models/DllSummaries.py | psifertex/CodeHawk-Binary | 0 | 12773928 | <reponame>psifertex/CodeHawk-Binary
# ------------------------------------------------------------------------------
# Access to the CodeHawk Binary Analyzer Analysis Results
# Author: <NAME>
# ------------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2016-2020 Kestrel Technology LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ------------------------------------------------------------------------------
import os
import zipfile
import xml.etree.ElementTree as ET
import chb.util.fileutil as UF
from chb.models.DllEnumDefinition import DllEnumDefinition
from chb.models.DllSummary import DllSummary
from chb.models.DllSummaryRef import DllSummaryRef
class DllSummaries(object):
def __init__(self,models,jarfilename):
self.models = models
self.jarfile = zipfile.ZipFile(jarfilename,'r')
self.filenames = []
self.dllsummaries = {} # (dll,fname) -> DllSummary
for info in self.jarfile.infolist():
self.filenames.append(info.filename)
def _get_filename(self,dll,fname):
name = dll.lower().replace('.','_') + os.sep + fname + '.xml'
if name in self.filenames:
return name
name = dll.lower().replace('.','_') + '_dll' + os.sep + fname + '.xml'
if name in self.filenames:
return name
return None
def has_summary(self,dll,fname):
return not (self._get_filename(dll,fname) is None)
def get_summary(self,dll,fname):
if self.has_summary(dll,fname):
if not (dll,fname) in self.dllsummaries:
self._read_summary(dll,fname)
return self.dllsummaries[(dll,fname)]
else:
raise UF.CHBError('Summary for ' + dll + ':' + fname + ' not found')
def _read_summary(self,dll,fname):
def isref(xnode): return (not (xnode.find('refer-to') is None))
xnode = self.get_summary_xnode(dll,fname)
if xnode is None:
raise UF.CHBError('Summary for ' + dll + ':' + fname + ' may be corrupted')
if isref(xnode):
refnode = xnode.find('refer-to')
self.dllsummaries[(dll,fname)] = DllSummaryRef(self.models,refnode,dll,fname)
else:
self.dllsummaries[(dll,fname)] = DllSummary(self.models,xnode)
def get_summary_xnode(self,dll,fname):
filename = self._get_filename(dll,fname)
if filename is None:
raise UF.CHBError('Error in obtaining summary for ' + dll + ':' + fname)
zfile = self.jarfile.read(filename)
xnode = ET.fromstring(str(zfile)).find('libfun')
if xnode is None:
raise UF.CHBError('Unable to load summary for ' + dll + ':' + fname
+ ': libfun node not found')
return xnode
| 1.195313 | 1 |
pyckaxe/utils/logging/preview.py | Arcensoth/pyckaxe | 3 | 12773929 | import asyncio
import random
from pyckaxe.utils.logging import get_logger
def preview_logging():
log = get_logger("preview_logging")
log.debug("debug")
log.info("info")
log.warning("warning")
log.error("error")
log.critical("critical")
try:
raise ValueError("don't worry this is a fake error")
except:
log.exception("exception")
async def preview_async_logging():
async def do_debug():
await asyncio.sleep(random.randint(500, 2000) / 1000)
log.debug(f"debug")
async def do_info():
await asyncio.sleep(random.randint(500, 2000) / 1000)
log.info(f"info")
async def do_warning():
await asyncio.sleep(random.randint(500, 2000) / 1000)
log.warning(f"warning")
async def do_error():
await asyncio.sleep(random.randint(500, 2000) / 1000)
log.error(f"error")
async def do_critical():
await asyncio.sleep(random.randint(500, 2000) / 1000)
log.critical(f"critical")
log = get_logger("preview_async_logging")
awaitables = [
do_debug(),
do_info(),
do_warning(),
do_error(),
do_critical(),
]
await asyncio.gather(*awaitables)
| 2.515625 | 3 |
zfs/posix/__init__.py | mcclung/zfsp | 600 | 12773930 | <gh_stars>100-1000
import logging
from .. import ondisk
from .. import datasets
from zfs.posix.attributes import POSIXAttrs_for
logger = logging.getLogger(__name__)
class PosixObject(object):
def __init__(self, dnode: ondisk.DNode, dataset: datasets.Dataset) -> None:
self.attrs = POSIXAttrs_for(dataset)(dnode.bonus)
self.dataset = dataset
self.dnode = dnode
from .posix_file import File
from .directory import Directory
| 1.84375 | 2 |
alpha_blending.py | michelecos/py_imagecompose | 0 | 12773931 | <filename>alpha_blending.py<gh_stars>0
import cv2
# Read the images
foreground = cv2.imread("puppets.png")
background = cv2.imread("ocean.png")
alpha = cv2.imread("puppets_alpha.png")
# Convert uint8 to float
foreground = foreground.astype(float)
background = background.astype(float)
# Normalize the alpha mask to keep intensity between 0 and 1
alpha = alpha.astype(float)/255
# Multiply the foreground with the alpha matte
foreground = cv2.multiply(alpha, foreground)
# Multiply the background with ( 1 - alpha )
background = cv2.multiply(1.0 - alpha, background)
# Add the masked foreground and background.
outImage = cv2.add(foreground, background)
# Display image
cv2.imshow("outImg", outImage/255)
cv2.waitKey(0)
| 3.15625 | 3 |
protocols/abstract.py | lvh/async-pep | 2 | 12773932 | <gh_stars>1-10
"""
The interfaces for implementing asynchronous IO.
"""
import abc
class Protocol(metaclass=abc.ABCMeta):
def connected(self, transport):
"""
Called when the connection is established.
"""
self.transport = transport
@abc.abstractmethod
def data_received(self, data):
"""
Called when some data is received.
"""
def disconnected(self, reason):
"""
Called when the connection is closed.
"""
self.transport = None
class Transport(metaclass=abc.ABCMeta):
@abc.abstractmethod
def write(self, data):
"""
Write some data into the transport.
The data must be buffer of bytes.
"""
@abc.abstractmethod
def write_sequence(self, sequence):
"""
Write a sequence of data.
The sequence must be a sequence of buffers of bytes.
"""
@abc.abstractmethod
def close(self):
"""
Close the connection after sending queued data.
"""
@abc.abstractmethod
def abort(self):
"""
Immediately close the connection without sending queued data.
"""
@abc.abstractmethod
def half_close(self):
"""
Close the connection after sending queued data.
Incoming data will still be accepted.
"""
| 3.25 | 3 |
src/integrated_klqp.py | pmh47/textured-mesh-gen | 30 | 12773933 |
from enum import Enum
import numpy as np
import tensorflow as tf
from edward1_utils import get_ancestors, get_descendants
class GenerativeMode(Enum):
UNCONDITIONED = 1 # i.e. sampling the learnt prior
CONDITIONED = 2 # i.e. sampling the posterior, with variational samples substituted
RECONSTRUCTION = 3 # i.e. mode of the posterior
def noncopying_integrated_reparam_klqp(generative_builder, variational_builder, name_to_data_map, discrete_name_to_states_map, sample_count=1, beta=1., alpha=5.e6, grad_clip_magnitude=None):
# Every variable in the generative and variational should have a leading dimension that is 'IID', corresponding to
# an index-into-batch or otherwise sampled independently -- when we make substitutions, this dimension may be
# expanded to incorporate more samples. Thus, all RVs are indexed by iid-index, *
# Generative RVs are created by lambdas, taking zero or one parameters. There should be zero parameters when
# dim0 (i.e. the iid-dimension) has size fixed by ancestral variables; there should be one parameter when it's a 'root'
# variable (i.e. doesn't have any ancestor-RVs) and its base dim0 should be multiplied by that parameter
# Variational RVs are created similarly; the name given to the lambda should match that of the corresponding
# generative variable. Sample/discrete-expanded observations are retrived with a second lambda
# generative_builder is free to return any type or None; for example, it may choose to return an object containing
# some of its random variables; the unconditioned and mode-reconstruction versions of this result are returned to
# the caller
# ** note that we do not allow (but do not check for!) non-leaf non-RV tensors that are iib-indexed and have an RV as a sibling to
# ** be included as parents of RVs in the graph, as these cannot easily be expanded to the correct dimensionality -- their iid-index
# ** will always be of 'base' size, and will not broadcast correctly against 'upsampled' iid-indices of the sibling RV
# ** this could be fixed by handling such things essentially the same as expansion-like-discrete
# ** note that having RVs created with a non-default (i.e. not scalar) sample_shape will not work in general, as we call rv.sample()
# ** directly without passing this in -- so the shape will not be what the caller expects
assert len(discrete_name_to_states_map) < 2
if len(discrete_name_to_states_map) == 1:
assert False # ...as this is broken for now -- discrete states get 'blurred together'
discrete_name, discrete_states = discrete_name_to_states_map.items()[0] # discrete_states is a numpy array indexed by discrete-index, *
assert discrete_name not in name_to_data_map
else:
discrete_name = None
discrete_states = np.zeros([1])
# Build the 'prior', i.e. the generative without variational substitutions, so we can evaluate the prior probability of the variational samples later
name_to_unconditioned_generative_variable = {}
generative_root_variable_names = set()
def make_unconditioned_rv(name, builder):
with tf.name_scope(name):
assert name not in name_to_unconditioned_generative_variable
is_root_variable = builder.__code__.co_argcount == 1 # ideally the below assert would *define* root-ness (indeed, it does, conceptually), but can't evaluate it before the variable is created!
variable = builder(1) if is_root_variable else builder()
assert is_root_variable == (len(get_ancestors(variable, name_to_unconditioned_generative_variable.values())) == 0) # ** could be made more efficient by caching, so quickly know chunks of the graph do/don't have ancestor-RVs
if is_root_variable:
generative_root_variable_names.add(name)
name_to_unconditioned_generative_variable[name] = variable
return variable.value
with tf.variable_scope('generative'), tf.name_scope('unconditioned'):
unconditioned_generative = generative_builder(make_unconditioned_rv, GenerativeMode.UNCONDITIONED)
def expand_like_discrete(substituted_value):
# This will be applied to all variables that aren't indexed by discrete-state
substituted_value = tf.reshape(substituted_value, [sample_count, -1] + list(map(int, substituted_value.get_shape()[1:]))) # indexed by sample-index, iid-index, *
substituted_value = tf.tile(substituted_value, [1, discrete_states.shape[0]] + [1] * (substituted_value.get_shape().ndims - 2)) # indexed by sample-index, discrete-index * iid-index, *
return tf.reshape(substituted_value, [-1] + list(map(int, substituted_value.get_shape()[2:]))) # indexed by sample-index * discrete-index * iid-index, *
name_to_substituted_value = {} # each value is indexed by sample-index * discrete-index * iid-index, *
# Construct expanded copies of the observations (tiled over sample and discrete indices); these are made available
# to the variational so it can reason over them, and are used as substitutions in the generative
for name in name_to_data_map:
assert name != discrete_name # ** need to think about this case!
# ** should also probably assert that the observed variable is not a variational-descendant of the discrete (or any other variable!)
substituted_value = tf.tile(
name_to_data_map[name],
[sample_count] + [1] * (name_to_data_map[name].get_shape().ndims - 1)
) # indexed by sample-index * iid-index, *
# ** is calling expand_like_discrete not strictly less efficient that just adding the discrete-state-count into the above tile?
name_to_substituted_value[name] = expand_like_discrete(substituted_value) # always expand, as an observed variable cannot be variational-descendant of the discrete
def is_variable_discrete_indexed(variable):
# Substituted values are always discrete-indexed, hence having one of them as an ancestor is a sufficient
# condition for being discrete-indexed. In practice we check the reverse, as the substitution is not an RV
# hence won't be returned as an ancestor. It is also a necessary condition, as there is no other route through
# which discrete-indexing can be added
return any(
len(get_descendants(substituted_value, [variable])) > 0
for substituted_value in name_to_substituted_value.values()
)
# Build the variational, substituting samples and expanding all variables to be indexed by sample and discrete indices
name_to_conditioned_variational_variable = {}
def make_variational_rv(name, builder):
with tf.name_scope('q_' + name):
assert name in name_to_unconditioned_generative_variable
assert name not in name_to_data_map
assert name not in name_to_conditioned_variational_variable
is_root_variable = builder.__code__.co_argcount == 1 # ideally the below assert would *define* root-ness (indeed, it does, conceptually), but can't evaluate it before the variable is created!
variable = builder(sample_count) if is_root_variable else builder()
assert is_root_variable == (
len(get_ancestors(variable, name_to_conditioned_variational_variable.values())) == 0 # it's a root variable if it doesn't have any variational RV as an ancestor...
and
all(
len(get_descendants(name_to_substituted_value[observation_name], [variable])) == 0 # ...and no observation has it as a descendant -- i.e. it doesn't have any observation as an ancestor either
for observation_name in name_to_data_map
)
) # ** could be made more efficient by caching, so quickly know chunks of the graph do/don't have ancestor-RVs
substituted_value = variable.value # indexed by sample-index * [discrete-index *] iid-index, *
if discrete_name is not None: # if there's a discrete to be integrated, then *all* substituted values must be discrete-indexed
if name == discrete_name:
assert map(int, substituted_value.get_shape()[1:]) == list(discrete_states.shape[1:]) # check the discrete values have the same shape as samples from the distribution
substituted_value = tf.tile(
discrete_states[np.newaxis, :, np.newaxis, ...],
[sample_count, 1, int(substituted_value.get_shape()[0]) / sample_count / (discrete_states.shape[0] if is_variable_discrete_indexed(variable) else 1)] + [1] * (len(discrete_states.shape) - 1)
) # indexed by sample-index, discrete-index, iid-index, *
substituted_value = tf.reshape(substituted_value, [-1] + list(discrete_states.shape[1:])) # indexed by sample-index * discrete-index * iid-index, *
else:
if not is_variable_discrete_indexed(variable):
substituted_value = expand_like_discrete(substituted_value)
name_to_conditioned_variational_variable[name] = variable # this is used to evaluate the variational density of the variational sample; for both this and next, uses ancestral substitutions in case of non-MF variational
name_to_substituted_value[name] = substituted_value # this is substituted into the generative
return substituted_value
with tf.variable_scope('variational'), tf.name_scope('conditioned'):
variational_builder(make_variational_rv, lambda observation_name: name_to_substituted_value[observation_name])
if discrete_name is not None:
assert discrete_name in name_to_conditioned_variational_variable
assert discrete_name in name_to_substituted_value
# Build the 'conditioned generative', with values substituted from the variational and observations
name_to_conditioned_generative_variable = {}
def make_conditioned_rv(name, builder):
with tf.name_scope(name):
is_root_variable = name in generative_root_variable_names # i.e. whether this is an RV with no ancestor-RVs, meaning that it should be replicated according to sample_count (otherwise, replication of some ancestor should 'bubble down' to us)
variable = builder(sample_count) if is_root_variable else builder()
name_to_conditioned_generative_variable[name] = variable # used to evaluate the generative density of the variational sample (and the observed data), with ancestral substitutions
if name not in name_to_substituted_value:
# Marginalise by sampling from the generative (with ancestral conditioning), as there's no corresponding variational or observation
# ** could condition the warning on whether it actually has descendants!
print('warning: {} has neither variational distribution nor observed value, hence will be marginalised by sampling'.format(name))
substituted_value = variable.value
if discrete_name is not None:
if not is_variable_discrete_indexed(variable):
substituted_value = expand_like_discrete(substituted_value)
name_to_substituted_value[name] = substituted_value
return name_to_substituted_value[name]
with tf.variable_scope('generative', reuse=True), tf.name_scope('conditioned'):
conditioned_generative = generative_builder(make_conditioned_rv, GenerativeMode.CONDITIONED)
if discrete_name is not None:
assert discrete_name in name_to_conditioned_generative_variable
def get_mode_or_mean(variable):
try:
return variable.distribution.mode()
except NotImplementedError:
print('warning: using mean instead of mode for {} in reconstruction'.format(variable.distribution.name))
return variable.distribution.mean() # fall back to mean, e.g. for uniform random variables
# Build a second copy of the variational, with the (variational) mode of each variable substituted, in order to do
# a full 'ancestrally modal' reconstruction in the non-MF case
name_to_variational_mode = {}
def make_variational_reconstruction_rv(name, builder):
with tf.name_scope('q_' + name):
assert name in name_to_unconditioned_generative_variable
is_root_variable = builder.__code__.co_argcount == 1 # ** cache from first variational model creation above?
variable = builder(1) if is_root_variable else builder()
name_to_variational_mode[name] = get_mode_or_mean(variable)
return name_to_variational_mode[name]
with tf.variable_scope('variational', reuse=True), tf.name_scope('modes'):
variational_builder(make_variational_reconstruction_rv, lambda observation_name: name_to_data_map[observation_name])
# This third copy of the generative is not used by inference, but is returned to the caller to use for reconstructions
# It does not perform any sample/discrete expansion, but substitutes variational modes for ancestral latents
def make_reconstruction_rv(name, builder):
with tf.name_scope(name):
if name in name_to_variational_mode:
return name_to_variational_mode[name]
else:
# ** non-use of name_to_data_map here may not be desirable if the variable is not a leaf
variable = builder(1) if name in generative_root_variable_names else builder()
return get_mode_or_mean(variable)
with tf.variable_scope('generative', reuse=True), tf.name_scope('reconstruction'):
reconstruction_modes = generative_builder(make_reconstruction_rv, GenerativeMode.RECONSTRUCTION)
with tf.name_scope('integrated_klqp'):
def lifted_log_prob(variable, value, name): # ** would be nice if we could rely on variable.name == name!
# variable is a random variable, indexed by sample-index * [discrete-index *] iid-index, *
# value is a tensor, indexed by sample-index * discrete-index * iid-index, *
# This function evaluates variable.log_prob on slices of value taken over discrete-index, summing away non-iid dimensions
discrete_state_count = discrete_states.shape[0]
if discrete_name is None:
log_prob = variable.distribution.log_prob(value)
return tf.reduce_sum(log_prob, axis=list(range(1, log_prob.get_shape().ndims)))[np.newaxis, ...]
elif is_variable_discrete_indexed(variable):
log_prob = variable.distribution.log_prob(value) # indexed by sample-index * discrete-index * iid-index, *
log_prob = tf.reduce_sum(log_prob, axis=list(range(1, log_prob.get_shape().ndims))) # indexed by sample-index * discrete-index * iid-index
log_prob = tf.reshape(log_prob, [sample_count, discrete_state_count, -1]) # indexed by sample-index, discrete-index, iid-index
return tf.reshape(tf.transpose(log_prob, [1, 0, 2]), [discrete_state_count, -1]) # indexed by discrete-index, sample-index * iid-index
else:
value = tf.reshape(value, [sample_count, discrete_state_count, -1] + list(map(int, value.get_shape()[1:]))) # indexed by sample-index, discrete-index, iid-index, *
value = tf.transpose(value, [1, 0, 2] + list(range(3, value.get_shape().ndims))) # indexed by discrete-index, sample-index, iid-index, *
value = tf.reshape(value, [discrete_state_count, -1] + list(map(int, value.get_shape()[3:]))) # indexed by discrete-index, sample-index * iid-index, *
log_prob = tf.stack([
variable.distribution.log_prob(value[state_index])
for state_index in range(discrete_state_count)
]) # indexed by discrete-index, sample-index * iid-index, *
return tf.reduce_sum(log_prob, axis=range(2, log_prob.get_shape().ndims)) # indexed by discrete-index, sample-index * iid-index
if discrete_name is not None:
discrete_qz_probs = tf.exp(lifted_log_prob(
name_to_conditioned_variational_variable[discrete_name],
name_to_substituted_value[discrete_name], # this is the discrete states tiled over sample-index and iid-index
discrete_name
)) # indexed by discrete-index, sample-index * iid-index; this is the probability under the variational, of each discrete state
def E_log_prob_wrt_discrete(variable, value, name): # ** again, would be nice if could rely on variable.name == name!
# log_prob is indexed by sample-index * [discrete-index *] iid-index, *
# result is scalar, being a mean over samples, and minibatch-elements, an expectation over discrete-states, and a sum over remaining dimensions
maybe_weighted_log_prob = lifted_log_prob(variable, value, name) # indexed by discrete-index, sample-index * iid-index
if discrete_name is not None:
maybe_weighted_log_prob *= discrete_qz_probs
return tf.reduce_mean(maybe_weighted_log_prob) # that we do a mean over iid-index means we treat the minibatch-indexing as independent sampling, not a joint rv
log_Px = sum(
E_log_prob_wrt_discrete(name_to_conditioned_generative_variable[name], name_to_substituted_value[name], name)
for name in name_to_data_map
)
log_Pz = sum(
E_log_prob_wrt_discrete(name_to_conditioned_generative_variable[name], name_to_substituted_value[name], name)
# for name in name_to_conditioned_generative_variable
for name in name_to_conditioned_variational_variable # variational not generative so we only include things with variational (not prior) substitutions
if name != discrete_name # ...as we use L1 divergence for this instead
if name not in name_to_data_map # ...as it's in P(x) instead
)
log_Qz = sum(
E_log_prob_wrt_discrete(name_to_conditioned_variational_variable[name], name_to_substituted_value[name], name)
for name in name_to_conditioned_variational_variable
if name != discrete_name # ...as we use L1 divergence for this instead
)
for name in name_to_conditioned_variational_variable:
if name != discrete_name:
if name not in name_to_data_map:
tf.summary.scalar('P(z_' + name + ')', E_log_prob_wrt_discrete(name_to_conditioned_generative_variable[name], name_to_substituted_value[name], name))
for name in name_to_data_map:
tf.summary.scalar('P(x_' + name + ')', E_log_prob_wrt_discrete(name_to_conditioned_generative_variable[name], name_to_substituted_value[name], name))
for name in name_to_conditioned_variational_variable:
if name != discrete_name:
value = E_log_prob_wrt_discrete(name_to_conditioned_variational_variable[name], name_to_substituted_value[name], name)
tf.summary.scalar('Q(z_' + name + ')', value)
if discrete_name is not None:
discrete_z_probs = tf.exp(lifted_log_prob(
name_to_unconditioned_generative_variable[discrete_name],
name_to_substituted_value[discrete_name], # this is the discrete states tiled over sample-index and iid-index
discrete_name
)) # indexed by discrete-index, sample-index * iid-index; this is the prior (unconditioned gen.) probability of the discrete states; it will be constant over sample-index and iid-index iff the discrete has no gen. ancestors
discrete_z_probs = tf.reduce_mean(discrete_z_probs, axis=1) # indexed by discrete-index
discrete_qz_probs = tf.reduce_mean(discrete_qz_probs, axis=1) # ditto; the mean here is calculating the aggregated posterior over the batch and samples
discrete_divergence_loss = tf.reduce_mean(tf.abs(discrete_z_probs - discrete_qz_probs)) * alpha # L1 loss
else:
discrete_divergence_loss = 0.
tf.losses.add_loss(0.) # this is needed because get_total_loss throws instead of returning zero if no losses have been registered
additional_losses = tf.losses.get_total_loss()
loss = -(log_Px + (log_Pz - log_Qz) * beta) + discrete_divergence_loss + additional_losses
tf.summary.scalar('inference/loss', loss)
tf.summary.scalar('inference/log_Px', log_Px)
tf.summary.scalar('inference/log_Pz', log_Pz)
tf.summary.scalar('inference/log_Qz', log_Qz)
tf.summary.scalar('inference/Ldd', discrete_divergence_loss)
tf.summary.scalar('inference/L*', additional_losses)
var_list = tf.trainable_variables()
grads = tf.gradients(loss, [v._ref() for v in var_list])
abs_grads = tf.abs(tf.concat([tf.reshape(grad, [-1]) for grad in grads if grad is not None], axis=0))
loss = tf.Print(loss, [log_Px, log_Pz * beta, log_Qz * beta, discrete_divergence_loss, additional_losses, tf.reduce_mean(abs_grads), tf.reduce_max(abs_grads)], 'p(x), p(z), q(z), Ldd, L*, <|g|>, max |g| = ')
if grad_clip_magnitude is not None:
grads, _ = tf.clip_by_global_norm(grads, grad_clip_magnitude)
return loss, list(zip(grads, var_list)), unconditioned_generative, reconstruction_modes, conditioned_generative
| 2.421875 | 2 |
models/RegistrationToken.py | lavalamp-/RootTheBox | 3 | 12773934 | <reponame>lavalamp-/RootTheBox<filename>models/RegistrationToken.py
# -*- coding: utf-8 -*-
'''
Created on Sep 22, 2012
@author: moloch
Copyright 2012 Root the Box
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from os import urandom
from sqlalchemy import Column
from sqlalchemy.types import Unicode, Boolean
from models.BaseGameObject import BaseObject
from models import dbsession
class RegistrationToken(BaseObject):
''' User definition '''
value = Column(Unicode(6), unique=True, nullable=False,
default=lambda: unicode(urandom(3).encode('hex'))
)
used = Column(Boolean, nullable=False, default=False)
@classmethod
def all(cls):
''' Returns a list of all objects in the database '''
return dbsession.query(cls).all()
@classmethod
def by_id(cls, ident):
''' Returns a the object with id of ident '''
return dbsession.query(cls).filter_by(id=ident).first()
@classmethod
def by_value(cls, value):
''' Returns a the object with value of value '''
return dbsession.query(cls).filter_by(value=unicode(value)).first() | 2.09375 | 2 |
index.py | agusawa/mie-gacoan-simulation | 0 | 12773935 | <gh_stars>0
import csv
import simpy
from tabulate import tabulate
from gacoan import config
from gacoan.app import Gacoan
if __name__ == "__main__":
print(
tabulate(
[
["Durasi Simulasi", config.SIMULATION_TIME, "menit"],
["Arrival Rate", config.ARRIVAL_RATE, "menit"],
["Max Kedatangan", config.MAX_ARRIVALS, "pelanggan"],
["Min Order", config.MIN_ORDER_QUANTITY, "qty"],
["Max Order", config.MAX_ORDER_QUANTITY, "qty"],
["", ""],
["Server Kasir", config.CASHIER_CAPACITY, "server"],
["Mesin Perebus Mie", config.BOILER_CAPACITY, "kapasitas satu kali rebus"],
["Wajan Penggoreng Mie", config.FRYER_CAPACITY, "wajan"],
["Pengaduk", config.MIXER_CAPACITY, "pegawai"],
["Pemberi Topping", config.TOPPING_CAPACITY, "pegawai"],
["Assembler", config.ASSEMBLER_CAPACITY, "pegawai"],
["", ""],
["Waktu Pelayanan Kasir", config.CASHIER_TIME, "menit"],
["Waktu Perebusan Mie", config.BOILER_TIME, "menit"],
["Waktu Penggorengan Mie", config.FRYER_TIME, "menit"],
["Waktu Mengaduk Mie", config.MIXER_TIME, "menit"],
["Waktu Memberi Topping", config.TOPPING_TIME, "menit"],
["Waktu Assemble", config.ASSEMBLER_TIME, "menit"],
],
headers=["Nama Variable", "Nilai", "Satuan"],
)
)
input("\n\n[Press ENTER to run]")
if config.REALTIME_MODE:
env = simpy.RealtimeEnvironment(factor=config.SIMULATION_FACTOR)
else:
env = simpy.Environment()
CSV_PER_MINUTE_FIELDS = [
"cashier",
"boiler",
"fryer",
"mixer",
"topping",
"assembler",
"num_arrivals",
]
CSV_CUSTOMER_FIELDS = [
"name",
"quantity",
"arrival_time",
"being_served_time",
"served_time",
]
# Insert field names
with open(
f"./output/{config.CSV_FILE_PER_MINUTE_RESULT}", "w", newline="", encoding="utf-8"
) as csv_file_per_minute, open(
f"./output/{config.CSV_FILE_CUSTOMER_RESULT}", "w", newline="", encoding="utf-8"
) as csv_file_customer:
csv.DictWriter(csv_file_per_minute, fieldnames=CSV_PER_MINUTE_FIELDS).writeheader()
csv.DictWriter(csv_file_customer, fieldnames=CSV_CUSTOMER_FIELDS).writeheader()
# Open csv files and run the simulation
with open(
f"./output/{config.CSV_FILE_PER_MINUTE_RESULT}", "a", newline="", encoding="utf-8"
) as csv_file_per_minute, open(
f"./output/{config.CSV_FILE_CUSTOMER_RESULT}", "a", newline="", encoding="utf-8"
) as csv_file_customer:
simulation = Gacoan(
env,
csv_writer_per_minute=csv.DictWriter(
csv_file_per_minute,
fieldnames=CSV_PER_MINUTE_FIELDS,
),
csv_writer_customer=csv.DictWriter(
csv_file_customer,
fieldnames=CSV_CUSTOMER_FIELDS,
),
)
env.process(simulation.run())
env.run(until=config.SIMULATION_TIME)
| 2.453125 | 2 |
torchocr/models/backbones/det_resnet.py | hua1024/OpenOCR | 3 | 12773936 | # coding=utf-8
# @Time : 2020/10/24 12:13
# @Auto : zzf-jeff
import torch
import torch.nn as nn
import math
from ..builder import BACKBONES
from .base import BaseBackbone
import torch.utils.model_zoo as model_zoo
from torchocr.utils.checkpoints import load_checkpoint
__all__ = [
"DetResNet"
]
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, dcn=None):
super(BasicBlock, self).__init__()
self.with_dcn = dcn is not None
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.with_modulated_dcn = False
if not self.with_dcn:
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1, bias=False)
else:
from torchvision.ops import DeformConv2d
deformable_groups = dcn.get('deformable_groups', 1)
offset_channels = 18
self.conv2_offset = nn.Conv2d(planes, deformable_groups * offset_channels, kernel_size=3, padding=1)
self.conv2 = DeformConv2d(planes, planes, kernel_size=3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
# out = self.conv2(out)
if not self.with_dcn:
out = self.conv2(out)
else:
offset = self.conv2_offset(out)
out = self.conv2(out, offset)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, dcn=None):
super(Bottleneck, self).__init__()
self.with_dcn = dcn is not None
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.with_modulated_dcn = False
if not self.with_dcn:
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
else:
deformable_groups = dcn.get('deformable_groups', 1)
from torchvision.ops import DeformConv2d
offset_channels = 18
self.conv2_offset = nn.Conv2d(planes, deformable_groups * offset_channels, stride=stride, kernel_size=3,
padding=1)
self.conv2 = DeformConv2d(planes, planes, kernel_size=3, padding=1, stride=stride, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.dcn = dcn
self.with_dcn = dcn is not None
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
# out = self.conv2(out)
if not self.with_dcn:
out = self.conv2(out)
else:
offset = self.conv2_offset(out)
out = self.conv2(out, offset)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
@BACKBONES.register_module()
class DetResNet(BaseBackbone):
arch_settings = {
18: (BasicBlock, (2, 2, 2, 2)),
34: (BasicBlock, (3, 4, 6, 3)),
50: (Bottleneck, (3, 4, 6, 3)),
101: (Bottleneck, (3, 4, 23, 3)),
152: (Bottleneck, (3, 8, 36, 3))
}
def __init__(self, depth, in_channels, num_classes=1000):
super(DetResNet, self).__init__()
self.inplanes = 64
self.out_channels = []
self.block = self.arch_settings[depth][0]
self.num_block = self.arch_settings[depth][1]
self.conv1 = nn.Conv2d(in_channels, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.pool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block=self.block, planes=64, blocks=self.num_block[0], stride=1)
self.layer2 = self._make_layer(block=self.block, planes=128, blocks=self.num_block[1], stride=2)
self.layer3 = self._make_layer(block=self.block, planes=256, blocks=self.num_block[2], stride=2)
self.layer4 = self._make_layer(block=self.block, planes=512, blocks=self.num_block[3], stride=2)
def _make_layer(self, block, planes, blocks, stride=1, dcn=None):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, dcn=dcn))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dcn=dcn))
self.out_channels.append(planes * block.expansion)
return nn.Sequential(*layers)
def init_weights(self, pretrained=None):
if pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(pretrained, str):
load_checkpoint(self,pretrained)
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.pool1(x)
c2 = self.layer1(x)
c3 = self.layer2(c2)
c4 = self.layer3(c3)
c5 = self.layer4(c4)
return (c2, c3, c4, c5)
| 1.9375 | 2 |
examples/gevent_http.py | tetsuo-dance/poyonga | 13 | 12773937 | from poyonga import Groonga
import gevent
from gevent import monkey
monkey.patch_all()
def fetch(cmd, **kwargs):
g = Groonga()
ret = g.call(cmd, **kwargs)
print(ret.status)
print(ret.body)
print("*" * 40)
return ret.body
cmds = [
("status", {}),
("log_level", {"level": "warning"}),
# ("table_create", {"name": "Site", "flags": "TABLE_HASH_KEY"}),
("select", {"table": "Site"}),
]
jobs = [gevent.spawn(fetch, cmd, **kwargs) for cmd, kwargs in cmds]
gevent.joinall(jobs)
results = [job.value for job in jobs]
print(results)
| 2.3125 | 2 |
rest_api/serializers.py | knaveenkumar3576/django-rest-example | 0 | 12773938 | from rest_framework import serializers
# from .models import Player, Point
from .models import Point
# class PlayerSerializer(serializers.ModelSerializer):
# class Meta:
# fields = (
# 'user_name',
# 'first_name',
# 'last_name',
# )
# model = Player
class PointSerializer(serializers.ModelSerializer):
class Meta:
fields = (
'user_name',
'score',
'date_created'
)
model = Point | 2.296875 | 2 |
main_clsa.py | maple-research-lab/CLSA | 35 | 12773939 | <gh_stars>10-100
#Copyright (C) 2020 <NAME>
#License: MIT for academic use.
#Contact: <NAME> (<EMAIL>, <EMAIL>)
#Some codes adopted from https://github.com/facebookresearch/moco
from ops.argparser import argparser
from ops.Config_Envrionment import Config_Environment
import torch.multiprocessing as mp
from training.main_worker import main_worker
def main(args):
#config environment
ngpus_per_node=Config_Environment(args)
# call training main control function
if args.multiprocessing_distributed==1:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
if __name__ == '__main__':
#use_cuda = torch.cuda.is_available()
#print("starting check cuda status",use_cuda)
#if use_cuda:
args,params=argparser()
main(args) | 2.625 | 3 |
python_poc/adapters/postgres_generic_adapter.py | pervcomp/Procem | 1 | 12773940 | <filename>python_poc/adapters/postgres_generic_adapter.py
# -*- coding: utf-8 -*-
"""This module includes the adapter for reading periodically updated values from a PostgreSQL database
and sending the values to the Procem RTL worker for further handling."""
# Copyright (c) TUT Tampere University of Technology 2015-2018.
# This software has been developed in Procem-project funded by Business Finland.
# This code is licensed under the MIT license.
# See the LICENSE.txt in the project root for the license terms.
#
# Main author(s): <NAME>, <NAME>, <NAME>,
# <NAME> ja <NAME>
import psycopg2.sql as sql
import datetime
import queue
import random
import sys
import threading
import time
try:
import adapters.common_utils as common_utils
import adapters.postgres_utils as postgres_utils
import adapters.postgres_weatherstation_model as model
except:
# used when running the module directly
import common_utils
import postgres_utils
import postgres_weatherstation_model as model
PROCEM_SERVER_IP = common_utils.PROCEM_SERVER_IP
PROCEM_SERVER_PORT = common_utils.PROCEM_SERVER_PORT
# maximum size for UDP payload. Current value based on a quick experiment where it was 8192
UDP_MAX_SIZE = common_utils.UDP_MAX_SIZE
# The names of the configuration files from where the data model information is read
CONFIG_SCHEME_FILE_NAME = "postgres_weatherstation_config.json"
MEASUREMENT_ID_FILE_NAME = "Wheather_Station_measurement_IDs.csv"
init_query_string = postgres_utils.init_query_string
normal_query_string = postgres_utils.normal_query_string
def PostgresTableWorker(connection, table_name, table_type, database, data_queue):
"""Reads values periodically from the given table and sends to Procem RTL worker."""
try:
cursor = connection.cursor()
table_sql = sql.Identifier(table_name)
time_field_sql = sql.Identifier(table_type.time_field)
# Gather the field names to variable field_names_sql
field_names = []
for field_name in table_type.fields:
field_names.append(sql.Identifier(field_name))
if len(field_names) == 0:
print(common_utils.getTimeString(), "No fields specified for table", table_name, "in database",
postgres_utils.getDatabaseAddress(database))
quit()
field_names_sql = sql.SQL(", ").join([time_field_sql] + field_names)
# get the latest database entry time by using an initial query
latest_time = None
while latest_time is None:
cursor.execute(sql.SQL(init_query_string).format(field=time_field_sql, table=table_sql))
result = cursor.fetchone()
if result is not None:
latest_time = result[0]
else:
time.sleep(table_type.delay / 1000)
# Construct the query that will be used to get the data
query = sql.SQL(normal_query_string).format(
fields=field_names_sql,
table=sql.Identifier(table_name),
time_field=sql.Identifier(table_type.time_field))
time_offset = database.time_offset
loop_start_time = time.time()
last_save_time = latest_time
day = datetime.date.today().day
packet_count = 0
print_interval = 3600 # print the number of sent packages once in an hour
next_print_count = print_interval
while True:
# Run the time offset checking if it is a new day.
current_day = datetime.date.today().day
if current_day != day:
time_offset = database.updateTimeoffset()
day = current_day
time_until_next_query = max(table_type.delay / 1000 - (time.time() - loop_start_time), 0)
time.sleep(time_until_next_query)
cursor.execute(query, [latest_time])
results = cursor.fetchall()
loop_start_time = time.time()
for result in results:
latest_time = result[0]
time_interval = (latest_time - last_save_time) / datetime.timedelta(milliseconds=1)
if time_interval > table_type.interval:
last_save_time = latest_time
ts = int(round((latest_time + time_offset).timestamp() * 1000))
for field_name, value in zip(table_type.fields, result[1:]):
field_info = table_type.fields[field_name]
new_pkt = bytes(common_utils.getProcemRTLpkt(
name=field_info.name,
path=field_info.path,
value=value,
timestamp=ts,
unit=field_info.unit,
datatype=field_info.data_type,
variableNumber=field_info.rtl_id,
confidential=not field_info.ticket), "utf-8")
data_queue.put(new_pkt)
packet_count += 1
# put empty item into the data queue as a mark that the buffer should be emptied
data_queue.put(bytes())
if packet_count >= next_print_count:
print(common_utils.getTimeString(), packet_count, "packages sent from", table_name)
next_print_count += print_interval
except Exception as error:
print(common_utils.getTimeString(), " ERROR: Table: ", table_name, ", Message: ", error, sep="")
# try to create a new connection and start the table worker again
time.sleep(2 * table_type.delay / 1000)
connection = postgres_utils.getConnection(database)
print(common_utils.getTimeString(), " (", postgres_utils.getDatabaseAddress(database),
") starting thread for table: ", table_name, sep="")
table_thread = threading.Thread(
target=PostgresTableWorker,
kwargs={
"connection": connection,
"table_name": table_name,
"table_type": table_type,
"database": database,
"data_queue": data_queue},
daemon=True)
table_thread.start()
def PostgresDatabaseWorker(database, data_queue):
"""Finds the time offset for the database and starts a new thread for each table in the database."""
postgres_utils.setTimeOffset(database)
for table_name, table in database.tables.items():
try:
connection = postgres_utils.getConnection(database)
print(common_utils.getTimeString(), " (", postgres_utils.getDatabaseAddress(database),
") starting thread for table: ", table_name, sep="")
table_thread = threading.Thread(
target=PostgresTableWorker,
kwargs={
"connection": connection,
"table_name": table_name,
"table_type": table,
"database": database,
"data_queue": data_queue},
daemon=True)
table_thread.start()
except Exception as error:
print(common_utils.getTimeString(), "ERROR:", error)
def startPostgresAdapter(data_model, data_queue):
"""Starts a new thread for each database in the data model."""
databases = data_model.databases
for database_id, database in databases.items():
print(common_utils.getTimeString(), " Starting thread for database ", database_id, ": ",
postgres_utils.getDatabaseAddress(database), sep="")
postgres_thread = threading.Thread(
target=PostgresDatabaseWorker,
kwargs={"database": database, "data_queue": data_queue},
daemon=True)
postgres_thread.start()
if __name__ == "__main__":
if len(sys.argv) == 3:
CONFIG_SCHEME_FILE_NAME = sys.argv[1]
MEASUREMENT_ID_FILE_NAME = sys.argv[2]
elif len(sys.argv) != 1:
print("Start this adapter with 'python3", sys.argv[0], "config_scheme.json measurement_ids.csv' command")
print("or use 'python3 ", sys.argv[0], "' to use the default configuration.", sep="")
quit()
# read configuration information from the configuration files
print(common_utils.getTimeString(), "Reading configurations from",
CONFIG_SCHEME_FILE_NAME, "and", MEASUREMENT_ID_FILE_NAME)
data_model = model.loadModel(
config_filename=CONFIG_SCHEME_FILE_NAME,
csv_filename=MEASUREMENT_ID_FILE_NAME)
# initialize the data queue and start the udp send thread
data_queue = queue.Queue()
threading.Thread(target=common_utils.procemSendWorker, kwargs={"data_queue": data_queue}).start()
startPostgresAdapter(data_model, data_queue)
while True:
txt = input("Press enter key to end:\n\r")
if not txt:
break
| 2.34375 | 2 |
zasim/gui/elementary.py | timo/zasim | 2 | 12773941 | """This module offers GUI tools for manipulating table-like step functions
of "elementary" cellular automatons.
Ideas for further utilities:
* Display conflicting rules for horizontal or vertical symmetry, rotational
symmetry, ...
* An editing mode, that handles simple binary logic, like::
c == 1 then result = 1
c == 0 then result = 0
l == 0 and r == 1 then result = 0
* A graphical editing mode that allows adding "pattern matching" for rules with
"dontcare fields" or something of that sort.
* A graphical editing mode with zooming UI.
* ...
"""
from __future__ import absolute_import
import numpy as np
from ..elementarytools import *
from ..cagen import elementary_digits_and_values, rule_nr_to_rule_arr
from ..external.qt import *
from ..display.qt import PALETTE_QC
from itertools import product
import random
GAP = object()
"""The value passed to create_subwidget when a position is not held by a
field."""
CELL_COL = PALETTE_QC
"""What colors to use for what field values."""
CELL_COL[GAP] = QColor("gray")
class CellDisplayWidget(QLabel):
"""A little Widget that displays a cell in a neighbourhood."""
def __init__(self, value, position=None, size=16, palette=None, **kwargs):
"""Create the DisplayWidget.
:param value: The cell value to show.
:param position: Alternatively, the position of the cell in the result
list, to be used for communication to the outside.
:param size: The size of the cell, used for both width and height.
:param palette: The palette of colors to use.
"""
super(CellDisplayWidget, self).__init__(**kwargs)
self.setFixedSize(size, size)
self.setPixmap(self.__pixmap_for_value(value))
self.position = position
self._palette = palette or CELL_COL
def __pixmap_for_value(self, value):
"""Create a pixmap for the value of the cell."""
pixmap = QPixmap(QSize(self.width(), self.height()))
pixmap.fill(CELL_COL[value])
return pixmap
class EditableCellDisplayWidget(QPushButton):
"""A clickable and keyboard-operatable display widget for cells."""
value_changed = Signal([int, int])
"""This signal will be emitted when the user changed the value of the
cell. It will emit the position and the new value."""
def __init__(self, value, position, base=2, size=16, palette=None, **kwargs):
"""Create the editable display widget.
:param value: The start value.
:param position: The position in the result list, used in the
:attr:`value_changed` signal.
:param base: The numerical base for values.
:param size: The size for the display, used for both width and height.
:param palette: The palette of qcolors to use.
"""
super(EditableCellDisplayWidget, self).__init__(**kwargs)
self.value = value
self.base = base
self._palette = palette
self.setFixedSize(size, size)
self.setFlat(True)
self.setAutoFillBackground(True)
self.bg_color = self._palette[self.value]
self.position = position
self.clicked.connect(self._change_value)
def _change_value(self):
"""Called by the clicked signal of the underlying QPushButton."""
self.value = (self.value + 1) % self.base
self.bg_color = self._palette[self.value]
self.update()
self.value_changed.emit(self.position, self.value)
def set_value(self, value, emit=False):
self.value = value
self.bg_color = self._palette[self.value]
self.update()
if emit:
self.value_changed.emit(self.position, self.value)
def paintEvent(self, event):
"""Redraw the button, add a rectangle inside the button if it has the
focus."""
paint = QPainter(self)
paint.fillRect(event.rect(), self.bg_color)
if self.hasFocus():
paint.setPen(QColor("red") if self.bg_color != QColor("red")
else QColor("black"))
paint.drawRect(QRect(1, 1, self.width() - 3, self.height() - 3))
def set_position(self, position):
self.position = position
# TODO implement CellDisplayWidgets for images.
class BaseNeighbourhoodDisplay(QWidget):
"""The BaseNeighbourhoodDisplay offers a skeleton for different ways of
displaying neighbourhoods.
Subclass this and implement create_subwidget, which will be fed an offset
and the corresponding entry from the values dictionary, or :data:`GAP` if
there is no spot in the neighbourhood at that position, and will then be
put into a QGridLayout.
This class itself displays colored blocks in the shape of the
neighbourhood."""
def __init__(self, neighbourhood, values=None, base=2, palette=None, **kwargs):
super(BaseNeighbourhoodDisplay, self).__init__(**kwargs)
self.neighbourhood = neighbourhood
self.offsets = neighbourhood.offsets
self.names = neighbourhood.names
self.bbox = self.neighbourhood.bounding_box()
self.base = base
self.palette = palette or CELL_COL
dims = len(self.bbox)
assert dims in (1, 2), "Only 1d or 2d neighbourhoods are supported"
if dims == 1:
# for making the code easier, we will only handle 2d neighbourhoods
# by trivially turning a 1d neighbourhood into a 2d neighbourhood.
self.offsets = tuple((x[0], 0) for x in self.offsets)
self.bbox = self.bbox[0], (0, 0)
if values is None:
values = dict((offs, 0) for offs in self.offsets)
if values.keys()[0] not in self.offsets:
values = dict((self.offsets[self.names.index(name)],
value) for name, value in values.iteritems())
self.values = values.copy()
self.subwidgets = {}
self.layout = QGridLayout(self)
self.layout.setContentsMargins(0, 0, 0, 0)
self.layout.setSpacing(0)
(grid_w, grid_h), (offs_x, offs_y) = self.determine_size()
widths = [[] for _ in range(grid_h)]
heights = [[] for _ in range(grid_w)]
positions = product(range(grid_w),
range(grid_h))
for (col, row) in positions:
offset = (col + offs_x, row + offs_y)
subwidget = self.create_subwidget(offset, self.values.get(offset, GAP))
subwidget.setObjectName("cell_%d_%d" % offset)
self.subwidgets[offset] = subwidget
if subwidget is not None:
self.layout.addWidget(subwidget, row, col)
w, h = subwidget.width(), subwidget.height()
else:
w, h = 0, 0
widths[row].append(w)
heights[col].append(h)
width = max([sum(part) for part in widths])
height = max([sum(part) for part in heights])
self.setFixedSize(width, height)
def determine_size(self):
"""Determine how many fields to allocate in the grid.
Subclass this, if you want more gaps around the edges.
Return a tuple of width, height and a tuple of x-offset and y-offset."""
return ((self.bbox[0][1] - self.bbox[0][0] + 1,
self.bbox[1][1] - self.bbox[1][0] + 1),
(self.bbox[0][0], self.bbox[1][0]))
def create_subwidget(self, offset, value):
"""Create a widget for a cell in the neighbourhood.
:param offset: A tuple of (x, y) for the position of the cell
:param value: The value of the cell, as per the values dictionary, or
if the widget is to be created for an empty space,
:data:`GAP`.
:returns: a QWidget initialised for the cell. Alternatively, None."""
return CellDisplayWidget(value, self.palette)
def update_value(self, widget, offset, new_value):
"""Manipulate the given widget for the new value.
:returns: None, if the widget was manipulated, alternatively a new
QWidget to take its place."""
widget.setPixmap(self.__pixmap_for_value(new_value))
class NextToResult(QWidget):
"""A simple utility class to display a neighbourhood widget and a result
widget next to each other in different relations."""
def __init__(self, neighbourhood_widget, result_widget, direction="l", **kwargs):
"""Create the widget.
:param neighbourhood_widget: The neighbourhood widget to put in.
:param result_widget: The result widget to put in.
:param direction: The direction the neighbourhood widget to put at.
Valid directions are l, u, d and r for left, up, down and right
respectively."""
super(NextToResult, self).__init__(**kwargs)
assert direction in "udlr"
self.result_widget = result_widget
self.neighbourhood_widget = neighbourhood_widget
self.result_widget.setObjectName("result")
self.neighbourhood_widget.setObjectName("neighbourhood")
if direction in "lr":
layout = QHBoxLayout()
spacing = self.result_widget.width()
else:
layout = QVBoxLayout()
spacing = self.result_widget.height()
if direction in "lu":
layout.addWidget(self.result_widget)
layout.addSpacing(spacing)
layout.addWidget(self.neighbourhood_widget)
if direction in "rd":
layout.addSpacing(spacing)
layout.addWidget(self.result_widget)
self.setLayout(layout)
class ElementaryRuleWindow(QWidget):
"""A window usable to modify the table of an elementary step function."""
def __init__(self, neighbourhood, rule=None, base=2, palette_info=None, **kwargs):
""":param neighbourhood: The `Neighbourhood` instance to get the
data from.
:param rule: The rule to set at the beginning.
:param base: The numerical base for the cells.
:param palette: The palette_info for the simulator.
"""
super(ElementaryRuleWindow, self).__init__(**kwargs)
self.neighbourhood = neighbourhood
self.base = base
self.entries = len(self.neighbourhood.offsets)
if rule is None:
rule = random.randrange(0, base ** (base ** self.entries))
self.rule_nr = rule
self.rule = np.array(rule_nr_to_rule_arr(self.rule_nr, self.entries, self.base))
self.n_r_widgets = []
self.display_widget = QWidget(self)
self.display_widget.setObjectName("display_widget")
self.display_layout = QGridLayout(self.display_widget)
self.display_layout.setSizeConstraint(QLayout.SetFixedSize)
digits_and_values = elementary_digits_and_values(self.neighbourhood,
self.base, self.rule)
for pos, data in enumerate(digits_and_values):
data = data.copy()
result = data["result_value"]
del data["result_value"]
n_w = BaseNeighbourhoodDisplay(neighbourhood, data, parent=self)
r_w = EditableCellDisplayWidget(result, pos, base=base, parent=self, palette=self.palette)
n_r_w = NextToResult(n_w, r_w, parent=self, direction="r")
n_r_w.setObjectName("block_%d" % pos)
r_w.value_changed.connect(self._result_changed)
self.n_r_widgets.append(n_r_w)
self.digits_and_values = digits_and_values
self._rewrap_grid()
self.display_widget.setLayout(self.display_layout)
self.scroll_area = QScrollArea(self)
self.scroll_area.setWidget(self.display_widget)
self.scroll_area.setObjectName("scroll_area")
layout = QVBoxLayout(self)
self.rule_nr_display = QLabel("Editing rule %s" % hex(self.rule_nr), self)
self.rule_nr_display.setObjectName("rule_nr_display")
# make text selectable and links (if any) clickable
self.rule_nr_display.setTextInteractionFlags(Qt.TextBrowserInteraction)
layout.addWidget(self.rule_nr_display)
layout.addWidget(self.scroll_area)
action_buttons = QHBoxLayout(self)
minimize_button = QPushButton("Minimize rule number", self)
minimize_button.clicked.connect(self.minimize_rule_number)
minimize_button.setObjectName("minimize")
action_buttons.addWidget(minimize_button)
action_buttons.addSpacing(11)
for name, action in neighbourhood_actions.iteritems():
act_btn = QPushButton(name, self)
def do_action(act=action):
self.do_neighbourhood_action(act)
act_btn.clicked.connect(do_action)
act_btn.setObjectName("action_%s" % name)
action_buttons.addWidget(act_btn)
layout.addLayout(action_buttons)
self.setLayout(layout)
def _result_changed(self, position, value):
"""React to a change in the results."""
self.digits_and_values[position]["result_value"] = value
self.recalculate_rule_number()
def recalculate_rule_number(self):
"""Recalculate what number corresponds to the result values saved in
:attr:`digits_and_values`.
:returns: the new rule number."""
num = 0
for digit, values in enumerate(self.digits_and_values):
num += values["result_value"] * (self.base ** digit)
self.rule_nr = num
self.rule_nr_display.setText("Editing rule %s" % (hex(self.rule_nr)))
return self.rule_nr
def minimize_rule_number(self):
best_num, (best_route, result), _ = minimize_rule_values(self.neighbourhood, self.digits_and_values)
if best_num == self.rule_nr:
QMessageBox.information(self, "No optimization found",
"""This rule set is already the lowest I can make out of it.""")
else:
okay = QMessageBox.question(self, "Apply optimization?",
"""With these actions, the rule number %d can be reached:
%s""" % (best_num, ", ".join(best_route)),
buttons=QMessageBox.Ok | QMessageBox.Cancel,
defaultButton=QMessageBox.Ok)
if okay == QMessageBox.Ok:
for num, data in enumerate(result):
self.n_r_widgets[num].result_widget.set_value(data)
self.digits_and_values = elementary_digits_and_values(self.neighbourhood, self.base, result)
self.recalculate_rule_number()
def do_neighbourhood_action(self, action):
result = action(self.neighbourhood, self.digits_and_values, base=self.base)
for num, data in enumerate(result):
val = data["result_value"]
self.n_r_widgets[num].result_widget.set_value(val)
self.digits_and_values = result
self.recalculate_rule_number()
def _rewrap_grid(self, old_width=None):
"""Put all the widgets into a grid, so that they fill just enough of
the width, so that there is no horizontal scroll bar."""
count = len(self.n_r_widgets)
# all items should have the same size actually
width_per_bit = self.n_r_widgets[0].sizeHint().width() + \
self.display_layout.spacing()
spacing = self.display_layout.horizontalSpacing()
if spacing == -1:
spacing = 11
available_width = self.contentsRect().width()
columns = available_width / (width_per_bit) - 1
if old_width is not None:
old_columns = old_width / (width_per_bit) - 1
if old_columns == columns:
return
if columns <= 0:
columns = 1
items_per_column = int(count / columns) + 1
for widget in self.n_r_widgets:
self.display_layout.removeWidget(widget)
for num, widget in enumerate(self.n_r_widgets):
col = num / items_per_column
row = num % items_per_column
self.display_layout.addWidget(widget, row, col)
height_per_bit = self.n_r_widgets[0].sizeHint().height()
v_spacing = self.display_layout.verticalSpacing()
if v_spacing == -1:
v_spacing = 11
height = (height_per_bit + v_spacing) * items_per_column
self.display_widget.setFixedSize(available_width, height)
def resizeEvent(self, event):
"""React to a size change of the widget."""
self._rewrap_grid(old_width = event.oldSize().width())
def main():
from ..cagen import VonNeumannNeighbourhood
import sys
vn = VonNeumannNeighbourhood()
dvw = ElementaryRuleWindow(vn, base=3)
dvw.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
| 3.328125 | 3 |
app/test.py | geekrohit/celery-sqs-spot | 5 | 12773942 | import tasks
from time import sleep
print("add 3+5")
ret = tasks.add.delay(3,5)
print("Task ID:")
print(ret)
sleep(10)
print(ret.status)
| 2.671875 | 3 |
dfman/__init__.py | jniedrauer/dfman | 0 | 12773943 | <reponame>jniedrauer/dfman<filename>dfman/__init__.py
"""Initial imports"""
import logging
from dfman.config import Config
logging.getLogger(__name__).addHandler(logging.NullHandler())
| 1.492188 | 1 |
evgp_rcs/gui.py | RoboJackets/evgp-rcs | 0 | 12773944 | <filename>evgp_rcs/gui.py
import sys
import os
import logging
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import Qt, QThread, QItemSelection
from PyQt5.Qt import QSortFilterProxyModel
from PyQt5.QtWidgets import QWidget, QGridLayout, QGroupBox, QVBoxLayout, QHBoxLayout, QPushButton, QLabel, QMessageBox, QSizePolicy, QFileDialog
from rcsmodel import RCSModel, RCSSortFilterProxyModel
from race import RaceState
from tcpserver import TCPServer
from rcsstatemanager import RCSStateManager
from buttonstatecontroller import ButtonStateController
logging.basicConfig(format='%(levelname)s::%(filename)s: %(message)s', level=logging.DEBUG) #TODO: lower level
class MainWindow(QtWidgets.QMainWindow):
def __init__(self):
super().__init__()
self.showMaximized()
self.setWindowTitle("EVGP Race Control System")
self.is_server_started = False
teams_list_file_path = "racers_list.yaml"
if not os.path.exists(teams_list_file_path):
logging.warning("No racers_list.yaml found!")
msgBox = QMessageBox()
msgBox.setIcon(QMessageBox.Warning)
msgBox.setText("No racers_list.yaml found.\nPress Open and use the file explorer to select the racer list YAML file")
msgBox.setWindowTitle("No racers_list.yaml found!")
msgBox.setStandardButtons(QMessageBox.Open)
returnValue = msgBox.exec()
if returnValue == QMessageBox.Open:
teams_list_file_path = QFileDialog.getOpenFileName(self, 'Open Racer List File', os.getcwd(),"Yaml files (*.yaml)")[0]
self.model = RCSModel(teams_list_file_path)
layout = QGridLayout()
layout.setColumnStretch(0, 10)
layout.setColumnStretch(1, 10)
#FILTERED ACTIVE RACE TABLE
self.activeRaceProxyModel = RCSSortFilterProxyModel(True)
self.activeRaceProxyModel.setDynamicSortFilter(True)
self.activeRaceProxyModel.setSourceModel(self.model)
self.activeRaceTable = QtWidgets.QTableView()
self.activeRaceTable.setModel(self.activeRaceProxyModel)
self.activeRaceTable.setSizeAdjustPolicy(
QtWidgets.QAbstractScrollArea.AdjustToContents)
self.activeRaceTable.horizontalHeader().setStretchLastSection(True);
self.activeRaceTable.horizontalHeader().setSectionResizeMode(QtWidgets.QHeaderView.ResizeToContents)
self.activeRaceTable.setSelectionBehavior(QtWidgets.QTableView.SelectRows)
self.activeRaceTable.setSelectionMode(QtWidgets.QTableView.SingleSelection)
layout.addWidget(self.activeRaceTable, 1, 0)
self.standbyRaceProxyModel = RCSSortFilterProxyModel(False)
self.standbyRaceProxyModel.setDynamicSortFilter(True)
self.standbyRaceProxyModel.setSourceModel(self.model)
self.standbyRaceTable = QtWidgets.QTableView()
self.standbyRaceTable.setModel(self.standbyRaceProxyModel)
self.standbyRaceTable.setSizeAdjustPolicy(
QtWidgets.QAbstractScrollArea.AdjustToContents)
self.standbyRaceTable.horizontalHeader().setSectionResizeMode(QtWidgets.QHeaderView.ResizeToContents)
self.standbyRaceTable.horizontalHeader().setStretchLastSection(True);
self.standbyRaceTable.setSelectionBehavior(QtWidgets.QTableView.SelectRows)
self.standbyRaceTable.setSelectionMode(QtWidgets.QTableView.SingleSelection)
layout.addWidget(self.standbyRaceTable, 1, 1)
self.activeRaceTableLabel = QLabel("Active Race Table")
self.activeRaceTableLabel.setAlignment(Qt.AlignCenter)
self.standbyRaceTableLabel = QLabel("Other Teams Table")
self.standbyRaceTableLabel.setAlignment(Qt.AlignCenter)
layout.addWidget(self.activeRaceTableLabel, 0, 0)
layout.addWidget(self.standbyRaceTableLabel, 0, 1)
self.standbyRaceTable.setSortingEnabled(True)
self.standbyRaceTable.sortByColumn(1, Qt.AscendingOrder)
self.activeRaceTable.setSortingEnabled(True)
self.activeRaceTable.sortByColumn(1, Qt.AscendingOrder)
self.selectedIndex = None
self.standbyRaceTable.selectionModel().selectionChanged.connect(self.standby_race_table_selection_handler)
self.activeRaceTable.selectionModel().selectionChanged.connect(self.active_race_table_selection_handler)
self.horizontalGroupBox = QGroupBox("")
self.horizontalGroupBox.setLayout(layout)
# All relevant buttons in sidebar
self.button_sidebar_vBox = QVBoxLayout()
layout.addLayout(self.button_sidebar_vBox, 1, 3)
self.button_sidebar_vBox.setAlignment(Qt.AlignTop)
# Move Racers Buttons
self.button_container_stylesheet = "QWidget#ButtonContainer{background-color: rgb(200, 200, 200);\n border-radius: 5;\n}"
move_racers_button_container = QWidget()
move_racers_button_container.setObjectName("ButtonContainer")
move_racers_button_container.setStyleSheet(self.button_container_stylesheet)
move_racers_button_container.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Maximum)
move_racers_layout = QVBoxLayout(move_racers_button_container)
move_racers_layout.addWidget(QLabel("ADMIN CONTROLS"))
move_to_active_race_button = QPushButton("Move to Active Race")
move_to_active_race_button.clicked.connect(self.move_to_active_race)
move_racers_layout.addWidget(move_to_active_race_button)
remove_from_active_race_button = QPushButton("Remove from Active Race")
remove_from_active_race_button.clicked.connect(self.remove_from_active_race)
move_racers_layout.addWidget(remove_from_active_race_button)
self.button_sidebar_vBox.addWidget(move_racers_button_container)
# Team State Control Buttons
team_state_button_container = QWidget()
team_state_button_container.setObjectName("ButtonContainer")
team_state_button_container.setStyleSheet(self.button_container_stylesheet)
team_state_button_container.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Maximum)
team_state_btns = self.create_team_state_buttons()
team_state_layout = QVBoxLayout(team_state_button_container)
team_state_layout.addWidget(QLabel("TEAM CONTROLS"))
for btn in team_state_btns:
team_state_layout.addWidget(btn)
self.button_sidebar_vBox.addWidget(team_state_button_container)
# Race State Control Buttons
race_state_button_container = QWidget()
race_state_button_container.setObjectName("ButtonContainer")
race_state_button_container.setStyleSheet(self.button_container_stylesheet)
race_state_button_container.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Maximum)
race_state_btns = self.create_race_state_buttons()
race_state_layout = QVBoxLayout(race_state_button_container)
race_state_layout.addWidget(QLabel("RACE CONTROLS"))
for btn in race_state_btns:
race_state_layout.addWidget(btn)
self.button_sidebar_vBox.addWidget(race_state_button_container)
self.info_group_box = QGroupBox("Race Status Information")
self.race_state_label = QLabel("Race State: IN_GARAGE")
self.info_label = QLabel("Race Status: No Race running.")
info_layout = QVBoxLayout()
info_layout.addWidget(self.race_state_label)
info_layout.addWidget(self.info_label)
self.info_group_box.setLayout(info_layout)
layout.addWidget(self.info_group_box, 3, 0)
self.buttonController = ButtonStateController(
race_state_btns[0],
race_state_btns[1],
race_state_btns[2],
race_state_btns[3],
race_state_btns[4],
team_state_btns[0],
move_to_active_race_button,
remove_from_active_race_button,
self.race_state_label,
self.info_label
)
self.model.race_state_change_signal.connect(self.buttonController.race_state_updated)
verticalSpacer = QtWidgets.QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding)
layout.addItem(verticalSpacer, 4, 0, rowSpan=1, columnSpan=3)
self.create_menu_bar()
# wait for start of server
self.server_wait_label = QLabel("Waiting for TCP Server to start. Please hold on.")
self.server_wait_label.setAlignment(Qt.AlignCenter)
self.setCentralWidget(self.server_wait_label)
self.start_server()
# Make sure we stop the server on window close
def closeEvent(self, event):
self.stop_server()
event.accept()
def create_menu_bar(self):
menuBar = QtWidgets.QMenuBar(self)
self.setMenuBar(menuBar)
helpMenu = menuBar.addMenu("&Help")
self.reset_gui_buttons_actions = QtWidgets.QAction("Reset GUI buttons")
self.reset_gui_buttons_actions.triggered.connect(self.buttonController.enable_all_buttons)
helpMenu.addAction(self.reset_gui_buttons_actions)
self.about_action = QtWidgets.QAction("About")
self.about_action.triggered.connect(self.show_about_message)
helpMenu.addAction(self.about_action)
def show_about_message(self):
text = "The Electric Vehicle GrandPrix Autonomous Race Control System " \
"is brought to you by the RoboJackets at Georgia Tech.<br>" \
"Contribute to the RCS at <a href='https://github.com/RoboJackets/evgp-rcs'>https://github.com/RoboJackets/evgp-rcs</a>"
QMessageBox.question(self, 'About the EVGP Race Control System',
text,
QMessageBox.Ok)
def create_race_state_buttons(self):
grid_active_race_button = QPushButton("GRID ACTIVE RACE")
grid_active_race_button.clicked.connect(lambda: self.race_state_change_callback(RaceState.GRID_ACTIVE))
start_race_button = QPushButton("START RACE")
start_race_button.setEnabled(False)
start_race_button.clicked.connect(lambda: self.race_state_change_callback(RaceState.GREEN_GREEN))
red_flag_race_button = QPushButton("RED FLAG RACE")
red_flag_race_button.clicked.connect(lambda: self.race_state_change_callback(RaceState.RED_FLAG))
e_stop_race_button = QPushButton("E-STOP RACE")
e_stop_race_button.clicked.connect(lambda: self.race_state_change_callback(RaceState.RED_RED))
finish_race_button = QPushButton("FINISH RACE")
finish_race_button.clicked.connect(lambda: self.race_state_change_callback(RaceState.IN_GARAGE))
return [grid_active_race_button, start_race_button, red_flag_race_button, e_stop_race_button, finish_race_button]
def create_team_state_buttons(self):
in_garage_team_button = QPushButton("IN GARAGE TEAM")
in_garage_team_button.clicked.connect(lambda: self.team_state_change_callback(RaceState.IN_GARAGE))
red_flag_team_button = QPushButton("RED FLAG TEAM")
red_flag_team_button.clicked.connect(lambda: self.team_state_change_callback(RaceState.RED_FLAG))
e_stop_team_button = QPushButton("E-STOP TEAM")
e_stop_team_button.clicked.connect(lambda: self.team_state_change_callback(RaceState.RED_RED))
return [in_garage_team_button, red_flag_team_button, e_stop_team_button]
def team_state_change_callback(self, state):
if self.selectedIndex is not None:
self.model.team_state_change(self.selectedIndex, state)
def race_state_change_callback(self, state):
self.model.race_state_change(state)
def move_to_active_race(self):
if self.selectedIndex is not None:
changed = self.model.move_to_active_race(self.selectedIndex)
if changed:
self.clearAllSelections()
def remove_from_active_race(self):
if self.selectedIndex is not None:
changed = self.model.move_to_standby_race(self.selectedIndex)
if changed:
self.clearAllSelections()
def start_server(self):
if not self.is_server_started:
self.is_server_started = True
port = 12017
server_backlog = 10
send_hz = 10
ip_list = self.model.teams_list.keys()
self.server = TCPServer(port, server_backlog, whitelist=ip_list, hz=send_hz)
self.server.new_connection.connect(self.model.new_connection_handler)
self.server.lost_connection.connect(self.model.lost_connection_handler)
self.server.new_response.connect(self.model.new_response_handler)
self.server.server_ready.connect(self.server_ready_handler)
self.model.team_state_change_signal.connect(self.server.on_race_state_change)
self.server_thread = QThread()
self.server.moveToThread(self.server_thread)
self.server_thread.started.connect(self.server.run_server)
self.server_thread.start()
def stop_server(self):
if self.is_server_started:
self.server.stop()
self.is_server_started = False
self.server_thread.quit()
@QtCore.pyqtSlot(bool)
def server_ready_handler(self, isReady):
if isReady:
self.setCentralWidget(self.horizontalGroupBox)
if not isReady:
self.server_wait_label.setText("Server Error: Please restart program.")
QMessageBox.question(self, 'Server Error',
"Server failed to start.\nPress \"Close\" to quit program, then fix your network issues and restart this program.",
QMessageBox.Close)
self.close()
@QtCore.pyqtSlot(QItemSelection, QItemSelection)
def standby_race_table_selection_handler(self, filterTableSelection, filterTableDeselected):
if filterTableSelection.indexes():
self.activeRaceTable.selectionModel().clearSelection()
self.selectedIndex = self.standbyRaceProxyModel.mapToSource(filterTableSelection.indexes()[0]).row()
@QtCore.pyqtSlot(QItemSelection, QItemSelection)
def active_race_table_selection_handler(self, tableSelection, tableDeselected):
if tableSelection.indexes():
self.standbyRaceTable.selectionModel().clearSelection()
self.selectedIndex = self.activeRaceProxyModel.mapToSource(tableSelection.indexes()[0]).row()
def clearAllSelections(self):
self.activeRaceTable.selectionModel().clearSelection()
self.standbyRaceTable.selectionModel().clearSelection()
self.selectedIndex = None
app=QtWidgets.QApplication(sys.argv)
window=MainWindow()
window.show()
app.exec_()
| 2.296875 | 2 |
5day/gui01.py | jsjang93/joony | 0 | 12773945 | <gh_stars>0
# gui01.py
# Python GUI --> tkinter, wxPython,PyQt
# 위젯 (Button,Label,Entry,,,,)
from tkinter import *
def btn1Click():
text1.insert(0,text1.get()+"님 어서오세요! ")
window = Tk()
#################
label1 = Label(window,text="이 름")
label1.grid(row=0,column=0)
#label1.pack()
text1 = Entry(window)
text1.grid(row=0,column=1)
#text1.pack()
button1 = Button(window, bg="yellow",text="입 력",command=btn1Click)
#button1["fg"] = "yellow"
#button1["bg"] = "red"
button1.grid(row=1,column=1)
#button1.pack()
#################
window.mainloop() # 이거하면 계속 떠있음
| 3.421875 | 3 |
04_datacamp/solutions/21_solutions.py | HirahTang/datascience_starter_course | 3 | 12773946 | sns.violinplot(data=df, y='Fare', x='Survived', hue='Sex', split=True)
| 1.992188 | 2 |
libptmalloc/frontend/commands/gdb/ptparam.py | nccgroup/libptmalloc | 36 | 12773947 | # -*- coding: future_fstrings -*-
from __future__ import print_function
import argparse
import binascii
import struct
import sys
import logging
from libptmalloc.frontend import printutils as pu
from libptmalloc.ptmalloc import ptmalloc as pt
from libptmalloc.frontend import helpers as h
from libptmalloc.frontend.commands.gdb import ptcmd
log = logging.getLogger("libptmalloc")
log.trace("ptparam.py")
try:
import gdb
except ImportError:
print("Not running inside of GDB, exiting...")
raise Exception("sys.exit()")
class ptparam(ptcmd.ptcmd):
"""Command to print information about malloc parameters represented by the malloc_par structure
"""
def __init__(self, ptm):
log.debug("ptparam.__init__()")
super(ptparam, self).__init__(ptm, "ptparam")
self.parser = argparse.ArgumentParser(
description="""Print malloc parameter(s) information
Analyze the malloc_par structure's fields.""",
add_help=False,
formatter_class=argparse.RawTextHelpFormatter,
epilog='NOTE: Last defined mp_ will be cached for future use')
# self.parser.add_argument(
# "-v", "--verbose", dest="verbose", action="count", default=0,
# help="Use verbose output (multiple for more verbosity)"
# )
self.parser.add_argument(
"-h", "--help", dest="help", action="store_true", default=False,
help="Show this help"
)
self.parser.add_argument(
"-l", dest="list", action="store_true", default=False,
help="List malloc parameter(s)' address only"
)
self.parser.add_argument(
"--use-cache", dest="use_cache", action="store_true", default=False,
help="Do not fetch parameters data if you know they haven't changed since last time they were cached"
)
self.parser.add_argument(
"address", default=None, nargs="?", type=h.string_to_int,
help="A malloc_par struct address. Optional with cached malloc parameters"
)
# allows to enable a different log level during development/debugging
self.parser.add_argument(
"--loglevel", dest="loglevel", default=None,
help=argparse.SUPPRESS
)
@h.catch_exceptions
@ptcmd.ptcmd.init_and_cleanup
def invoke(self, arg, from_tty):
"""Inherited from gdb.Command
See https://sourceware.org/gdb/current/onlinedocs/gdb/Commands-In-Python.html
"""
log.debug("ptparam.invoke()")
self.cache.update_param(self.args.address, show_status=True, use_cache=self.args.use_cache)
if self.args.list:
self.list_parameters()
return
print(self.cache.par)
def list_parameters(self):
"""List malloc parameter(s)' address only"""
par = self.cache.par
print("Parameter(s) found:", end="\n")
print(" parameter @ ", end="")
pu.print_header("{:#x}".format(int(par.address)), end="\n") | 2.5 | 2 |
tests/base/env/spaces/test_discrete.py | pocokhc/simple_rl | 1 | 12773948 | <gh_stars>1-10
import unittest
import numpy as np
from srl.base.env.spaces import DiscreteSpace
from tests.base.env.space_test import SpaceTest
class Test(unittest.TestCase):
def setUp(self) -> None:
self.space = DiscreteSpace(5)
self.assertTrue(self.space.n == 5)
self.tester = SpaceTest(self, self.space)
def _check_action(self, decode_action, true_action):
self.assertTrue(isinstance(decode_action, int))
self.assertTrue(decode_action == true_action)
def test_space(self):
# sample
actions = [self.space.sample([3]) for _ in range(100)]
actions = sorted(list(set(actions)))
np.testing.assert_array_equal(actions, [0, 1, 2, 4])
# action discrete
decode_action = self.tester.check_action_discrete(5, action=2)
self._check_action(decode_action, 2)
self.tester.check_action_encode(3, 3)
# action_continuous
decode_action = self.tester.check_action_continuous(
true_n=1,
true_low=[0],
true_high=[4],
action=[3.3],
)
self._check_action(decode_action, 3)
# observation discrete
self.tester.check_observation_discrete(
true_shape=(1,),
true_low=[0],
true_high=[4],
state=2,
encode_state=[2],
)
# observation continuous
self.tester.check_observation_continuous(
true_shape=(1,),
true_low=[0],
true_high=[4],
state=2,
encode_state=[2.0],
)
if __name__ == "__main__":
unittest.main(module=__name__, defaultTest="Test.test_space", verbosity=2)
| 2.578125 | 3 |
py/zk/zkjson.py | acidburn0zzz/vitess | 1 | 12773949 | <reponame>acidburn0zzz/vitess<filename>py/zk/zkjson.py<gh_stars>1-10
# Implement a sensible wrapper that treats python objects as dictionaries
# with sensible restrictions on serialization.
import json
def _default(o):
if hasattr(o, '_serializable_attributes'):
return dict([(k, v)
for k, v in o.__dict__.iteritems()
if k in o._serializable_attributes])
return o.__dict__
_default_kargs = {'default': _default,
'sort_keys': True,
'indent': 2,
}
def dump(*pargs, **kargs):
_kargs = _default_kargs.copy()
_kargs.update(kargs)
return json.dump(*pargs, **_kargs)
def dumps(*pargs, **kargs):
_kargs = _default_kargs.copy()
_kargs.update(kargs)
return json.dumps(*pargs, **_kargs)
load = json.load
loads = json.loads
class ZkJsonObject(object):
_serializable_attributes = ()
def to_json(self):
return dumps(self)
@classmethod
def from_json(cls, data):
o = cls()
if data:
o.__dict__.update(loads(data))
return o
| 2.46875 | 2 |
1_joint_alignment/STN/atn_helpers/matrix_exp.py | BGU-CS-VIL/JA-POLS | 16 | 12773950 | <gh_stars>10-100
import tensorflow as tf
#import tensorflow.compat.v1 as tf
#tf.disable_v2_behavior()
def expm(params_matrix):
# Take the matrix exponentioal of the affine map, inorder to get an affine-defiomorphism map.
exp_params = tf.reshape(params_matrix,[-1,2,3])
# append a row of 0,0,0 before computing the exponent:
initial = tf.zeros_like(tf.slice(exp_params,[0,0,0], [-1,1,3]))
initial = tf.cast(initial,tf.float32)
exp_params = tf.concat([exp_params,initial], 1)
return matrix_expnential(exp_params) #if we want to remove the exp, return this: -params_matrix
def matrix_expnential(matrices):
matrices = tf.cast(matrices, tf.float64)
results = tf.map_fn(lambda x: tf.slice(tf.linalg.expm(tf.cast(x, tf.float64)),[0,0],[2,3]) , matrices)
# results = tf.map_fn(lambda x: tf.cast([[1,0,0],[0,1,0]], tf.float64), matrices) # DEBUG: GET IDENTITY TRANSFORMATION
return tf.reshape(results, [-1, 6])
# ---------------- OLD CODE: -----------------------
# import tensorflow as tf
#
# def expm(params_matrix,batch_size):
# # Take the matrix exponentioal of the affine map, inorder to get an affine-defiomorphism map.
# exp_params = tf.reshape(params_matrix,[-1,2,3])
# # append a row of 0,0,0 before computing the exponent:
# initial = tf.zeros_like(tf.slice(exp_params,[0,0,0],[-1,1,3]))
# initial = tf.cast(initial,tf.float32)
# exp_params = tf.concat([exp_params,initial],1)
# return matrix_expnential(exp_params,batch_size) #if we want to remove the exp, return this: -params_matrix
#
#
# def matrix_expnential(matrices,batch_size):
# matrices = tf.cast(matrices,tf.float32)
# x_unpacked = tf.unstack(matrices,num=batch_size) # defaults to axis 0, returns a list of tensors
# processed = [] # this will be the list of processed tensors
# for t in x_unpacked:
# t = tf.cast(t,tf.float64)
# result_tensor = tf.linalg.expm(t)
# result_tensor = tf.slice(result_tensor,[0,0],[2,3])
# processed.append(result_tensor)
#
# #output = tf.concat(tf.cast(processed, tf.float32), 0)
# return tf.reshape(processed,[-1,6]) | 2.125 | 2 |