max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
min-blockchain/app/views/blockchain.py
|
JoMingyu/Blockchain-py
| 12
|
12774751
|
<reponame>JoMingyu/Blockchain-py
from uuid import uuid4
from flask import Response
from flask_restful import Resource, request
from blockchain.blockchain import Blockchain
blockchain = Blockchain()
class Node(Resource):
def post(self):
"""
Add new node to blockchain
"""
node_id = blockchain.register_node(request.host)
return {
'message': 'New node have been added.',
'node_id': node_id,
'nodes': list(blockchain.nodes)
}, 201
class Chain(Resource):
def get(self):
"""
Returns blockchain
"""
chains = blockchain.chain
return {
'chains': chains,
'length': len(chains)
}, 200
class Mine(Resource):
def post(self):
if not request.is_json:
return Response('', 400)
req = request.get_json()
node_id = req.get('node_id')
if not all([node_id]):
return Response('', 400)
if node_id not in blockchain.nodes:
return Response('Invalid node id', 400)
last_block = blockchain.last_block
nonce = blockchain.proof_of_work(last_block['nonce'])
# Mine
blockchain.new_transaction(
sender='0',
recipient=node_id,
amount=1
)
previous_hash = blockchain.hash_block(last_block)
new_block = blockchain.new_block(nonce, previous_hash)
# Generates new block
return {
'message': 'New Block Forged',
'block': {
'version': new_block['version'],
'transactions': new_block['transactions'],
'timestamp': new_block['timestamp'],
'nonce': new_block['nonce']
}
}, 200
class Transaction(Resource):
def post(self):
if not request.is_json:
return Response('', 400)
req = request.get_json()
sender = req.get('sender')
recipient = req.get('recipient')
amount = req.get('amount')
if not all([sender, recipient, amount]):
return Response('', 400)
if sender not in blockchain.nodes or recipient not in blockchain.nodes:
return Response('Invalid sender id or recipient id', 400)
blockchain.new_transaction(sender, recipient, amount)
return Response('', 201)
| 2.90625
| 3
|
backend/sshwrapper.py
|
Teknologforeningen/svaksvat
| 0
|
12774752
|
"""
Platform independent ssh port forwarding
Much code stolen from the paramiko example
"""
import select
try:
import SocketServer
except ImportError:
import socketserver as SocketServer
import paramiko
SSH_PORT = 22
DEFAULT_PORT = 5432
class ForwardServer (SocketServer.ThreadingTCPServer):
daemon_threads = True
allow_reuse_address = True
class Handler (SocketServer.BaseRequestHandler):
def handle(self):
try:
chan = self.ssh_transport.open_channel('direct-tcpip',
(self.chain_host,
self.chain_port),
self.request.getpeername())
except Exception as e:
print('Incoming request to %s:%d failed: %s' % (self.chain_host,
self.chain_port,
repr(e)))
return
if chan is None:
print('Incoming request to %s:%d was rejected by the SSH server.'
% (self.chain_host, self.chain_port))
return
print('Connected! Tunnel open %r -> %r -> %r' %
(self.request.getpeername(),
chan.getpeername(), (self.chain_host, self.chain_port)))
while True:
r, w, x = select.select([self.request, chan], [], [])
if self.request in r:
data = self.request.recv(1024)
if len(data) == 0:
break
chan.send(data)
if chan in r:
data = chan.recv(1024)
if len(data) == 0:
break
self.request.send(data)
peername = self.request.getpeername()
chan.close()
self.request.close()
print('Tunnel closed from %r' % (peername,))
def forward_tunnel(local_port, remote_host, remote_port, transport):
# this is a little convoluted, but lets me configure things for the Handler
# object. (SocketServer doesn't give Handlers any way to access the outer
# server normally.)
class SubHander (Handler):
chain_host = remote_host
chain_port = remote_port
ssh_transport = transport
ForwardServer(('', local_port), SubHander).serve_forever()
def connect_ssh(server, login, password, port=SSH_PORT):
"""Return a paramiko.SSHClient on successfull connection, otherwise returns
None
"""
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.WarningPolicy())
print('Connecting to ssh host %s:%d ...' % (server, port))
try:
client.connect(server, port, login, password=password)
print("Connection successful")
return client
except Exception as e:
print('*** Failed to connect to %s:%d: %r' % (server, port, e))
return None
def portforward(client, threadfinishedmutex,
remote_host,
local_port=DEFAULT_PORT,
remote_port=DEFAULT_PORT):
"""Neverending portforwarding thread. Locks threadfinishedmutex
on failure.
client has to be a connected paramiko.SSHClient."""
print('Now forwarding port %d to %s:%d ...' % (local_port, remote_host,
remote_port))
try:
forward_tunnel(local_port, remote_host, remote_port,
client.get_transport())
threadfinishedmutex.acquire()
except Exception as e:
threadfinishedmutex.acquire()
raise e
| 2.796875
| 3
|
rl/algorithms/qlearning.py
|
cbschaff/nlimb
| 12
|
12774753
|
import numpy as np
import tensorflow as tf
from rl.losses import QLearningLoss
from rl.algorithms import OnlineRLAlgorithm
from rl.runner import *
from rl.replay_buffer import ReplayBuffer, PrioritizedReplayBuffer
from rl import util
from deeplearning.layers import Adam, RunningNorm
from deeplearning.schedules import LinearSchedule
from deeplearning import logger
from collections import deque
import time
class QLearning(OnlineRLAlgorithm):
def defaults(self):
return {
'lr': 1e-4,
'momentum': 0.9,
'beta2': 0.999,
'clip_norm': 10.,
'gamma': 0.99,
'learning_starts': int(1e5),
'exploration_timesteps': int(1e6),
'final_eps': 0.02,
'target_update_freq': int(1e4),
'prioritized_replay': True,
'huber_loss': True,
'buffer_size': int(1e6),
'replay_alpha': 0.6,
'replay_beta': 0.4,
't_beta_max': int(1e7)
}
def __init__(self,
logdir,
env_fn,
model_fn,
nenv,
rollout_length=1,
batch_size=32,
callback=None,
**kwargs
):
defaults = self.defaults()
for k in kwargs:
assert k in defaults, "Unknown argument: {}".format(k)
defaults.update(kwargs)
super().__init__(logdir, env_fn, model_fn, nenv, rollout_length, batch_size, callback, runner_flags=[], **defaults)
self.target_sync = tf.group([tf.assign(v1,v2) for v1,v2 in zip(self.loss.qtarg.variables(), self.loss.qvals.variables())])
if self.args.prioritized_replay:
self.buffer = PrioritizedReplayBuffer(self.args.buffer_size, alpha=self.args.replay_alpha)
else:
self.buffer = ReplayBuffer(self.args.buffer_size)
# determine if the network has a RunningNorm submodule that needs to be updated.
submods = self.opt.find_submodules_by_instance(RunningNorm)
self.rn = submods[0] if len(submods) > 0 else None
self.losses = deque(maxlen=100)
self.nsteps = 0
self.last_target_sync = (self.t // self.args.target_update_freq) * self.args.target_update_freq
self.beta_schedule = LinearSchedule(self.args.t_beta_max, 1.0, self.args.replay_beta)
self.eps_schedule = LinearSchedule(int(self.args.exploration_timesteps), self.args.final_eps, 1.0)
self._time_start = time.time()
self._t_start = self.t
def _def_loss(self, model_fn, env):
target_network = model_fn(env)
target_network.build('target', self.nenv, self.batch_size, trainable=False)
# extra network for double dqn. Tie variables with network
return QLearningLoss('loss', model_fn(env), model_fn(env), target_network, gamma=self.args.gamma, use_huber_loss=self.args.huber_loss)
def _def_opt(self, loss):
return Adam(
'opt',
loss,
lr=self.args.lr,
beta1=self.args.momentum,
beta2=self.args.beta2,
clip_norm=self.args.clip_norm
)
def _before_step(self):
if self.t == 0 or self.t - self.last_target_sync > self.args.target_update_freq:
self.target_sync.run()
self.last_target_sync = self.t
self.actor.update_eps(self.eps_schedule.value(self.t))
def _process_rollout(self, rollout):
self._update_buffer(rollout)
while len(self.buffer) < self.args.learning_starts and len(self.buffer) != self.args.buffer_size:
self._update_buffer(self.runner.rollout())
self.t += self.timesteps_per_step
if self.args.prioritized_replay:
obs, acs, rews, next_obs, dones, weights, self._inds = self.buffer.sample(self.nenv * self.batch_size, self.beta_schedule.value(self.t))
inputs=[obs, next_obs, next_obs, rews, acs, dones, weights[...,None]]
else:
obs, acs, rews, next_obs, dones = self.buffer.sample(self.nenv * self.batch_size)
inputs=[obs, next_obs, next_obs, rews, acs, dones]
return inputs
def _update_buffer(self, rollout):
if self.rn is not None:
x = np.asarray(rollout.obs)
self._update_running_norm(x.reshape([-1] + list(x.shape[2:])))
for i,obs in enumerate(rollout.obs):
next_obs = rollout.end_ob if i == len(rollout.obs) - 1 else rollout.obs[i+1]
for j in range(self.nenv):
ob = obs[j]
next_ob = next_obs[j]
ac = rollout.actions[i][j]
r = rollout.rewards[i][j]
done = rollout.dones[i][j]
self.buffer.add(ob, ac, r, next_ob, done)
def _update_model(self, data):
outs = self.opt.run(inputs=data, state=[], state_out=False, update=True, td=True)
if self.args.prioritized_replay:
self.buffer.update_priorities(self._inds, priorities=np.abs(outs['td'][:,0]) + 1e-6)
self.losses.append(outs['out'])
return outs
def _after_step(self, rollout, data, outs):
self.nsteps += 1
if self.nsteps % 100 == 0:
logger.log("========================| Timestep: {} |========================".format(self.t))
meanloss = np.mean(np.array(self.losses), axis=0)
# Logging stats...
logger.logkv('Loss', meanloss)
logger.logkv('timesteps', self.t)
logger.logkv('serial timesteps', self.t / self.nenv)
logger.logkv('mean episode length', np.mean(self.runner.get_episode_lengths()))
logger.logkv('mean episode reward', np.mean(self.runner.get_episode_rewards()))
logger.logkv('fps', int((self.t - self._t_start) / (time.time() - self._time_start)))
logger.logkv('time_elapsed', time.time() - self._time_start)
logger.logkv('time spent exploring', self.actor.eps)
logger.dumpkvs()
def _update_running_norm(self, x):
mean = x.mean(axis=0)
var = x.var(axis=0)
count = x.shape[0]
self.rn.update(mean, var, count)
def update_lr(self, new_lr):
self.opt.update_lr(new_lr)
| 1.992188
| 2
|
examples/hello_world.py
|
MartialMad/py-dimensional-analysis
| 2
|
12774754
|
<gh_stars>1-10
import logging
def main():
import danalysis as da
si = da.standard_systems.SI # predefined standard units
s = da.Solver(
{
'a' : si.M, # [a] is mass
'b' : si.L*si.M*si.T**-2, # [b] is force (alt. si.F)
'c' : si.T, # [c] is time
'd' : si.Pressure # [d] is pressure
},
si.L*si.T # target dimension
)
print(s.solve())
# Found 2 variable products of variables
# {
# a:Q(M),
# b:Q(L*M*T**-2),
# c:Q(T),
# d:Q(L**-1*M*T**-2)
# }, each of dimension L*T:
# 1: [a*c**-1*d**-1] = L*T
# 2: [b**0.5*c*d**-0.5] = L*T
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
main()
| 2.59375
| 3
|
src/core/tasking/llnms-register-task.py
|
marvins/LLNMS
| 0
|
12774755
|
<reponame>marvins/LLNMS<gh_stars>0
#!/usr/bin/env python
#
# File: llnms-register-task.py
# Author: <NAME>
# Date: 6/21/2015
#
# Purpose: Register a Task with LLNMS
#
__author__ = '<NAME>'
# Python Libraries
import os, sys, argparse
# LLNMS Libraries
if os.environ['LLNMS_HOME'] is not None:
sys.path.append(os.environ['LLNMS_HOME'] + '/lib')
import llnms
# ------------------------------------ #
# - Parse the Command-Line - #
# ------------------------------------ #
def Parse_Command_Line():
# Create an Argument Parser
parser = argparse.ArgumentParser(description='Register an LLNMS Task.')
# Version Info
parser.add_argument('-v', '--version',
action='version',
version='%(prog)s ' + llnms.info.Get_Version_String(),
help='Print the version information.')
# Verbose Mode
parser.add_argument('--verbose',
dest='verbose_flag',
required=False,
default=False,
action='store_true',
help='Print with verbose output.')
# Task Filename
parser.add_argument('-t','--task-file',
dest='task_path',
required=True,
help='LLNMS Task XML file to register.')
# Return the parser
return parser.parse_args()
# ------------------------------ #
# - Process Task Args - #
# ------------------------------ #
def Process_Input( options ):
# Load the new task
task = llnms.Task.Task(filename=options.task_path)
# Return
return task
# ---------------------------- #
# - Main Function - #
# ---------------------------- #
def Main():
# Retrieve LLNMS_HOME
llnms_home=os.environ['LLNMS_HOME']
# Parse Command-Line Arguments
options = Parse_Command_Line()
# Validate Arguments
new_task = Process_Input( options )
# Add to the task list
task_list = llnms.Task.llnms_load_tasks(llnms_home)
task_list.append(new_task)
# Write the task list
llnms.Task.llnms_write_registered_task_list(llnms_home, task_list)
if __name__ == '__main__':
Main()
| 2.234375
| 2
|
model.py
|
karth295/hacks-on-hacks
| 0
|
12774756
|
<reponame>karth295/hacks-on-hacks
import csv
def delta_growth_by_zipcode(file):
growth = {}
with open(file, 'rb') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
growth[float(line[0])] = float(line[2]) - float(line[1]) # delta in growth by zip code
return growth
def main():
growth = delta_growth_by_zipcode("growth.csv")
print growth
if __name__ == '__main__':
main()
| 3.40625
| 3
|
python/treelas/idx.py
|
EQt/treelas
| 3
|
12774757
|
<gh_stars>1-10
from graphidx.idx import ( # noqa
BiAdjacent,
ChildrenIndex,
PartitionIndex,
cluster,
)
| 0.964844
| 1
|
checks/load_favicons_test.py
|
thegreenwebfoundation/green-spider
| 19
|
12774758
|
from pprint import pprint
import httpretty
from httpretty import httprettified
import unittest
from checks import load_favicons
from checks.config import Config
@httprettified
class TestFavicons(unittest.TestCase):
def test_favicons(self):
# This site has a favicon
url1 = 'http://example1.com/favicon.ico'
httpretty.register_uri(httpretty.HEAD, url1,
body='',
adding_headers={
"Content-type": "image/x-ico",
})
# This site has no favicon
url2 = 'http://example2.com/favicon.ico'
httpretty.register_uri(httpretty.HEAD, url2,
status=404,
body='Not found',
adding_headers={
"Content-type": "text/plain",
})
config = Config(urls=['http://example1.com/path/', 'http://example2.com/'])
checker = load_favicons.Checker(config=config)
result = checker.run()
pprint(result)
self.assertEqual(result, {
'http://example1.com/path/': {
'url': 'http://example1.com/favicon.ico'
}
})
| 2.765625
| 3
|
hkl/tests/test_diffract.py
|
bluesky/hklpy
| 1
|
12774759
|
import gi
import numpy.testing
import pint
import pyRestTable
import pytest
gi.require_version("Hkl", "5.0")
# NOTE: MUST call gi.require_version() BEFORE import hkl
from hkl.calc import A_KEV
from hkl.diffract import Constraint
from hkl import SimulatedE4CV
class Fourc(SimulatedE4CV):
...
@pytest.fixture(scope="function")
def fourc():
fourc = Fourc("", name="fourc")
fourc.wait_for_connection()
fourc._update_calc_energy()
return fourc
def test_calc_energy_permit(fourc):
assert fourc._calc_energy_update_permitted
fourc.energy_update_calc_flag.put(False)
assert not fourc._calc_energy_update_permitted
nrg = fourc.calc.energy
fourc.energy.put(5.989) # BTW: Cr K absorption edge
numpy.testing.assert_almost_equal(fourc.energy.get(), 5.989)
numpy.testing.assert_almost_equal(fourc.calc.energy, nrg)
fourc._energy_changed()
numpy.testing.assert_almost_equal(fourc.calc.energy, nrg)
fourc._energy_changed(fourc.energy.get())
numpy.testing.assert_almost_equal(fourc.calc.energy, nrg)
fourc._energy_changed(5.989)
numpy.testing.assert_almost_equal(fourc.calc.energy, nrg)
fourc._update_calc_energy()
numpy.testing.assert_almost_equal(fourc.calc.energy, 5.989)
# test that value argument is ignored
fourc._update_calc_energy(A_KEV / 1)
numpy.testing.assert_almost_equal(fourc.calc.energy, 5.989)
def test_energy(fourc):
numpy.testing.assert_almost_equal(fourc.energy.get(), fourc.calc.energy)
for nrg in (8.0, 8.04, 9.0, 0.931):
fourc.energy.put(nrg)
numpy.testing.assert_almost_equal(fourc.energy.get(), nrg)
numpy.testing.assert_almost_equal(fourc.calc.energy, nrg)
numpy.testing.assert_almost_equal(fourc.calc.wavelength, A_KEV / nrg)
def test_energy_offset(fourc):
assert fourc.energy_offset.get() == 0
nrg = 8.0
fourc.energy.put(nrg)
numpy.testing.assert_almost_equal(fourc.energy.get(), nrg)
numpy.testing.assert_almost_equal(fourc.energy.get(), fourc.calc.energy)
for offset in (0.05, -0.1):
fourc.energy_offset.put(offset)
fourc.energy.put(nrg)
numpy.testing.assert_almost_equal(fourc.energy.get(), nrg)
numpy.testing.assert_almost_equal(fourc.energy.get() + offset, fourc.calc.energy)
def test_energy_offset_units(fourc):
assert fourc.energy_offset.get() == 0
assert fourc.energy_units.get() == "keV"
fourc.energy_units.put("eV")
assert fourc.energy_units.get() == "eV"
nrg = 931
fourc.energy.put(nrg)
numpy.testing.assert_almost_equal(fourc.energy.get(), nrg)
numpy.testing.assert_almost_equal(fourc.energy.get() / 1000, fourc.calc.energy)
for offset in (5, -6):
fourc.energy_offset.put(offset)
fourc.energy.put(nrg)
numpy.testing.assert_almost_equal(fourc.energy.get(), nrg)
numpy.testing.assert_almost_equal((fourc.energy.get() + offset) / 1000, fourc.calc.energy)
def test_energy_units_931eV(fourc):
assert fourc.energy_units.get() == "keV"
fourc.energy_units.put("eV")
assert fourc.energy_units.get() == "eV"
eV = 931
fourc.energy.put(eV)
numpy.testing.assert_almost_equal(fourc.energy.get(), eV)
numpy.testing.assert_almost_equal(fourc.calc.energy, eV / 1000)
def test_energy_units_issue79(fourc):
# issue #79
fourc.energy_units.put("eV")
fourc.energy_offset.put(0)
eV = 1746
fourc.energy.put(eV)
numpy.testing.assert_almost_equal(fourc.calc.energy, eV / 1000)
numpy.testing.assert_almost_equal(
# fmt: off
pint.Quantity(fourc.calc.energy, "keV").to(fourc.energy_units.get()).magnitude,
fourc.energy.get(),
# fmt: on
)
def test_energy_units_offset(fourc):
fourc.energy_units.put("keV")
fourc.energy.put(7.985)
fourc.energy_offset.put(0.015)
assert fourc.calc.energy == 8.0
assert round(fourc.energy.get(), 6) == 7.985
fourc.energy.put(8)
assert fourc.calc.energy == 8.015
assert round(fourc.energy.get(), 6) == 8
fourc.energy_offset.put(0.0)
assert fourc.calc.energy == 8.0
def test_energy_units_issue86(fourc):
# issue #86
# changing units or offset changes .energy, not .calc.energy
fourc.energy.put(8)
fourc.energy_offset.put(0.015)
fourc.energy_units.put("eV")
# test interim state when fourc.energy value has not changed but units have
assert round(fourc.calc.energy, 6) == 8.015e-3
assert round(fourc.energy.get(), 1) == 8
fourc.energy.put(8000)
assert round(fourc.calc.energy, 8) == 8.000015
assert round(fourc.energy.get(), 1) == 8000
fourc.energy_offset.put(15)
assert round(fourc.calc.energy, 8) == 8.015
assert round(fourc.energy.get(), 1) == 8000
fourc.energy.put(8000)
assert round(fourc.calc.energy, 8) == 8.015
assert round(fourc.energy.get(), 1) == 8000
def test_names(fourc):
assert fourc.geometry_name.get() == "E4CV"
assert fourc.class_name.get() == "Fourc"
def test_forward_solutions_table(fourc):
fourc.energy.put(A_KEV / 1.54)
# (100) has chi ~ 0 which poses occasional roundoff errors
# (sometimes -0.00000, sometimes 0.00000)
sol = fourc.forward(1, 0, 0)
assert pytest.approx(sol.omega, 1e-5) == -30
assert pytest.approx(sol.chi, 1e-5) == 0
assert pytest.approx(sol.phi, 1e-5) == -90
assert pytest.approx(sol.tth, 1e-5) == -60
fourc.apply_constraints({"tth": Constraint(0, 180, 0, True)})
tbl = fourc.forward_solutions_table(
# fmt: off
[
[1, 1, 0],
[1, 1, 1],
[100, 1, 1], # no solutions
]
# fmt: on
)
received = str(tbl).splitlines()
expected = [
"=========== ======== ===== ======== ==== =====",
"(hkl) solution omega chi phi tth ",
"=========== ======== ===== ======== ==== =====",
"[1, 1, 0] 0 45.0 45.0 90.0 90.0 ",
"[1, 1, 1] 0 60.0 35.26439 45.0 120.0",
"[100, 1, 1] none ",
"=========== ======== ===== ======== ==== =====",
]
for r, e in zip(received, expected):
assert r == e
def test_pa(fourc, capsys):
tbl = fourc.pa()
assert isinstance(tbl, pyRestTable.Table)
out, err = capsys.readouterr()
assert len(out) > 0
assert err == ""
out = [v.rstrip() for v in out.strip().splitlines()]
expected = [
"===================== ====================================================================",
"term value",
"===================== ====================================================================",
"diffractometer fourc",
"geometry E4CV",
"class Fourc",
"energy (keV) 8.00000",
"wavelength (angstrom) 1.54980",
"calc engine hkl",
"mode bissector",
"positions ===== =======",
" name value",
" ===== =======",
" omega 0.00000",
" chi 0.00000",
" phi 0.00000",
" tth 0.00000",
" ===== =======",
"constraints ===== ========= ========== ===== ====",
" axis low_limit high_limit value fit",
" ===== ========= ========== ===== ====",
" omega -180.0 180.0 0.0 True",
" chi -180.0 180.0 0.0 True",
" phi -180.0 180.0 0.0 True",
" tth -180.0 180.0 0.0 True",
" ===== ========= ========== ===== ====",
"sample: main ================ ===================================================",
" term value",
" ================ ===================================================",
" unit cell edges a=1.54, b=1.54, c=1.54",
" unit cell angles alpha=90.0, beta=90.0, gamma=90.0",
" [U] [[1. 0. 0.]",
" [0. 1. 0.]",
" [0. 0. 1.]]",
" [UB] [[ 4.07999046e+00 -2.49827363e-16 -2.49827363e-16]",
" [ 0.00000000e+00 4.07999046e+00 -2.49827363e-16]",
" [ 0.00000000e+00 0.00000000e+00 4.07999046e+00]]",
" ================ ===================================================",
"===================== ====================================================================",
]
assert len(out) == len(expected)
assert out == expected
def test_wh(fourc, capsys):
tbl = fourc.wh()
assert isinstance(tbl, pyRestTable.Table)
out, err = capsys.readouterr()
assert len(out) > 0
assert err == ""
out = [v.rstrip() for v in out.strip().splitlines()]
expected = [
"===================== ========= =========",
"term value axis_type",
"===================== ========= =========",
"diffractometer fourc",
"sample name main",
"energy (keV) 8.00000",
"wavelength (angstrom) 1.54980",
"calc engine hkl",
"mode bissector",
"h 0.0 pseudo",
"k 0.0 pseudo",
"l 0.0 pseudo",
"omega 0 real",
"chi 0 real",
"phi 0 real",
"tth 0 real",
"===================== ========= =========",
]
assert len(out) == len(expected)
assert out == expected
def test_show_constraints(fourc, capsys):
fourc.show_constraints()
out, err = capsys.readouterr()
assert len(out) > 0
assert err == ""
out = [v.rstrip() for v in out.strip().splitlines()]
expected = [
"===== ========= ========== ===== ====",
"axis low_limit high_limit value fit",
"===== ========= ========== ===== ====",
"omega -180.0 180.0 0.0 True",
"chi -180.0 180.0 0.0 True",
"phi -180.0 180.0 0.0 True",
"tth -180.0 180.0 0.0 True",
"===== ========= ========== ===== ====",
]
for r, e in zip(out, expected):
assert r.rstrip() == e.rstrip()
def test_apply_constraints(fourc):
fourc.energy.put(A_KEV / 1.54)
# fmt: off
fourc.apply_constraints(
{
"tth": Constraint(0, 180, 0, True),
"chi": Constraint(0, 180, 0, True),
}
)
# fmt: on
sol = fourc.forward(1, 0, 0)
assert pytest.approx(sol.omega, 1e-5) == 30
assert pytest.approx(sol.chi, 1e-5) == 0
assert pytest.approx(sol.phi, 1e-5) == 90
assert pytest.approx(sol.tth, 1e-5) == 60
def test_specify_engine():
import hkl
import numpy as np
from ophyd import Component as Cpt
from ophyd import PseudoSingle
from ophyd import SoftPositioner
class Q4C(hkl.E4CV):
q = Cpt(PseudoSingle, "")
omega = Cpt(SoftPositioner, limits=(-180, 180), init_pos=0)
chi = Cpt(SoftPositioner, limits=(-180, 180), init_pos=0)
phi = Cpt(SoftPositioner, limits=(-180, 180), init_pos=0)
tth = Cpt(SoftPositioner, limits=(-180, 180), init_pos=0)
q4c = Q4C("", name="q4c")
assert q4c.calc.engine.name == "hkl"
q4c = Q4C("", name="q4c", engine="q")
assert q4c.calc.engine.name == "q"
q = 1.0
angle = 2 * np.arcsin(q * q4c.calc.wavelength / 4 / np.pi) * 180 / np.pi
value = q4c.forward(q)
assert round(value.tth, 5) == round(angle, 5)
assert value.omega == 0.0
assert value.chi == 0.0
assert value.phi == 0.0
| 1.976563
| 2
|
probability_basic/discrete_distributions/discrete_distributions.py
|
OnlyBelter/MachineLearning_examples
| 14
|
12774760
|
<filename>probability_basic/discrete_distributions/discrete_distributions.py<gh_stars>10-100
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 16 18:47:10 2017
@author: xin
"""
# an example
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
def example1():
# 分布的参数初始化
myDF = stats.norm(5, 3) # Create the frozen distribution
# 取101个等间距的x
X = np.linspace(-5, 15, 101)
# cdf, 累计分布函数
y = myDF.cdf(X) # Calculate the corresponding CDF
plt.plot(X, y)
def bernoulli_distribution():
# 伯努利分布
# 只有一个参数:p,实验成功的概率
p = 0.6
bernoulli_dist = stats.bernoulli(p)
# 伯努利分布的概率质量分布函数pmf
p_heads = bernoulli_dist.pmf(1) # 试验结果为1的概率, 规定为正面, 概率为0.6
p_tails = bernoulli_dist.pmf(0) # 试验结果为0的概率, 规定为反面, 概率为0.4
# 取100个服从参数为0.6的伯努利分布的随机变量
trials = bernoulli_dist.rvs(100)
print(np.sum(trials)) # 63, 相当于1的个数
# 100个随机变量的直方图, 相当于取出来的100个随机变量的概率质量分布
plt.hist(trials/len(trials))
# plt.show()
plt.savefig('bernoulli_pmf.png', dpi=200)
plt.close()
# 0-2之间均匀的取100个点
x = np.linspace(0, 2, 100)
cdf = bernoulli_dist.cdf # 相当于取出来的100个随机变量的累积分布函数(cdf)
plt.plot(x, cdf(x)) # 上述伯努利分布在区间[0, 2]上的cdf图像
# plt.show()
plt.savefig('bernoulli_cdf.png', dpi=200)
plt.close()
def binom_dis(n=1, p=0.1):
"""
二项分布,模拟抛硬币试验
:param n: 实验总次数
:param p: 单次实验成功的概率
:return: 试验成功的次数
"""
binom_dis = stats.binom(n, p)
simulation_result = binom_dis.rvs(size=5) # 取20个符合该分布的随机变量
print(simulation_result) # [ 7 11 13 8 13], 每次结果会不一样
prob_10 = binom_dis.pmf(10)
print(prob_10) # 0.117
def poisson_dis(mu=3.0):
"""
泊松分布
:param mu: 单位时间(或单位面积)内随机事件的平均发生率
:return:
"""
mu = 2
poisson_dist = stats.poisson(mu)
X2 = np.arange(5)
x_prob2 = poisson_dist.pmf(X2)
plt.plot(X2, x_prob2)
poisson_dist.pmf(3) # 0.18, 恰好发生3次的概率
def compare_binom_poisson(mu=4, n1=8, n2=50):
"""
二项分布与泊松分布的比较
:param mu: 泊松分布的参数,保持mu不变
:param n1: 第一个二项分布中的实验次数,n比较小
:param n2: 第二个二项分布中的实验次数,n比较大
:return:
"""
# 为了具有可比性, 利用mu = n * p, 计算p
p1 = mu/n1 # 二项分布中的参数,单次实验成功的概率
p2 = mu/n2
poisson_dist = stats.poisson(mu) # 初始化泊松分布
binom_dist1 = stats.binom(n1, p1) # 初始化第一个二项分布
binom_dist2 = stats.binom(n2, p2) # 初始化第二个二项分布
# 计算pmf
X = np.arange(poisson_dist.ppf(0.0001), poisson_dist.ppf(0.9999))
y_po = poisson_dist.pmf(X)
print(X)
print(y_po)
y_bi1 = binom_dist1.pmf(X)
y_bi2 = binom_dist2.pmf(X)
# 作图
# First group
# 当n比较小,p比较大时,两者差别比较大
plt.figure(1)
plt.subplot(211)
plt.plot(X, y_bi1, 'b-', label='binom1 (n={}, p={})'.format(n1, p1))
plt.plot(X, y_po, 'r--', label='poisson (mu={})'.format(mu))
plt.ylabel('Probability')
plt.title('Comparing PMF of Poisson Dist. and Binomial Dist.')
plt.legend(loc='best', frameon=False)
# second group
# 当n比较大,p比较小时,两者非常相似
plt.subplot(212)
plt.plot(X, y_bi2, 'b-', label='binom1 (n={}, p={})'.format(n2, p2))
plt.plot(X, y_po, 'r--', label='poisson (mu={})'.format(mu))
plt.ylabel('Probability')
plt.legend(loc='best', frameon=False)
plt.show()
if __name__ == '__main__':
# bernoulli_distribution()
# binom_dis(20, 0.5)
compare_binom_poisson(mu=4, n1=8, n2=50)
| 3.359375
| 3
|
Apr_13.py
|
keiraaaaa/Leetcode
| 0
|
12774761
|
<gh_stars>0
'''
################
# 55. Jump Game
################
class Solution:
def canJump(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
if not nums or (nums[0]==0 and len(nums)>1):
return False
if len(nums)==1:
return True
l = len(nums)
max_ = 0
for i in range (l-1):
if nums[i]+i>max_:
max_ = nums[i] + i
if max_>=l-1:
return True
if max_==i and nums[i]==0:
return False
return False
# nums = [2,3,1,1,4]
nums = [1,2,0,3,0]
solu = Solution()
print (solu.canJump(nums))
'''
########################
# 56. Merge Intervals
########################
# Definition for an interval.
class Interval:
def __init__(self, s=0, e=0):
self.start = s
self.end = e
class Solution:
def merge(self, intervals):
"""
:type intervals: List[Interval]
:rtype: List[Interval]
"""
if not intervals:
return intervals
out = []
for interval in sorted(intervals, key=lambda i: i.start):
if out and interval.start<=out[-1].end:
out[-1].end = max(interval.end, out[-1].end)
else:
out.append(interval)
return out
intervals = [Interval(2,3), Interval(8,10),Interval(1,6),Interval(15,18)]
# intervals = [[2,3],[8,10],[1,6],[15,18]]
solu = Solution()
t = solu.merge(intervals)
print (t[1].end)
# print (sorted(intervals, key=lambda i: i.start))
| 3.671875
| 4
|
dictionary/1_retrieve.py
|
fossabot/hotpot
| 1
|
12774762
|
import zipfile
from utils import download_from_url
# =================================
# Script purpose:
# Download and unzip all raw files
# =================================
# Word frequency calculations from Beijing Language and Culture University
download_from_url(
"http://bcc.blcu.edu.cn/downloads/resources/BCC_LEX_Zh.zip",
"./data/raw/BCC_LEX_Zh.zip",
overwrite=False,
)
# Word frequency calculations for blogs, converted to UTF-8
download_from_url(
"https://www.plecoforums.com/download/blogs_wordfreq-release_utf-8-txt.2602/",
"./data/raw/blogs_wordfreq-release_utf-8.txt",
overwrite=False,
)
# CEDICT dictionary
download_from_url(
"https://www.mdbg.net/chinese/export/cedict/cedict_1_0_ts_utf-8_mdbg.zip",
"./data/raw/cedict_1_0_ts_utf-8_mdbg.zip",
overwrite=True,
)
# CJKVI character decompositions
download_from_url(
"https://raw.githubusercontent.com/cjkvi/cjkvi-ids/master/ids.txt",
"./data/raw/cjkvi_ids.txt",
overwrite=True,
)
# Word segmentation index for jieba
download_from_url(
"https://github.com/fxsjy/jieba/raw/master/extra_dict/dict.txt.big",
"./data/raw/dict.txt.big.txt",
overwrite=True,
)
# FastText CommonCrawl word vectors
download_from_url(
"https://dl.fbaipublicfiles.com/fasttext/vectors-crawl/cc.zh.300.bin.gz",
"./data/raw/cc.zh.300.bin.gz",
overwrite=True,
)
# Tencent word vectors
download_from_url(
"https://ai.tencent.com/ailab/nlp/en/data/Tencent_AILab_ChineseEmbedding.tar.gz",
"./data/raw/Tencent_AILab_ChineseEmbedding.tar.gz",
overwrite=True,
)
print("Unzipping BCC_LEX_Zh.zip... ", end="")
with zipfile.ZipFile("./data/raw/BCC_LEX_Zh.zip", "r") as zip_ref:
zip_ref.extractall("./data/raw/BCC_LEX_Zh")
print("ok")
print("Unzipping cedict_1_0_ts_utf-8_mdbg.zip... ", end="")
with zipfile.ZipFile("./data/raw/cedict_1_0_ts_utf-8_mdbg.zip", "r") as zip_ref:
zip_ref.extractall("./data/raw/cedict_1_0_ts_utf-8_mdbg")
print("ok")
print("Unzipping Tencent_AILab_ChineseEmbedding.zip... ", end="")
with zipfile.ZipFile("./data/raw/Tencent_AILab_ChineseEmbedding.zip", "r") as zip_ref:
zip_ref.extractall("./data/raw/Tencent_AILab_ChineseEmbedding")
print("ok")
| 2.609375
| 3
|
capreolus/benchmark/__init__.py
|
nimasadri11/capreolus
| 77
|
12774763
|
import os
import json
from copy import deepcopy
from collections import defaultdict
import ir_datasets
from capreolus import ModuleBase
from capreolus.utils.caching import cached_file, TargetFileExists
from capreolus.utils.trec import write_qrels, load_qrels, load_trec_topics
from capreolus.utils.loginit import get_logger
logger = get_logger(__name__)
def validate(build_f):
def validate_folds_file(self):
if not hasattr(self, "fold_file"):
logger.warning(f"Folds file is not found for Module {self.module_name}")
return
if self.fold_file.suffix != ".json":
raise ValueError(f"Expect folds file to be in .json format.")
raw_folds = json.load(open(self.fold_file))
# we actually don't need to verify the name of folds right?
for fold_name, fold_sets in raw_folds.items():
if set(fold_sets) != {"train_qids", "predict"}:
raise ValueError(f"Expect each fold to contain ['train_qids', 'predict'] fields.")
if set(fold_sets["predict"]) != {"dev", "test"}:
raise ValueError(f"Expect each fold to contain ['dev', 'test'] fields under 'predict'.")
logger.info("Folds file validation finishes.")
def validate_qrels_file(self):
if not hasattr(self, "qrel_file"):
logger.warning(f"Qrel file is not found for Module {self.module_name}")
return
n_dup, qrels = 0, defaultdict(dict)
with open(self.qrel_file) as f:
for line in f:
qid, _, docid, label = line.strip().split()
if docid in qrels[qid]:
n_dup += 1
if int(label) != qrels[qid][docid]:
raise ValueError(f"Found conflicting label in {self.qrel_file} for query {qid} and document {docid}.")
qrels[qid][docid] = int(label)
if n_dup > 0:
qrel_file_no_ext, ext = os.path.splitext(self.qrel_file)
dup_qrel_file = qrel_file_no_ext + "-contain-dup-entries" + ext
os.rename(self.qrel_file, dup_qrel_file)
write_qrels(qrels, self.qrel_file)
logger.warning(
f"Removed {n_dup} entries from the file {self.qrel_file}. The original version could be found in {dup_qrel_file}."
)
logger.info("Qrel file validation finishes.")
def validate_query_alignment(self):
topic_qids = set(self.topics[self.query_type])
qrels_qids = set(self.qrels)
for fold_name, fold_sets in self.folds.items():
# check if there are overlap between training, dev, and test set
train_qids, dev_qids, test_qids = (
set(fold_sets["train_qids"]),
set(fold_sets["predict"]["dev"]),
set(fold_sets["predict"]["test"]),
)
if len(train_qids & dev_qids) > 0:
logger.warning(
f"Found {len(train_qids & dev_qids)} overlap queries between training and dev set in fold {fold_name}."
)
if len(train_qids & test_qids) > 0:
logger.warning(
f"Found {len(train_qids & dev_qids)} overlap queries between training and dev set in fold {fold_name}."
)
if len(dev_qids & test_qids) > 0:
logger.warning(
f"Found {len(train_qids & dev_qids)} overlap queries between training and dev set in fold {fold_name}."
)
# check if the topics, qrels, and folds file share a reasonable set (if not all) of queries
folds_qids = train_qids | dev_qids | test_qids
n_overlap = len(set(topic_qids) & set(qrels_qids) & set(folds_qids))
if not len(topic_qids) == len(qrels_qids) == len(folds_qids) == n_overlap:
logger.warning(
f"Number of queries are not aligned across topics, qrels and folds in fold {fold_name}: {len(topic_qids)} queries in topics file, {len(qrels_qids)} queries in qrels file, {len(folds_qids)} queries in folds file; {n_overlap} overlap queries found among the three."
)
# check if any topic in folds cannot be found in topics file
for set_name, set_qids in zip(["training", "dev", "test"], [train_qids, dev_qids, test_qids]):
if len(set_qids - topic_qids) > 0:
raise ValueError(
f"{len(set_qids - topic_qids)} queries in {set_name} set of fold {fold_name} cannot be found in topic file."
)
logger.info("Query Alignment validation finishes.")
def _validate(self):
"""Rewrite the files that contain invalid (duplicate) entries, and remove the currently loaded variables"""
build_f(self)
validate_folds_file(self)
validate_qrels_file(self)
validate_query_alignment(self)
return _validate
class Benchmark(ModuleBase):
"""Base class for Benchmark modules. The purpose of a Benchmark is to provide the data needed to run an experiment, such as queries, folds, and relevance judgments.
Modules should provide:
- a ``topics`` dict mapping query ids (*qids*) to *queries*
- a ``qrels`` dict mapping *qids* to *docids* and *relevance labels*
- a ``folds`` dict mapping a fold name to *training*, *dev* (validation), and *testing* qids
- if these can be loaded from files in standard formats, they can be specified by setting the ``topic_file``, ``qrel_file``, and ``fold_file``, respectively, rather than by setting the above attributes directly
"""
module_type = "benchmark"
qrel_file = None
topic_file = None
fold_file = None
query_type = None
relevance_level = 1
""" Documents with a relevance label >= relevance_level will be considered relevant.
This corresponds to trec_eval's --level_for_rel (and is passed to pytrec_eval as relevance_level). """
use_train_as_dev = True
""" Whether to use training set as validate set when there is no training needed,
e.g. for traditional IR algorithms like BM25 """
@property
def qrels(self):
if not hasattr(self, "_qrels"):
self._qrels = load_qrels(self.qrel_file)
return self._qrels
@property
def topics(self):
if not hasattr(self, "_topics"):
self._topics = load_trec_topics(self.topic_file)
return self._topics
@property
def folds(self):
if not hasattr(self, "_folds"):
self._folds = json.load(open(self.fold_file, "rt"), parse_int=str)
return self._folds
@property
def non_nn_dev(self):
dev_per_fold = {fold_name: deepcopy(folds["predict"]["dev"]) for fold_name, folds in self.folds.items()}
if self.use_train_as_dev:
for fold_name, folds in self.folds.items():
dev_per_fold[fold_name].extend(folds["train_qids"])
return dev_per_fold
def get_topics_file(self, query_sets=None):
"""Returns path to a topics file in TSV format containing queries from query_sets.
query_sets may contain any combination of 'train', 'dev', and 'test'.
All are returned if query_sets is None."""
if query_sets:
query_sets = set(query_sets)
invalid = query_sets - {"train", "test", "dev"}
if invalid:
raise ValueError(f"query_sets contains invalid fold names: {invalid}")
query_sets = "_".join(sorted(query_sets))
valid_qids = set()
if "train" in query_sets:
valid_qids.update(self.folds["train_qids"])
if "dev" in query_sets:
valid_qids.update(self.folds["predict"]["dev"])
if "test" in query_sets:
valid_qids.update(self.folds["predict"]["test"])
else:
query_sets = "all"
valid_qids = None
fn = self.get_cache_path() / f"topics-{query_sets}.tsv"
try:
with cached_file(fn) as tmp_fn:
with open(tmp_fn, "wt") as outf:
for qid, query in self.topics[self.query_type].items():
if query_sets == "all" or qid in valid_qids:
print(f"{qid}\t{query}", file=outf)
except TargetFileExists as e:
pass
return fn
@validate
def build(self):
return
class IRDBenchmark(Benchmark):
ird_dataset_names = []
@property
def qrels(self):
if not hasattr(self, "_qrels"):
self._qrels = self.ird_load_qrels()
return self._qrels
@property
def topics(self):
if not hasattr(self, "_topics"):
self._topics = self.ird_load_topics()
return self._topics
def ird_load_qrels(self):
qrels = {}
for name in self.ird_dataset_names:
dataset = ir_datasets.load(name)
for qrel in dataset.qrels_iter():
qrels.setdefault(qrel.query_id, {})
qrels[qrel.query_id][qrel.doc_id] = max(qrel.relevance, qrels[qrel.query_id].get(qrel.doc_id, -1))
return qrels
def ird_load_topics(self):
topics = {}
field = "description" if self.query_type == "desc" else self.query_type
for name in self.ird_dataset_names:
dataset = ir_datasets.load(name)
for query in dataset.queries_iter():
topics[query.query_id] = getattr(query, field).replace("\n", " ")
return {self.query_type: topics}
from profane import import_all_modules
from .dummy import DummyBenchmark
import_all_modules(__file__, __package__)
| 2.21875
| 2
|
src/logexception/exceptionhandler.py
|
nabeelraja/mip-python-training
| 0
|
12774764
|
<filename>src/logexception/exceptionhandler.py
'''
Create exceptions based on your inputs. Please follow the tasks below.
- Capture and handle system exceptions
- Create custom user-based exceptions
'''
class CustomInputError(Exception):
def __init__(self, *args, **kwargs):
print("Going through my own CustomInputError")
# Exception.__init__(self, *args, **kwargs)
class MyZeroDivisionException(ZeroDivisionError):
def __init__(self):
print("The data is not valid")
class DataNotValidException(TypeError):
def __init__(self):
print("The data contains Strings. Only numbers are expected in the input data")
| 3.578125
| 4
|
Module/CBAM.py
|
YuHe0108/cvmodule
| 0
|
12774765
|
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
"""
论文中指出了,先使用CA,后使用SA
定义了:
channel attention output.shape: [b, 1, 1, filters]
spatial attention output.shape: [b, h, w, 1]
"""
def regularized_padded_conv(*args, **kwargs):
""" 定义一个3x3卷积!kernel_initializer='he_normal','glorot_normal'"""
return layers.Conv2D(
*args, **kwargs,
padding='same',
use_bias=False,
kernel_initializer='he_normal',
# kernel_regularizer=keras.regularizers.l2(5e-4)
)
def channel_attention_dense(inputs, filters, ratio=16):
avg_out = layers.GlobalAveragePooling2D()(inputs)
max_out = layers.GlobalMaxPool2D()(inputs)
out = tf.stack([avg_out, max_out], axis=1)
out = layers.Dense(filters // ratio,
kernel_initializer='he_normal',
# kernel_regularizer=keras.regularizers.l2(5e-4),
use_bias=True,
bias_initializer='zeros',
activation='relu'
)(out)
out = layers.Dense(filters,
kernel_initializer='he_normal',
# kernel_regularizer=keras.regularizers.l2(5e-4),
use_bias=True,
bias_initializer='zeros'
)(out)
out = tf.reduce_sum(out, axis=1)
out = layers.Activation('sigmoid')(out)
out = layers.Reshape((1, 1, out.shape[1]))(out)
return out
def channel_attention_conv(inputs, filters, ratio=16):
"""将全连接层替换为卷积层: channel attention 输出: [B, 1, 1, filters]"""
avg_out = layers.GlobalAveragePooling2D()(inputs)
max_out = layers.GlobalMaxPool2D()(inputs)
avg_out = layers.Reshape((1, 1, avg_out.shape[1]))(avg_out)
max_out = layers.Reshape((1, 1, max_out.shape[1]))(max_out)
out = layers.Concatenate(axis=3)([avg_out, max_out]) # [batch_size, 1, 1, dims+dims]
pool_out = [avg_out, max_out]
conv_out = []
for i in range(2):
out = layers.Conv2D(filters // ratio,
kernel_size=1, strides=1,
padding='same',
# kernel_regularizer=keras.regularizers.l2(5e-4),
use_bias=True, activation=tf.nn.relu
)(pool_out[i])
out = layers.Conv2D(filters,
kernel_size=1, strides=1, padding='same',
# kernel_regularizer=keras.regularizers.l2(5e-4),
use_bias=True
)(out)
conv_out.append(out)
conv_out = conv_out[0] + conv_out[1]
out = layers.Reshape((1, 1, filters))(out)
out = layers.Activation('sigmoid')(out)
return out
class ChannelAttentionConv(layers.Layer):
def __init__(self, out_filters, ratio=16):
super(ChannelAttentionConv, self).__init__()
self.avg = layers.GlobalAveragePooling2D()
self.max = layers.GlobalMaxPooling2D()
self.conv1 = layers.Conv2D(
out_filters // ratio, kernel_size=1, strides=1, padding='same',
# kernel_regularizer=keras.regularizers.l2(5e-4),
use_bias=True, activation=tf.nn.relu)
self.conv2 = layers.Conv2D(
out_filters, kernel_size=1, strides=1, padding='same',
# kernel_regularizer=keras.regularizers.l2(5e-4),
use_bias=True)
def build(self, input_shape):
filter_size = input_shape[1]
input_filters = input_shape[-1]
self.conv_filter_size = layers.Conv2D(
input_filters, kernel_size=filter_size, strides=1, padding='valid',
# kernel_regularizer=keras.regularizers.l2(5e-4),
use_bias=True)
return
def call(self, inputs):
avg = self.avg(inputs)
max = self.max(inputs)
avg = layers.Reshape((1, 1, avg.shape[1]))(avg) # shape (None, 1, 1 feature)
max = layers.Reshape((1, 1, max.shape[1]))(max) # shape (None, 1, 1 feature)
avg_out = self.conv2(self.conv1(avg))
max_out = self.conv2(self.conv1(max))
out = avg_out + max_out
out = tf.nn.sigmoid(out)
return out
class ChannelAttentionDense(layers.Layer):
"""channel attention 自定义类"""
def __init__(self, in_planes, ratio=16):
super(ChannelAttentionDense, self).__init__()
self.avg = layers.GlobalAveragePooling2D()
self.max = layers.GlobalMaxPooling2D()
self.fc1 = layers.Dense(in_planes // ratio,
kernel_initializer='he_normal',
# kernel_regularizer=keras.regularizers.l2(5e-4),
use_bias=True,
bias_initializer='zeros',
activation='relu')
self.fc2 = layers.Dense(in_planes,
kernel_initializer='he_normal',
# kernel_regularizer=keras.regularizers.l2(5e-4),
use_bias=True,
bias_initializer='zeros')
def build(self, input_shape):
pass
def call(self, inputs):
avg_out = self.fc2(self.fc1(self.avg(inputs)))
max_out = self.fc2(self.fc1(self.max(inputs)))
out = avg_out + max_out
out = tf.nn.sigmoid(out)
out = layers.Reshape((1, 1, out.shape[1]))(out)
return out
class SpatialAttention(layers.Layer):
def __init__(self, kernel_size=7):
super(SpatialAttention, self).__init__()
self.conv1 = layers.Conv2D(
filters=1, kernel_size=kernel_size, strides=1, activation='sigmoid',
padding='same', use_bias=False, kernel_initializer='he_normal',
# kernel_regularizer=keras.regularizers.l2(5e-4)
)
def call(self, inputs):
avg_out = tf.reduce_mean(inputs, axis=3) # [b, h, w, 1]
max_out = tf.reduce_max(inputs, axis=3) # [b, h, w, 1]
out = tf.stack([avg_out, max_out], axis=-1) # 创建一个维度,拼接到一起concat。[b, h, w, 2]
out = self.conv1(out) # [b, h, w, 1]
return out
def test_model(input_shape):
inputs = layers.Input(input_shape)
out = SpatialAttention()(inputs)
return tf.keras.Model(inputs, out)
if __name__ == '__main__':
model_ = test_model((32, 32, 64))
model_.summary()
| 2.828125
| 3
|
src/selfie_intersection/src/intersection_mock_client.py
|
KNR-Selfie/selfie_carolocup2020
| 10
|
12774766
|
#! /usr/bin/env python
from __future__ import print_function
import rospy
import actionlib
import time
from std_msgs.msg import Float32
from selfie_msgs.msg import PolygonArray
import selfie_msgs.msg
def intersection_client():
client = actionlib.SimpleActionClient('intersection', selfie_msgs.msg.intersectionAction)
client.wait_for_server()
goal = selfie_msgs.msg.intersectionGoal()
print("Sending goal")
client.send_goal(goal)
distance_pub=rospy.Publisher('/intersection_distance', Float32, queue_size=10)
distance=Float32(data=5)
time.sleep(0.5)
print("Sending mock (far) distance to intersection.")
distance_pub.publish(distance)
polygons = PolygonArray()
pub = rospy.Publisher('/obstacles', PolygonArray, queue_size=10)
time.sleep(0.5)
print("."),
pub.publish(polygons)
time.sleep(0.8)
print("."),
pub.publish(polygons)
distance.data=0.05
distance_pub.publish(distance)
time.sleep(0.8)
print("."),
pub.publish(polygons)
time.sleep(1)
print("."),
pub.publish(polygons)
time.sleep(1)
print("."),
pub.publish(polygons)
time.sleep(1)
print("."),
pub.publish(polygons)
print('mock obstacles sent')
client.wait_for_result()
print("Result achieved")
return client.get_result()
if __name__ == '__main__':
try:
rospy.init_node('intersection_mock_client_py')
result = intersection_client()
except rospy.ROSInterruptException:
print("program interrupted before completion", file=sys.stderr)
| 2.25
| 2
|
notebooks/icos_jupyter_notebooks/tools/visualization/bokeh_help_funcs/__init__.py
|
ICOS-Carbon-Portal/jupyter
| 6
|
12774767
|
"""
This folder contains help-functions to Bokeh visualizations
in Python.
There are functions that align 2nd-ary y-axis to primary
y-axis as well as functions that align 3 y-axes.
"""
__credits__ = "ICOS Carbon Portal"
__license__ = "GPL-3.0"
__version__ = "0.1.0"
__maintainer__ = "ICOS Carbon Portal, elaborated products team"
__email__ = ['<EMAIL>', '<EMAIL>']
__date__ = "2020-10-15"
| 1.617188
| 2
|
factory/tools/manual_glidein_submit.py
|
bbockelm/glideinWMS
| 0
|
12774768
|
#!/usr/bin/env python
import os
import sys
import ConfigParser
STARTUP_DIR = sys.path[0]
sys.path.append(os.path.join(STARTUP_DIR,".."))
sys.path.append(os.path.join(STARTUP_DIR,"../../lib"))
from glideinwms.factory.glideFactoryCredentials import SubmitCredentials
from glideinwms.factory.glideFactoryLib import submitGlideins
from glideinwms.factory.glideFactoryLib import ClientWeb
from glideinwms.lib.iniSupport import IniError
from glideinwms.lib.iniSupport import load_ini
from glideinwms.lib.iniSupport import cp_get
class ArgumentError(Exception): pass
def usage():
msg = """
Usage: manual_glidein_submit <ini_file>
ini_file: (REQUIRED) This file contains all the required information for a
glidein to be submitted and run on a remote site.
"""
print sys.stderr, msg
def check_args():
if len(sys.argv) > 1:
raise ArgumentError, "Too many arguments!"
if len(sys.argv) < 1:
raise ArgumentError, "You must specify an ini file!"
def main():
try:
check_args()
except ArgumentError, ae:
print sys.stderr, ae
usage()
try:
ini_path = sys.argv[1]
cp = load_ini(ini_path)
# get all the required elements and create the required objects
entry_name = cp_get(cp, "entry", "entry_name", "", throw_exception=True)
client_name = cp_get(cp, "entry", "client_name", "", throw_exception=True)
nr_glideins = cp_get(cp, "entry", "nr_glideins", "", throw_exception=True)
frontend_name = cp_get(cp, "entry", "frontend_name", "", throw_exception=True)
user_name = cp_get(cp, "submit_credentials", "UserName", "", throw_exception=True)
security_class = cp_get(cp, "submit_credentials", "SecurityClass", "", throw_exception=True)
# create the params object
params = {}
for option in cp.options("params"):
params[option] = cp_get(cp, "params", option, "", throw_exception=True)
# create the client_web object
client_web_url = cp_get(cp, "client_web", "clientweb", "", throw_exception=True)
client_signtype = cp_get(cp, "client_web", "clientsigntype", "", throw_exception=True)
client_descript = cp_get(cp, "client_web", "clientdescript", "", throw_exception=True)
client_sign = cp_get(cp, "client_web", "clientsign", "", throw_exception=True)
client_group = cp_get(cp, "client_web", "clientgroup", "", throw_exception=True)
client_group_web_url = cp_get(cp, "client_web", "clientwebgroup", "", throw_exception=True)
client_group_descript = cp_get(cp, "client_web", "clientdescriptgroup", "", throw_exception=True)
client_group_sign = cp_get(cp, "client_web", "clientsigngroup", "", throw_exception=True)
client_web = ClientWeb(client_web_url, client_signtype, client_descript, client_sign,
client_group, client_group_web_url, client_group_descript, client_group_sign)
# create the submit_credentials object
credentials = SubmitCredentials(user_name, security_class)
for option in cp.options("security_credentials"):
credentials.add_security_credential(option, cp_get(cp, "security_credentials", option, "", throw_exception=True))
for option in cp.options("identity_credentials"):
credentials.add_identity_credential(option, cp_get(cp, "identity_credentials", option, "", throw_exception=True))
# call the submit
submitGlideins(entry_name, client_name, nr_glideins, frontend_name, credentials, client_web, params)
except IniError, ie:
print sys.stderr, "ini file error make this message better"
except Exception, ex:
print sys.stderr, "general error make this message better"
if __name__ == "__main__":
sys.exit(main())
| 2.296875
| 2
|
ctc_decoder/best_path.py
|
a-sneddon/CTCDecoder
| 0
|
12774769
|
from itertools import groupby
import numpy as np
def best_path(mat: np.ndarray, labels: str) -> str:
"""Best path (greedy) decoder.
Take best-scoring character per time-step, then remove repeated characters and CTC blank characters.
See dissertation of Graves, p63.
Args:
mat: Output of neural network of shape TxC.
labels: The set of characters the neural network can recognize, excluding the CTC-blank.
Returns:
The decoded text.
"""
# get char indices along best path
best_path_indices = np.argmax(mat, axis=1)
# collapse best path (using itertools.groupby), map to chars, join char list to string
blank_idx = len(labels)
best_chars_collapsed = [labels[k] for k, _ in groupby(best_path_indices) if k != blank_idx]
res = ''.join(best_chars_collapsed)
return res
| 3.015625
| 3
|
marketplace/vm-solution/cluster.py
|
isabella232/datashare-toolkit
| 0
|
12774770
|
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create configuration to deploy GKE cluster."""
import six
def GenerateConfig(context):
"""Generate YAML resource configuration."""
name_prefix = context.env['deployment'] + '-' + context.env['name']
cluster_name = 'datashare-cluster-resource'
acutal_cluster_name = 'datashare'
type_name = name_prefix + '-type'
cluster_version = '1.16'
workload_pool = context.env['project'] + '.svc.id.goog'
machine_type = 'e2-standard-2'
resources = [
{
'name': cluster_name,
'type': 'container.v1.cluster',
#'metadata': {
# 'dependsOn': ['delete-api']
#},
'properties': {
'zone': context.properties['zone'],
'cluster': {
'name': acutal_cluster_name,
'initialClusterVersion': cluster_version,
'initialNodeCount': 3,
'ipAllocationPolicy': {
'useIpAliases': True,
},
'workloadIdentityConfig': {
'workloadPool': workload_pool,
},
'addonsConfig': {
'horizontalPodAutoscaling': {
'disabled': False,
},
'httpLoadBalancing': {
'disabled': False,
},
'cloudRunConfig': {
'disabled': False,
}
},
'nodeConfig': {
'machineType': machine_type,
'oauthScopes': [
'https://www.googleapis.com/auth/' + s
for s in [
'compute',
'devstorage.read_only',
'logging.write',
'monitoring'
]
]
}
}
}
}
]
outputs = []
return {'resources': resources, 'outputs': outputs}
| 1.601563
| 2
|
matrix.py
|
sumnerevans/math-utils
| 0
|
12774771
|
<reponame>sumnerevans/math-utils<filename>matrix.py
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2016 <NAME> <<EMAIL>>
#
# Distributed under terms of the MIT license.
from fractions import Fraction
class Matrix:
def __init__(self, data=None):
self.data = data
def __getitem__(self, arg):
return self.data[arg]
def __len__(self):
return len(self.data)
def __iter__(self):
for row in self.data:
yield row
def print(self):
for row in self.data:
print('|' + ' '.join([str(round(x, 5)) for x in row]) + '|')
def validate(self):
if len(self.data) == 0: return False
n = len(self.data[0])
for row in self.data:
if len(row) != n: return False
return True
def is_square(self):
return len(self.data) == len(self.data[0])
def interchange(self, r1, r2):
self.data[r1], self.data[r2] = self.data[r2], self.data[r1]
def scale(self, r1, scale):
self.data[r1] = [Fraction(scale * c) for c in self.data[r1]]
# (scale)R1 + R2 -> R2
def replace(self, scale, r1, r2):
self.data[r2] = [Fraction((scale * c) + self.data[r2][i]) for i, c in enumerate(self.data[r1])]
def prompt_for_matrix(self, prompt_text, requires_square=False):
requires_entry = True
while requires_entry:
print('\n%s' % prompt_text)
self.data = []
i = 0
while True:
row = input().strip()
if row.lower() == 'done': break
if len(row) == 0: continue
# Add and populate the row
self.data.append([Fraction(x) for x in row.split()])
i += 1
requires_entry = not self.validate() or (requires_square and not self.is_square())
| 4.0625
| 4
|
tests/test_data/test_datasets/__init__.py
|
rlleshi/mmaction2
| 1,870
|
12774772
|
<filename>tests/test_data/test_datasets/__init__.py
# Copyright (c) OpenMMLab. All rights reserved.
from .base import BaseTestDataset
__all__ = ['BaseTestDataset']
| 1.007813
| 1
|
Curso de Python USP Part1/Exercicios/ProgramaCompleto_Jogo_NIM.py
|
JorgeTranin/Cursos_Coursera
| 0
|
12774773
|
def computador_escolhe_jogada(n, m):
pc_remove = 1
while pc_remove != m:
if (n - pc_remove) % (m+1) == 0:
return pc_remove
else:
pc_remove += 1
return pc_remove
def usuario_escolhe_jogada(n, m):
while True:
usuario_removeu = int(input('Quantas peças você vai tirar? '))
if usuario_removeu > m or usuario_removeu <= 0:
print('Oops! Jogada inválida! Tente de novo.')
else:
break
return usuario_removeu
def campeonato():
for i in range(0, 3):
print()
print(f'**** Rodada {i+1} ****')
print()
partida()
print()
print('**** Final do campeonato! ****')
print()
print('Placar: Você 0 X 3 Computador')
def partida():
n = int(input('Quantas peças? '))
m = int(input('Limite de peças por jogada? '))
while n < m:
print('As peças tem que conter um valor maior que as jogadas. Tente de novo!')
n = int(input('Quantas peças? '))
m = int(input('Limite de peças por jogada? '))
print()
usuario = False
if n % (m+1) == 0:
print('Você começa')
usuario = True
else:
print('Computador começa')
while n > 0:
if usuario:
escolha_do_usuario = usuario_escolhe_jogada(n, m)
print()
if escolha_do_usuario == 1:
print('Você tirou uma peça.')
else:
print(f'Voce tirou {escolha_do_usuario} peças.')
if n == 1:
print('Agora resta apenas uma peça no tabuleiro.')
elif n != 0:
print(
f'Agora resta {n - escolha_do_usuario} peça no tabuleiro.')
n -= escolha_do_usuario
usuario = False
else:
escolha_do_pc = computador_escolhe_jogada(n, m)
print()
if escolha_do_pc == 1:
print('O computador tirou uma peça.')
else:
print(f'O computador tirou {escolha_do_pc} peças.')
if n == 1:
print('Agora resta apenas uma peça no tabuleiro.')
elif n != 0:
print(f'Agora resta {n - escolha_do_pc} peça no tabuleiro.')
print()
n -= escolha_do_pc
usuario = True
print('Fim do jogo! O computador ganhou!')
# Programa Principal!!
print()
print('Bem-vindo ao jogo do NIM! Escolha:')
print()
while True:
print('1 - para jogar uma partida isolada')
partida_ou_campeonato = int(input('2 - para jogar um campeonato '))
if partida_ou_campeonato == 2:
print()
print('Voce escolheu um campeonato!')
print()
campeonato()
break
elif partida_ou_campeonato == 1:
print()
print('Voce escolheu partida isolada')
print()
partida()
break
else:
print('Numero invalido tente de novo! ')
| 3.875
| 4
|
setup.py
|
ukitinu/event-reminder
| 1
|
12774774
|
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open('README.md') as f:
readme = f.read()
setup(
name="event-reminder",
version="1.0.0",
description="Show messages at a specific date with crontab-like scheduling expressions.",
author="ukitinu",
author_email="<EMAIL>",
url="https://github.com/ukitinu/event-reminder",
packages=['eventreminder', 'eventreminder.tests'],
license="MIT",
long_description=readme,
long_description_content_type='text/markdown',
keywords='crontab birthday',
include_package_data=True,
)
| 1.445313
| 1
|
bin/email_sender.py
|
vconstellation/steam-forum-scraper
| 0
|
12774775
|
import smtplib
import json
import keyring
from datetime import date
from email.message import EmailMessage
def send_emails(posts):
# get login and service from cfg
# then get pass from keyring
with open('config.json', 'r') as f:
config = json.load(f)
service = config["MAIL"]["service"]
login = config["MAIL"]["login"]
password = keyring.get_password(service, login)
# format mail body
mail_body = ""
curr_date = date.today()
for i in posts:
mail_body += i['date'] + "\n" + i['thread'] + "\n" + i['link'] + "\n\n"
# init EmailMessage
msg = EmailMessage()
msg.set_content(
f"There are {len(posts)} threads with new posts! \n They are as follows:\n {mail_body}"
)
msg['From'] = login
msg['To'] = config['MAIL']['recipients']
msg['Subject'] = f'Scraper\'s new mail - {curr_date}'
try:
smtp_server = smtplib.SMTP_SSL('smtp.gmail.com', 465)
smtp_server.ehlo()
smtp_server.login(login, password)
smtp_server.send_message(msg)
smtp_server.close()
except Exception as e:
print(e)
| 2.84375
| 3
|
stream_alert/rule_processor/main.py
|
serhatcan/streamalert
| 1
|
12774776
|
<reponame>serhatcan/streamalert
'''
Copyright 2017-present, Airbnb Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import importlib
import os
from stream_alert.rule_processor.handler import StreamAlert
from rules import (
sample_matchers
)
modules_to_import = set()
# walk the rules directory to dymanically import
for root, dirs, files in os.walk('rules/'):
# ignore old rule files and helpers
if root in ['rules/helpers', 'rules/']:
continue
# ignore __init__.py files
filtered_files = filter(lambda x: not x.startswith('.') and
not x.endswith('.pyc') and
not x.startswith('__init__'), files)
for import_file in filtered_files:
package_path = root.replace('/', '.')
import_module = os.path.splitext(import_file)[0]
modules_to_import.add('{}.{}'.format(package_path, import_module))
for module_name in modules_to_import:
importlib.import_module(module_name)
def handler(event, context):
"""Main Lambda handler function"""
StreamAlert(context).run(event)
| 1.679688
| 2
|
removing_nonconserved.py
|
tipputa/Reversals_identification
| 0
|
12774777
|
<gh_stars>0
#!/usr/bin/env python
"ordering as well as rotation of the genomes is done for almost conserved genes"
"missing genes are stored in sorted order"
from xlrd import open_workbook
import xlsxwriter
wb = open_workbook("FILE.xlsx")
workbookfinal = xlsxwriter.Workbook("removed_not_conserved"+'.xlsx')
worksheetfinal = workbookfinal.add_worksheet()
values=[]
counter=0
for s in wb.sheets():
for row in range(s.nrows):
counter=0
row_value = []
for col in range(s.ncols):
value = (s.cell(row,col).value)
if value=='-':
counter=counter+1
try : value = int(value)
except : pass
row_value.append(value)
if counter==1 or counter==0:
values.append(row_value)
for i in range(0,len(values)):
val=values[i]
for j in range(0,len(val)):
worksheetfinal.write(i,j,val[j])
workbookfinal.close()
| 3.265625
| 3
|
fluent.pygments/fluent/pygments/cli.py
|
shlomyb-di/python-fluent
| 155
|
12774778
|
import argparse
import sys
from pygments import highlight
from pygments.formatters import Terminal256Formatter
from fluent.pygments.lexer import FluentLexer
def main():
parser = argparse.ArgumentParser()
parser.add_argument('path')
args = parser.parse_args()
with open(args.path) as fh:
code = fh.read()
highlight(code, FluentLexer(), Terminal256Formatter(), sys.stdout)
if __name__ == '__main__':
main()
| 2.390625
| 2
|
steam/ext/dota2/protobufs/dota_match_metadata.py
|
Gobot1234/steam-ext-dota2
| 0
|
12774779
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# sources: dota_match_metadata.proto
# plugin: python-betterproto
from dataclasses import dataclass
from typing import List
import betterproto
from .base_gcmessages import CsoEconItem
from .dota_gcmessages_common import CMsgDotaMatch, CMsgMatchTips
from .dota_gcmessages_common_match_management import CLobbyTimedRewardDetails, CMsgMatchMatchmakingStats, CMvpData
from .dota_shared_enums import EdotammrBoostType, EEvent
@dataclass(eq=False, repr=False)
class CdotaMatchMetadataFile(betterproto.Message):
version: int = betterproto.int32_field(1)
match_id: int = betterproto.uint64_field(2)
metadata: "CdotaMatchMetadata" = betterproto.message_field(3)
private_metadata: bytes = betterproto.bytes_field(5)
@dataclass(eq=False, repr=False)
class CdotaMatchMetadata(betterproto.Message):
teams: List["CdotaMatchMetadataTeam"] = betterproto.message_field(1)
item_rewards: List["CLobbyTimedRewardDetails"] = betterproto.message_field(2)
lobby_id: int = betterproto.fixed64_field(3)
report_until_time: int = betterproto.fixed64_field(4)
event_game_custom_table: bytes = betterproto.bytes_field(5)
primary_event_id: int = betterproto.uint32_field(6)
match_tips: List["CMsgMatchTips"] = betterproto.message_field(7)
matchmaking_stats: "CMsgMatchMatchmakingStats" = betterproto.message_field(8)
mvp_data: "CMvpData" = betterproto.message_field(9)
guild_challenge_progress: List["CdotaMatchMetadataGuildChallengeProgress"] = betterproto.message_field(10)
@dataclass(eq=False, repr=False)
class CdotaMatchMetadataTeam(betterproto.Message):
dota_team: int = betterproto.uint32_field(1)
players: List["CdotaMatchMetadataTeamPlayer"] = betterproto.message_field(2)
graph_experience: List[float] = betterproto.float_field(3)
graph_gold_earned: List[float] = betterproto.float_field(4)
graph_net_worth: List[float] = betterproto.float_field(5)
cm_first_pick: bool = betterproto.bool_field(6)
cm_captain_player_id: int = betterproto.uint32_field(7)
cm_bans: List[int] = betterproto.uint32_field(8)
cm_picks: List[int] = betterproto.uint32_field(9)
cm_penalty: int = betterproto.uint32_field(10)
@dataclass(eq=False, repr=False)
class CdotaMatchMetadataTeamPlayerKill(betterproto.Message):
victim_slot: int = betterproto.uint32_field(1)
count: int = betterproto.uint32_field(2)
@dataclass(eq=False, repr=False)
class CdotaMatchMetadataTeamItemPurchase(betterproto.Message):
item_id: int = betterproto.uint32_field(1)
purchase_time: int = betterproto.int32_field(2)
@dataclass(eq=False, repr=False)
class CdotaMatchMetadataTeamInventorySnapshot(betterproto.Message):
item_id: List[int] = betterproto.uint32_field(1)
game_time: int = betterproto.int32_field(2)
kills: int = betterproto.uint32_field(3)
deaths: int = betterproto.uint32_field(4)
assists: int = betterproto.uint32_field(5)
level: int = betterproto.uint32_field(6)
@dataclass(eq=False, repr=False)
class CdotaMatchMetadataTeamAutoStyleCriteria(betterproto.Message):
name_token: int = betterproto.uint32_field(1)
value: float = betterproto.float_field(2)
@dataclass(eq=False, repr=False)
class CdotaMatchMetadataTeamStrangeGemProgress(betterproto.Message):
kill_eater_type: int = betterproto.uint32_field(1)
gem_item_def_index: int = betterproto.uint32_field(2)
required_hero_id: int = betterproto.uint32_field(3)
starting_value: int = betterproto.uint32_field(4)
ending_value: int = betterproto.uint32_field(5)
owner_item_def_index: int = betterproto.uint32_field(6)
owner_item_id: int = betterproto.uint64_field(7)
@dataclass(eq=False, repr=False)
class CdotaMatchMetadataTeamVictoryPrediction(betterproto.Message):
item_id: int = betterproto.uint64_field(1)
item_def_index: int = betterproto.uint32_field(2)
starting_value: int = betterproto.uint32_field(3)
is_victory: bool = betterproto.bool_field(4)
@dataclass(eq=False, repr=False)
class CdotaMatchMetadataTeamSubChallenge(betterproto.Message):
slot_id: int = betterproto.uint32_field(1)
start_value: int = betterproto.uint32_field(2)
end_value: int = betterproto.uint32_field(3)
completed: bool = betterproto.bool_field(4)
@dataclass(eq=False, repr=False)
class CdotaMatchMetadataTeamCavernChallengeResult(betterproto.Message):
completed_path_id: int = betterproto.uint32_field(1)
claimed_room_id: int = betterproto.uint32_field(2)
@dataclass(eq=False, repr=False)
class CdotaMatchMetadataTeamActionGrant(betterproto.Message):
action_id: int = betterproto.uint32_field(1)
quantity: int = betterproto.uint32_field(2)
audit: int = betterproto.uint32_field(3)
@dataclass(eq=False, repr=False)
class CdotaMatchMetadataTeamCandyGrant(betterproto.Message):
points: int = betterproto.uint32_field(1)
reason: int = betterproto.uint32_field(2)
@dataclass(eq=False, repr=False)
class CdotaMatchMetadataTeamEventData(betterproto.Message):
event_id: int = betterproto.uint32_field(1)
event_points: int = betterproto.uint32_field(2)
challenge_instance_id: int = betterproto.uint32_field(3)
challenge_quest_id: int = betterproto.uint32_field(4)
challenge_quest_challenge_id: int = betterproto.uint32_field(5)
challenge_completed: bool = betterproto.bool_field(6)
challenge_rank_completed: int = betterproto.uint32_field(7)
challenge_rank_previously_completed: int = betterproto.uint32_field(8)
event_owned: bool = betterproto.bool_field(9)
sub_challenges_with_progress: List["CdotaMatchMetadataTeamSubChallenge"] = betterproto.message_field(10)
wager_winnings: int = betterproto.uint32_field(11)
cavern_challenge_active: bool = betterproto.bool_field(12)
cavern_challenge_winnings: int = betterproto.uint32_field(13)
amount_wagered: int = betterproto.uint32_field(14)
periodic_point_adjustments: int = betterproto.uint32_field(16)
cavern_challenge_map_results: List["CdotaMatchMetadataTeamCavernChallengeResult"] = betterproto.message_field(17)
cavern_challenge_plus_shard_winnings: int = betterproto.uint32_field(18)
actions_granted: List["CdotaMatchMetadataTeamActionGrant"] = betterproto.message_field(19)
cavern_crawl_map_variant: int = betterproto.uint32_field(20)
team_wager_bonus_pct: int = betterproto.uint32_field(21)
wager_streak_pct: int = betterproto.uint32_field(22)
candy_points_granted: List["CdotaMatchMetadataTeamCandyGrant"] = betterproto.message_field(23)
@dataclass(eq=False, repr=False)
class CdotaMatchMetadataTeamGauntletProgress(betterproto.Message):
gauntlet_tier: int = betterproto.uint32_field(2)
gauntlet_wins: int = betterproto.uint32_field(3)
gauntlet_losses: int = betterproto.uint32_field(4)
@dataclass(eq=False, repr=False)
class CdotaMatchMetadataTeamPlayer(betterproto.Message):
account_id: int = betterproto.uint32_field(1)
ability_upgrades: List[int] = betterproto.uint32_field(2)
player_slot: int = betterproto.uint32_field(3)
equipped_econ_items: List["CsoEconItem"] = betterproto.message_field(4)
kills: List["CdotaMatchMetadataTeamPlayerKill"] = betterproto.message_field(5)
items: List["CdotaMatchMetadataTeamItemPurchase"] = betterproto.message_field(6)
avg_kills_x16: int = betterproto.uint32_field(7)
avg_deaths_x16: int = betterproto.uint32_field(8)
avg_assists_x16: int = betterproto.uint32_field(9)
avg_gpm_x16: int = betterproto.uint32_field(10)
avg_xpm_x16: int = betterproto.uint32_field(11)
best_kills_x16: int = betterproto.uint32_field(12)
best_assists_x16: int = betterproto.uint32_field(13)
best_gpm_x16: int = betterproto.uint32_field(14)
best_xpm_x16: int = betterproto.uint32_field(15)
win_streak: int = betterproto.uint32_field(16)
best_win_streak: int = betterproto.uint32_field(17)
fight_score: float = betterproto.float_field(18)
farm_score: float = betterproto.float_field(19)
support_score: float = betterproto.float_field(20)
push_score: float = betterproto.float_field(21)
level_up_times: List[int] = betterproto.uint32_field(22)
graph_net_worth: List[float] = betterproto.float_field(23)
inventory_snapshot: List["CdotaMatchMetadataTeamInventorySnapshot"] = betterproto.message_field(24)
avg_stats_calibrated: bool = betterproto.bool_field(25)
auto_style_criteria: List["CdotaMatchMetadataTeamAutoStyleCriteria"] = betterproto.message_field(26)
event_data: List["CdotaMatchMetadataTeamEventData"] = betterproto.message_field(29)
strange_gem_progress: List["CdotaMatchMetadataTeamStrangeGemProgress"] = betterproto.message_field(30)
hero_xp: int = betterproto.uint32_field(31)
camps_stacked: int = betterproto.uint32_field(32)
victory_prediction: List["CdotaMatchMetadataTeamVictoryPrediction"] = betterproto.message_field(33)
lane_selection_flags: int = betterproto.uint32_field(34)
rampages: int = betterproto.uint32_field(35)
triple_kills: int = betterproto.uint32_field(36)
aegis_snatched: int = betterproto.uint32_field(37)
rapiers_purchased: int = betterproto.uint32_field(38)
couriers_killed: int = betterproto.uint32_field(39)
net_worth_rank: int = betterproto.uint32_field(40)
support_gold_spent: int = betterproto.uint32_field(41)
observer_wards_placed: int = betterproto.uint32_field(42)
sentry_wards_placed: int = betterproto.uint32_field(43)
wards_dewarded: int = betterproto.uint32_field(44)
stun_duration: float = betterproto.float_field(45)
rank_mmr_boost_type: "EdotammrBoostType" = betterproto.enum_field(46)
gauntlet_progress: "CdotaMatchMetadataTeamGauntletProgress" = betterproto.message_field(47)
contract_progress: List["CdotaMatchMetadataTeamPlayerContractProgress"] = betterproto.message_field(48)
guild_ids: List[int] = betterproto.uint32_field(49)
@dataclass(eq=False, repr=False)
class CdotaMatchMetadataTeamPlayerContractProgress(betterproto.Message):
guild_id: int = betterproto.uint32_field(1)
event_id: int = betterproto.uint32_field(2)
challenge_instance_id: int = betterproto.uint32_field(3)
challenge_parameter: int = betterproto.uint32_field(4)
contract_stars: int = betterproto.uint32_field(5)
contract_slot: int = betterproto.uint32_field(6)
completed: bool = betterproto.bool_field(7)
@dataclass(eq=False, repr=False)
class CdotaMatchMetadataGuildChallengeProgress(betterproto.Message):
guild_id: int = betterproto.uint32_field(1)
event_id: "EEvent" = betterproto.enum_field(2)
challenge_instance_id: int = betterproto.uint32_field(3)
challenge_parameter: int = betterproto.uint32_field(4)
challenge_timestamp: int = betterproto.uint32_field(5)
challenge_progress_at_start: int = betterproto.uint32_field(6)
challenge_progress_accumulated: int = betterproto.uint32_field(7)
individual_progress: List["CdotaMatchMetadataGuildChallengeProgressIndividualProgress"] = betterproto.message_field(
8
)
@dataclass(eq=False, repr=False)
class CdotaMatchMetadataGuildChallengeProgressIndividualProgress(betterproto.Message):
account_id: int = betterproto.uint32_field(1)
progress: int = betterproto.uint32_field(2)
@dataclass(eq=False, repr=False)
class CdotaMatchPrivateMetadata(betterproto.Message):
teams: List["CdotaMatchPrivateMetadataTeam"] = betterproto.message_field(1)
graph_win_probability: List[float] = betterproto.float_field(2)
string_names: List["CdotaMatchPrivateMetadataStringName"] = betterproto.message_field(3)
@dataclass(eq=False, repr=False)
class CdotaMatchPrivateMetadataStringName(betterproto.Message):
id: int = betterproto.uint32_field(1)
name: str = betterproto.string_field(2)
@dataclass(eq=False, repr=False)
class CdotaMatchPrivateMetadataTeam(betterproto.Message):
dota_team: int = betterproto.uint32_field(1)
players: List["CdotaMatchPrivateMetadataTeamPlayer"] = betterproto.message_field(2)
buildings: List["CdotaMatchPrivateMetadataTeamBuilding"] = betterproto.message_field(3)
@dataclass(eq=False, repr=False)
class CdotaMatchPrivateMetadataTeamPlayer(betterproto.Message):
account_id: int = betterproto.uint32_field(1)
player_slot: int = betterproto.uint32_field(2)
position_stream: bytes = betterproto.bytes_field(3)
combat_segments: List["CdotaMatchPrivateMetadataTeamPlayerCombatSegment"] = betterproto.message_field(4)
damage_unit_names: List[str] = betterproto.string_field(5)
buff_records: List["CdotaMatchPrivateMetadataTeamPlayerBuffRecord"] = betterproto.message_field(6)
graph_kills: List[float] = betterproto.float_field(7)
graph_deaths: List[float] = betterproto.float_field(8)
graph_assists: List[float] = betterproto.float_field(9)
graph_lasthits: List[float] = betterproto.float_field(10)
graph_denies: List[float] = betterproto.float_field(11)
gold_received: "CdotaMatchPrivateMetadataTeamPlayerGoldReceived" = betterproto.message_field(12)
xp_received: "CdotaMatchPrivateMetadataTeamPlayerXpReceived" = betterproto.message_field(13)
@dataclass(eq=False, repr=False)
class CdotaMatchPrivateMetadataTeamPlayerCombatSegment(betterproto.Message):
game_time: int = betterproto.int32_field(1)
damage_by_ability: List[
"CdotaMatchPrivateMetadataTeamPlayerCombatSegmentDamageByAbility"
] = betterproto.message_field(2)
healing_by_ability: List[
"CdotaMatchPrivateMetadataTeamPlayerCombatSegmentHealingByAbility"
] = betterproto.message_field(3)
@dataclass(eq=False, repr=False)
class CdotaMatchPrivateMetadataTeamPlayerCombatSegmentDamageByAbility(betterproto.Message):
source_unit_index: int = betterproto.uint32_field(3)
ability_id: int = betterproto.uint32_field(1)
by_hero_targets: List[
"CdotaMatchPrivateMetadataTeamPlayerCombatSegmentDamageByAbilityByHeroTarget"
] = betterproto.message_field(2)
@dataclass(eq=False, repr=False)
class CdotaMatchPrivateMetadataTeamPlayerCombatSegmentDamageByAbilityByHeroTarget(betterproto.Message):
hero_id: int = betterproto.uint32_field(1)
damage: int = betterproto.uint32_field(2)
@dataclass(eq=False, repr=False)
class CdotaMatchPrivateMetadataTeamPlayerCombatSegmentHealingByAbility(betterproto.Message):
source_unit_index: int = betterproto.uint32_field(3)
ability_id: int = betterproto.uint32_field(1)
by_hero_targets: List[
"CdotaMatchPrivateMetadataTeamPlayerCombatSegmentHealingByAbilityByHeroTarget"
] = betterproto.message_field(2)
@dataclass(eq=False, repr=False)
class CdotaMatchPrivateMetadataTeamPlayerCombatSegmentHealingByAbilityByHeroTarget(betterproto.Message):
hero_id: int = betterproto.uint32_field(1)
healing: int = betterproto.uint32_field(2)
@dataclass(eq=False, repr=False)
class CdotaMatchPrivateMetadataTeamPlayerBuffRecord(betterproto.Message):
buff_ability_id: int = betterproto.uint32_field(1)
buff_modifier_name: str = betterproto.string_field(3)
by_hero_targets: List["CdotaMatchPrivateMetadataTeamPlayerBuffRecordByHeroTarget"] = betterproto.message_field(2)
@dataclass(eq=False, repr=False)
class CdotaMatchPrivateMetadataTeamPlayerBuffRecordByHeroTarget(betterproto.Message):
hero_id: int = betterproto.uint32_field(1)
elapsed_duration: float = betterproto.float_field(2)
is_hidden: bool = betterproto.bool_field(3)
@dataclass(eq=False, repr=False)
class CdotaMatchPrivateMetadataTeamPlayerGoldReceived(betterproto.Message):
creep: int = betterproto.uint32_field(1)
heroes: int = betterproto.uint32_field(2)
bounty_runes: int = betterproto.uint32_field(3)
passive: int = betterproto.uint32_field(4)
buildings: int = betterproto.uint32_field(5)
abilities: int = betterproto.uint32_field(6)
wards: int = betterproto.uint32_field(7)
other: int = betterproto.uint32_field(8)
@dataclass(eq=False, repr=False)
class CdotaMatchPrivateMetadataTeamPlayerXpReceived(betterproto.Message):
creep: int = betterproto.uint32_field(1)
heroes: int = betterproto.uint32_field(2)
roshan: int = betterproto.uint32_field(3)
tome_of_knowledge: int = betterproto.uint32_field(4)
outpost: int = betterproto.uint32_field(5)
other: int = betterproto.uint32_field(6)
@dataclass(eq=False, repr=False)
class CdotaMatchPrivateMetadataTeamBuilding(betterproto.Message):
unit_name: str = betterproto.string_field(1)
position_quant_x: int = betterproto.uint32_field(2)
position_quant_y: int = betterproto.uint32_field(3)
death_time: float = betterproto.float_field(4)
@dataclass(eq=False, repr=False)
class CMsgDotadpcMatch(betterproto.Message):
match: "CMsgDotaMatch" = betterproto.message_field(1)
metadata: "CdotaMatchMetadata" = betterproto.message_field(2)
| 1.398438
| 1
|
arrow/users/migrations/0003_application_hierarchy.py
|
AkhilGKrishnan/arrow
| 0
|
12774780
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2017-11-05 16:19
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('users', '0002_auto_20171105_1034'),
]
operations = [
migrations.CreateModel(
name='Application',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(choices=[('SSLC', 'SSLC'), ('+2', '+2'), ('EMB', 'Embassy Attestation'), ('BNK1', 'Bank Loan - 1 Year'), ('BNK4', 'Bank Loan - 4 Years'), ('CHAR', 'Character Certificate'), ('NRSD', 'Non Receipt of Stipend'), ('NRLP', 'Non Receipt of Laptop'), ('NRSP', 'Non Receipt of Scholarship'), ('OTH', 'Other')], max_length=4)),
('other', models.CharField(blank=True, max_length=100)),
('applicant', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Hierarchy',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('application_type', models.CharField(choices=[('SSLC', 'SSLC'), ('+2', '+2'), ('EMB', 'Embassy Attestation'), ('BNK1', 'Bank Loan - 1 Year'), ('BNK4', 'Bank Loan - 4 Years'), ('CHAR', 'Character Certificate'), ('NRSD', 'Non Receipt of Stipend'), ('NRLP', 'Non Receipt of Laptop'), ('NRSP', 'Non Receipt of Scholarship'), ('OTH', 'Other')], max_length=4)),
('sl_no', models.IntegerField()),
('user', models.CharField(choices=[('st', 'STUDENT'), ('tu', 'TUTOR'), ('ho', 'HOD'), ('of', 'OFFICE STAFF')], max_length=2)),
],
),
]
| 1.625
| 2
|
tests/adapters/shell/mock_terminal_commands.py
|
FrancoisLopez/netman
| 38
|
12774781
|
# Copyright 2015 Internap.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
import time
from MockSSH import SSHCommand
class HangingCommand(SSHCommand):
def __init__(self, name, hang_time, *args):
self.name = name
self.hang_time = hang_time
self.protocol = None # set in __call__
def __call__(self, protocol, *args):
SSHCommand.__init__(self, protocol, self.name, *args)
return self
def start(self):
time.sleep(self.hang_time)
self.write("Done!\n")
self.exit()
class AmbiguousCommand(SSHCommand):
def __init__(self, name, *args):
self.name = name
self.protocol = None # set in __call__
def __call__(self, protocol, *args):
SSHCommand.__init__(self, protocol, self.name, *args)
return self
def start(self):
self.write("working -> done!\n")
self.exit()
class MultiAsyncWriteCommand(SSHCommand):
def __init__(self, name, count, interval, *args):
self.name = name
self.count = count
self.interval = interval
self.protocol = None # set in __call__
def __call__(self, protocol, *args):
SSHCommand.__init__(self, protocol, self.name, *args)
return self
def start(self):
for i in range(self.count):
self.write("Line %d\n" % (i + 1))
time.sleep(self.interval)
self.exit()
class SkippingLineCommand(SSHCommand):
def __init__(self, name, lines, *args):
self.name = name
self.lines = lines
self.protocol = None # set in __call__
def __call__(self, protocol, *args):
SSHCommand.__init__(self, protocol, self.name, *args)
return self
def start(self):
for _ in range(self.lines):
self.write("\r\n")
self.write("%s lines skipped!\n" % self.lines)
self.exit()
def exit_command_success(instance):
instance.protocol.call_command(instance.protocol.commands['_exit'])
def passwd_change_protocol_prompt(instance):
instance.protocol.prompt = "hostname#"
instance.protocol.password_input = False
def passwd_write_password_to_transport(instance):
instance.writeln("MockSSH: password is %s" % instance.valid_password)
class KeystrokeAnsweredCommand(SSHCommand):
def __init__(self, name):
self.name = name
self.protocol = None # set in __call__
def __call__(self, protocol, *args):
SSHCommand.__init__(self, protocol, self.name, *args)
return self
def start(self):
self.write("whatup?")
this = self
def finish():
this.writeln("k")
this.writeln("K pressed")
this.exit()
this.protocol.keyHandlers.pop("k")
self.protocol.keyHandlers.update({"k": finish})
| 2.390625
| 2
|
source/appModules/searchui.py
|
siddhartha-iitd/NVDA-Enhancements
| 0
|
12774782
|
<reponame>siddhartha-iitd/NVDA-Enhancements
#A part of NonVisual Desktop Access (NVDA)
#Copyright (C) 2015 NV Access Limited
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
import appModuleHandler
import controlTypes
import api
import speech
from NVDAObjects.UIA import UIA
from NVDAObjects.UIA.edge import EdgeList
from NVDAObjects.IAccessible import IAccessible, ContentGenericClient
# Windows 10 Search UI suggestion list item
class SuggestionListItem(UIA):
role=controlTypes.ROLE_LISTITEM
def event_UIA_elementSelected(self):
focusControllerFor=api.getFocusObject().controllerFor
if len(focusControllerFor)>0 and focusControllerFor[0].appModule is self.appModule and self.name:
speech.cancelSpeech()
api.setNavigatorObject(self)
self.reportFocus()
class AppModule(appModuleHandler.AppModule):
def chooseNVDAObjectOverlayClasses(self,obj,clsList):
if isinstance(obj,UIA) and obj.role==controlTypes.ROLE_LISTITEM and isinstance(obj.parent,EdgeList):
clsList.insert(0,SuggestionListItem)
elif isinstance(obj,IAccessible):
try:
# #5288: Never use ContentGenericClient, as this uses displayModel
# which will freeze if the process is suspended.
clsList.remove(ContentGenericClient)
except ValueError:
pass
| 1.84375
| 2
|
biobb_wf_md_setup_mutations/python/workflow.py
|
bioexcel/biobb_workflows
| 2
|
12774783
|
#!/usr/bin/env python3
import time
import argparse
from biobb_common.configuration import settings
from biobb_common.tools import file_utils as fu
from biobb_chemistry.ambertools.reduce_remove_hydrogens import reduce_remove_hydrogens
from biobb_structure_utils.utils.extract_molecule import extract_molecule
from biobb_structure_utils.utils.cat_pdb import cat_pdb
from biobb_model.model.fix_side_chain import fix_side_chain
from biobb_model.model.mutate import mutate
from biobb_md.gromacs.pdb2gmx import pdb2gmx
from biobb_md.gromacs.editconf import editconf
from biobb_md.gromacs.solvate import solvate
from biobb_md.gromacs.grompp import grompp
from biobb_md.gromacs.genion import genion
from biobb_md.gromacs.mdrun import mdrun
from biobb_md.gromacs.make_ndx import make_ndx
from biobb_analysis.gromacs.gmx_energy import gmx_energy
from biobb_analysis.gromacs.gmx_rgyr import gmx_rgyr
from biobb_analysis.gromacs.gmx_trjconv_str import gmx_trjconv_str
from biobb_analysis.gromacs.gmx_image import gmx_image
from biobb_analysis.gromacs.gmx_rms import gmx_rms
def main(config, system=None):
start_time = time.time()
conf = settings.ConfReader(config, system)
global_log, _ = fu.get_logs(path=conf.get_working_dir_path(), light_format=True)
global_prop = conf.get_prop_dic(global_log=global_log)
global_paths = conf.get_paths_dic()
global_log.info("step0_reduce_remove_hydrogens: Removing Hydrogens")
reduce_remove_hydrogens(**global_paths["step0_reduce_remove_hydrogens"], properties=global_prop["step0_reduce_remove_hydrogens"])
global_log.info("step1_extract_molecule: Extracting Protein")
extract_molecule(**global_paths["step1_extract_molecule"], properties=global_prop["step1_extract_molecule"])
global_log.info("step00_cat_pdb: Concatenating protein with included ions")
cat_pdb(**global_paths["step00_cat_pdb"], properties=global_prop["step00_cat_pdb"])
global_log.info("step2_fix_side_chain: Modeling the missing heavy atoms in the structure side chains")
fix_side_chain(**global_paths["step2_fix_side_chain"], properties=global_prop["step2_fix_side_chain"])
for mutation_number, mutation in enumerate(conf.properties['mutations']):
global_log.info('')
global_log.info("Mutation: %s %d/%d" % (mutation, mutation_number+1, len(conf.properties['mutations'])))
global_log.info('')
prop = conf.get_prop_dic(prefix=mutation, global_log=global_log)
paths = conf.get_paths_dic(prefix=mutation)
global_log.info("step3_mutate: Modeling mutation")
prop['step3_mutate']['mutation_list'] = mutation
paths['step3_mutate']['input_pdb_path'] = global_paths['step2_fix_side_chain']['output_pdb_path']
mutate(**paths["step3_mutate"], properties=prop["step3_mutate"])
global_log.info("step4_pdb2gmx: Generate the topology")
pdb2gmx(**paths["step4_pdb2gmx"], properties=prop["step4_pdb2gmx"])
global_log.info("step5_editconf: Create the solvent box")
editconf(**paths["step5_editconf"], properties=prop["step5_editconf"])
global_log.info("step6_solvate: Fill the solvent box with water molecules")
solvate(**paths["step6_solvate"], properties=prop["step6_solvate"])
global_log.info("step7_grompp_genion: Preprocess ion generation")
grompp(**paths["step7_grompp_genion"], properties=prop["step7_grompp_genion"])
global_log.info("step8_genion: Ion generation")
genion(**paths["step8_genion"], properties=prop["step8_genion"])
global_log.info("step9_grompp_min: Preprocess energy minimization")
grompp(**paths["step9_grompp_min"], properties=prop["step9_grompp_min"])
global_log.info("step10_mdrun_min: Execute energy minimization")
mdrun(**paths["step10_mdrun_min"], properties=prop["step10_mdrun_min"])
global_log.info("step100_make_ndx: Creating an index file for the whole system")
make_ndx(**paths["step100_make_ndx"], properties=prop["step100_make_ndx"])
global_log.info("step11_grompp_nvt: Preprocess system temperature equilibration")
grompp(**paths["step11_grompp_nvt"], properties=prop["step11_grompp_nvt"])
global_log.info("step12_mdrun_nvt: Execute system temperature equilibration")
mdrun(**paths["step12_mdrun_nvt"], properties=prop["step12_mdrun_nvt"])
global_log.info("step13_grompp_npt: Preprocess system pressure equilibration")
grompp(**paths["step13_grompp_npt"], properties=prop["step13_grompp_npt"])
global_log.info("step14_mdrun_npt: Execute system pressure equilibration")
mdrun(**paths["step14_mdrun_npt"], properties=prop["step14_mdrun_npt"])
global_log.info("step15_grompp_md: Preprocess free dynamics")
grompp(**paths["step15_grompp_md"], properties=prop["step15_grompp_md"])
global_log.info("step16_mdrun_md: Execute free molecular dynamics simulation")
mdrun(**paths["step16_mdrun_md"], properties=prop["step16_mdrun_md"])
global_log.info("step17_gmx_image1: Image Trajectory, step1, moving ligand to center of the water box")
gmx_image(**paths["step17_gmx_image1"], properties=prop["step17_gmx_image1"])
global_log.info("step18_gmx_image2: Image Trajectory, step2, removing rotation")
gmx_image(**paths["step18_gmx_image2"], properties=prop["step18_gmx_image2"])
global_log.info("step19_gmx_trjconv_str: Convert final structure from GRO to PDB")
gmx_trjconv_str(**paths["step19_gmx_trjconv_str"], properties=prop["step19_gmx_trjconv_str"])
global_log.info("step20_gmx_energy: Generate energy plot from minimization/equilibration")
gmx_energy(**paths["step20_gmx_energy"], properties=prop["step20_gmx_energy"])
global_log.info("step21_gmx_rgyr: Generate Radius of Gyration plot for the resulting setup trajectory from the free md step")
gmx_rgyr(**paths["step21_gmx_rgyr"], properties=prop["step21_gmx_rgyr"])
global_log.info("step22_rmsd_first: Generate RMSd (against 1st snp.) plot for the resulting setup trajectory from the free md step")
gmx_rms(**paths["step22_rmsd_first"], properties=prop["step22_rmsd_first"])
global_log.info("step23_rmsd_exp: Generate RMSd (against exp.) plot for the resulting setup trajectory from the free md step")
gmx_rms(**paths["step23_rmsd_exp"], properties=prop["step23_rmsd_exp"])
if conf.properties['run_md']:
global_log.info("step24_grompp_md: Preprocess long MD simulation after setup")
grompp(**paths["step24_grompp_md"], properties=prop["step24_grompp_md"])
elapsed_time = time.time() - start_time
global_log.info('')
global_log.info('')
global_log.info('Execution successful: ')
global_log.info(' Workflow_path: %s' % conf.get_working_dir_path())
global_log.info(' Config File: %s' % config)
if system:
global_log.info(' System: %s' % system)
global_log.info('')
global_log.info('Elapsed time: %.1f minutes' % (elapsed_time/60))
global_log.info('')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Based on the official Gromacs tutorial")
parser.add_argument('--config', required=True)
parser.add_argument('--system', required=False)
args = parser.parse_args()
main(args.config, args.system)
| 1.617188
| 2
|
src/ocd/utilities.py
|
ofirr/OpenCommunity
| 0
|
12774784
|
<filename>src/ocd/utilities.py
import uuid
def create_uuid():
return uuid.uuid4().hex
| 1.929688
| 2
|
ads/adsconstants.py
|
rako233/TC2ADSProtocol
| 0
|
12774785
|
<gh_stars>0
"""Collection of all documented ADS constants. Only a small subset of these
are used by code in this library.
Source: http://infosys.beckhoff.com/english.php?content=../content/1033/tcplclibsystem/html/tcplclibsys_constants.htm&id= # nopep8
"""
"""Port numbers"""
# Port number of the standard loggers.
AMSPORT_LOGGER = 100
# Port number of the TwinCAT Eventloggers.
AMSPORT_EVENTLOG = 110
# Port number of the TwinCAT Realtime Servers.
AMSPORT_R0_RTIME = 200
# Port number of the TwinCAT I/O Servers.
AMSPORT_R0_IO = 300
# Port number of the TwinCAT NC Servers.
AMSPORT_R0_NC = 500
# Port number of the TwinCAT NC Servers (Task SAF).
AMSPORT_R0_NCSAF = 501
# Port number of the TwinCAT NC Servers (Task SVB).
AMSPORT_R0_NCSVB = 511
# internal
AMSPORT_R0_ISG = 550
# Port number of the TwinCAT NC I Servers.
AMSPORT_R0_CNC = 600
# internal
AMSPORT_R0_LINE = 700
# Port number of the TwinCAT PLC Servers (only at the Buscontroller).
AMSPORT_R0_PLC = 800
# Port number of the TwinCAT PLC Servers in the runtime 1.
AMSPORT_R0_PLC_RTS1 = 801
# Port number of the TwinCAT PLC Servers in the runtime 2.
AMSPORT_R0_PLC_RTS2 = 811
# Port number of the TwinCAT PLC Servers in the runtime 3.
AMSPORT_R0_PLC_RTS3 = 821
# Port number of the TwinCAT PLC Servers in the runtime 4.
AMSPORT_R0_PLC_RTS4 = 831
# Port number of the TwinCAT CAM Server.
AMSPORT_R0_CAM = 900
# Port number of the TwinCAT CAMTOOL Server.
AMSPORT_R0_CAMTOOL = 950
# Port number of the TwinCAT System Service.
AMSPORT_R3_SYSSERV = 10000
# Port number of the TwinCAT Scope Servers (since Lib. V2.0.12)
AMSPORT_R3_SCOPESERVER = 27110
"""ADS States"""
ADSSTATE_INVALID = 0 # ADS Status: invalid
ADSSTATE_IDLE = 1 # ADS Status: idle
ADSSTATE_RESET = 2 # ADS Status: reset.
ADSSTATE_INIT = 3 # ADS Status: init
ADSSTATE_START = 4 # ADS Status: start
ADSSTATE_RUN = 5 # ADS Status: run
ADSSTATE_STOP = 6 # ADS Status: stop
ADSSTATE_SAVECFG = 7 # ADS Status: save configuration
ADSSTATE_LOADCFG = 8 # ADS Status: load configuration
ADSSTATE_POWERFAILURE = 9 # ADS Status: Power failure
ADSSTATE_POWERGOOD = 10 # ADS Status: Power good
ADSSTATE_ERROR = 11 # ADS Status: Error
ADSSTATE_SHUTDOWN = 12 # ADS Status: Shutdown
ADSSTATE_SUSPEND = 13 # ADS Status: Suspend
ADSSTATE_RESUME = 14 # ADS Status: Resume
ADSSTATE_CONFIG = 15 # ADS Status: Configuration
ADSSTATE_RECONFIG = 16 # ADS Status: Reconfiguration
ADSSTATE_MAXSTATES = 17
"""Reserved Index Groups"""
ADSIGRP_SYMTAB = 0xF000
ADSIGRP_SYMNAME = 0xF001
ADSIGRP_SYMVAL = 0xF002
ADSIGRP_SYM_HNDBYNAME = 0xF003
ADSIGRP_SYM_VALBYNAME = 0xF004
ADSIGRP_SYM_VALBYHND = 0xF005
ADSIGRP_SYM_RELEASEHND = 0xF006
ADSIGRP_SYM_INFOBYNAME = 0xF007
ADSIGRP_SYM_VERSION = 0xF008
ADSIGRP_SYM_INFOBYNAMEEX = 0xF009
ADSIGRP_SYM_DOWNLOAD = 0xF00A
ADSIGRP_SYM_UPLOAD = 0xF00B
ADSIGRP_SYM_UPLOADINFO = 0xF00C
ADSIGRP_SYM_SUMREAD = 0xF080
ADSIGRP_SYM_SUMWRITE = 0xF081
ADSIGRP_SYM_SUMREADWRITE = 0xF082
ADSIGRP_SYMNOTE = 0xF010
ADSIGRP_IOIMAGE_RWIB = 0xF020
ADSIGRP_IOIMAGE_RWIX = 0xF021
ADSIGRP_IOIMAGE_RISIZE = 0xF025
ADSIGRP_IOIMAGE_RWOB = 0xF030
ADSIGRP_IOIMAGE_RWOX = 0xF031
ADSIGRP_IOIMAGE_RWOSIZE = 0xF035
ADSIGRP_IOIMAGE_CLEARI = 0xF040
ADSIGRP_IOIMAGE_CLEARO = 0xF050
ADSIGRP_IOIMAGE_RWIOB = 0xF060
ADSIGRP_DEVICE_DATA = 0xF100
ADSIOFFS_DEVDATA_ADSSTATE = 0x0000
ADSIOFFS_DEVDATA_DEVSTATE = 0x0002
"""System Service Index Groups"""
SYSTEMSERVICE_OPENCREATE = 100
SYSTEMSERVICE_OPENREAD = 101
SYSTEMSERVICE_OPENWRITE = 102
SYSTEMSERVICE_CREATEFILE = 110
SYSTEMSERVICE_CLOSEHANDLE = 111
SYSTEMSERVICE_FOPEN = 120
SYSTEMSERVICE_FCLOSE = 121
SYSTEMSERVICE_FREAD = 122
SYSTEMSERVICE_FWRITE = 123
SYSTEMSERVICE_FSEEK = 124
SYSTEMSERVICE_FTELL = 125
SYSTEMSERVICE_FGETS = 126
SYSTEMSERVICE_FPUTS = 127
SYSTEMSERVICE_FSCANF = 128
SYSTEMSERVICE_FPRINTF = 129
SYSTEMSERVICE_FEOF = 130
SYSTEMSERVICE_FDELETE = 131
SYSTEMSERVICE_FRENAME = 132
SYSTEMSERVICE_REG_HKEYLOCALMACHINE = 200
SYSTEMSERVICE_SENDEMAIL = 300
SYSTEMSERVICE_TIMESERVICES = 400
SYSTEMSERVICE_STARTPROCESS = 500
SYSTEMSERVICE_CHANGENETID = 600
"""System Service Index Offsets (Timeservices)"""
TIMESERVICE_DATEANDTIME = 1
TIMESERVICE_SYSTEMTIMES = 2
TIMESERVICE_RTCTIMEDIFF = 3
TIMESERVICE_ADJUSTTIMETORTC = 4
"""Masks for Log output"""
ADSLOG_MSGTYPE_HINT = 0x01
ADSLOG_MSGTYPE_WARN = 0x02
ADSLOG_MSGTYPE_ERROR = 0x04
ADSLOG_MSGTYPE_LOG = 0x10
ADSLOG_MSGTYPE_MSGBOX = 0x20
ADSLOG_MSGTYPE_RESOURCE = 0x40
ADSLOG_MSGTYPE_STRING = 0x80
"""Masks for Bootdata-Flagsx"""
BOOTDATAFLAGS_RETAIN_LOADED = 0x01
BOOTDATAFLAGS_RETAIN_INVALID = 0x02
BOOTDATAFLAGS_RETAIN_REQUESTED = 0x04
BOOTDATAFLAGS_PERSISTENT_LOADED = 0x10
BOOTDATAFLAGS_PERSISTENT_INVALID = 0x20
"""Masks for BSOD-Flags"""
SYSTEMSTATEFLAGS_BSOD = 0x01 # BSOD: Blue Screen of Death
SYSTEMSTATEFLAGS_RTVIOLATION = 0x02 # Realtime violation, latency time overrun
"""Masks for File output"""
# 'r': Opens file for reading
FOPEN_MODEREAD = 0x0001
# 'w': Opens file for writing, (possible) existing files were overwritten.
FOPEN_MODEWRITE = 0x0002
# 'a': Opens file for writing, is attached to (possible) exisiting files. If no
# file exists, it will be created.
FOPEN_MODEAPPEND = 0x0004
# '+': Opens a file for reading and writing.
FOPEN_MODEPLUS = 0x0008
# 'b': Opens a file for binary reading and writing.
FOPEN_MODEBINARY = 0x0010
# 't': Opens a file for textual reading and writing.
FOPEN_MODETEXT = 0x0020
"""Masks for Eventlogger Flags"""
# Class and priority are defined by the formatter.
TCEVENTFLAG_PRIOCLASS = 0x0010
# The formatting information comes with the event
TCEVENTFLAG_FMTSELF = 0x0020
# Logg.
TCEVENTFLAG_LOG = 0x0040
# Show message box .
TCEVENTFLAG_MSGBOX = 0x0080
# Use Source-Id instead of Source name.
TCEVENTFLAG_SRCID = 0x0100
"""TwinCAT Eventlogger Status messages"""
# Not valid, occurs also if the event was not reported.
TCEVENTSTATE_INVALID = 0x0000
# Event is reported, but neither signed off nor acknowledged.
TCEVENTSTATE_SIGNALED = 0x0001
# Event is signed off ('gone').
TCEVENTSTATE_RESET = 0x0002
# Event is acknowledged.
TCEVENTSTATE_CONFIRMED = 0x0010
# Event is signed off and acknowledged.
TCEVENTSTATE_RESETCON = 0x0012
"""TwinCAT Eventlogger Status messages"""
TCEVENT_SRCNAMESIZE = 15 # Max. Length for the Source name.
TCEVENT_FMTPRGSIZE = 31 # Max. Length for the name of the formatters.
"""Other"""
PI = 3.1415926535897932384626433832795 # Pi number
DEFAULT_ADS_TIMEOUT = 5 # (seconds) Default ADS timeout
MAX_STRING_LENGTH = 255 # The max. string length of T_MaxString data type
| 1.65625
| 2
|
mysite/classroom/models.py
|
anishmo99/Classrooom-Django-Web-App
| 1
|
12774786
|
from django.utils import timezone
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager
class User(AbstractBaseUser):
is_student = models.BooleanField(default=False)
is_teacher = models.BooleanField(default=False)
# class Teacher(models.Model):
# teacher_name = models.CharField(max_length=200)
# def __str__(self):
# return self.teacher_name
class TeacherManager(BaseUserManager):
def create_user(self,email,username,password=<PASSWORD>):
if not email:
raise ValueError("Email required")
if not username:
raise ValueError("Username required")
user = self.model(
email=self.normalize_email(email),
username=username,
)
user.set_password(password)
user.save(using=self._db)
return user
class Teacher(AbstractBaseUser):
email = models.EmailField(verbose_name="email",max_length=60,unique=True)
username = models.CharField(max_length=30,unique=True)
date_joined = models.DateTimeField(verbose_name="date joined",auto_now_add=True)
last_login = models.DateTimeField(verbose_name="last login",auto_now=True)
is_admin = models.BooleanField(default=False)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
is_superuser = models.BooleanField(default=False)
is_teacher = models.BooleanField(default=True)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username',]
objects = TeacherManager()
def __str__(self):
return self.email
def has_perm(self,perm,obj=None):
return self.is_admin
def has_module_perms(self,applabel):
return True
class Class(models.Model):
classNum = models.CharField(max_length=2)
section = models.CharField(max_length=2)
class Meta:
verbose_name_plural = 'Classes'
def __str__(self):
return '{0} {1}'.format(self.classNum,self.section)
class Subject(models.Model):
subject = models.CharField(max_length=100)
def __str__(self):
return self.subject
class Question(models.Model):
question = models.TextField()
subject = models.ForeignKey(Subject,on_delete=models.CASCADE)
question_for_class = models.ForeignKey(Class,on_delete=models.CASCADE,default='')
# question_for_section = models.ForeignKey(Section,on_delete=models.CASCADE,default='')
question_by_teacher = models.ForeignKey(Teacher,on_delete=models.CASCADE,default='')
question_published = models.DateTimeField('date published',default=timezone.now())
def __str__(self):
return self.question
class Answer(models.Model):
answer = models.TextField()
answer_for_question = models.ForeignKey(Question,on_delete=models.CASCADE,default='')
answer_published = models.DateTimeField('date published',default=timezone.now())
def __str__(self):
return self.answer
class StudentManager(BaseUserManager):
def create_user(self,email,username,password=None):
if not email:
raise ValueError("Email required")
if not username:
raise ValueError("Username required")
user = self.model(
email=self.normalize_email(email),
username=username,
)
user.set_password(password)
user.save(using=self._db)
return user
class Student(AbstractBaseUser):
email = models.EmailField(verbose_name="email",max_length=60,unique=True)
username = models.CharField(max_length=30,unique=True)
student_class = models.ForeignKey(Class,on_delete=models.CASCADE)
date_joined = models.DateTimeField(verbose_name="date joined",auto_now_add=True)
last_login = models.DateTimeField(verbose_name="last login",auto_now=True)
is_admin = models.BooleanField(default=False)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
is_superuser = models.BooleanField(default=False)
is_student = models.BooleanField(default=True)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username',]
objects = StudentManager()
def __str__(self):
return self.email
def has_perm(self,perm,obj=None):
return self.is_admin
def has_module_perms(self,applabel):
return True
# class Student(models.Model):
# student_name = models.CharField(max_length=200)
# # student_section = models.ForeignKey(Section,on_delete=models.CASCADE)
# student_class = models.ForeignKey(Class,on_delete=models.CASCADE)
# def __str__(self):
# return self.student_name
| 2.53125
| 3
|
images/models.py
|
cebanauskes/ida_images
| 0
|
12774787
|
<reponame>cebanauskes/ida_images
import os
from urllib.request import urlretrieve
from django.db import models
from django.core.files import File
class Image(models.Model):
"""Модель Изображения
pub_date - поле с датой публикации изображения
url - поле с ссылкой на изображение, если оно загружено со стороннего ресурса
image - поле с изображением оригинального размера
resized_image - поле с измененным изображением
"""
pub_date = models.DateTimeField('Дата публикации', auto_now_add=True)
url = models.URLField('Ссылка', blank=True, null=True)
image = models.ImageField(upload_to='images/',
blank=True, null=True)
resized_image = models.ImageField(upload_to='resized_images/',
blank=True, null=True)
def get_remote_image(self):
"""Метод для загрузки изображения через url"""
if self.url and not self.image:
response = urlretrieve(self.url)
self.image.save(os.path.basename(self.url),
File(open(response[0], 'rb')))
self.save()
def save(self, *args, **kwargs):
self.get_remote_image()
super().save(*args, **kwargs)
@property
def get_name(self):
"""Метод получения имени файла"""
return os.path.basename(self.image.url)
class Meta:
ordering = ('-pub_date',)
| 2.328125
| 2
|
tests/conftest.py
|
aspose-email-cloud/aspose-email-cloud-python
| 1
|
12774788
|
<reponame>aspose-email-cloud/aspose-email-cloud-python
import json
import os
import sys
import uuid
sys.path.append(os.path.join(os.path.dirname(__file__), "../sdk"))
from AsposeEmailCloudSdk import api, models
import pytest
class EmailApiData:
def __init__(self, email_cloud: api.EmailCloud, folder, storage):
self.api = email_cloud
self.folder = folder
self.storage = storage
def storage_folder(self):
return models.StorageFolderLocation(self.storage, self.folder)
def pytest_addoption(parser):
parser.addoption("--test_configuration", action="store",
help="config file in json format", default=None)
def pytest_configure(config):
config.addinivalue_line("markers", "pipeline")
config.addinivalue_line("markers", "ai")
@pytest.fixture(scope="class")
def td(request):
config = _get_config(request)
app_sid = config["clientid"]
app_key = config["clientsecret"]
api_base_url = config.get("apibaseurl", "https://api-qa.aspose.cloud")
email_cloud = api.EmailCloud(app_key, app_sid, api_base_url)
auth_url = config.get("authurl")
if auth_url:
email_cloud.email.api_client.configuration.auth_url = auth_url
folder = str(uuid.uuid4())
storage = 'First Storage'
email_cloud.cloud_storage.folder.create_folder(models.CreateFolderRequest(folder, storage))
yield EmailApiData(email_cloud, folder, storage)
email_cloud.cloud_storage.folder.delete_folder(models.DeleteFolderRequest(folder, storage, True))
def _get_config(request):
data = _get_lower_keys(os.environ)
file = request.config.getoption("--test_configuration", None)
if file is not None:
with open(file) as json_file:
data.update(_get_lower_keys(json.load(json_file)))
return data
def _get_lower_keys(dictionary):
data = {}
for k, v in dictionary.items():
data[str(k).lower()] = v
return data
| 2.15625
| 2
|
src/wai/annotations/core/plugin/_get_all_plugins_by_type.py
|
waikato-ufdl/wai-annotations-core
| 0
|
12774789
|
<reponame>waikato-ufdl/wai-annotations-core<gh_stars>0
from ..specifier.util import specifier_type
from ._cache import *
from ._get_all_plugins import get_all_plugins
def get_all_plugins_by_type() -> Dict[Type[StageSpecifier], Dict[str, Type[StageSpecifier]]]:
"""
Gets a dictionary from plugin base-type to the plugins
of that type registered with the system.
"""
# Create the empty result object
all_plugins_by_type: Dict[Type[StageSpecifier], Dict[str, Type[StageSpecifier]]] = {}
# Add each plugin to a set under its base-type
for name, plugin_specifier in get_all_plugins().items():
# Get the base-type of the plugin
base_type = specifier_type(plugin_specifier)
# Create a new group for this base-type if none exists already
if base_type not in all_plugins_by_type:
all_plugins_by_type[base_type] = {}
# Add this plugin to its base-type group
all_plugins_by_type[base_type][name] = plugin_specifier
return all_plugins_by_type
| 1.984375
| 2
|
case_cleaner.py
|
fcoclavero/text-preprocess
| 2
|
12774790
|
__author__ = ["<NAME>"]
__description__ = "Text cleaner functions that deal with casing."
__email__ = ["<EMAIL>"]
__status__ = "Prototype"
import re
def clean_cases(text: str) -> str:
"""Makes text all lowercase.
Arguments:
text:
The text to be converted to all lowercase.
Returns:
The lowercase text.
"""
return text.lower()
def kebab_to_snake_case(text: str) -> str:
"""Convert a kebab-cased-text to snake_case.
Arguments:
text:
The text to be converted to snake case. Must be valid kebab case.
Returns:
The text in kebab case form.
"""
return text.replace("-", "_")
def split_camel_cased(text: str) -> str:
"""Split camelCased elements with a space.
Arguments:
text:
The text to be processed.
Returns:
The text with all camelCased elements split into different elements.
"""
return re.sub("(?!^)([A-Z][a-z]+)", r" \1", text)
| 3.296875
| 3
|
dados_cnpj_lista_url.py
|
rictom/cnpj-mysql
| 3
|
12774791
|
<filename>dados_cnpj_lista_url.py
# -*- coding: utf-8 -*-
"""
Spyder Editor
lista relação de arquivos na página de dados públicos da receita federal
"""
url = 'https://www.gov.br/receitafederal/pt-br/assuntos/orientacao-tributaria/cadastros/consultas/dados-publicos-cnpj'
url = 'http://172.16.58.3/CNPJ/'
from bs4 import BeautifulSoup, SoupStrainer
import requests
page = requests.get(url)
data = page.text
soup = BeautifulSoup(data)
for link in soup.find_all('a'):
if str(link.get('href')).endswith('.zip'):
cam = link.get('href')
# if cam.startswith('http://http'):
# cam = 'http://' + cam[len('http://http//'):]
if not cam.startswith('http'):
print(url+cam)
else:
print(cam)
'''
http://172.16.58.3/CNPJ/F.K03200$W.SIMPLES.CSV.D10911.zip
http://200.152.38.155/CNPJ/F.K03200$Z.D10911.CNAECSV.zip
http://200.152.38.155/CNPJ/F.K03200$Z.D10911.MOTICSV.zip
http://172.16.58.3/CNPJ/F.K03200$Z.D10911.MUNICCSV.zip
http://200.152.38.155/CNPJ/F.K03200$Z.D10911.NATJUCSV.zip
http://172.16.58.3/CNPJ/F.K03200$Z.D10911.PAISCSV.zip
http://200.152.38.155/CNPJ/F.K03200$Z.D10911.QUALSCSV.zip
http://200.152.38.155/CNPJ/K3241.K03200Y0.D10911.EMPRECSV.zip
http://172.16.58.3/CNPJ/K3241.K03200Y0.D10911.ESTABELE.zip
http://200.152.38.155/CNPJ/K3241.K03200Y0.D10911.SOCIOCSV.zip
http://200.152.38.155/CNPJ/K3241.K03200Y1.D10911.EMPRECSV.zip
http://200.152.38.155/CNPJ/K3241.K03200Y1.D10911.ESTABELE.zip
http://200.152.38.155/CNPJ/K3241.K03200Y1.D10911.SOCIOCSV.zip
http://200.152.38.155/CNPJ/K3241.K03200Y2.D10911.EMPRECSV.zip
http://200.152.38.155/CNPJ/K3241.K03200Y2.D10911.ESTABELE.zip
http://200.152.38.155/CNPJ/K3241.K03200Y2.D10911.SOCIOCSV.zip
http://200.152.38.155/CNPJ/K3241.K03200Y3.D10911.EMPRECSV.zip
http://200.152.38.155/CNPJ/K3241.K03200Y3.D10911.ESTABELE.zip
http://200.152.38.155/CNPJ/K3241.K03200Y3.D10911.SOCIOCSV.zip
http://200.152.38.155/CNPJ/K3241.K03200Y4.D10911.EMPRECSV.zip
http://200.152.38.155/CNPJ/K3241.K03200Y4.D10911.ESTABELE.zip
http://200.152.38.155/CNPJ/K3241.K03200Y4.D10911.SOCIOCSV.zip
http://200.152.38.155/CNPJ/K3241.K03200Y5.D10911.EMPRECSV.zip
http://200.152.38.155/CNPJ/K3241.K03200Y5.D10911.ESTABELE.zip
http://200.152.38.155/CNPJ/K3241.K03200Y5.D10911.SOCIOCSV.zip
http://200.152.38.155/CNPJ/K3241.K03200Y6.D10911.EMPRECSV.zip
http://200.152.38.155/CNPJ/K3241.K03200Y6.D10911.ESTABELE.zip
http://200.152.38.155/CNPJ/K3241.K03200Y6.D10911.SOCIOCSV.zip
http://200.152.38.155/CNPJ/K3241.K03200Y7.D10911.EMPRECSV.zip
http://200.152.38.155/CNPJ/K3241.K03200Y7.D10911.ESTABELE.zip
http://200.152.38.155/CNPJ/K3241.K03200Y7.D10911.SOCIOCSV.zip
http://200.152.38.155/CNPJ/K3241.K03200Y8.D10911.EMPRECSV.zip
http://200.152.38.155/CNPJ/K3241.K03200Y8.D10911.ESTABELE.zip
http://200.152.38.155/CNPJ/K3241.K03200Y8.D10911.SOCIOCSV.zip
http://200.152.38.155/CNPJ/K3241.K03200Y9.D10911.EMPRECSV.zip
http://200.152.38.155/CNPJ/K3241.K03200Y9.D10911.ESTABELE.zip
http://200.152.38.155/CNPJ/K3241.K03200Y9.D10911.SOCIOCSV.zip
'''
'''
http://200.152.38.155/CNPJ/F.K03200$W.SIMPLES.CSV.D10814.zip
http://200.152.38.155/CNPJ/F.K03200$Z.D10814.CNAECSV.zip
http://200.152.38.155/CNPJ/F.K03200$Z.D10814.MOTICSV.zip
http://200.152.38.155/CNPJ/F.K03200$Z.D10814.MUNICCSV.zip
http://172.16.58.3/CNPJ/F.K03200$Z.D10814.NATJUCSV.zip
http://200.152.38.155/CNPJ/F.K03200$Z.D10814.PAISCSV.zip
http://172.16.58.3/CNPJ/F.K03200$Z.D10814.QUALSCSV.zip
http://200.152.38.155/CNPJ/K3241.K03200Y0.D10814.EMPRECSV.zip
http://200.152.38.155/CNPJ/K3241.K03200Y0.D10814.ESTABELE.zip
http://200.152.38.155/CNPJ/K3241.K03200Y0.D10814.SOCIOCSV.zip
http://200.152.38.155/CNPJ/K3241.K03200Y1.D10814.EMPRECSV.zip
http://200.152.38.155/CNPJ/K3241.K03200Y1.D10814.ESTABELE.zip
http://200.152.38.155/CNPJ/K3241.K03200Y1.D10814.SOCIOCSV.zip
http://200.152.38.155/CNPJ/K3241.K03200Y2.D10814.EMPRECSV.zip
http://200.152.38.155/CNPJ/K3241.K03200Y2.D10814.ESTABELE.zip
http://200.152.38.155/CNPJ/K3241.K03200Y2.D10814.SOCIOCSV.zip
http://200.152.38.155/CNPJ/K3241.K03200Y3.D10814.EMPRECSV.zip
http://200.152.38.155/CNPJ/K3241.K03200Y3.D10814.ESTABELE.zip
http://200.152.38.155/CNPJ/K3241.K03200Y3.D10814.SOCIOCSV.zip
http://200.152.38.155/CNPJ/K3241.K03200Y4.D10814.EMPRECSV.zip
http://200.152.38.155/CNPJ/K3241.K03200Y4.D10814.ESTABELE.zip
http://200.152.38.155/CNPJ/K3241.K03200Y4.D10814.SOCIOCSV.zip
http://200.152.38.155/CNPJ/K3241.K03200Y5.D10814.EMPRECSV.zip
http://200.152.38.155/CNPJ/K3241.K03200Y5.D10814.ESTABELE.zip
http://200.152.38.155/CNPJ/K3241.K03200Y5.D10814.SOCIOCSV.zip
http://200.152.38.155/CNPJ/K3241.K03200Y6.D10814.EMPRECSV.zip
http://200.152.38.155/CNPJ/K3241.K03200Y6.D10814.ESTABELE.zip
http://200.152.38.155/CNPJ/K3241.K03200Y6.D10814.SOCIOCSV.zip
http://172.16.58.3/CNPJ/K3241.K03200Y7.D10814.EMPRECSV.zip
http://200.152.38.155/CNPJ/K3241.K03200Y7.D10814.ESTABELE.zip
http://200.152.38.155/CNPJ/K3241.K03200Y7.D10814.SOCIOCSV.zip
http://200.152.38.155/CNPJ/K3241.K03200Y8.D10814.EMPRECSV.zip
http://200.152.38.155/CNPJ/K3241.K03200Y8.D10814.ESTABELE.zip
http://200.152.38.155/CNPJ/K3241.K03200Y8.D10814.SOCIOCSV.zip
http://172.16.58.3/CNPJ/K3241.K03200Y9.D10814.EMPRECSV.zip
http://200.152.38.155/CNPJ/K3241.K03200Y9.D10814.ESTABELE.zip
http://172.16.58.3/CNPJ/K3241.K03200Y9.D10814.SOCIOCSV.zip
'''
| 2.96875
| 3
|
pycozmo/tests/test_image_encoder.py
|
gimait/pycozmo
| 123
|
12774792
|
import unittest
from pycozmo.image_encoder import ImageEncoder, str_to_image, ImageDecoder, image_to_str
from pycozmo.util import hex_dump, hex_load
from pycozmo.tests.image_encoder_fixtures import FIXTURES
class TestImageEncoder(unittest.TestCase):
@staticmethod
def _encode(sim: str) -> str:
im = str_to_image(sim)
encoder = ImageEncoder(im)
buf = encoder.encode()
res = hex_dump(buf)
return res
def assertSameImage(self, sim: str, seq: str) -> None:
buffer = hex_load(seq)
decoder = ImageDecoder(buffer)
decoder.decode()
actual = image_to_str(decoder.image)
self.assertEqual(sim.strip(), actual.strip())
def test_blank(self):
fixture = FIXTURES["blank"]
sim = fixture["image"]
expected = fixture["seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_fill_screen(self):
fixture = FIXTURES["fill_screen"]
sim = fixture["image"]
expected = fixture["seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_fill_screen2(self):
fixture = FIXTURES["fill_screen2"]
sim = fixture["image"]
expected = fixture["alt_seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_top_left(self):
fixture = FIXTURES["top_left"]
sim = fixture["image"]
expected = fixture["alt_seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_top_left_5(self):
fixture = FIXTURES["top_left_5"]
sim = fixture["image"]
expected = fixture["alt_seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_top_left_1_8(self):
fixture = FIXTURES["top_left_1_8"]
sim = fixture["image"]
expected = fixture["alt_seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_top_left_line(self):
fixture = FIXTURES["top_left_line"]
sim = fixture["image"]
expected = fixture["alt_seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_top_line(self):
fixture = FIXTURES["top_line"]
sim = fixture["image"]
expected = fixture["alt_seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_bottom_line(self):
fixture = FIXTURES["bottom_line"]
sim = fixture["image"]
expected = fixture["seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_left_line(self):
fixture = FIXTURES["left_line"]
sim = fixture["image"]
expected = fixture["seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_right_line(self):
fixture = FIXTURES["right_line"]
sim = fixture["image"]
expected = fixture["seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_columns(self):
fixture = FIXTURES["columns"]
sim = fixture["image"]
expected = fixture["seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_rect(self):
fixture = FIXTURES["rect"]
sim = fixture["image"]
expected = fixture["alt_seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_rect2(self):
fixture = FIXTURES["rect2"]
sim = fixture["image"]
expected = fixture["alt_seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_rect3(self):
fixture = FIXTURES["rect3"]
sim = fixture["image"]
expected = fixture["alt_seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_rect4(self):
fixture = FIXTURES["rect4"]
sim = fixture["image"]
expected = fixture["alt_seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_diagonal(self):
fixture = FIXTURES["diagonal"]
sim = fixture["image"]
expected = fixture["alt_seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_diagonal2(self):
fixture = FIXTURES["diagonal2"]
sim = fixture["image"]
expected = fixture["alt_seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_blocks(self):
fixture = FIXTURES["blocks"]
sim = fixture["image"]
expected = fixture["seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_pycozmo(self):
fixture = FIXTURES["pycozmo"]
sim = fixture["image"]
expected = fixture["seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_chessboard_tl(self):
fixture = FIXTURES["chessboard_tl"]
sim = fixture["image"]
expected = fixture["seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_chessboard_bl(self):
fixture = FIXTURES["chessboard_bl"]
sim = fixture["image"]
expected = fixture["alt_seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_chessboard_tr(self):
fixture = FIXTURES["chessboard_tr"]
sim = fixture["image"]
expected = fixture["seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_chessboard_br(self):
fixture = FIXTURES["chessboard_br"]
sim = fixture["image"]
expected = fixture["alt_seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_chessboard2_tl(self):
fixture = FIXTURES["chessboard2_tl"]
sim = fixture["image"]
expected = fixture["seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_chessboard2_bl(self):
fixture = FIXTURES["chessboard2_bl"]
sim = fixture["image"]
expected = fixture["alt_seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_chessboard2_tr(self):
fixture = FIXTURES["chessboard2_tr"]
sim = fixture["image"]
expected = fixture["seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_chessboard2_br(self):
fixture = FIXTURES["chessboard2_br"]
sim = fixture["image"]
expected = fixture["alt_seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
| 2.53125
| 3
|
traditional_methods.py
|
hpi-sam/GNN-TiborMaxTiago
| 11
|
12774793
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# use all cores
#import os
#os.system("taskset -p 0xff %d" % os.getpid())
pd.options.mode.chained_assignment = None # deactivating slicing warns
def load_seattle_speed_matrix():
""" Loads the whole Seattle `speed_matrix_2015` into memory.
Caution ~ 200 mb of data
:param:
:return df (pandas.DataFrame): speed matrix as DataFrame. Columns are sensors, rows are timestamps
"""
speed_matrix = './data/Seattle_Loop_Dataset/speed_matrix_2015'
print('Loading data...')
df = pd.read_pickle(speed_matrix)
df.index = pd.to_datetime(df.index, format='%Y-%m-%d %H:%M')
print('Load completed.')
return df
def best_moving_average(df, col, average_window_in_hours=27, from_date=None, to_date=None, plot=False):
""" Calculates the moving average in a window of `average_window_in_hours` hours and propagates
into the future.
Beware! This code uses data from the future to perform predictions.
Meaning it is meant to be used to generate the "perfect" moving average baseline.
:param df (pandas.DataFrame): dataset being used
:param col (str): column for which the moving average will be applied
:param average_window_in_hours (int): the window (in hours) used to generate predictions
:param from_date (str): initial date to be shown in the plot, format: "YYYY-MM-DD"
:param to_date (str): end date to be shown in the plot
:param plot (bool): plot moving average and original df
:return MAE, RMSE (tuple): Both metrics are calculated for the column `col`
"""
ndf = df[[col]]
window_size = average_window_in_hours*12
ndf['preds'] = ndf.rolling(window=window_size).mean().shift(1)
MAE = ndf.apply((lambda x: np.abs(x[0] - x[1])), axis=1).dropna().mean()
RMSE = np.sqrt(ndf.apply((lambda x: np.power(x[0] - x[1], 2)), axis=1).dropna().mean())
if plot:
if from_date is not None and to_date is not None:
ndf.resample('1h').mean().loc[from_date:to_date].plot(figsize=(12, 7))
else:
ndf.resample('1h').mean()[:500].plot(figsize=(12, 7))
plt.show()
return (MAE, RMSE)
def calculate_metrics(df, average_window_in_hours, verbose=5, save=True):
""" Calculates MAE and RMSE for all columns of `df`, taking a sliding window of `average_window_in_hours` hours.
:param df (panads.DataFrame): dataset being used
:param average_window_in_hours (int): the window (in hours) used to generate predictions
:param verbose (int): option to display the calculations on-the-fly.
Values are going to be displayed after `verbose` iterations.
:param save (bool):
:return mae_and_rmse (dict): dictionary containing (MAE, RMSE) for each column of `df`
"""
mae_and_rmse = {}
for (it, col) in enumerate(df.columns):
MAE, RMSE = best_moving_average(df, col, average_window_in_hours)
mae_and_rmse[col] = (MAE, RMSE)
if it%verbose == 0:
print('Column: {}, MAE: {}, RMSE: {}'.format(col, MAE, RMSE))
if save:
# TODO: add param to attribute filename and filedir
pd.DataFrame(mae_rmse, index=['MAE', 'RMSE']).to_csv('./experiment_results/seattle_best_moving_average_mae_rmse.csv')
return mae_and_rmse
def real_moving_average(df, col, sliding_window_in_hours, forecast_window_in_minutes):
""" Calculating the moving average using a sliding window of `sliding_window_in_hours`
on a forecast window of `forecast_window_in_minutes` over the dataset.
Returns a dataframe with the forecast for the given dataframe.
"""
sliding_window = 12*sliding_window_in_hours
forecast_window = ((forecast_window_in_minutes+5)//5)
X = df[col].values
Y = X[:sliding_window]
for i in range(forecast_window):
ypred = np.mean(Y[i: i+sliding_window])
Y = np.append(Y, ypred)
forecast_df = pd.DataFrame(
data=Y[len(Y)-forecast_window:],
index=df.index[sliding_window:sliding_window+forecast_window]
)
return forecast_df
# still need to compute MAE and RMSE for all data
def moving_average_forecast(df, col, sliding_window_in_hours, forecast_window_in_minutes):
""" Applies moving average forecast across all the dataset. Stride can be applied to make forecasting faster,
ie, stride makes the sliding window jump a window of `stride_in_minutes`.
Returns a pandas.DataFrame containing a side-by-side comparison of the real dataframe and its predictions,
for all predicted values.
"""
sliding_window = 12*sliding_window_in_hours
forecast_window = ((forecast_window_in_minutes+5)//5)
stride_in_minutes = 60
stride = (stride_in_minutes//5)
all_predictions = []
if stride_in_minutes == 0:
max_it = len(df)
else:
max_it = len(df)//stride
for i in range(max_it):
try:
smaller_df = df.iloc[i*stride: (sliding_window+forecast_window) + (i+1)*stride]
preds = real_moving_average(smaller_df, col, sliding_window_in_hours, forecast_window_in_minutes)
fdf = pd.concat([smaller_df[[col]].loc[preds.index[0]:preds.index[-1]],preds], axis=1)
fdf = fdf.rename(columns={0:col+'_pred'})
all_predictions.append(fdf)
except:
pass
return pd.concat(all_predictions, axis=0)
def metrics(preds_df):
""" Given a `preds_df` containing two columns, the first with real values and the second being preds,
returns MAE and RMSE
"""
preds = preds_df
MAE = np.mean(np.abs(preds[preds.columns[0]] - preds[preds.columns[1]] ))
RMSE = np.sqrt(np.mean(np.power(preds[preds.columns[0]] - preds[preds.columns[1]], 2)))
return (MAE, RMSE)
def main():
# this options should go into an argument parser
SLIDING_WINDOW_IN_HOURS = 4
FORECAST_WINDOW_IN_MINUTES = 15
STRIDE_IN_MINUTES = 60
df = load_seattle_speed_matrix()
metrics_dict = {}
for col in df.columns:
print(col)
preds = moving_average_forecast(df, col, SLIDING_WINDOW_IN_HOURS, FORECAST_WINDOW_IN_MINUTES)
mae_rmse = metrics(preds)
metrics_dict[col] = mae_rmse
pd.DataFrame(metrics_dict, index=['MAE', 'RMSE']).to_csv('./experiment_results/training_window_4_hour_forecast_window_15_min_mae_rmse_seattle.csv')
if __name__ == '__main__':
main()
| 3.421875
| 3
|
app/security.py
|
ruter/otakucal
| 0
|
12774794
|
from itsdangerous import URLSafeTimedSerializer
from . import app
ts = URLSafeTimedSerializer(app.config['SECRET_KEY'])
| 1.375
| 1
|
qbert/goexplore_py/randselectors.py
|
StrangeTcy/Q-BERT
| 57
|
12774795
|
from .import_ai import *
from tqdm import tqdm
# from montezuma_env import *
@dataclass()
class Weight:
weight: float = 1.0
power: float = 1.0
def __repr__(self):
return f'w={self.weight:.2f}=p={self.power:.2f}'
@dataclass()
class DirWeights:
horiz: float = 2.0
vert: float = 0.3
score_low: float = 0.0
score_high: float = 0.0
def __repr__(self):
return f'h={self.horiz:.2f}=v={self.vert:.2f}=l={self.score_low:.2f}=h={self.score_high:.2f}'
def numberOfSetBits(i):
i = i - ((i >> 1) & 0x55555555)
i = (i & 0x33333333) + ((i >> 2) & 0x33333333)
return (((i + (i >> 4) & 0xF0F0F0F) * 0x1010101) & 0xffffffff) >> 24
def convert_score(e):
# TODO: this doesn't work when actual score is used!! Fix?
if isinstance(e, tuple):
return len(e)
return numberOfSetBits(e)
class WeightedSelector:
def __init__(self, game, seen=Weight(0.1), chosen=Weight(), action=Weight(0.1, power=0.5),
room_cells=Weight(0.0, power=0.5), dir_weights=DirWeights(), low_level_weight=0.0,
chosen_since_new_weight=Weight()):
self.seen: Weight = seen
self.chosen: Weight = chosen
self.chosen_since_new_weight: Weight = chosen_since_new_weight
self.room_cells: Weight = room_cells
self.dir_weights: DirWeights = dir_weights
self.action: Weight = action
self.low_level_weight: float = low_level_weight
self.game = game
def reached_state(self, elem):
pass
def update(self):
pass
def compute_weight(self, value, weight):
return weight.weight * 1 / (value + 0.001) ** weight.power + 0.00001
def get_seen_weight(self, cell):
return self.compute_weight(cell.seen_times, self.seen)
def get_chosen_weight(self, cell):
return self.compute_weight(cell.chosen_times, self.chosen)
def get_chosen_since_new_weight(self, cell):
return self.compute_weight(cell.chosen_since_new, self.chosen_since_new_weight)
def get_action_weight(self, cell):
return self.compute_weight(cell.action_times, self.action)
def no_neighbor(self, pos, offset, known_cells):
x = pos.x + offset[0]
y = pos.y + offset[1]
room = pos.room
room_x, room_y = self.game.get_room_xy(room)
if x < self.xrange[0]:
x = self.xrange[1]
room_x -= 1
elif x > self.xrange[1]:
x = self.xrange[0]
room_x += 1
elif y < self.yrange[0]:
y = self.yrange[1]
room_y -= 1
elif y > self.yrange[1]:
y = self.yrange[0]
room_y += 1
if self.game.get_room_out_of_bounds(room_x, room_y):
return True
room = self.game.get_room_from_xy(room_x, room_y)
if room == -1:
return True
new_pos = copy.copy(pos)
new_pos.room = room,
new_pos.x = x
new_pos.y = y
res = self.game.make_pos(pos.score, new_pos) not in known_cells
return res
def get_pos_weight(self, pos, cell, known_cells, possible_scores):
if isinstance(pos, tuple):
# Logic for the score stuff: the highest score will get a weight of 1, second highest a weight of sqrt(1/2), third sqrt(1/3) etc.
return 1 + self.dir_weights.score_high * 1 / np.sqrt(len(possible_scores) - possible_scores.index(cell.score))
no_low = True
if convert_score(pos.score) == convert_score(possible_scores[0]):
pass
else:
for score in possible_scores:
if convert_score(score) >= convert_score(pos.score):
break
if self.game.make_pos(score, pos) in known_cells:
no_low = False
break
no_high = True
if convert_score(pos.score) == convert_score(possible_scores[-1]):
pass
else:
for score in reversed(possible_scores):
if convert_score(score) <= convert_score(pos.score):
break
if self.game.make_pos(score, pos) in known_cells:
no_high = False
break
neigh_horiz = 0.0
if self.dir_weights.horiz:
neigh_horiz = (self.no_neighbor(pos, (-1, 0), known_cells) + self.no_neighbor(pos, (1, 0), known_cells))
neigh_vert = 0.0
if self.dir_weights.vert:
neigh_vert = (self.no_neighbor(pos, (0, -1), known_cells) + self.no_neighbor(pos, (0, 1), known_cells))
res = self.dir_weights.horiz * neigh_horiz + self.dir_weights.vert * neigh_vert + self.dir_weights.score_low * no_low + self.dir_weights.score_high * no_high + 1
return res
def get_weight(self, cell_key, cell, possible_scores, known_cells):
level_weight = 1.0
if not isinstance(cell_key, tuple) and cell_key.level < self.max_level:
level_weight = self.low_level_weight ** (self.max_level - cell_key.level)
if level_weight == 0.0:
return 0.0
res = (self.get_pos_weight(cell_key, cell, known_cells, possible_scores) +
self.get_seen_weight(cell) +
self.get_chosen_weight(cell) +
self.get_action_weight(cell) +
self.get_chosen_since_new_weight(cell)
) * level_weight
return res
def set_ranges(self, to_choose):
if isinstance(to_choose[0], tuple):
return
self.xrange = (min(e.x for e in to_choose), max(e.x for e in to_choose))
self.yrange = (min(e.y for e in to_choose), max(e.y for e in to_choose))
self.max_level = max(e.level for e in to_choose)
def choose_cell(self, known_cells, size=1):
to_choose = list(known_cells.keys())
# scores as is, but 0 => max()/100 so it still has a chance to get chosen
highest_number = max(max([e.score for e in known_cells.values()]), 1)
converted_scores = [max(e.score, highest_number/100) for e in known_cells.values()]
# tqdm.write(f'{converted_scores}')
total = sum(converted_scores)
return np.random.choice(to_choose, size=size, replace=True, p=[w / total for w in converted_scores])
self.set_ranges(to_choose)
if not isinstance(to_choose[0], tuple):
possible_scores = sorted(set(e.score for e in to_choose), key=convert_score)
else:
possible_scores = sorted(set(e.score for e in known_cells.values()))
if len(to_choose) == 1:
return [to_choose[0]] * size
weights = [
self.get_weight(
k, known_cells[k], possible_scores, known_cells)
for k in to_choose
]
total = np.sum(weights)
idxs = np.random.choice(
list(range(len(to_choose))),
size=size,
p=[w / total for w in weights]
)
return [to_choose[i] for i in idxs]
def __repr__(self):
return f'weight-seen-{self.seen}-chosen-{self.chosen}-chosen-since-new-{self.chosen_since_new_weight}-action-{self.action}-room-{self.room_cells}-dir-{self.dir_weights}'
| 2.328125
| 2
|
timeline/urls.py
|
mikechumba/insta
| 0
|
12774796
|
from django.conf import settings
from django.conf.urls.static import static
from django.urls import path,include
from django.conf.urls import url
from django.contrib.auth import views as auth_views
from . import views
from .forms import LoginForm
urlpatterns = [
path('', views.index, name="home"),
path('register', views.register, name='register'),
path('profile', views.profile, name='profile'),
path('timeline/new', views.new_post, name='new_post'),
path('profile/edit', views.edit_profile, name='edit_profile'),
path('<user_name>', views.users, name='user_profile'),
path('post/<int:image_id>', views.image_view, name='image_view'),
path('login/', auth_views.LoginView.as_view(authentication_form=LoginForm), name='login'),
path('logout/', views.logout_view, name='logout'),
path('search/', views.search,name='search'),
# method views
path('follow/<user_name>', views.follow, name='follow'),
path('like/<int:image_id>',views.like,name='like')
]
| 1.898438
| 2
|
old/gridsearchXGboostR.py
|
giorgiopiatti/hgboost
| 21
|
12774797
|
<filename>old/gridsearchXGboostR.py<gh_stars>10-100
# The process of performing random search with cross validation is:
# 1. Set up a grid of hyperparameters to evaluate
# 2. Randomly sample a combination of hyperparameters
# 3. Create a model with the selected combination
# 4. Evaluate the model using cross validation
# 5. Decide which hyperparameters worked the best
#https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.RandomizedSearchCV.html
#https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.GradientBoostingRegressor.html
#https://scikit-learn.org/stable/modules/model_evaluation.html#scoring-parameter
#https://xgboost.readthedocs.io/en/latest/parameter.html
#--------------------------------------------------------------------------
# Name : gridsearchGradientBoosting.py
# Version : 1.0
# Author : E.Taskesen
# Contact : <EMAIL>
# Date : Dec. 2018
#--------------------------------------------------------------------------
#
'''
NOTE:
IF you see something like this: training data did not have the following fields: f73, f40, f66, f147, f62, f39, f2, f83, f127, f84, f54, f97, f114, f102, f49, f7, f8, f56, f23, f107, f138, f28, f71, f152, f80, f57, f46, f58, f139, f121, f140, f20, f45, f113, f5, f60, f135, f101, f68, f76, f65, f41, f99, f131, f109, f117, f13, f100, f128, f52, f15, f50, f95, f124, f19, f12, f43, f137, f33, f22, f32, f72, f142, f151, f74, f90, f48, f122, f133, f26, f79, f94, f18, f10, f51, f0, f53, f92, f29, f115, f143, f14, f116, f47, f69, f82, f34, f89, f35, f6, f132, f16, f118, f31, f96, f59, f75, f1, f110, f61, f108, f25, f21, f11, f17, f85, f150, f3, f98, f24, f77, f103, f112, f91, f144, f70, f86, f119, f55, f130, f106, f44, f36, f64, f67, f4, f145, f37, f126, f88, f93, f104, f81, f149, f27, f136, f146, f30, f38, f42, f141, f134, f120, f105, f129, f9, f148, f87, f125, f123, f111, f78, f63
Then, it may be caused by the incompatibility of sklearn's CalibratedClassifierCV and pandas.DataFrame
Or your data has 0 in it!
Just replace the last element with a very small number, like so:
X=X.replace(0,0.0000001)
https://github.com/dmlc/xgboost/issues/2334
'''
#%% Libraries
import xgboost
#from sklearn.ensemble import GradientBoostingRegressor
from sklearn.model_selection import RandomizedSearchCV
import pandas as pd
import numpy as np
#import matplotlib.pyplot as plt
import time
from sklearn.model_selection import train_test_split
#%% Gridsearch for GradientBoostingRegressor
def gridsearchXGboostR(X, y, cv=10, n_iter=20, n_jobs=1, verbose=True):
if verbose==True: verbose=2
n_jobs=np.maximum(n_jobs,1)
# print "Checkinf for NaN and Inf"
# print "np.inf=", np.where(np.isnan(X))
# print "is.inf=", np.where(np.isinf(X))
# print "np.max=", np.max(abs(X))
# [X_train, X_test, y_train, y_test] = train_test_split(X.iloc[:-1,:].values, y.iloc[:-1].values, train_size=0.8, test_size=0.2)
min_child_weight = [0.5, 1.0, 3.0, 5.0, 7.0, 10.0]
n_estimators = [100, 250, 300, 500]
gamma = [0, 0.25, 0.5, 1.0]
subsample = [0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
# Maximum depth of each tree
max_depth = [2, 3, 4, 5, 10, 15]
silent = [False]
learning_rate = [0.001, 0.01, 0.1, 0.2, 0,3]
colsample_bylevel = [0.4, 0.6, 0.8, 1.0]
colsample_bytree = [0.4, 0.6, 0.8, 1.0]
reg_lambda = [0.1, 1.0, 5.0, 10.0, 50.0, 100.0]
num_round=[10,50,100]
# Control the balance of positive and negative weights, useful for unbalanced classes.
scale_pos_weight = [1]
hyperparameter_grid = {
# 'min_child_weight': min_child_weight,
'n_estimators': n_estimators,
'gamma': gamma,
'subsample': subsample,
'max_depth': max_depth,
'silent': silent,
'learning_rate': learning_rate,
'colsample_bylevel': colsample_bylevel,
'colsample_bytree': colsample_bytree,
'reg_lambda': reg_lambda,
'scale_pos_weight': scale_pos_weight,
# 'num_round':num_round,
}
# Create the model to use for hyperparameter tuning
model = xgboost.XGBRegressor()
# Set up the random search with 5-fold cross validation
random_cv = RandomizedSearchCV(model,
hyperparameter_grid,
cv=cv,
n_iter=n_iter,
n_jobs=n_jobs,
verbose=verbose,
scoring='neg_mean_absolute_error', #neg_mean_squared_error
return_train_score = False,
refit=True, #Refit an estimator using the best found parameters on the whole dataset.
)
# Fit on the training data
# random_cv = xgboost.XGBRegressor()
# X.dropna(inplace=True)
# y.dropna(inplace=True)
# X = X.fillna(X.mean())
# np.where(X.values >= np.finfo(np.float64).max)
# np.isnan(X.values.any())
# col_mask=X.isnull().any(axis=0).sum()
# row_mask=X.isnull().any(axis=1).sum()
# X[X==np.inf]=np.nan
# X.fillna(X.mean(), inplace=True)
# IND=X.asmatrix(columns=['ColumnA', 'ColumnB'])
# np.isnan(IND).any()
if 'pandas' in str(type(X)):
X = X.as_matrix().astype(np.float)
if 'pandas' in str(type(y)):
y = y.as_matrix().astype(np.float)
search_time_start = time.time()
random_cv.fit(X, y)
# Show some results:
if verbose:
print("Randomized search time:", time.time() - search_time_start)
report(random_cv.cv_results_)
# Find the best combination of settings
model=random_cv.best_estimator_
# random_cv.best_score_
# random_cv.best_params_
# random_cv.best_index_
# random_cv.cv_results_['params'][search.best_index_]
# random_results = pd.DataFrame(random_cv.cv_results_).sort_values('mean_test_score', ascending = False)
# bestparams=random_cv.cv_results_['params'][random_cv.best_index_]
return(model,random_cv)
#%% Report best scores
def report(results, n_top=5):
for i in range(1, n_top + 1):
candidates = np.flatnonzero(results['rank_test_score'] == i)
for candidate in candidates:
print("Model with rank: {0}".format(i))
print("Mean validation score: {0:.3f} (std: {1:.3f})".format(results['mean_test_score'][candidate], results['std_test_score'][candidate]))
print("Parameters: {0}".format(results['params'][candidate]))
print("")
#%% END
| 3.453125
| 3
|
tridet/utils/train.py
|
flipson/dd3d
| 227
|
12774798
|
<gh_stars>100-1000
# Copyright 2021 Toyota Research Institute. All rights reserved.
import logging
import os
from tabulate import tabulate
from termcolor import colored
from detectron2.utils.events import get_event_storage
LOG = logging.getLogger(__name__)
def get_inference_output_dir(dataset_name, is_last=False, use_tta=False, root_output_dir=None):
if not root_output_dir:
root_output_dir = os.getcwd() # hydra
step = get_event_storage().iter
if is_last:
result_dirname = "final"
else:
result_dirname = f"step{step:07d}"
if use_tta:
result_dirname += "-tta"
output_dir = os.path.join(root_output_dir, "inference", result_dirname, dataset_name)
return output_dir
def print_test_results(test_results):
metric_table = tabulate(
[(k, v) for k, v in test_results.items()],
headers=["metric", "value"],
tablefmt="pipe",
numalign="left",
stralign="left",
)
LOG.info("Test results:\n" + colored(metric_table, "cyan"))
| 2.046875
| 2
|
P1/task_2.2/task_2pt2.py
|
VitusP/IoT-Analytics
| 0
|
12774799
|
<gh_stars>0
import pandas as pd
import random
import math
import collections
## Global Data
mc = 0
rtcl = 3
nonRTCL = 5
n_rt = 0
n_nonrt = 0
scl = 4
s = 2 #server status
pre_empted_service_time = 0
iat_rt = 10
iat_nonrt = 5
serviceTime_rt = 2
serviceTime_nonrt = 4
iat_rt_mu = 10
iat_nonrt_mu = 5
serviceTime_rt_mu = 2
serviceTime_nonrt_mu = 4
df = pd.DataFrame(columns=['MC', 'RTCL', 'nonRTCL', 'n_RT', 'n_nonRT', 'SCL', 's', 'Pre-empted-service-time'])
## Main method
def main():
global mc, rtcl, nonRTCL, n_rt, n_nonrt, scl, s, pre_empted_service_time, iat_rt, iat_nonrt, serviceTime_rt, serviceTime_nonrt, df
max_mc = int(input("Do the hand simulation until MC: "))
record_global_vars()
mc = rtcl
while mc <= max_mc:
randomized_param()
event = next_event()
if event == 0:
rt_arrived()
elif event == 1:
nonrt_arrived()
elif event == 2:
service_completed()
print(df)
export_to_excel()
## Helper Methods
def rt_arrived():
global mc, rtcl, nonRTCL, n_rt, n_nonrt, scl, s, pre_empted_service_time, iat_rt, iat_nonrt, serviceTime_rt, serviceTime_nonrt
mc = rtcl
n_rt = n_rt + 1
rtcl = mc + iat_rt
if n_rt == 1 and s == 0:
scl = mc + serviceTime_rt
n_rt = n_rt - 1
s = 1
elif s == 2:
# pre-empt nonRT and run RT
if (scl - mc) > 0:
pre_empted_service_time = (scl - mc)
n_nonrt = n_nonrt + 1
scl = mc + serviceTime_rt
n_rt = n_rt - 1
s = 1
record_global_vars()
def nonrt_arrived():
global mc, rtcl, nonRTCL, n_rt, n_nonrt, scl, s, pre_empted_service_time, iat_rt, iat_nonrt, serviceTime_rt, serviceTime_nonrt
mc = nonRTCL
n_nonrt = n_nonrt + 1
nonRTCL = mc + iat_nonrt
if n_nonrt == 1 and s == 0:
scl = mc + serviceTime_nonrt
s = 2
n_nonrt = n_nonrt - 1
record_global_vars()
def service_completed():
global mc, rtcl, nonRTCL, n_rt, n_nonrt, scl, s, pre_empted_service_time, iat_rt, iat_nonrt, serviceTime_rt, serviceTime_nonrt
mc = scl
if n_rt > 0:
# Check RT queue
scl = mc + serviceTime_rt
s = 1
n_rt = n_rt - 1
elif n_nonrt > 0:
# Check nonRT queue
if pre_empted_service_time > 0:
scl = mc + pre_empted_service_time
pre_empted_service_time = 0
else:
scl = mc + serviceTime_nonrt
s = 2
n_nonrt = n_nonrt - 1
else:
s = 0
record_global_vars()
def record_global_vars():
global mc, rtcl, nonRTCL, n_rt, n_nonrt, scl, s, pre_empted_service_time, iat_rt, iat_nonrt, serviceTime_rt, serviceTime_nonrt, df
series_obj = pd.Series( [mc,rtcl, nonRTCL, n_rt, n_nonrt, scl, s, pre_empted_service_time], index=df.columns )
df = df.append(series_obj, ignore_index=True)
#print(df.iloc[-1].to_frame().T)
def export_to_excel():
global mc, rtcl, nonRTCL, n_rt, n_nonrt, scl, s, pre_empted_service_time, iat_rt, iat_nonrt, serviceTime_rt, serviceTime_nonrt, df
writer = pd.ExcelWriter('vaputra_task2pt2.xlsx')
df.to_excel(writer)
writer.save()
print('Output is written successfully to Excel File.')
def randomized_param():
global mc, rtcl, nonRTCL, n_rt, n_nonrt, scl, s, pre_empted_service_time, iat_rt, iat_nonrt, serviceTime_rt, serviceTime_nonrt, df
iat_rt = -(iat_rt_mu)*math.log(random.random())
iat_nonrt = -(iat_nonrt_mu)*math.log(random.random())
serviceTime_rt = -(serviceTime_rt_mu)*math.log(random.random())
serviceTime_nonrt = -(serviceTime_nonrt_mu)*math.log(random.random())
def next_event():
# {event(int):value}
clock_dict= {0:rtcl, 1:nonRTCL, 2:scl}
sorted_clock = sorted(clock_dict.items(), key=lambda kv: kv[1])
sorted_clock_dict = collections.OrderedDict(sorted_clock)
if s == 0:
sorted_clock_dict.pop(2)
return next(iter(sorted_clock_dict))
main()
# clock_dict= {0:rtcl, 1:nonRTCL, 2:scl}
# sorted_clock = sorted(clock_dict.items(), key=lambda kv: kv[1])
# sorted_clock_dict = collections.OrderedDict(sorted_clock)
# print(next(iter( sorted_clock_dict.items() )))
# s_event = sched.scheduler(time.time, time.sleep)
# def do_something(sc):
# global mc
# print("Doing stuff...")
# # do your stuff
# mc = mc + 1
# print(mc)
# s_event.enter(1, 1, do_something, (sc,))
# s_event.enter(1, 1, do_something, (s_event,))
# s_event.run()
| 2.796875
| 3
|
models/object.py
|
matheuspb/igs
| 1
|
12774800
|
""" This module contains a class that describes an object in the world. """
import numpy as np
class Object:
"""
Object is a simple wireframe composed of multiple points connected by
lines that can be drawn in the viewport.
"""
TOTAL_OBJECTS = -1
def __init__(self, points=None, name=None, color=None):
self._points = [] if points is None else points
self._name = self.default_name() if name is None else name
self._color = (0, 0, 0) if color is None else color
Object.TOTAL_OBJECTS += 1
@staticmethod
def default_name():
""" Default name for new objects. """
return "object{}".format(Object.TOTAL_OBJECTS + 1)
@property
def points(self):
""" The points in the wireframe. """
return self._points
@property
def name(self):
""" Name of the object. """
return self._name
@property
def color(self):
""" Color of the object. """
return self._color
@property
def center(self):
""" Center of the object. """
points = set()
for face in self._points:
points.update(face)
x_points = [point[0] for point in points]
y_points = [point[1] for point in points]
z_points = [point[2] for point in points]
return \
(np.average(x_points), np.average(y_points), np.average(z_points))
def _transform(self, matrix, center=None, offset=None):
center = self.center if center is None else center
# move object to center
operation_matrix = np.array([
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[-center[0], -center[1], -center[2], 1],
])
# perform operation
operation_matrix = operation_matrix.dot([
matrix[0] + [0],
matrix[1] + [0],
matrix[2] + [0],
([0, 0, 0] if offset is None else offset) + [1],
])
# move object back to original position
operation_matrix = operation_matrix.dot([
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[center[0], center[1], center[2], 1],
])
for fpos, face in enumerate(self._points):
for ppos, point in enumerate(face):
new_point = np.dot(point + (1,), operation_matrix)
self._points[fpos][ppos] = tuple(new_point[:3])
def move(self, offset):
""" Moves the object by an offset = (x, y). """
self._transform(
[
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
], center=None, offset=list(offset))
def zoom(self, factor):
""" Zooms in the object by 'factor' times. """
self._transform(
[
[factor, 0, 0],
[0, factor, 0],
[0, 0, factor],
])
@staticmethod
def generate_rotation_matrix(x_angle, y_angle, z_angle):
""" Generates the matrix that rotates points. """
return np.array([
[1, 0, 0],
[0, np.cos(x_angle), -np.sin(x_angle)],
[0, np.sin(x_angle), np.cos(x_angle)],
]).dot([
[np.cos(y_angle), 0, np.sin(y_angle)],
[0, 1, 0],
[-np.sin(y_angle), 0, np.cos(y_angle)],
]).dot([
[np.cos(z_angle), -np.sin(z_angle), 0],
[np.sin(z_angle), np.cos(z_angle), 0],
[0, 0, 1],
]).tolist()
def rotate(self, x_angle, y_angle, z_angle, center=None):
""" Rotates the object around center, the angle is in radians. """
self._transform(
Object.generate_rotation_matrix(x_angle, y_angle, z_angle),
center)
def project(self):
""" Projects the 3D objects to 2D. Using perspective projection. """
def _project(point):
return (
point[0]/(point[2]/Window.COP_DISTANCE+1),
point[1]/(point[2]/Window.COP_DISTANCE+1))
self._points = [list(map(_project, face)) for face in self._points]
def clip(self, window):
""" Weiler-Atherton polygon clipping algorithm. """
def connect_points(clipped, side1, side2, window):
""" Connects points of the window. """
edge = side1
while edge != side2:
clipped.append(window.points[0][edge])
edge = (edge - 1) % 4
boundaries = window.real_boundaries
clipped = []
for face in self._points:
new_face = []
entered, exited = None, None
for i in range(len(face) - 1):
points, side = Object._clip_line(
face[i], face[i + 1], *boundaries[0], *boundaries[1])
if not points: # clipped line is outside window
continue
if side[0] is not None: # entered
if exited is not None:
connect_points(new_face, exited, side[0], window)
else:
entered = side[0]
if side[1] is not None: # exited
exited = side[1]
new_face.append(points[0])
new_face.append(points[1])
else:
new_face.append(points[0])
if new_face and face[0] == face[-1]:
if entered is not None:
connect_points(new_face, exited, entered, window)
new_face.append(new_face[0])
clipped.append(new_face)
self._points = clipped
@staticmethod
def _clip_line(point1, point2, xmin, ymin, xmax, ymax):
""" Liang-Barsky line clipping algorithm. """
deltax, deltay = point2[0] - point1[0], point2[1] - point1[1]
deltas = [-deltax, -deltay, deltax, deltay] # p
distances = [ # q
point1[0] - xmin, point1[1] - ymin,
xmax - point1[0], ymax - point1[1]]
ratios = np.divide(distances, deltas) # r
pct1, pct2 = 0, 1 # how much of the line is inside the window
side = [None, None]
for i in range(4):
if deltas[i] == 0 and distances[i] < 0:
return (), side
if deltas[i] < 0:
if ratios[i] > pct1: # entered
side[0] = i
pct1 = ratios[i]
if deltas[i] > 0:
if ratios[i] < pct2: # exited
side[1] = i
pct2 = ratios[i]
if pct1 > pct2:
return (), side
clipped = (
tuple(np.add((point1[0], point1[1]), (pct1*deltax, pct1*deltay))),
tuple(np.add((point1[0], point1[1]), (pct2*deltax, pct2*deltay))),
)
return clipped, side
@staticmethod
def build_from_file(path):
""" Returns objects described in an OBJ file. """
with open(path) as obj:
raw_file = obj.read()
file_lines = [line.split(" ") for line in raw_file.split("\n")]
vertices = {}
faces = []
for number, line in enumerate(file_lines):
if line[0] == "v":
vertices[number + 1] = tuple(map(float, line[1:]))
if line[0] == "f":
face = []
for index in line[1:]:
face.append(vertices[int(index)])
face.append(vertices[int(line[1])])
faces.append(face)
return Object(points=faces)
class Window(Object):
"""
The window object.
This object delimits what should be drawn in the viewport. Moving and
rescaling it has the effect to change which portion of the world is
drawn at the viewport.
"""
BORDER = 0.05
def __init__(self, width, height):
points = [
(-width/2, height/2, 0),
(-width/2, -height/2, 0),
(width/2, -height/2, 0),
(width/2, height/2, 0),
]
points.append(points[0])
super().__init__([points], "window", (0, 0, 0))
self._rotation_matrix = np.array([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
@property
def expanded_boundaries(self):
""" Boundaries a little bigger than the actual window. """
width = self._points[0][3][0] - self._points[0][1][0]
height = self._points[0][3][1] - self._points[0][1][1]
factor = np.multiply((width, height), Window.BORDER)
return (
np.subtract(self._points[0][1], factor),
np.add(self._points[0][3], factor))
@property
def real_boundaries(self):
""" Returns windows' bottom left and upper right coordinates. """
return (self._points[0][1], self._points[0][3])
@property
def inv_rotation_matrix(self):
""" This matrix rotates the window back to its original position. """
return np.linalg.inv(self._rotation_matrix).tolist()
def move(self, offset):
# rotate offset vector to move window relative to its own directions
offset = np.dot(offset, self._rotation_matrix)
super().move(offset)
def zoom(self, factor):
# save original state
original_points = self._points.copy()
# apply the zoom operation
super().zoom(factor**(-1))
# find new window size
minimum, maximum = self.real_boundaries
width = np.abs(maximum[0] - minimum[0])
height = np.abs(maximum[1] - minimum[1])
# if zoom was exceeded, go back to original state and raise an error
if width < 10 and height < 10:
self._points = original_points
raise RuntimeError("Maximum zoom in exceeded")
def rotate(self, x_angle, y_angle, z_angle, center=None):
# find M = R^-1 * T * R
# R is the rotation matrix, it saves the rotation state of the window
# T is the matrix of the rotation that is being applied
matrix = Object.generate_rotation_matrix(x_angle, y_angle, z_angle)
matrix = np.dot(self.inv_rotation_matrix, matrix)
matrix = np.dot(matrix, self._rotation_matrix)
self._transform(matrix.tolist())
# update rotation matrix
self._rotation_matrix = np.dot(self._rotation_matrix, matrix)
def clip(self, _):
pass
class Curve(Object):
""" A Bezier curve with four control points. """
def __init__(self, points, name=None, color=None):
curve = Curve._generate_curve(points)
curve.append(curve[-1]) # add stub point for clipping
super().__init__(
points=[curve], name=name, color=color)
@staticmethod
def _generate_curve(points):
def f(t, i):
return np.array([t**3, t**2, t, 1]).dot(np.array([
[-1, 3, -3, 1],
[3, -6, 3, 0],
[-3, 3, 0, 0],
[1, 0, 0, 0],
])).dot(np.array([p[i] for p in points]))
step = 0.02
x_points = [f(t, 0) for t in np.arange(0, 1+step, step)]
y_points = [f(t, 1) for t in np.arange(0, 1+step, step)]
z_points = [f(t, 2) for t in np.arange(0, 1+step, step)]
return list(zip(x_points, y_points, z_points))
class Spline(Object):
""" A Spline curve with arbitrary amount of control points. """
def __init__(self, points, name=None, color=None):
curves = []
for i in range(len(points) - 3):
# build a curve for every four control points
curve = Spline._generate_curve(points[i:i+4])
curve.append(curve[-1]) # add stub point for clipping
curves.append(curve)
super().__init__(
points=curves, name=name, color=color)
@staticmethod
def _generate_curve(points):
coef = np.multiply(1/6, np.array([
[-1, 3, -3, 1],
[3, -6, 3, 0],
[-3, 0, 3, 0],
[1, 4, 1, 0],
])).dot(np.array(points))
number_of_points = 50
delta = 1/number_of_points
deltas = np.array([
[0, 0, 0, 1],
[delta**3, delta**2, delta, 0],
[6*delta**3, 2*delta**2, 0, 0],
[6*delta**3, 0, 0, 0],
]).dot(coef)
points = [tuple(deltas[0])]
for _ in range(number_of_points):
# update coordinates using forward differences
deltas[0] += deltas[1]
deltas[1] += deltas[2]
deltas[2] += deltas[3]
points.append(tuple(deltas[0]))
return points
| 3.4375
| 3
|
sited_py/lib/org_noear_siteder_dao_engine_sdVewModel_BookSdViewModel.py
|
wistn/sited_py
| 0
|
12774801
|
<reponame>wistn/sited_py
# -*- coding: UTF-8 -*-
"""
Author:wistn
since:2020-09-23
LastEditors:Do not edit
LastEditTime:2021-03-04
Description:
"""
from .org_noear_siteder_dao_engine_DdSource import DdSource
from .mytool import TextUtils
from .android_util_Log import Log
from .org_noear_siteder_viewModels_ViewModelBase import ViewModelBase
from .org_noear_siteder_models_SectionModel import SectionModel
from .noear_snacks_ONode import ONode
class BookSdViewModel(ViewModelBase):
def __init__(self, url):
super().__init__()
self.sections = []
self.name = None
self.author = None
self.intro = None
self.logo = None
self.updateTime = None
self.isSectionsAsc = False # 输出的section是不是顺排的
self.bookUrl = url
# @Override
def loadByConfig(self, config):
pass
# @Override
def loadByJson(self, config, *jsons):
# java版: (String... jsons) 表示可变长度参数列表,参数为0到多个String类型的对象,或者是一个String[]。
if jsons == None or jsons.__len__() == 0:
return
# py版: (*jsons) 表示可变参数组成的元组,要type(jsons[0])==list识别java版的多个String或者一个String[]
if jsons.__len__() == 1 and type(jsons[0]) == list:
jsons = jsons[0]
for json in jsons:
self.loadByJsonData(config, json)
def loadByJsonData(self, config, json):
data = ONode.tryLoad(json)
# 注意:java版ViewModel都是自定义类ONode,JsonReader。对于输出须要有转义符的文本插件(比较小众)和py版json.loads有不同效果
if DdSource.isBook(config):
if TextUtils.isEmpty(self.name):
self.name = data.get("name").getString()
self.author = data.get("author").getString()
self.intro = data.get("intro").getString()
self.logo = data.get("logo").getString()
self.updateTime = data.get("updateTime").getString()
self.isSectionsAsc = data.get("isSectionsAsc").getInt() > 0
# 默认为倒排
sl = data.get("sections").asArray()
for n in sl:
sec = SectionModel()
sec.name = n.get("name").getString()
sec.url = n.get("url").getString()
sec.orgIndex = self.total()
self.sections.append(sec)
self.onAddItem(sec)
Log.v("loadByJsonData:", json)
# --------------
def clear(self):
self.sections = []
def total(self):
return self.sections.__len__()
def get(self, idx):
if self.sections == None:
return None
len = self.sections.__len__()
if idx >= len or idx < 0:
return None
else:
return self.sections[idx]
def onAddItem(self, sec):
pass
| 2.0625
| 2
|
Python/tangshi.py
|
jmworsley/TangShi
| 1
|
12774802
|
<filename>Python/tangshi.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import re
import codecs
ping = re.compile(u'.平')
shang = re.compile(u'上聲')
ru = re.compile(u'入')
qu = re.compile(u'去')
mydict = { }
# f = open("../Data/TangRhymesMap.csv")
f = codecs.open("../Data/TangRhymesMap.csv", "r", "utf-8")
for line in f:
line = line.rstrip()
value, key = line.split(",")
#key = char.decode("utf-8")
#value = rhyme.decode("utf-8")
mydict[key] = value
f = codecs.open("../Data/SamplePoem.txt", "r", "utf-8")
for line in f:
line = line.rstrip()
for key in line:
if key not in mydict:
print key
elif ping.match(mydict[key]):
print key + " = " + " Ping"
elif shang.match(mydict[key]):
print key + " = " + " Shang"
elif qu.match(mydict[key]):
print key + " = " + " Qu"
elif ru.match(mydict[key]):
print key + " = " + " Ru"
else:
print key + " = " + " *"
| 3.3125
| 3
|
app/views/handlers/auth_handler.py
|
pwgraham91/Cloud-Contact
| 3
|
12774803
|
<filename>app/views/handlers/auth_handler.py
import flask
from requests_oauthlib import OAuth2Session
from config import Auth
def get_google_auth(state=None, token=None):
if token:
return OAuth2Session(Auth.CLIENT_ID, token=token)
if state:
return OAuth2Session(
Auth.CLIENT_ID,
state=state,
redirect_uri=Auth.REDIRECT_URI,
scope=['email']
)
oauth = OAuth2Session(
Auth.CLIENT_ID,
redirect_uri=Auth.REDIRECT_URI,
scope=['email']
)
return oauth
def get_google_authorization_url():
current_user = flask.g.user
if current_user.is_authenticated:
return
google = get_google_auth()
auth_url, state = google.authorization_url(Auth.AUTH_URI)
flask.session['oauth_state'] = state
return auth_url
| 2.625
| 3
|
linehaul/_server.py
|
dstufft/linehaul
| 0
|
12774804
|
#!/usr/bin/env python3.5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
class Server:
def __init__(self, *args, loop=None, **kwargs):
self._loop = loop if loop is not None else asyncio.get_event_loop()
self._args = args
self._kwargs = kwargs
async def __aenter__(self):
self._server = await self._loop.create_server(
*self._args,
**self._kwargs,
)
return self._server
async def __aexit__(self, exc_type, exc, tb):
if self._server.sockets is not None:
self._server.close()
await self._server.wait_closed()
| 2.59375
| 3
|
tibber_aws/__init__.py
|
tibber/tibber-pyAws
| 0
|
12774805
|
# flake8: noqa
from .aws_base import get_aiosession
from .aws_lambda import invoke as lambda_invoke
from .aws_queue import Queue
from .aws_metadata import get_instance_id
from .s3 import STATE_NOT_EXISTING, STATE_OK, STATE_PRECONDITION_FAILED, S3Bucket
from .secret_manager import get_secret, get_secret_parser
from .sns import Topic
| 1.234375
| 1
|
Code/Visualizations/visualizer.py
|
jaspertaylor-projects/QuantumLatticeGasAlgorithm
| 1
|
12774806
|
<reponame>jaspertaylor-projects/QuantumLatticeGasAlgorithm
"Importing visualization files..."
import numpy as np
import os
import cv2
os.environ['CUDA_DEVICE'] = str(0) #Set CUDA device, starting at 0
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from importlib import import_module
class visualizer:
def __init__(self, technique, global_vars, **kwargs):
print("here")
self.global_vars = global_vars
self.technique = technique
self.directory_name = global_vars["base_directory_name"]
self.already_visualized = []
self.frames = self.get_frames()
self.image_dir = self.make_image_dir()
self.ani_dir = self.make_ani_dir()
self.frame_maker = None
self.animation_maker = None
self.fig = None
self.set_vis_technique()
def make_image_dir(self):
dn = self.directory_name + "Images/" +self.technique +"/"
if not os.path.exists(dn):
os.makedirs(dn)
return dn
def make_ani_dir(self):
dn = self.directory_name + "Animation/" +self.technique +"/"
if not os.path.exists(dn):
os.makedirs(dn)
return dn
def get_frames(self):
arr = os.listdir(self.directory_name + "Data/")
arr_new = [x for x in arr if x not in self.already_visualized]
arr_new.sort()
return arr_new
def set_vis_technique(self):
vis_file = import_module("Code.Visualizations." + self.technique)
self.frame_maker = vis_file.make_frame
def visualize(self, **kwargs):
for frame in self.frames:
print (frame)
self.frame_maker(self.directory_name + "Data/" + frame, frame, self.image_dir, self.frames, self.global_vars, **kwargs)
def animate(self, fps = 2, **kwargs):
name = ''
dirs = [x[0] for x in os.walk(self.image_dir)]
files = [x[2] for x in os.walk(self.image_dir)]
for idx, d in enumerate(dirs):
if len(files[idx]) > 0:
images = sorted(files[idx])
if (d != self.image_dir):
name = d.split("/")[-1]
frame = cv2.imread(os.path.join(d, images[0]))
height, width, layers = frame.shape
save_name = self.get_animation_name(self.ani_dir, name, **kwargs)
print(save_name)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
video = cv2.VideoWriter(save_name, fourcc, fps, (width,height))
for image in images:
print (os.path.join(d, image))
video.write(cv2.imread(os.path.join(d, image)))
cv2.destroyAllWindows()
video.release()
def get_animation_name(self, ani_dir, name, vid_fmt = "avi" , **kwargs):
if name == '':
return ani_dir + "animation." + vid_fmt
else:
return self.ani_dir + name + "_animation." + vid_fmt
| 2.46875
| 2
|
model.py
|
utting/whiley2boogie
| 1
|
12774807
|
# -*- coding: utf-8 -*-
"""
Python module for recording Boogie models and printing them in Whiley syntax.
Use boogie /printModel:0 wval.bpl prog.bpl
@author: <NAME>
"""
import sys
class Model:
"""Stores the details of one Boogie counter-example.
Provides facilities for simplifying the model to improve readability.
and for printing it in Whiley-like syntax.
"""
def __init__(self):
"""Create an empty model."""
self.globals = {} # the 'name -> WVal' mapping of the model
self.records = {} # stores the 'Select_[WField]WVal' function
self.field_names = {} # maps WField values to field names
self.toInt = {}
def concretise_dict(self, dictt):
"""Helper function for the main concretise. """
for (name, val) in dictt.items():
for func in (self.toInt, self.toBool, self.toRecord):
if val in func:
# print("replacing", val, "by", func[val])
dictt[name] = func[val]
def concretise(self):
"""Replace WVals by concrete values where possible.
This improves readability, so should be done before printing.
"""
self.concretise_dict(self.globals)
for (name, rec) in self.records.items():
self.concretise_dict(rec)
def whiley_compound(self, val) -> str:
"""Converts records and arrays to a Whiley-like syntax."""
if "[WField]" in val and val in self.records:
rec = self.records[val]
fields = list(rec.keys())
fields.sort()
pairs = [f + ": " + rec[f] for f in fields]
return "{ " + ", ".join(pairs) + " }"
elif "[Int]" in val:
return "[ " + val + " ]"
else:
return val # not a compound?
def __str__(self):
"""Returns a human-readable version of the model.
It will be even more readable if you call concretise first.
Global variables are printed sorted by their names.
"""
result = "Model:\n"
names = list(self.globals.keys())
names.sort()
for name in names:
val = self.globals[name]
if val.startswith("|"):
val = self.whiley_compound(val)
result += " {:18s} = {}\n".format(name, val)
return result
# %% Model Reader
def ignore_pair(name, val):
"""ignores this pair of inputs."""
pass
def store_pair(mapping, name, val):
"""Store the (name,val) pair into the given dictionary."""
mapping[name] = val
def store_globals(model, name, val):
"""Store the global (name,val) pair into the model."""
if name.startswith("$"):
model.field_names[val] = name
elif name.startswith("%lbl%"):
pass # ignore these
else:
# print("global:", name, val)
model.globals[name] = val
def store_records(model, name, val):
"""Store a 'record field -> val' triple into model.records.
Note that 'records' contains a nested dictionary for each record.
"""
lhs = name.split(" ")
if len(lhs) == 2:
if lhs[0] not in model.records:
model.records[lhs[0]] = {}
rec = model.records[lhs[0]]
rec[model.field_names[lhs[1]]] = val
else:
# print("ignored record select:", lhs)
pass
def read_models(filename) -> list:
"""Reads Boogie output and parses the counter-example models.
These are returned in the result list.
Other lines are printed unchanged.
"""
result = []
curr_model = None
curr_func = ignore_pair
infile = open(filename, "r")
lines = (s.strip().split(" -> ") for s in infile.readlines())
for words in lines:
if words == ["*** MODEL"]:
curr_model = Model()
curr_func = lambda n,v: store_globals(curr_model, n, v)
elif words == ["*** END_MODEL"]:
result.append(curr_model)
curr_model = None
curr_func = ignore_pair
elif len(words) == 2 and words[1] == "{":
# this is the start of a mapping function
if words[0].startswith("to"):
curr_dict = {}
# print("==== STARTING", words[0], "====")
setattr(curr_model, words[0], curr_dict)
curr_func = lambda a,b: store_pair(curr_dict, a, b)
elif words[0] == "Select_[WField]WVal":
# print("==== STARTING select WField ====")
curr_func = lambda n,v: store_records(curr_model, n, v)
else:
# print("==== ignoring ", words[0])
curr_func = ignore_pair
elif len(words) == 2:
curr_func(words[0], words[1])
elif words == ["}"]:
curr_func = ignore_pair
else:
print(" -> ".join(words)) # print the original line
return result
def main(filename):
models = read_models(filename)
for m in models:
m.concretise()
print()
print(m)
if __name__ == "__main__":
# execute only if run as a script
if len(sys.argv) == 2:
main(sys.argv[1])
else:
print("Usage: boogie_model.txt")
| 3.28125
| 3
|
pycrostates/utils/utils.py
|
mscheltienne/pycrostates
| 1
|
12774808
|
"""Utils functions."""
from copy import deepcopy
import mne
import numpy as np
from ._logs import logger
# TODO: Add test for this. Also compare speed with latest version of numpy.
# Also compared speed with a numba implementation.
def _corr_vectors(A, B, axis=0):
# based on:
# https://github.com/wmvanvliet/mne_microstates/blob/master/microstates.py
# written by <NAME> <<EMAIL>>
"""Compute pairwise correlation of multiple pairs of vectors.
Fast way to compute correlation of multiple pairs of vectors without
computing all pairs as would with corr(A,B). Borrowed from Oli at
StackOverflow. Note the resulting coefficients vary slightly from the ones
obtained from corr due to differences in the order of the calculations.
(Differences are of a magnitude of 1e-9 to 1e-17 depending on the tested
data).
Parameters
----------
A : ndarray, shape (n, m)
The first collection of vectors
B : ndarray, shape (n, m)
The second collection of vectors
axis : int
The axis that contains the elements of each vector. Defaults to 0.
Returns
-------
corr : ndarray, shape (m, )
For each pair of vectors, the correlation between them.
"""
if A.shape != B.shape:
raise ValueError("All input arrays must have the same shape")
# If maps is null, divide will not trhow an error.
np.seterr(divide="ignore", invalid="ignore")
An = A - np.mean(A, axis=axis)
Bn = B - np.mean(B, axis=axis)
An /= np.linalg.norm(An, axis=axis)
Bn /= np.linalg.norm(Bn, axis=axis)
corr = np.sum(An * Bn, axis=axis)
corr = np.nan_to_num(corr, posinf=0, neginf=0)
np.seterr(divide="warn", invalid="warn")
return corr
# TODO: To be removed when ChInfo is implemented.
def _copy_info(inst, sfreq):
ch_names = inst.info["ch_names"]
ch_types = [
mne.channel_type(inst.info, idx)
for idx in range(0, inst.info["nchan"])
]
new_info = mne.create_info(ch_names, sfreq=sfreq, ch_types=ch_types)
if inst.get_montage():
montage = inst.get_montage()
new_info.set_montage(montage)
return new_info
def _compare_infos(cluster_info, inst_info):
"""Check that channels in cluster_info are all present in inst_info."""
for ch in cluster_info["ch_names"]:
if ch not in inst_info["ch_names"]:
raise ValueError(
"Instance to segment into microstates sequence does not have "
"the same channels as the instance used for fitting."
)
# Extract loc arrays
cluster_loc = list()
for ch in cluster_info["chs"]:
cluster_loc.append((ch["ch_name"], deepcopy(ch["loc"])))
inst_loc = list()
for ch in inst_info["chs"]:
if ch["ch_name"] in cluster_info["ch_names"]:
inst_loc.append((ch["ch_name"], deepcopy(ch["loc"])))
cluster_loc = [loc[1] for loc in sorted(cluster_loc, key=lambda x: x[0])]
inst_loc = [loc[1] for loc in sorted(inst_loc, key=lambda x: x[0])]
# Compare loc
assert len(cluster_loc) == len(inst_loc) # sanity-check
for l1, l2 in zip(cluster_loc, inst_loc):
if not np.allclose(l1, l2, equal_nan=True):
logger.warning(
"Instance to segment into microstates sequence does not have "
"the same channels montage as the instance used for fitting. "
)
break
# Compare attributes in chs
cluster_kinds = list()
cluster_units = list()
cluster_coord_frame = list()
for ch in cluster_info["chs"]:
cluster_kinds.append((ch["ch_name"], ch["kind"]))
cluster_units.append((ch["ch_name"], ch["unit"]))
cluster_coord_frame.append((ch["ch_name"], ch["coord_frame"]))
inst_kinds = list()
inst_units = list()
inst_coord_frames = list()
for ch in inst_info["chs"]:
if ch["ch_name"] in cluster_info["ch_names"]:
inst_kinds.append((ch["ch_name"], ch["kind"]))
inst_units.append((ch["ch_name"], ch["unit"]))
inst_coord_frames.append((ch["ch_name"], ch["coord_frame"]))
cluster_kinds = [
elt[1] for elt in sorted(cluster_kinds, key=lambda x: x[0])
]
cluster_units = [
elt[1] for elt in sorted(cluster_units, key=lambda x: x[0])
]
cluster_coord_frame = [
elt[1] for elt in sorted(cluster_coord_frame, key=lambda x: x[0])
]
inst_kinds = [elt[1] for elt in sorted(inst_kinds, key=lambda x: x[0])]
inst_units = [elt[1] for elt in sorted(inst_units, key=lambda x: x[0])]
inst_coord_frames = [
elt[1] for elt in sorted(inst_coord_frames, key=lambda x: x[0])
]
if not all(
kind1 == kind2 for kind1, kind2 in zip(cluster_kinds, inst_kinds)
):
logger.warning(
"Instance to segment into microstates sequence does not have "
"the same channels kinds as the instance used for fitting. "
)
if not all(
unit1 == unit2 for unit1, unit2 in zip(cluster_units, inst_units)
):
logger.warning(
"Instance to segment into microstates sequence does not have "
"the same channels units as the instance used for fitting. "
)
if not all(
f1 == f2 for f1, f2 in zip(cluster_coord_frame, inst_coord_frames)
):
logger.warning(
"Instance to segment into microstates sequence does not have "
"the same coordinate frames as the instance used for fitting. "
)
| 3.234375
| 3
|
web/web/constants.py
|
pbvarga1/docker_opportunity
| 1
|
12774809
|
<reponame>pbvarga1/docker_opportunity
import os
DOCKER_HOST = os.environ.get('DOCKER_IP', '192.168.99.100')
DSN = f'http://9929242db8104494b679b60c94b0f96d@{DOCKER_HOST}:9000/2'
| 1.710938
| 2
|
backend/equipment/models.py
|
Vini1979/Engenharia_Software_IF977
| 0
|
12774810
|
from django.db import models
from django.utils import timezone
STATE_CHOICES = [
("Good", "Good"),
("Needs repair", "Needs repair"),
("In repair", "In repair"),
]
class Equipment(models.Model):
name = models.CharField(max_length=200)
def __str__(self):
return self.name
class Item(models.Model):
kind = models.ForeignKey("equipment.Equipment", on_delete=models.CASCADE, related_name="+")
person = models.ForeignKey("users.Person", on_delete=models.CASCADE, related_name="+")
code = models.CharField(max_length=500)
brand = models.CharField(max_length=200)
specifications = models.TextField()
series_number = models.CharField(max_length=200)
state = models.CharField(choices=STATE_CHOICES, default="Good", max_length=100)
registered_date = models.DateTimeField(default=timezone.now)
return_date = models.DateTimeField()
| 2.328125
| 2
|
BaekJoon Online Judge/step/3-For-Loop/[8393] sum.py
|
TyeolRik/CodingProblems
| 0
|
12774811
|
<reponame>TyeolRik/CodingProblems
# https://www.acmicpc.net/problem/8393
a = int(input())
result = 0
for i in range(a + 1):
result = result + i
print(result)
| 3.59375
| 4
|
gssapi/tests/test_raw.py
|
judilsteve/python-gssapi
| 84
|
12774812
|
<filename>gssapi/tests/test_raw.py<gh_stars>10-100
import copy
import ctypes
import ctypes.util
import os
import socket
import sys
import unittest
import gssapi.raw as gb
import gssapi.raw.misc as gbmisc
import k5test.unit as ktu
import k5test as kt
from collections.abc import Set
TARGET_SERVICE_NAME = b'host'
FQDN = (
'localhost' if sys.platform == 'darwin' else socket.getfqdn()
).encode('utf-8')
SERVICE_PRINCIPAL = TARGET_SERVICE_NAME + b'/' + FQDN
if sys.platform == 'darwin':
TARGET_SERVICE_NAME += b"@" + FQDN
class _GSSAPIKerberosTestCase(kt.KerberosTestCase):
@classmethod
def setUpClass(cls):
super(_GSSAPIKerberosTestCase, cls).setUpClass()
svc_princ = SERVICE_PRINCIPAL.decode("UTF-8")
cls.realm.kinit(svc_princ, flags=['-k'])
cls._init_env()
cls.USER_PRINC = cls.realm.user_princ.split('@')[0].encode("UTF-8")
cls.ADMIN_PRINC = cls.realm.admin_princ.split('@')[0].encode("UTF-8")
cls.KRB5_LIB_PATH = os.environ.get("GSSAPI_KRB5_MAIN_LIB", None)
@classmethod
def _init_env(cls):
cls._saved_env = copy.deepcopy(os.environ)
for k, v in cls.realm.env.items():
os.environ[k] = v
@classmethod
def _restore_env(cls):
for k in copy.deepcopy(os.environ):
if k in cls._saved_env:
os.environ[k] = cls._saved_env[k]
else:
del os.environ[k]
cls._saved_env = None
@classmethod
def tearDownClass(cls):
super(_GSSAPIKerberosTestCase, cls).tearDownClass()
cls._restore_env()
class TestBaseUtilities(_GSSAPIKerberosTestCase):
def setUp(self):
self.realm.kinit(SERVICE_PRINCIPAL.decode("UTF-8"), flags=['-k'])
def test_indicate_mechs(self):
mechs = gb.indicate_mechs()
self.assertIsInstance(mechs, set)
self.assertIn(gb.MechType.kerberos, mechs)
def test_import_name(self):
imported_name = gb.import_name(TARGET_SERVICE_NAME)
self.assertIsInstance(imported_name, gb.Name)
gb.release_name(imported_name)
def test_canonicalize_export_name(self):
imported_name = gb.import_name(self.ADMIN_PRINC,
gb.NameType.kerberos_principal)
canonicalized_name = gb.canonicalize_name(imported_name,
gb.MechType.kerberos)
self.assertIsInstance(canonicalized_name, gb.Name)
exported_name = gb.export_name(canonicalized_name)
self.assertIsInstance(exported_name, bytes)
self.assertGreater(len(exported_name), 0)
def test_duplicate_name(self):
orig_name = gb.import_name(TARGET_SERVICE_NAME)
new_name = gb.duplicate_name(orig_name)
self.assertIsNotNone(new_name)
self.assertTrue(gb.compare_name(orig_name, new_name))
def test_display_name(self):
imported_name = gb.import_name(TARGET_SERVICE_NAME,
gb.NameType.hostbased_service)
displ_resp = gb.display_name(imported_name)
self.assertIsNotNone(displ_resp)
displayed_name, out_type = displ_resp
self.assertIsInstance(displayed_name, bytes)
self.assertEqual(displayed_name, TARGET_SERVICE_NAME)
self.assertEqual(out_type, gb.NameType.hostbased_service)
# NB(directxman12): we don't test display_name_ext because the krb5 mech
# doesn't actually implement it
@ktu.gssapi_extension_test('rfc6680', 'RFC 6680')
@ktu.krb_provider_test(['mit'], 'Heimdal does not implemented for krb5')
def test_inquire_name_not_mech_name(self):
base_name = gb.import_name(TARGET_SERVICE_NAME,
gb.NameType.hostbased_service)
inquire_res = gb.inquire_name(base_name)
self.assertIsNotNone(inquire_res)
self.assertFalse(inquire_res.is_mech_name)
self.assertIsNone(inquire_res.mech)
@ktu.gssapi_extension_test('rfc6680', 'RFC 6680')
@ktu.krb_provider_test(['mit'], 'Heimdal does not implemented for krb5')
def test_inquire_name_mech_name(self):
base_name = gb.import_name(TARGET_SERVICE_NAME,
gb.NameType.hostbased_service)
mech_name = gb.canonicalize_name(base_name, gb.MechType.kerberos)
inquire_res = gb.inquire_name(mech_name)
self.assertIsNotNone(inquire_res)
self.assertTrue(inquire_res.is_mech_name)
self.assertIsInstance(inquire_res.mech, gb.OID)
self.assertEqual(inquire_res.mech, gb.MechType.kerberos)
@ktu.gssapi_extension_test('rfc6680', 'RFC 6680')
@ktu.gssapi_extension_test('rfc6680_comp_oid',
'RFC 6680 (COMPOSITE_EXPORT OID)')
def test_import_export_name_composite_no_attrs(self):
base_name = gb.import_name(TARGET_SERVICE_NAME,
gb.NameType.hostbased_service)
canon_name = gb.canonicalize_name(base_name,
gb.MechType.kerberos)
exported_name = gb.export_name_composite(canon_name)
self.assertIsInstance(exported_name, bytes)
imported_name = gb.import_name(exported_name,
gb.NameType.composite_export)
self.assertIsInstance(imported_name, gb.Name)
# NB(directxman12): the greet_client plugin only allows for one value
@ktu.gssapi_extension_test('rfc6680', 'RFC 6680')
@ktu.krb_plugin_test('authdata', 'greet_client')
def test_inquire_name_with_attrs(self):
base_name = gb.import_name(TARGET_SERVICE_NAME,
gb.NameType.hostbased_service)
canon_name = gb.canonicalize_name(base_name, gb.MechType.kerberos)
gb.set_name_attribute(canon_name, b'urn:greet:greeting',
[b'some greeting'])
inquire_res = gb.inquire_name(canon_name)
self.assertIsInstance(inquire_res.attrs, list)
self.assertEqual(inquire_res.attrs, [b"urn:greet:greeting"])
@ktu.gssapi_extension_test('rfc6680', 'RFC 6680')
@ktu.krb_plugin_test('authdata', 'greet_client')
def test_basic_get_set_delete_name_attributes_no_auth(self):
base_name = gb.import_name(TARGET_SERVICE_NAME,
gb.NameType.hostbased_service)
canon_name = gb.canonicalize_name(base_name, gb.MechType.kerberos)
gb.set_name_attribute(canon_name, b'urn:greet:greeting',
[b'some other val'], complete=True)
get_res = gb.get_name_attribute(canon_name, b'urn:greet:greeting')
self.assertIsNotNone(get_res)
self.assertIsInstance(get_res.values, list)
self.assertEqual(get_res.values, [b"some other val"])
self.assertIsInstance(get_res.display_values, list)
self.assertEqual(get_res.display_values, get_res.values)
self.assertTrue(get_res.complete)
self.assertFalse(get_res.authenticated)
gb.delete_name_attribute(canon_name, b'urn:greet:greeting')
# NB(directxman12): the code below currently segfaults due to the way
# that krb5 and the krb5 greet plugin is written
# gb.get_name_attribute.should_raise(
# gb.exceptions.OperationUnavailableError, canon_name,
# 'urn:greet:greeting')
@ktu.gssapi_extension_test('rfc6680', 'RFC 6680')
@ktu.krb_plugin_test('authdata', 'greet_client')
def test_import_export_name_composite(self):
base_name = gb.import_name(TARGET_SERVICE_NAME,
gb.NameType.hostbased_service)
canon_name = gb.canonicalize_name(base_name, gb.MechType.kerberos)
gb.set_name_attribute(canon_name, b'urn:greet:greeting', [b'some val'])
exported_name = gb.export_name_composite(canon_name)
self.assertIsInstance(exported_name, bytes)
# TODO(directxman12): when you just import a token as composite,
# appears as this name whose text is all garbled, since it contains
# all of the attributes, etc, but doesn't properly have the attributes.
# Once it's canonicalized, the attributes reappear. However, if you
# just import it as normal export, the attributes appear directly.
# It is thus unclear as to what is going on
# imported_name_raw = gb.import_name(exported_name,
# gb.NameType.composite_export)
# imported_name = gb.canonicalize_name(imported_name_r,
# gb.MechType.kerberos)
imported_name = gb.import_name(exported_name, gb.NameType.export)
self.assertIsInstance(imported_name, gb.Name)
get_res = gb.get_name_attribute(imported_name, b'urn:greet:greeting')
self.assertEqual(get_res.values, [b"some val"])
def test_compare_name(self):
service_name1 = gb.import_name(TARGET_SERVICE_NAME)
service_name2 = gb.import_name(TARGET_SERVICE_NAME)
init_name = gb.import_name(self.ADMIN_PRINC,
gb.NameType.kerberos_principal)
self.assertTrue(gb.compare_name(service_name1, service_name2))
self.assertTrue(gb.compare_name(service_name2, service_name1))
self.assertFalse(gb.compare_name(service_name1, init_name))
gb.release_name(service_name1)
gb.release_name(service_name2)
gb.release_name(init_name)
def test_display_status(self):
status_resp = gbmisc._display_status(0, False)
self.assertIsNotNone(status_resp)
status, ctx, cont = status_resp
self.assertIsInstance(status, bytes)
self.assertGreater(len(status), 0)
self.assertIsInstance(ctx, int)
self.assertIsInstance(cont, bool)
self.assertFalse(cont)
def test_acquire_creds(self):
name = gb.import_name(SERVICE_PRINCIPAL,
gb.NameType.kerberos_principal)
cred_resp = gb.acquire_cred(name)
self.assertIsNotNone(cred_resp)
creds, actual_mechs, ttl = cred_resp
self.assertIsInstance(creds, gb.Creds)
self.assertIn(gb.MechType.kerberos, actual_mechs)
if sys.platform != 'darwin':
self.assertIsInstance(ttl, int)
gb.release_name(name)
gb.release_cred(creds)
@ktu.gssapi_extension_test('cred_imp_exp', 'credentials import-export')
def test_cred_import_export(self):
creds = gb.acquire_cred(None).creds
token = gb.export_cred(creds)
imported_creds = gb.import_cred(token)
inquire_orig = gb.inquire_cred(creds, name=True)
inquire_imp = gb.inquire_cred(imported_creds, name=True)
self.assertTrue(gb.compare_name(inquire_orig.name, inquire_imp.name))
def test_context_time(self):
target_name = gb.import_name(TARGET_SERVICE_NAME,
gb.NameType.hostbased_service)
ctx_resp = gb.init_sec_context(target_name)
client_token1 = ctx_resp[3]
client_ctx = ctx_resp[0]
server_name = gb.import_name(SERVICE_PRINCIPAL,
gb.NameType.kerberos_principal)
server_creds = gb.acquire_cred(server_name)[0]
server_resp = gb.accept_sec_context(client_token1,
acceptor_creds=server_creds)
server_tok = server_resp[3]
client_resp2 = gb.init_sec_context(target_name,
context=client_ctx,
input_token=server_tok)
ctx = client_resp2[0]
ttl = gb.context_time(ctx)
self.assertIsInstance(ttl, int)
self.assertGreater(ttl, 0)
def test_inquire_context(self):
target_name = gb.import_name(TARGET_SERVICE_NAME,
gb.NameType.hostbased_service)
ctx_resp = gb.init_sec_context(target_name)
client_token1 = ctx_resp[3]
client_ctx = ctx_resp[0]
server_name = gb.import_name(SERVICE_PRINCIPAL,
gb.NameType.kerberos_principal)
server_creds = gb.acquire_cred(server_name)[0]
server_resp = gb.accept_sec_context(client_token1,
acceptor_creds=server_creds)
server_tok = server_resp[3]
client_resp2 = gb.init_sec_context(target_name,
context=client_ctx,
input_token=server_tok)
ctx = client_resp2[0]
inq_resp = gb.inquire_context(ctx)
self.assertIsNotNone(inq_resp)
src_name, target_name, ttl, mech_type, flags, local_est, is_open = \
inq_resp
self.assertIsInstance(src_name, gb.Name)
self.assertIsInstance(target_name, gb.Name)
self.assertIsInstance(ttl, int)
self.assertEqual(mech_type, gb.MechType.kerberos)
self.assertIsInstance(flags, Set)
self.assertGreater(len(flags), 0)
self.assertIsInstance(local_est, bool)
self.assertTrue(local_est)
self.assertIsInstance(is_open, bool)
self.assertTrue(is_open)
# NB(directxman12): We don't test `process_context_token` because
# there is no clear non-deprecated way to test it
@ktu.gssapi_extension_test('s4u', 'S4U')
def test_add_cred_impersonate_name(self):
server_name = gb.import_name(SERVICE_PRINCIPAL,
gb.NameType.kerberos_principal)
password = self.realm.password('<PASSWORD>')
self.realm.kinit(self.realm.user_princ, password=password,
flags=["-f"])
name = gb.import_name(b"user", gb.NameType.kerberos_principal)
client_creds = gb.acquire_cred(name, usage="initiate").creds
cctx_res = gb.init_sec_context(
server_name, creds=client_creds,
flags=gb.RequirementFlag.delegate_to_peer)
self.realm.kinit(SERVICE_PRINCIPAL.decode("utf-8"), flags=["-k"])
server_creds = gb.acquire_cred(server_name, usage="both").creds
sctx_res = gb.accept_sec_context(cctx_res.token, server_creds)
self.assertTrue(gb.inquire_context(sctx_res.context).complete)
input_creds = gb.Creds()
imp_resp = gb.add_cred_impersonate_name(input_creds,
sctx_res.delegated_creds,
server_name,
gb.MechType.kerberos)
self.assertIsNotNone(imp_resp)
self.assertIsInstance(imp_resp, gb.AddCredResult)
self.assertIsInstance(imp_resp.creds, gb.Creds)
self.assertIn(gb.MechType.kerberos, imp_resp.mechs)
self.assertIsInstance(imp_resp.init_lifetime, int)
self.assertGreater(imp_resp.init_lifetime, 0)
self.assertIsInstance(imp_resp.accept_lifetime, int)
self.assertEqual(imp_resp.accept_lifetime, 0)
@ktu.gssapi_extension_test('s4u', 'S4U')
def test_acquire_creds_impersonate_name(self):
server_name = gb.import_name(SERVICE_PRINCIPAL,
gb.NameType.kerberos_principal)
password = self.realm.password('<PASSWORD>')
self.realm.kinit(self.realm.user_princ, password=password,
flags=["-f"])
name = gb.import_name(b'user', gb.NameType.kerberos_principal)
client_creds = gb.acquire_cred(name, usage="initiate").creds
cctx_res = gb.init_sec_context(
server_name, creds=client_creds,
flags=gb.RequirementFlag.delegate_to_peer)
self.realm.kinit(SERVICE_PRINCIPAL.decode("utf-8"), flags=["-k"])
server_creds = gb.acquire_cred(server_name, usage='both').creds
sctx_res = gb.accept_sec_context(cctx_res.token, server_creds)
self.assertTrue(gb.inquire_context(sctx_res.context).complete)
imp_resp = gb.acquire_cred_impersonate_name(sctx_res.delegated_creds,
server_name)
self.assertIsInstance(imp_resp, gb.AcquireCredResult)
self.assertIsInstance(imp_resp.creds, gb.Creds)
self.assertIn(gb.MechType.kerberos, imp_resp.mechs)
self.assertIsInstance(imp_resp.lifetime, int)
self.assertGreater(imp_resp.lifetime, 0)
@ktu.gssapi_extension_test('s4u', 'S4U')
@ktu.krb_minversion_test('1.11',
'returning delegated S4U2Proxy credentials',
provider='mit')
def test_always_get_delegated_creds(self):
svc_princ = SERVICE_PRINCIPAL.decode("UTF-8")
self.realm.kinit(svc_princ, flags=['-k', '-f'])
target_name = gb.import_name(TARGET_SERVICE_NAME,
gb.NameType.hostbased_service)
client_token = gb.init_sec_context(target_name).token
# if our acceptor creds have a usage of both, we get
# s4u2proxy delegated credentials
server_creds = gb.acquire_cred(None, usage='both').creds
server_ctx_resp = gb.accept_sec_context(client_token,
acceptor_creds=server_creds)
self.assertIsNotNone(server_ctx_resp)
self.assertIsInstance(server_ctx_resp.delegated_creds, gb.Creds)
@ktu.gssapi_extension_test('rfc5588', 'RFC 5588')
def test_store_cred_acquire_cred(self):
# we need to acquire a forwardable ticket
svc_princ = SERVICE_PRINCIPAL.decode("UTF-8")
self.realm.kinit(svc_princ, flags=['-k', '-f'])
target_name = gb.import_name(TARGET_SERVICE_NAME,
gb.NameType.hostbased_service)
client_creds = gb.acquire_cred(None, usage='initiate').creds
client_ctx_resp = gb.init_sec_context(
target_name, creds=client_creds,
flags=gb.RequirementFlag.delegate_to_peer)
client_token = client_ctx_resp[3]
server_creds = gb.acquire_cred(None, usage='accept').creds
server_ctx_resp = gb.accept_sec_context(client_token,
acceptor_creds=server_creds)
deleg_creds = server_ctx_resp.delegated_creds
self.assertIsNotNone(deleg_creds)
store_res = gb.store_cred(deleg_creds, usage='initiate',
mech=gb.MechType.kerberos,
set_default=True, overwrite=True)
self.assertIsNotNone(store_res)
if self.realm.provider.lower() != 'heimdal':
# Heimdal does not return this info as expected
self.assertEqual(store_res.usage, "initiate")
self.assertIn(gb.MechType.kerberos, store_res.mechs)
deleg_name = gb.inquire_cred(deleg_creds).name
acq_resp = gb.acquire_cred(deleg_name, usage='initiate')
self.assertIsNotNone(acq_resp)
@ktu.gssapi_extension_test('cred_store', 'credentials store')
def test_store_cred_into_acquire_cred(self):
CCACHE = 'FILE:{tmpdir}/other_ccache'.format(tmpdir=self.realm.tmpdir)
KT = '{tmpdir}/other_keytab'.format(tmpdir=self.realm.tmpdir)
store = {b'ccache': CCACHE.encode('UTF-8'),
b'keytab': KT.encode('UTF-8')}
princ_name = 'service/cs@' + self.realm.realm
self.realm.addprinc(princ_name)
self.realm.extract_keytab(princ_name, KT)
self.realm.kinit(princ_name, None, ['-k', '-t', KT])
initial_creds = gb.acquire_cred(None, usage='initiate').creds
# NB(sross): overwrite because the ccache doesn't exist yet
expected_usage = 'initiate'
store_kwargs = {}
if self.realm.provider.lower() == 'heimdal':
expected_usage = 'both'
store_kwargs['mech'] = gb.MechType.kerberos
store_kwargs['usage'] = 'initiate'
store_res = gb.store_cred_into(store, initial_creds, overwrite=True,
**store_kwargs)
self.assertIsNotNone(store_res.mechs)
self.assertEqual(store_res.usage, expected_usage)
name = gb.import_name(princ_name.encode('UTF-8'))
retrieve_res = gb.acquire_cred_from(store, name)
self.assertIsNotNone(retrieve_res)
self.assertIsNotNone(retrieve_res.creds)
self.assertIsInstance(retrieve_res.creds, gb.Creds)
self.assertIn(gb.MechType.kerberos, retrieve_res.mechs)
self.assertIsInstance(retrieve_res.lifetime, int)
def test_add_cred(self):
if sys.platform == 'darwin':
self.skipTest('macOS fails to find the credential')
target_name = gb.import_name(TARGET_SERVICE_NAME,
gb.NameType.hostbased_service)
client_ctx_resp = gb.init_sec_context(target_name)
client_token = client_ctx_resp[3]
del client_ctx_resp # free all the things (except the token)!
server_name = gb.import_name(SERVICE_PRINCIPAL,
gb.NameType.kerberos_principal)
server_creds = gb.acquire_cred(server_name, usage='both')[0]
server_ctx_resp = gb.accept_sec_context(client_token,
acceptor_creds=server_creds)
input_creds = gb.Creds()
imp_resp = gb.add_cred(input_creds,
server_ctx_resp[1],
gb.MechType.kerberos)
self.assertIsNotNone(imp_resp)
new_creds, actual_mechs, output_init_ttl, output_accept_ttl = imp_resp
self.assertIsInstance(new_creds, gb.Creds)
self.assertIn(gb.MechType.kerberos, actual_mechs)
self.assertIsInstance(output_init_ttl, int)
self.assertIsInstance(output_accept_ttl, int)
# NB(sross): we skip testing add_cred with mutate for the same reasons
# that testing add_cred in the high-level API is skipped
def test_inquire_creds(self):
name = gb.import_name(SERVICE_PRINCIPAL,
gb.NameType.kerberos_principal)
cred = gb.acquire_cred(name).creds
inq_resp = gb.inquire_cred(cred)
self.assertIsNotNone(inq_resp)
self.assertIsInstance(inq_resp.name, gb.Name)
if self.realm.provider.lower() == 'heimdal':
name = gb.import_name(self.realm.host_princ.encode('utf-8'),
gb.NameType.kerberos_principal)
self.assertTrue(gb.compare_name(name, inq_resp.name))
if sys.platform == 'darwin':
self.assertEqual(inq_resp.usage, "accept")
else:
self.assertIsInstance(inq_resp.lifetime, int)
self.assertEqual(inq_resp.usage, "both")
self.assertIn(gb.MechType.kerberos, inq_resp.mechs)
def test_create_oid_from_bytes(self):
kerberos_bytes = gb.MechType.kerberos.__bytes__()
new_oid = gb.OID(elements=kerberos_bytes)
self.assertEqual(new_oid, gb.MechType.kerberos)
del new_oid # make sure we can dealloc
def test_error_dispatch(self):
err_code1 = gb.ParameterReadError.CALLING_CODE
err_code2 = gb.BadNameError.ROUTINE_CODE
err = gb.GSSError(err_code1 | err_code2, 0)
self.assertIsInstance(err, gb.NameReadError)
self.assertEqual(err.maj_code, err_code1 | err_code2)
def test_inquire_names_for_mech(self):
res = gb.inquire_names_for_mech(gb.MechType.kerberos)
self.assertIsNotNone(res)
self.assertIn(gb.NameType.kerberos_principal, res)
def test_inquire_mechs_for_name(self):
name = gb.import_name(self.USER_PRINC,
gb.NameType.kerberos_principal)
res = gb.inquire_mechs_for_name(name)
self.assertIsNotNone(res)
self.assertIn(gb.MechType.kerberos, res)
@ktu.gssapi_extension_test('password', 'Password')
def test_acquire_cred_with_password(self):
password = self.realm.password('<PASSWORD>')
self.realm.kinit(self.realm.user_princ, password=password)
name = gb.import_name(b'user', gb.NameType.kerberos_principal)
imp_resp = gb.acquire_cred_with_password(name,
password.encode('UTF-8'))
self.assertIsNotNone(imp_resp)
imp_creds, actual_mechs, output_ttl = imp_resp
self.assertIsNotNone(imp_creds)
self.assertIsInstance(imp_creds, gb.Creds)
if sys.platform == 'darwin':
self.assertIn(gb.OID.from_int_seq('1.3.6.1.5.2.5'), actual_mechs)
else:
self.assertIn(gb.MechType.kerberos, actual_mechs)
self.assertIsInstance(output_ttl, int)
@ktu.gssapi_extension_test('password_add', 'Password (add)')
def test_add_cred_with_password(self):
password = self.realm.password('<PASSWORD>')
self.realm.kinit(self.realm.user_princ, password=password)
name = gb.import_name(b'user', gb.NameType.kerberos_principal)
input_creds = gb.Creds()
imp_resp = gb.add_cred_with_password(input_creds, name,
gb.MechType.kerberos,
password.encode('UTF-8'))
self.assertIsNotNone(imp_resp)
new_creds, actual_mechs, output_init_ttl, output_accept_ttl = imp_resp
self.assertIsInstance(new_creds, gb.Creds)
self.assertIn(gb.MechType.kerberos, actual_mechs)
self.assertIsInstance(output_init_ttl, int)
self.assertIsInstance(output_accept_ttl, int)
@ktu.gssapi_extension_test('rfc5587', 'RFC 5587')
def test_rfc5587(self):
if sys.platform == "darwin":
self.skipTest("too many edge cases on macOS")
mechs = gb.indicate_mechs_by_attrs(None, None, None)
self.assertIsInstance(mechs, set)
self.assertGreater(len(mechs), 0)
# We're validating RFC 5587 here: by iterating over all mechanisms,
# we can query their attributes and build a mapping of attr->{mechs}.
# To test indicate_mechs_by_attrs, we can use this mapping and
# ensure that, when the attribute is placed in a slot, we get the
# expected result (e.g., attr in have --> mechs are present).
attrs_dict = {}
known_attrs_dict = {}
for mech in mechs:
self.assertIsInstance(mech, gb.OID)
inquire_out = gb.inquire_attrs_for_mech(mech)
mech_attrs = inquire_out.mech_attrs
known_mech_attrs = inquire_out.known_mech_attrs
self.assertIsInstance(mech_attrs, set)
self.assertIsInstance(known_mech_attrs, set)
# Verify that we get data for every available
# attribute. Testing the contents of a few known
# attributes is done in test_display_mech_attr().
for mech_attr in mech_attrs:
self.assertIsInstance(mech_attr, gb.OID)
display_out = gb.display_mech_attr(mech_attr)
self.assertIsInstance(display_out.name, bytes)
self.assertIsInstance(display_out.short_desc, bytes)
self.assertIsInstance(display_out.long_desc, bytes)
if mech_attr not in attrs_dict:
attrs_dict[mech_attr] = set()
attrs_dict[mech_attr].add(mech)
for mech_attr in known_mech_attrs:
self.assertIsInstance(mech_attr, gb.OID)
display_out = gb.display_mech_attr(mech_attr)
self.assertIsInstance(display_out.name, bytes)
self.assertIsInstance(display_out.short_desc, bytes)
self.assertIsInstance(display_out.long_desc, bytes)
if mech_attr not in known_attrs_dict:
known_attrs_dict[mech_attr] = set()
known_attrs_dict[mech_attr].add(mech)
for attr, expected_mechs in attrs_dict.items():
attrs = set([attr])
mechs = gb.indicate_mechs_by_attrs(attrs, None, None)
self.assertGreater(len(mechs), 0)
self.assertEqual(mechs, expected_mechs)
mechs = gb.indicate_mechs_by_attrs(None, attrs, None)
for expected_mech in expected_mechs:
self.assertNotIn(expected_mech, mechs)
if self.realm.provider.lower() != 'heimdal':
# Heimdal doesn't fully implement gss_indicate_mechs_by_attrs
for attr, expected_mechs in known_attrs_dict.items():
attrs = set([attr])
mechs = gb.indicate_mechs_by_attrs(None, None, attrs)
self.assertGreater(len(mechs), 0)
self.assertEqual(mechs, expected_mechs)
@ktu.gssapi_extension_test('rfc5587', 'RFC 5587')
def test_display_mech_attr(self):
test_attrs = [
# oid, name, short_desc, long_desc
# Taken from krb5/src/tests/gssapi/t_saslname
[gb.OID.from_int_seq("1.3.6.1.5.5.13.24"),
b"GSS_C_MA_CBINDINGS", b"channel-bindings",
b"Mechanism supports channel bindings."],
[gb.OID.from_int_seq("1.3.6.1.5.5.13.1"),
b"GSS_C_MA_MECH_CONCRETE", b"concrete-mech",
b"Mechanism is neither a pseudo-mechanism nor a composite "
b"mechanism."]
]
if self.realm.provider.lower() == 'heimdal':
test_attrs[0][3] = b""
test_attrs[1][3] = b"Indicates that a mech is neither a " \
b"pseudo-mechanism nor a composite mechanism"
for attr in test_attrs:
display_out = gb.display_mech_attr(attr[0])
self.assertEqual(display_out.name, attr[1])
self.assertEqual(display_out.short_desc, attr[2])
self.assertEqual(display_out.long_desc, attr[3])
@ktu.gssapi_extension_test('rfc5801', 'SASL Names')
def test_sasl_names(self):
mechs = gb.indicate_mechs()
for mech in mechs:
out = gb.inquire_saslname_for_mech(mech)
out_smn = out.sasl_mech_name
if out_smn:
self.assertIsInstance(out_smn, bytes)
self.assertGreater(len(out_smn), 0)
out_mn = out.mech_name
self.assertIsInstance(out_mn, bytes)
out_md = out.mech_description
self.assertIsInstance(out_md, bytes)
# Heimdal fails with Unknown mech-code on sanon
if not (self.realm.provider.lower() == 'heimdal' and
mech.dotted_form == '1.3.6.1.4.1.5322.26.1.110'):
cmp_mech = gb.inquire_mech_for_saslname(out_smn)
self.assertIsNotNone(cmp_mech)
# For some reason macOS sometimes returns this for mechs
if not (sys.platform == 'darwin' and
cmp_mech.dotted_form == '1.2.752.43.14.2'):
self.assertEqual(cmp_mech, mech)
@ktu.gssapi_extension_test('rfc4178', 'Negotiation Mechanism')
def test_set_neg_mechs(self):
all_mechs = gb.indicate_mechs()
spnego_mech = gb.OID.from_int_seq("1.3.6.1.5.5.2")
krb5_mech = gb.OID.from_int_seq("1.2.840.113554.1.2.2")
ntlm_mech = gb.OID.from_int_seq("1.3.6.1.4.1.311.2.2.10")
server_name = gb.import_name(TARGET_SERVICE_NAME,
gb.NameType.hostbased_service)
username = gb.import_name(name=b"user",
name_type=gb.NameType.user)
krb5_client_creds = gb.acquire_cred(
None, usage='initiate',
mechs=[krb5_mech, spnego_mech]).creds
try:
ntlm_client_creds = gb.acquire_cred_with_password(
name=username,
password=b'password',
mechs=[ntlm_mech, spnego_mech]).creds
except gb.GSSError:
self.skipTest('You do not have the GSSAPI gss-ntlmssp mech '
'installed')
server_creds = gb.acquire_cred(server_name, usage='accept',
mechs=all_mechs).creds
neg_resp = gb.set_neg_mechs(server_creds, [ntlm_mech])
self.assertIsNone(neg_resp)
client_ctx_resp = gb.init_sec_context(server_name,
creds=ntlm_client_creds,
mech=spnego_mech)
client_token = client_ctx_resp.token
server_ctx_resp = gb.accept_sec_context(client_token,
acceptor_creds=server_creds)
self.assertIsNotNone(server_ctx_resp)
client_ctx_resp = gb.init_sec_context(server_name,
creds=krb5_client_creds,
mech=spnego_mech)
client_token = client_ctx_resp.token
self.assertRaises(gb.GSSError, gb.accept_sec_context, client_token,
acceptor_creds=server_creds)
neg_resp = gb.set_neg_mechs(server_creds, [krb5_mech])
self.assertIsNone(neg_resp)
client_ctx_resp = gb.init_sec_context(server_name,
creds=krb5_client_creds,
mech=spnego_mech)
client_token = client_ctx_resp.token
server_ctx_resp = gb.accept_sec_context(client_token,
acceptor_creds=server_creds)
self.assertIsNotNone(server_ctx_resp)
client_ctx_resp = gb.init_sec_context(server_name,
creds=ntlm_client_creds,
mech=spnego_mech)
client_token = client_ctx_resp.token
self.assertRaises(gb.GSSError, gb.accept_sec_context, client_token,
acceptor_creds=server_creds)
@ktu.gssapi_extension_test('ggf', 'Global Grid Forum')
@ktu.gssapi_extension_test('s4u', 'S4U')
@ktu.krb_minversion_test('1.16',
'querying impersonator name of krb5 GSS '
'Credential using the '
'GSS_KRB5_GET_CRED_IMPERSONATOR OID',
provider='mit')
def test_inquire_cred_by_oid_impersonator(self):
svc_princ = SERVICE_PRINCIPAL.decode("UTF-8")
self.realm.kinit(svc_princ, flags=['-k', '-f'])
target_name = gb.import_name(TARGET_SERVICE_NAME,
gb.NameType.hostbased_service)
client_token = gb.init_sec_context(target_name).token
# if our acceptor creds have a usage of both, we get
# s4u2proxy delegated credentials
server_creds = gb.acquire_cred(None, usage='both').creds
server_ctx_resp = gb.accept_sec_context(client_token,
acceptor_creds=server_creds)
self.assertIsNotNone(server_ctx_resp)
self.assertIsNotNone(server_ctx_resp.delegated_creds)
self.assertIsInstance(server_ctx_resp.delegated_creds, gb.Creds)
# GSS_KRB5_GET_CRED_IMPERSONATOR
oid = gb.OID.from_int_seq("1.2.840.113554.172.16.58.3.14")
info = gb.inquire_cred_by_oid(server_ctx_resp.delegated_creds, oid)
self.assertIsInstance(info, list)
self.assertGreater(len(info), 0)
self.assertIsInstance(info[0], bytes)
self.assertEqual(info[0], b"%s@%s" % (
SERVICE_PRINCIPAL, self.realm.realm.encode('utf-8')))
@ktu.gssapi_extension_test('ggf', 'Global Grid Forum')
def test_inquire_sec_context_by_oid(self):
target_name = gb.import_name(TARGET_SERVICE_NAME,
gb.NameType.hostbased_service)
ctx_resp1 = gb.init_sec_context(target_name)
server_name = gb.import_name(SERVICE_PRINCIPAL,
gb.NameType.kerberos_principal)
server_creds = gb.acquire_cred(server_name)[0]
server_resp = gb.accept_sec_context(ctx_resp1[3],
acceptor_creds=server_creds)
server_ctx = server_resp[0]
server_tok = server_resp[3]
client_resp2 = gb.init_sec_context(target_name,
context=ctx_resp1[0],
input_token=server_tok)
client_ctx = client_resp2[0]
# GSS_C_INQ_SSPI_SESSION_KEY
session_key_oid = gb.OID.from_int_seq("1.2.840.113554.172.16.58.3.5")
client_key = gb.inquire_sec_context_by_oid(client_ctx, session_key_oid)
server_key = gb.inquire_sec_context_by_oid(server_ctx, session_key_oid)
self.assertIsInstance(client_key, list)
self.assertGreater(len(client_key), 0)
self.assertIsInstance(server_key, list)
self.assertGreater(len(server_key), 0)
self.assertCountEqual(client_key, server_key)
@ktu.gssapi_extension_test('ggf', 'Global Grid Forum')
def test_inquire_sec_context_by_oid_should_raise_error(self):
target_name = gb.import_name(TARGET_SERVICE_NAME,
gb.NameType.hostbased_service)
ctx_resp1 = gb.init_sec_context(target_name)
server_name = gb.import_name(SERVICE_PRINCIPAL,
gb.NameType.kerberos_principal)
server_creds = gb.acquire_cred(server_name)[0]
server_resp = gb.accept_sec_context(ctx_resp1[3],
acceptor_creds=server_creds)
client_resp2 = gb.init_sec_context(target_name,
context=ctx_resp1[0],
input_token=server_resp[3])
client_ctx = client_resp2[0]
invalid_oid = gb.OID.from_int_seq("1.2.3.4.5.6.7.8.9")
self.assertRaises(gb.GSSError, gb.inquire_sec_context_by_oid,
client_ctx, invalid_oid)
@ktu.gssapi_extension_test('ggf', 'Global Grid Forum')
@ktu.gssapi_extension_test('password', 'Add Credential with Password')
def test_set_sec_context_option(self):
if sys.platform == 'darwin':
self.skipTest("macOS NTLM does not implement this OID")
ntlm_mech = gb.OID.from_int_seq("1.3.6.1.4.1.311.2.2.10")
username = gb.import_name(name=b"user",
name_type=gb.NameType.user)
try:
cred = gb.acquire_cred_with_password(name=username,
password=b"password",
mechs=[ntlm_mech])
except gb.GSSError:
self.skipTest('You do not have the GSSAPI gss-ntlmssp mech '
'installed')
server = gb.import_name(name=b"server",
name_type=gb.NameType.hostbased_service)
orig_context = gb.init_sec_context(server, creds=cred.creds,
mech=ntlm_mech)[0]
# GSS_NTLMSSP_RESET_CRYPTO_OID_STRING
reset_mech = gb.OID.from_int_seq("1.3.6.1.4.1.7165.655.1.3")
out_context = gb.set_sec_context_option(reset_mech,
context=orig_context,
value=b"\x00" * 4)
self.assertIsInstance(out_context, gb.SecurityContext)
@ktu.gssapi_extension_test('ggf', 'Global Grid Forum')
@ktu.gssapi_extension_test('password', 'Add Credential with Password')
def test_set_sec_context_option_fail(self):
ntlm_mech = gb.OID.from_int_seq("1.3.6.1.4.1.311.2.2.10")
username = gb.import_name(name=b"user",
name_type=gb.NameType.user)
try:
cred = gb.acquire_cred_with_password(name=username,
password=b"password",
mechs=[ntlm_mech])
except gb.GSSError:
self.skipTest('You do not have the GSSAPI gss-ntlmssp mech '
'installed')
server = gb.import_name(name=b"server",
name_type=gb.NameType.hostbased_service)
context = gb.init_sec_context(server, creds=cred.creds,
mech=ntlm_mech)[0]
# GSS_NTLMSSP_RESET_CRYPTO_OID_STRING
reset_mech = gb.OID.from_int_seq("1.3.6.1.4.1.7165.655.1.3")
# will raise a GSSError if no data was passed in
self.assertRaises(gb.GSSError, gb.set_sec_context_option, reset_mech,
context)
@ktu.gssapi_extension_test('set_cred_opt', 'Kitten Set Credential Option')
@ktu.krb_minversion_test('1.14',
'GSS_KRB5_CRED_NO_CI_FLAGS_X was added in MIT '
'krb5 1.14', provider='mit')
def test_set_cred_option(self):
name = gb.import_name(SERVICE_PRINCIPAL,
gb.NameType.kerberos_principal)
# GSS_KRB5_CRED_NO_CI_FLAGS_X
no_ci_flags_x = gb.OID.from_int_seq("1.2.752.43.13.29")
orig_cred = gb.acquire_cred(name).creds
# nothing much we can test here apart from it doesn't fail and the
# id of the return cred is the same as the input one
output_cred = gb.set_cred_option(no_ci_flags_x, creds=orig_cred)
self.assertIsInstance(output_cred, gb.Creds)
@ktu.gssapi_extension_test('set_cred_opt', 'Kitten Set Credential Option')
def test_set_cred_option_should_raise_error(self):
name = gb.import_name(SERVICE_PRINCIPAL,
gb.NameType.kerberos_principal)
orig_cred = gb.acquire_cred(name).creds
# this is a fake OID and shouldn't work at all
invalid_oid = gb.OID.from_int_seq("1.2.3.4.5.6.7.8.9")
self.assertRaises(gb.GSSError, gb.set_cred_option, invalid_oid,
orig_cred, b"\x00")
@ktu.gssapi_extension_test('krb5', 'Kerberos Extensions')
@ktu.krb_provider_test(['mit'], 'Cannot revert ccache on Heimdal')
# https://github.com/heimdal/heimdal/issues/803
def test_krb5_ccache_name(self):
provider = self.realm.provider.lower()
new_ccache = os.path.join(self.realm.tmpdir, 'ccache-new')
new_env = self.realm.env.copy()
new_env['KRB5CCNAME'] = new_ccache
self.realm.kinit(self.realm.user_princ,
password=self.realm.password('<PASSWORD>'),
env=new_env)
old_ccache = gb.krb5_ccache_name(new_ccache.encode('utf-8'))
try:
if provider == 'heimdal':
# Heimdal never returns the old name - see above link
self.assertTrue(old_ccache is None)
else:
self.assertEqual(old_ccache.decode('utf-8'), self.realm.ccache)
cred_resp = gb.acquire_cred(usage='initiate').creds
princ_name = gb.inquire_cred(cred_resp, name=True).name
name = gb.display_name(princ_name, name_type=False).name
self.assertEqual(name, self.realm.user_princ.encode('utf-8'))
if provider != 'heimdal':
changed_ccache = gb.krb5_ccache_name(old_ccache)
self.assertEqual(changed_ccache.decode('utf-8'), new_ccache)
finally:
# Ensure original behaviour is back for other tests
gb.krb5_ccache_name(None)
target_name = gb.import_name(TARGET_SERVICE_NAME,
gb.NameType.hostbased_service)
client_resp = gb.init_sec_context(target_name, creds=cred_resp)
client_ctx = client_resp[0]
client_token = client_resp[3]
server_name = gb.import_name(SERVICE_PRINCIPAL,
gb.NameType.kerberos_principal)
server_creds = gb.acquire_cred(server_name)[0]
server_resp = gb.accept_sec_context(client_token,
acceptor_creds=server_creds)
server_ctx = server_resp[0]
server_token = server_resp[3]
gb.init_sec_context(target_name, context=client_ctx,
input_token=server_token)
initiator = gb.inquire_context(server_ctx,
initiator_name=True).initiator_name
initiator_name = gb.display_name(initiator, name_type=False).name
self.assertEqual(initiator_name, self.realm.user_princ.encode('utf-8'))
@ktu.gssapi_extension_test('krb5', 'Kerberos Extensions')
def test_krb5_export_lucid_sec_context(self):
target_name = gb.import_name(TARGET_SERVICE_NAME,
gb.NameType.hostbased_service)
ctx_resp = gb.init_sec_context(target_name)
client_token1 = ctx_resp[3]
client_ctx = ctx_resp[0]
server_name = gb.import_name(SERVICE_PRINCIPAL,
gb.NameType.kerberos_principal)
server_creds = gb.acquire_cred(server_name)[0]
server_resp = gb.accept_sec_context(client_token1,
acceptor_creds=server_creds)
server_ctx = server_resp[0]
server_tok = server_resp[3]
client_resp2 = gb.init_sec_context(target_name,
context=client_ctx,
input_token=server_tok)
ctx = client_resp2[0]
self.assertRaises(gb.GSSError, gb.krb5_export_lucid_sec_context,
ctx, 0)
initiator_info = gb.krb5_export_lucid_sec_context(ctx, 1)
self.assertTrue(isinstance(initiator_info, gb.Krb5LucidContextV1))
self.assertEqual(initiator_info.version, 1)
self.assertTrue(initiator_info.is_initiator)
self.assertTrue(isinstance(initiator_info.endtime, int))
self.assertTrue(isinstance(initiator_info.send_seq, int))
self.assertTrue(isinstance(initiator_info.recv_seq, int))
self.assertEqual(initiator_info.protocol, 1)
self.assertEqual(initiator_info.rfc1964_kd, None)
self.assertTrue(isinstance(initiator_info.cfx_kd, gb.CfxKeyData))
self.assertTrue(isinstance(initiator_info.cfx_kd.ctx_key_type, int))
self.assertTrue(isinstance(initiator_info.cfx_kd.ctx_key, bytes))
self.assertTrue(isinstance(initiator_info.cfx_kd.acceptor_subkey_type,
int))
self.assertTrue(isinstance(initiator_info.cfx_kd.acceptor_subkey,
bytes))
acceptor_info = gb.krb5_export_lucid_sec_context(server_ctx, 1)
self.assertTrue(isinstance(acceptor_info, gb.Krb5LucidContextV1))
self.assertEqual(acceptor_info.version, 1)
self.assertFalse(acceptor_info.is_initiator)
self.assertTrue(isinstance(acceptor_info.endtime, int))
self.assertTrue(isinstance(acceptor_info.send_seq, int))
self.assertTrue(isinstance(acceptor_info.recv_seq, int))
self.assertEqual(acceptor_info.protocol, 1)
self.assertEqual(acceptor_info.rfc1964_kd, None)
self.assertTrue(isinstance(acceptor_info.cfx_kd, gb.CfxKeyData))
self.assertTrue(isinstance(acceptor_info.cfx_kd.ctx_key_type, int))
self.assertTrue(isinstance(acceptor_info.cfx_kd.ctx_key, bytes))
self.assertTrue(isinstance(acceptor_info.cfx_kd.acceptor_subkey_type,
int))
self.assertTrue(isinstance(acceptor_info.cfx_kd.acceptor_subkey,
bytes))
self.assertEqual(initiator_info.endtime, acceptor_info.endtime)
self.assertEqual(initiator_info.send_seq, acceptor_info.recv_seq)
self.assertEqual(initiator_info.recv_seq, acceptor_info.send_seq)
self.assertEqual(initiator_info.cfx_kd.ctx_key_type,
acceptor_info.cfx_kd.ctx_key_type)
self.assertEqual(initiator_info.cfx_kd.ctx_key,
acceptor_info.cfx_kd.ctx_key)
self.assertEqual(initiator_info.cfx_kd.acceptor_subkey_type,
acceptor_info.cfx_kd.acceptor_subkey_type)
self.assertEqual(initiator_info.cfx_kd.acceptor_subkey,
acceptor_info.cfx_kd.acceptor_subkey)
@ktu.gssapi_extension_test('krb5', 'Kerberos Extensions')
def test_krb5_extract_authtime_from_sec_context(self):
target_name = gb.import_name(TARGET_SERVICE_NAME,
gb.NameType.hostbased_service)
ctx_resp = gb.init_sec_context(target_name)
client_token1 = ctx_resp[3]
client_ctx = ctx_resp[0]
server_name = gb.import_name(SERVICE_PRINCIPAL,
gb.NameType.kerberos_principal)
server_creds = gb.acquire_cred(server_name)[0]
server_resp = gb.accept_sec_context(client_token1,
acceptor_creds=server_creds)
server_ctx = server_resp[0]
server_tok = server_resp[3]
client_resp2 = gb.init_sec_context(target_name,
context=client_ctx,
input_token=server_tok)
ctx = client_resp2[0]
if self.realm.provider.lower() == 'heimdal':
# Heimdal doesn't store the ticket info on the initiator
client_authtime = server_authtime = \
gb.krb5_extract_authtime_from_sec_context(server_ctx)
self.assertRaises(gb.GSSError,
gb.krb5_extract_authtime_from_sec_context,
client_ctx)
else:
client_authtime = gb.krb5_extract_authtime_from_sec_context(ctx)
server_authtime = gb.krb5_extract_authtime_from_sec_context(
server_ctx)
self.assertTrue(isinstance(client_authtime, int))
self.assertTrue(isinstance(server_authtime, int))
self.assertEqual(client_authtime, server_authtime)
@ktu.gssapi_extension_test('krb5', 'Kerberos Extensions')
def test_krb5_extract_authz_data_from_sec_context(self):
target_name = gb.import_name(TARGET_SERVICE_NAME,
gb.NameType.hostbased_service)
client_token = gb.init_sec_context(target_name)[3]
server_name = gb.import_name(SERVICE_PRINCIPAL,
gb.NameType.kerberos_principal)
server_creds = gb.acquire_cred(server_name)[0]
server_ctx = gb.accept_sec_context(client_token,
acceptor_creds=server_creds)[0]
# KRB5_AUTHDATA_IF_RELEVANT = 1
authz_data = gb.krb5_extract_authz_data_from_sec_context(server_ctx, 1)
self.assertTrue(isinstance(authz_data, bytes))
@ktu.gssapi_extension_test('krb5', 'Kerberos Extensions')
def test_krb5_import_cred(self):
# Ensuring we match the krb5 library to the GSSAPI library is a thorny
# problem. Avoid it by requiring test suite users to explicitly
# enable this test.
if not self.KRB5_LIB_PATH:
self.skipTest("Env var GSSAPI_KRB5_MAIN_LIB not defined")
creds = gb.Creds()
# Should fail if only creds are specified
self.assertRaises(ValueError, gb.krb5_import_cred, creds)
new_ccache = os.path.join(self.realm.tmpdir, 'ccache-new')
new_env = self.realm.env.copy()
new_env['KRB5CCNAME'] = new_ccache
self.realm.kinit(self.realm.user_princ,
password=self.realm.password('<PASSWORD>'),
env=new_env)
krb5 = ctypes.CDLL(self.KRB5_LIB_PATH)
krb5_ctx = ctypes.c_void_p()
krb5.krb5_init_context(ctypes.byref(krb5_ctx))
try:
ccache_ptr = ctypes.c_void_p()
err = krb5.krb5_cc_resolve(krb5_ctx, new_ccache.encode('utf-8'),
ctypes.byref(ccache_ptr))
self.assertEqual(err, 0)
try:
gb.krb5_import_cred(creds, cache=ccache_ptr.value)
# Creds will be invalid once the cc is closed so do this now
target_name = gb.import_name(TARGET_SERVICE_NAME,
gb.NameType.hostbased_service)
client_resp = gb.init_sec_context(target_name, creds=creds)
finally:
krb5.krb5_cc_close(krb5_ctx, ccache_ptr)
finally:
krb5.krb5_free_context(krb5_ctx)
client_ctx = client_resp[0]
client_token = client_resp[3]
server_name = gb.import_name(SERVICE_PRINCIPAL,
gb.NameType.kerberos_principal)
server_creds = gb.acquire_cred(server_name)[0]
server_resp = gb.accept_sec_context(client_token,
acceptor_creds=server_creds)
server_ctx = server_resp[0]
server_token = server_resp[3]
gb.init_sec_context(target_name, context=client_ctx,
input_token=server_token)
initiator = gb.inquire_context(server_ctx,
initiator_name=True).initiator_name
initiator_name = gb.display_name(initiator, name_type=False).name
self.assertEqual(initiator_name, self.realm.user_princ.encode('utf-8'))
@ktu.gssapi_extension_test('krb5', 'Kerberos Extensions')
def test_krb5_get_tkt_flags(self):
target_name = gb.import_name(TARGET_SERVICE_NAME,
gb.NameType.hostbased_service)
ctx_resp = gb.init_sec_context(target_name)
client_token1 = ctx_resp[3]
client_ctx = ctx_resp[0]
server_name = gb.import_name(SERVICE_PRINCIPAL,
gb.NameType.kerberos_principal)
server_creds = gb.acquire_cred(server_name)[0]
server_resp = gb.accept_sec_context(client_token1,
acceptor_creds=server_creds)
server_ctx = server_resp[0]
server_tok = server_resp[3]
client_resp2 = gb.init_sec_context(target_name,
context=client_ctx,
input_token=server_tok)
client_ctx = client_resp2[0]
if self.realm.provider.lower() == 'heimdal':
# Heimdal doesn't store the ticket info on the initiator
client_flags = server_flags = gb.krb5_get_tkt_flags(server_ctx)
self.assertRaises(gb.GSSError, gb.krb5_get_tkt_flags, client_ctx)
else:
client_flags = gb.krb5_get_tkt_flags(client_ctx)
server_flags = gb.krb5_get_tkt_flags(server_ctx)
self.assertTrue(isinstance(client_flags, int))
self.assertTrue(isinstance(server_flags, int))
self.assertEqual(client_flags, server_flags)
@ktu.gssapi_extension_test('krb5', 'Kerberos Extensions')
@ktu.krb_provider_test(['mit'], 'Cannot revert ccache on Heimdal')
# https://github.com/heimdal/heimdal/issues/803
def test_krb5_set_allowable_enctypes(self):
krb5_mech = gb.OID.from_int_seq("1.2.840.113554.1.2.2")
AES_128 = 0x11
AES_256 = 0x12
new_ccache = os.path.join(self.realm.tmpdir, 'ccache-new')
new_env = self.realm.env.copy()
new_env['KRB5CCNAME'] = new_ccache
self.realm.kinit(self.realm.user_princ,
password=self.realm.password('<PASSWORD>'),
env=new_env)
gb.krb5_ccache_name(new_ccache.encode('utf-8'))
try:
creds = gb.acquire_cred(usage='initiate',
mechs=[krb5_mech]).creds
finally:
gb.krb5_ccache_name(None)
gb.krb5_set_allowable_enctypes(creds, [AES_128])
target_name = gb.import_name(TARGET_SERVICE_NAME,
gb.NameType.hostbased_service)
server_name = gb.import_name(SERVICE_PRINCIPAL,
gb.NameType.kerberos_principal)
server_creds = gb.acquire_cred(server_name, usage='accept',
mechs=[krb5_mech])[0]
if self.realm.provider.lower() != 'heimdal':
# Will fail because the client only offers AES128
# Only seems to work on MIT and not Heimdal
ctx_resp = gb.init_sec_context(target_name, creds=creds)
client_token1 = ctx_resp[3]
client_ctx = ctx_resp[0]
gb.krb5_set_allowable_enctypes(server_creds, [AES_256])
self.assertRaises(gb.GSSError, gb.accept_sec_context,
client_token1, acceptor_creds=server_creds)
gb.krb5_set_allowable_enctypes(server_creds, [AES_128, AES_256])
ctx_resp = gb.init_sec_context(target_name, creds=creds)
client_token1 = ctx_resp[3]
client_ctx = ctx_resp[0]
server_resp = gb.accept_sec_context(client_token1,
acceptor_creds=server_creds)
server_ctx = server_resp[0]
server_tok = server_resp[3]
client_resp2 = gb.init_sec_context(target_name,
context=client_ctx,
input_token=server_tok)
ctx = client_resp2[0]
initiator_info = gb.krb5_export_lucid_sec_context(ctx, 1)
acceptor_info = gb.krb5_export_lucid_sec_context(server_ctx, 1)
self.assertEqual(AES_128, initiator_info.cfx_kd.ctx_key_type)
self.assertEqual(initiator_info.cfx_kd.ctx_key_type,
initiator_info.cfx_kd.acceptor_subkey_type)
self.assertEqual(acceptor_info.cfx_kd.ctx_key_type,
acceptor_info.cfx_kd.acceptor_subkey_type)
class TestIntEnumFlagSet(unittest.TestCase):
def test_create_from_int(self):
int_val = (gb.RequirementFlag.integrity |
gb.RequirementFlag.confidentiality)
fset = gb.IntEnumFlagSet(gb.RequirementFlag, int_val)
self.assertEqual(int(fset), int_val)
def test_create_from_other_set(self):
int_val = (gb.RequirementFlag.integrity |
gb.RequirementFlag.confidentiality)
fset1 = gb.IntEnumFlagSet(gb.RequirementFlag, int_val)
fset2 = gb.IntEnumFlagSet(gb.RequirementFlag, fset1)
self.assertEqual(fset1, fset2)
def test_create_from_list(self):
lst = [gb.RequirementFlag.integrity,
gb.RequirementFlag.confidentiality]
fset = gb.IntEnumFlagSet(gb.RequirementFlag, lst)
self.assertCountEqual(list(fset), lst)
def test_create_empty(self):
fset = gb.IntEnumFlagSet(gb.RequirementFlag)
self.assertEqual(len(fset), 0)
def _create_fset(self):
lst = [gb.RequirementFlag.integrity,
gb.RequirementFlag.confidentiality]
return gb.IntEnumFlagSet(gb.RequirementFlag, lst)
def test_contains(self):
fset = self._create_fset()
self.assertIn(gb.RequirementFlag.integrity, fset)
self.assertNotIn(gb.RequirementFlag.protection_ready, fset)
def test_len(self):
self.assertEqual(len(self._create_fset()), 2)
def test_add(self):
fset = self._create_fset()
self.assertEqual(len(fset), 2)
fset.add(gb.RequirementFlag.protection_ready)
self.assertEqual(len(fset), 3)
self.assertIn(gb.RequirementFlag.protection_ready, fset)
def test_discard(self):
fset = self._create_fset()
self.assertEqual(len(fset), 2)
fset.discard(gb.RequirementFlag.protection_ready)
self.assertEqual(len(fset), 2)
fset.discard(gb.RequirementFlag.integrity)
self.assertEqual(len(fset), 1)
self.assertNotIn(gb.RequirementFlag.integrity, fset)
def test_and_enum(self):
fset = self._create_fset()
self.assertTrue(fset & gb.RequirementFlag.integrity)
self.assertFalse(fset & gb.RequirementFlag.protection_ready)
def test_and_int(self):
fset = self._create_fset()
int_val = int(gb.RequirementFlag.integrity)
self.assertEqual(fset & int_val, int_val)
def test_and_set(self):
fset1 = self._create_fset()
fset2 = self._create_fset()
fset3 = self._create_fset()
fset1.add(gb.RequirementFlag.protection_ready)
fset2.add(gb.RequirementFlag.out_of_sequence_detection)
self.assertEqual(fset1 & fset2, fset3)
def test_or_enum(self):
fset1 = self._create_fset()
fset2 = fset1 | gb.RequirementFlag.protection_ready
self.assertLess(fset1, fset2)
self.assertIn(gb.RequirementFlag.protection_ready, fset2)
def test_or_int(self):
fset = self._create_fset()
int_val = int(gb.RequirementFlag.integrity)
self.assertEqual(fset | int_val, int(fset))
def test_or_set(self):
fset1 = self._create_fset()
fset2 = self._create_fset()
fset3 = self._create_fset()
fset1.add(gb.RequirementFlag.protection_ready)
fset2.add(gb.RequirementFlag.out_of_sequence_detection)
fset3.add(gb.RequirementFlag.protection_ready)
fset3.add(gb.RequirementFlag.out_of_sequence_detection)
self.assertEqual(fset1 | fset2, fset3)
def test_xor_enum(self):
fset1 = self._create_fset()
fset2 = fset1 ^ gb.RequirementFlag.protection_ready
fset3 = fset1 ^ gb.RequirementFlag.integrity
self.assertEqual(len(fset2), 3)
self.assertIn(gb.RequirementFlag.protection_ready, fset2)
self.assertEqual(len(fset3), 1)
self.assertNotIn(gb.RequirementFlag.integrity, fset3)
def test_xor_int(self):
fset = self._create_fset()
self.assertEqual(fset ^ int(gb.RequirementFlag.protection_ready),
int(fset) ^ gb.RequirementFlag.protection_ready)
self.assertEqual(fset ^ int(gb.RequirementFlag.integrity),
int(fset) ^ gb.RequirementFlag.integrity)
def test_xor_set(self):
fset1 = self._create_fset()
fset2 = self._create_fset()
fset1.add(gb.RequirementFlag.protection_ready)
fset2.add(gb.RequirementFlag.out_of_sequence_detection)
fset3 = fset1 ^ fset2
self.assertEqual(len(fset3), 2)
self.assertNotIn(gb.RequirementFlag.integrity, fset3)
self.assertNotIn(gb.RequirementFlag.confidentiality, fset3)
self.assertIn(gb.RequirementFlag.protection_ready, fset3)
self.assertIn(gb.RequirementFlag.out_of_sequence_detection, fset3)
class TestInitContext(_GSSAPIKerberosTestCase):
def setUp(self):
self.target_name = gb.import_name(TARGET_SERVICE_NAME,
gb.NameType.hostbased_service)
def tearDown(self):
gb.release_name(self.target_name)
def test_basic_init_default_ctx(self):
ctx_resp = gb.init_sec_context(self.target_name)
self.assertIsNotNone(ctx_resp)
(ctx, out_mech_type,
out_req_flags, out_token, out_ttl, cont_needed) = ctx_resp
self.assertIsInstance(ctx, gb.SecurityContext)
self.assertEqual(out_mech_type, gb.MechType.kerberos)
self.assertIsInstance(out_req_flags, Set)
if sys.platform != 'darwin':
self.assertGreaterEqual(len(out_req_flags), 2)
self.assertGreater(len(out_token), 0)
self.assertGreater(out_ttl, 0)
self.assertIsInstance(cont_needed, bool)
gb.delete_sec_context(ctx)
class TestAcceptContext(_GSSAPIKerberosTestCase):
def setUp(self):
self.target_name = gb.import_name(TARGET_SERVICE_NAME,
gb.NameType.hostbased_service)
ctx_resp = gb.init_sec_context(self.target_name)
self.client_token = ctx_resp[3]
self.client_ctx = ctx_resp[0]
self.assertIsNotNone(self.client_ctx)
self.server_name = gb.import_name(SERVICE_PRINCIPAL,
gb.NameType.kerberos_principal)
self.server_creds = gb.acquire_cred(self.server_name)[0]
self.server_ctx = None
def tearDown(self):
gb.release_name(self.target_name)
gb.release_name(self.server_name)
gb.release_cred(self.server_creds)
gb.delete_sec_context(self.client_ctx)
if self.server_ctx is not None:
gb.delete_sec_context(self.server_ctx)
def test_basic_accept_context_no_acceptor_creds(self):
server_resp = gb.accept_sec_context(self.client_token)
self.assertIsNotNone(server_resp)
(self.server_ctx, name, mech_type, out_token,
out_req_flags, out_ttl, delegated_cred, cont_needed) = server_resp
self.assertIsInstance(self.server_ctx, gb.SecurityContext)
self.assertIsInstance(name, gb.Name)
self.assertEqual(mech_type, gb.MechType.kerberos)
self.assertGreater(len(out_token), 0)
self.assertIsInstance(out_req_flags, Set)
self.assertGreaterEqual(len(out_req_flags), 2)
self.assertGreater(out_ttl, 0)
self.assertIsInstance(cont_needed, bool)
if delegated_cred is not None:
self.assertIsInstance(delegated_cred, gb.Creds)
def test_basic_accept_context(self):
server_resp = gb.accept_sec_context(self.client_token,
acceptor_creds=self.server_creds)
self.assertIsNotNone(server_resp)
(self.server_ctx, name, mech_type, out_token,
out_req_flags, out_ttl, delegated_cred, cont_needed) = server_resp
self.assertIsInstance(self.server_ctx, gb.SecurityContext)
self.assertIsInstance(name, gb.Name)
self.assertEqual(mech_type, gb.MechType.kerberos)
self.assertGreater(len(out_token), 0)
self.assertIsInstance(out_req_flags, Set)
self.assertGreaterEqual(len(out_req_flags), 2)
self.assertGreater(out_ttl, 0)
self.assertIsInstance(cont_needed, bool)
if delegated_cred is not None:
self.assertIsInstance(delegated_cred, gb.Creds)
def test_channel_bindings(self):
bdgs = gb.ChannelBindings(application_data=b'abcxyz',
initiator_address_type=gb.AddressType.ip,
initiator_address=b'127.0.0.1',
acceptor_address_type=gb.AddressType.ip,
acceptor_address=b'127.0.0.1')
self.target_name = gb.import_name(TARGET_SERVICE_NAME,
gb.NameType.hostbased_service)
ctx_resp = gb.init_sec_context(self.target_name,
channel_bindings=bdgs)
self.client_token = ctx_resp[3]
self.client_ctx = ctx_resp[0]
self.assertIsNotNone(self.client_ctx)
self.server_name = gb.import_name(SERVICE_PRINCIPAL,
gb.NameType.kerberos_principal)
self.server_creds = gb.acquire_cred(self.server_name)[0]
server_resp = gb.accept_sec_context(self.client_token,
acceptor_creds=self.server_creds,
channel_bindings=bdgs)
self.assertIsNotNone(server_resp)
self.server_ctx = server_resp.context
def test_bad_channel_binding_raises_error(self):
if sys.platform == 'darwin':
self.skipTest('macOS does not raise error with validation')
bdgs = gb.ChannelBindings(application_data=b'abcxyz',
initiator_address_type=gb.AddressType.ip,
initiator_address=b'127.0.0.1',
acceptor_address_type=gb.AddressType.ip,
acceptor_address=b'127.0.0.1')
self.target_name = gb.import_name(TARGET_SERVICE_NAME,
gb.NameType.hostbased_service)
ctx_resp = gb.init_sec_context(self.target_name,
channel_bindings=bdgs)
self.client_token = ctx_resp[3]
self.client_ctx = ctx_resp[0]
self.assertIsNotNone(self.client_ctx)
self.server_name = gb.import_name(SERVICE_PRINCIPAL,
gb.NameType.kerberos_principal)
self.server_creds = gb.acquire_cred(self.server_name)[0]
bdgs.acceptor_address = b'127.0.1.0'
self.assertRaises(gb.GSSError, gb.accept_sec_context,
self.client_token, acceptor_creds=self.server_creds,
channel_bindings=bdgs)
class TestWrapUnwrap(_GSSAPIKerberosTestCase):
def setUp(self):
self.target_name = gb.import_name(TARGET_SERVICE_NAME,
gb.NameType.hostbased_service)
ctx_resp = gb.init_sec_context(self.target_name)
self.client_token1 = ctx_resp[3]
self.client_ctx = ctx_resp[0]
self.server_name = gb.import_name(SERVICE_PRINCIPAL,
gb.NameType.kerberos_principal)
self.server_creds = gb.acquire_cred(self.server_name)[0]
server_resp = gb.accept_sec_context(self.client_token1,
acceptor_creds=self.server_creds)
self.server_ctx = server_resp[0]
self.server_tok = server_resp[3]
client_resp2 = gb.init_sec_context(self.target_name,
context=self.client_ctx,
input_token=self.server_tok)
self.client_token2 = client_resp2[3]
self.client_ctx = client_resp2[0]
def tearDown(self):
gb.release_name(self.target_name)
gb.release_name(self.server_name)
gb.release_cred(self.server_creds)
gb.delete_sec_context(self.client_ctx)
gb.delete_sec_context(self.server_ctx)
def test_import_export_sec_context(self):
tok = gb.export_sec_context(self.client_ctx)
self.assertIsInstance(tok, bytes)
self.assertGreater(len(tok), 0)
imported_ctx = gb.import_sec_context(tok)
self.assertIsInstance(imported_ctx, gb.SecurityContext)
self.client_ctx = imported_ctx # ensure that it gets deleted
def test_get_mic(self):
mic_token = gb.get_mic(self.client_ctx, b"some message")
self.assertIsInstance(mic_token, bytes)
self.assertGreater(len(mic_token), 0)
def test_basic_verify_mic(self):
mic_token = gb.get_mic(self.client_ctx, b"some message")
qop_used = gb.verify_mic(self.server_ctx, b"some message", mic_token)
self.assertIsInstance(qop_used, int)
# test a bad MIC
self.assertRaises(gb.GSSError, gb.verify_mic, self.server_ctx,
b"some other message", b"some invalid mic")
def test_wrap_size_limit(self):
with_conf = gb.wrap_size_limit(self.client_ctx, 100)
without_conf = gb.wrap_size_limit(self.client_ctx, 100,
confidential=False)
self.assertIsInstance(with_conf, int)
self.assertIsInstance(without_conf, int)
self.assertLess(without_conf, 100)
self.assertLess(with_conf, 100)
def test_basic_wrap_unwrap(self):
wrapped_message, conf = gb.wrap(self.client_ctx, b"test message")
self.assertIsInstance(conf, bool)
self.assertTrue(conf)
self.assertIsInstance(wrapped_message, bytes)
self.assertGreater(len(wrapped_message), len("test message"))
unwrapped_message, conf, qop = gb.unwrap(self.server_ctx,
wrapped_message)
self.assertIsInstance(unwrapped_message, bytes)
self.assertEqual(unwrapped_message, b'test message')
self.assertIsInstance(conf, bool)
self.assertTrue(conf)
self.assertIsInstance(qop, int)
self.assertGreaterEqual(qop, 0)
@ktu.gssapi_extension_test('dce', 'DCE (IOV/AEAD)')
def test_basic_iov_wrap_unwrap_prealloc(self):
init_data = b'some encrypted data'
init_other_data = b'some other encrypted data'
init_signed_info = b'some sig data'
init_message = gb.IOV((gb.IOVBufferType.sign_only, init_signed_info),
init_data, init_other_data, auto_alloc=False)
self.assertFalse(init_message[0].allocate)
self.assertFalse(init_message[4].allocate)
self.assertFalse(init_message[5].allocate)
conf = gb.wrap_iov_length(self.client_ctx, init_message)
self.assertIsInstance(conf, bool)
self.assertTrue(conf)
self.assertGreaterEqual(len(init_message[0]), 1)
self.assertGreaterEqual(len(init_message[5]), 1)
conf = gb.wrap_iov(self.client_ctx, init_message)
self.assertIsInstance(conf, bool)
self.assertTrue(conf)
# make sure we didn't strings used
self.assertEqual(init_data, b'some encrypted data')
self.assertEqual(init_other_data, b'some other encrypted data')
self.assertEqual(init_signed_info, b'some sig data')
self.assertNotEqual(init_message[2].value, b'some encrypted data')
self.assertNotEqual(init_message[3].value,
b'some other encrypted data')
conf, qop = gb.unwrap_iov(self.server_ctx, init_message)
self.assertIsInstance(conf, bool)
self.assertTrue(conf)
self.assertIsInstance(qop, int)
self.assertEqual(init_message[1].value, init_signed_info)
self.assertEqual(init_message[2].value, init_data)
self.assertEqual(init_message[3].value, init_other_data)
@ktu.gssapi_extension_test('dce', 'DCE (IOV)')
def test_basic_iov_wrap_unwrap_autoalloc(self):
init_data = b'some encrypted data'
init_other_data = b'some other encrypted data'
init_signed_info = b'some sig data'
init_message = gb.IOV((gb.IOVBufferType.sign_only, init_signed_info),
init_data, init_other_data)
conf = gb.wrap_iov(self.client_ctx, init_message)
self.assertIsInstance(conf, bool)
self.assertTrue(conf)
# make sure we didn't strings used
self.assertEqual(init_data, b'some encrypted data')
self.assertEqual(init_other_data, b'some other encrypted data')
self.assertEqual(init_signed_info, b'some sig data')
self.assertNotEqual(init_message[2].value, b'some encrypted data')
self.assertNotEqual(init_message[3].value,
b'some other encrypted data')
conf, qop = gb.unwrap_iov(self.server_ctx, init_message)
self.assertIsInstance(conf, bool)
self.assertTrue(conf)
self.assertIsInstance(qop, int)
self.assertEqual(init_message[1].value, init_signed_info)
self.assertEqual(init_message[2].value, init_data)
self.assertEqual(init_message[3].value, init_other_data)
@ktu.gssapi_extension_test('dce_aead', 'DCE (AEAD)')
@ktu.krb_provider_test(['mit'], 'unwrapping AEAD stream')
def test_basic_aead_wrap_unwrap(self):
assoc_data = b'some sig data'
wrapped_message, conf = gb.wrap_aead(self.client_ctx, b"test message",
assoc_data)
self.assertIsInstance(wrapped_message, bytes)
self.assertGreater(len(wrapped_message), len('test message'))
self.assertIsInstance(conf, bool)
self.assertTrue(conf)
unwrapped_message, conf, qop = \
gb.unwrap_aead(self.server_ctx, wrapped_message, assoc_data)
self.assertIsInstance(unwrapped_message, bytes)
self.assertEqual(unwrapped_message, b'test message')
self.assertIsInstance(conf, bool)
self.assertTrue(conf)
self.assertIsInstance(qop, int)
self.assertGreaterEqual(qop, 0)
@ktu.gssapi_extension_test('dce_aead', 'DCE (AEAD)')
@ktu.krb_provider_test(['mit'], 'unwrapping AEAD stream')
def test_basic_aead_wrap_unwrap_no_assoc(self):
wrapped_message, conf = gb.wrap_aead(self.client_ctx, b"test message")
self.assertIsInstance(wrapped_message, bytes)
self.assertGreater(len(wrapped_message), len("test message"))
self.assertIsInstance(conf, bool)
self.assertTrue(conf)
unwrapped_message, conf, qop = gb.unwrap_aead(self.server_ctx,
wrapped_message)
self.assertIsInstance(unwrapped_message, bytes)
self.assertEqual(unwrapped_message, b"test message")
self.assertIsInstance(conf, bool)
self.assertTrue(conf)
self.assertIsInstance(qop, int)
self.assertGreaterEqual(qop, 0)
@ktu.gssapi_extension_test('dce_aead', 'DCE (AEAD)')
@ktu.krb_provider_test(['mit'], 'unwrapping AEAD stream')
def test_basic_aead_wrap_unwrap_bad_assoc_raises_error(self):
assoc_data = b'some sig data'
wrapped_message, conf = gb.wrap_aead(self.client_ctx, b"test message",
assoc_data)
self.assertIsInstance(wrapped_message, bytes)
self.assertGreater(len(wrapped_message), len("test message"))
self.assertIsInstance(conf, bool)
self.assertTrue(conf)
self.assertRaises(gb.BadMICError, gb.unwrap_aead, self.server_ctx,
wrapped_message, b'some other sig data')
@ktu.gssapi_extension_test('iov_mic', 'IOV MIC')
def test_get_mic_iov(self):
init_message = gb.IOV(b'some data',
(gb.IOVBufferType.sign_only, b'some sig data'),
gb.IOVBufferType.mic_token, std_layout=False)
gb.get_mic_iov(self.client_ctx, init_message)
self.assertEqual(init_message[2].type, gb.IOVBufferType.mic_token)
self.assertGreater(len(init_message[2].value), 0)
@ktu.gssapi_extension_test('iov_mic', 'IOV MIC')
def test_basic_verify_mic_iov(self):
init_message = gb.IOV(b'some data',
(gb.IOVBufferType.sign_only, b'some sig data'),
gb.IOVBufferType.mic_token, std_layout=False)
gb.get_mic_iov(self.client_ctx, init_message)
self.assertEqual(init_message[2].type, gb.IOVBufferType.mic_token)
self.assertGreater(len(init_message[2].value), 0)
qop_used = gb.verify_mic_iov(self.server_ctx, init_message)
self.assertIsInstance(qop_used, int)
@ktu.gssapi_extension_test('iov_mic', 'IOV MIC')
def test_verify_mic_iov_bad_mic_raises_error(self):
init_message = gb.IOV(b'some data',
(gb.IOVBufferType.sign_only, b'some sig data'),
(gb.IOVBufferType.mic_token, '<PASSWORD>'),
std_layout=False)
# test a bad MIC
self.assertRaises(gb.GSSError, gb.verify_mic_iov, self.server_ctx,
init_message)
@ktu.gssapi_extension_test('iov_mic', 'IOV MIC')
def test_get_mic_iov_length(self):
init_message = gb.IOV(b'some data',
(gb.IOVBufferType.sign_only, b'some sig data'),
gb.IOVBufferType.mic_token, std_layout=False,
auto_alloc=False)
gb.get_mic_iov_length(self.client_ctx, init_message)
self.assertEqual(init_message[2].type, gb.IOVBufferType.mic_token)
self.assertGreater(len(init_message[2].value), 0)
TEST_OIDS = {'SPNEGO': {'bytes': b'\053\006\001\005\005\002',
'string': '1.3.6.1.5.5.2'},
'KRB5': {'bytes': b'\052\206\110\206\367\022\001\002\002',
'string': '1.2.840.113554.1.2.2'},
'KRB5_OLD': {'bytes': b'\053\005\001\005\002',
'string': '1.3.5.1.5.2'},
'KRB5_WRONG': {'bytes': b'\052\206\110\202\367\022\001\002\002',
'string': '1.2.840.48018.1.2.2'},
'IAKERB': {'bytes': b'\053\006\001\005\002\005',
'string': '1.3.6.1.5.2.5'}}
class TestOIDTransforms(unittest.TestCase):
def test_decode_from_bytes(self):
for oid in TEST_OIDS.values():
o = gb.OID(elements=oid['bytes'])
self.assertEqual(repr(o), f"<OID {oid['string']}>")
def test_encode_from_string(self):
for oid in TEST_OIDS.values():
o = gb.OID.from_int_seq(oid['string'])
self.assertEqual(o.__bytes__(), oid['bytes'])
def test_encode_from_int_seq(self):
for oid in TEST_OIDS.values():
int_seq = oid['string'].split('.')
o = gb.OID.from_int_seq(int_seq)
self.assertEqual(o.__bytes__(), oid['bytes'])
def test_comparisons(self):
krb5 = gb.OID.from_int_seq(TEST_OIDS['KRB5']['string'])
krb5_other = gb.OID.from_int_seq(TEST_OIDS['KRB5']['string'])
spnego = gb.OID.from_int_seq(TEST_OIDS['SPNEGO']['string'])
# Purpose here is to test comparisons themselves - don't simplify
self.assertTrue(krb5 == krb5_other)
self.assertFalse(krb5 == spnego)
self.assertFalse(krb5 != krb5_other)
self.assertTrue(krb5 != spnego)
| 1.898438
| 2
|
Code/arrayTest.py
|
Wolfcoder13/Drooper
| 0
|
12774813
|
import numpy as numpy
a = numpy.arange(150)
# a[0::2] *= numpy.sqrt(2)/2.0 * (numpy.cos(2) - numpy.sin(2))
a[0::2] *= 2
print(a)
| 2.90625
| 3
|
chapter6/shodan/shodan_api_rest.py
|
gabrielmahia/ushuhudAI
| 0
|
12774814
|
import shodan
import requests
SHODAN_API_KEY = ""
api = shodan.Shodan(SHODAN_API_KEY)
domain = 'www.python.org'
dnsResolve = 'https://api.shodan.io/dns/resolve?hostnames=' + domain + '&key=' + SHODAN_API_KEY
try:
resolved = requests.get(dnsResolve)
hostIP = resolved.json()[domain]
host = api.host(hostIP)
print("IP: %s" % host['ip_str'])
print("Organization: %s" % host.get('org', 'n/a'))
print("Operating System: %s" % host.get('os', 'n/a'))
for item in host['data']:
print("Port: %s" % item['port'])
print("Banner: %s" % item['data'])
except shodan.APIError as exception:
print('Error: %s' % exception)
| 2.828125
| 3
|
game.py
|
Catsuko/Westward
| 3
|
12774815
|
from actors.actions.hit_and_run_action import HitAndRunAction
from actors.actions.input_driven_action import InputDrivenAction
from actors.actions.shoot_at_action import ShootAtAction
from actors.actor_target import ActorTarget
from actors.components.components import Components
from actors.components.health import Health
from actors.components.inventory import Inventory
from actors.interactions.null_interaction import NullInteraction
from actors.projectile import Projectile
from actors.actions.move_action import MoveAction
from actors.actions.use_action import UseAction
from input.keyboard_input import KeyboardInput
from items.gun import Gun
from views.actor_camera import ActorCamera
from views.json_environment import JsonEnvironment
from views.point_camera import PointCamera
from views.pyxel.pyxel_renderer import PyxelRenderer
from views.pyxel.shaders.color_mapped_shader import ColorMappedShader
from views.pyxel.shaders.flicker_shader import FlickerShader
from views.pyxel.pyxel_area_view import PyxelAreaView
from views.pyxel.shaders.perlin_noise_shader import PerlinNoiseShader
from world.area_builder import AreaBuilder
from actors.actor import Actor
from world.rendered_area import RenderedArea
from utilities.countdown import Countdown
import threading
player_key = 'p'
input_action = InputDrivenAction({
'w': MoveAction(0, -1), 's': MoveAction(0, 1), 'a': MoveAction(-1, 0), 'd': MoveAction(1, 0),
'i': UseAction(0, -1), 'k': UseAction(0, 1), 'j': UseAction(-1, 0), 'l': UseAction(1, 0)
}, KeyboardInput())
gun = Gun(lambda aim_dir: Projectile(aim_dir, "*"))
inventory = Inventory(frozenset([gun]))
player_target = ActorTarget(player_key)
cowboy_components = Components(frozenset([inventory, Health(99, 99)]))
shoot_at_action = ShootAtAction(player_target, UseAction())
hit_and_run_action = HitAndRunAction(player_target, shoot_at_action, MoveAction(), 3, Countdown(4, 0))
bandit = Actor(hit_and_run_action, NullInteraction(), "b", cowboy_components)
player = Actor(input_action, NullInteraction(), player_key, cowboy_components)
mapped_shader = ColorMappedShader(JsonEnvironment('config/pyxel_environment.json'))
pyxel_view = PyxelAreaView(PyxelRenderer(range(8)), PerlinNoiseShader(), FlickerShader(mapped_shader, 4), mapped_shader)
camera = ActorCamera(player_key, PointCamera(0, 0, 6, pyxel_view))
# TODO: Action that waits for an actor to enter within a certain distance? Make enemies idle about!
area = RenderedArea(AreaBuilder().rectangle(11, 11)
.with_actor(player, 0, 0)
.with_open_space(11, 5)
.to_area(), camera)
def update_loop(a):
while True:
a = a.update()
thread = threading.Thread(target=lambda: update_loop(area))
thread.start()
pyxel_view.run(128, 128)
| 2.15625
| 2
|
users/migrations/0005_auto_20200811_0450.py
|
Emmanuel-9/Instagram
| 0
|
12774816
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2020-08-11 01:50
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('users', '0004_auto_20200809_1815'),
]
operations = [
migrations.AddField(
model_name='images',
name='created_time',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AddField(
model_name='images',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='images',
name='user_profile',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='users.Profile'),
),
migrations.AddField(
model_name='profile',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| 1.65625
| 2
|
activeusers/urls.py
|
Yuego/django-activeusers
| 0
|
12774817
|
<filename>activeusers/urls.py
from django.conf.urls import url
from activeusers import views
app_name = 'activeusers'
urlpatterns = [
url(r'^refresh/$', views.update_active_users, name='activeusers-refresh-active-users', ),
url(r'^refresh/json/$', views.get_active_users, name='activeusers-get-active-users', ),
]
| 1.757813
| 2
|
scem/gen.py
|
noukoudashisoup/score-EM
| 3
|
12774818
|
<gh_stars>1-10
"""Module for generative models"""
import torch
import torch.nn as nn
import torch.distributions as dists
from scem import stein, net
from scem import util
from abc import ABCMeta, abstractmethod
from torch.nn.parameter import Parameter
class ConditionalSampler(metaclass=ABCMeta):
"""Abstract class of conditional distributions"""
@abstractmethod
def sample(self, n_sample, X,
seed=3, *args, **kwargs):
"""Conditioned on the input X, generate
n_sample samples.
This class represents a conditinal
distribution. Subclasses should
implement samplers such that
given an input, they output
tensor of (n_sample,) + X.shape[0].
"""
pass
def log_prob(self, X, Z, *args, **kwargs):
pass
class CSNoiseTransformer(ConditionalSampler,
nn.Module):
"""Conditional distribution of the form
Z \sim F(X, n) where X is a conditinoning
variable, n is noise, and F is a function
of those.
"""
def __init__(self):
super(CSNoiseTransformer, self).__init__()
@abstractmethod
def forward(self, noise, X, *args, **kwargs):
"""Define map F transforming noise and input X
"""
pass
@abstractmethod
def sample_noise(self, n_sample, n, seed=13):
"""Sample from the noise distribution
Returns:
torch.Tensor of sizee [n_sample, n, in_shape]
"""
pass
@abstractmethod
def in_out_shapes(self):
"""Returns the tuple of the
respective shapes of the noise
and the tranformed noise.
"""
pass
class PTPPCAPosterior(ConditionalSampler):
"""Pytorch implementation of PPCA
posterior.
Attributes:
ppca:
PPCA object
"""
def __init__(self, ppca):
super(PTPPCAPosterior, self).__init__()
self.ppca = ppca
W = ppca.weight
_, dz = W.shape
var = ppca.var
cov = torch.pinverse(
torch.eye(dz) + (W.T @ W)/var)
self.cov = cov
def _mean_cov(self, X):
var = self.ppca.var
cov = self.cov
mean = (X@W)@cov / var
return mean, cov
def sample(self, n_sample, X,
seed=3,
):
n = X.shape[0]
W = self.ppca.weight
_, dz = W.shape
mean, cov = self._mean_cov(X)
with util.TorchSeedContext(seed):
Z = (mean + torch.randn([n_sample, n, dz]) @ cov)
return Z
class PTCSGaussLinearMean(CSNoiseTransformer):
"""Gaussian distribution of the form
N(Ax+b, W W^T) where
W is a some matrix of dz x dz.
Attributes:
dx (int): dimensionality of the observable
variable
dz (int): dimensionality of the latent
mean_fn(nn.Module):
mean function, nn.Linear
W: torch parameter, matrix of size [dz, dz]
"""
def __init__(self, dx, dz, *args, **kwargs):
super(PTCSGaussLinearMean, self).__init__()
self.mean_fn = nn.Linear(dx, dz)
self.W = Parameter(torch.eye(dz))
self.dx = dx
self.dz = dz
def forward(self, noise, X):
W = self.W
mean = self.mean_fn(X)
out = noise @ W + mean
return out
def sample_noise(self, n_sample, X, seed=13):
n = X.shape[0]
return torch.randn(n_sample, n, self.dz)
def sample(self, n_sample, X, seed=3):
with util.TorchSeedContext(seed):
noise = self.sample_noise(n_sample, X)
return self.forward(noise, X)
def in_out_shapes(self):
return (self.dz, self.dz)
class CSGRBMBernoulliFamily(ConditionalSampler,
nn.Module):
"""Class representing a conditional distribution
of the form:
\prod_{j=1}^dz Bern(z_j; pj(X)), where
pj(X) = softmax(AjX + bj)
Attributes:
dx (int):
Conditioning variable's dimension
dz (int):
Output variables's dimension
"""
n_cat = 2
def __init__(self, dx, dz):
super(CSGRBMBernoulliFamily, self).__init__()
self.dx = dx
self.dz = dz
self.probs = nn.ModuleList(
[nn.Sequential(
nn.Linear(dx, self.n_cat),
nn.Softmax(dim=-1),
)
for _ in range(dz)
]
)
def forward(self, X):
out = [f(X)
for f in self.probs]
out = torch.stack(out, dim=0)
return out
def sample(self, n_sample, X,
seed=3, *args, **kwargs):
"""
Returns:
torch.Tensor: tensor of size
[n_sample,] + X.shape + [2,]
"""
probs = self.forward(X)
temp = torch.tensor([1.], dtype=X.dtype)
if self.training:
m = dists.RelaxedOneHotCategorical(
temp,
probs,
)
return m.rsample([n_sample]).permute(0, 2, 1, 3)
else:
m = dists.OneHotCategorical(probs)
return m.sample([n_sample]).permute(0, 2, 1, 3)
class CSGRBMPosterior(ConditionalSampler):
"""The posterior distribution of a Gaussian-Boltzmann
Machine.
Attributes:
grbm (ebm.GRBM)
W (torch.Tensor):
W parameter of grbm
b (torch.Tensor)
b paramter of grbm
c (torch.Tensor)
c parameter of grbm
"""
def __init__(self, grbm):
self.grbm = grbm
self.W = grbm.W
self.b = grbm.b
self.c = grbm.c
def sample(self, n_sample, X, seed=13):
"""
Returns:
torch.Tensor: tensor of size
[n_sample,] + X.shape + [2,]
"""
W = self.W
c = self.c
probs = torch.sigmoid(-(X@W+c))
probs = torch.stack([1.-probs, probs], dim=2)
m = dists.OneHotCategorical(probs)
with util.TorchSeedContext(seed):
H = m.sample([n_sample])
return H
class CSFactorisedGaussian(ConditionalSampler, nn.Module):
def __init__(self, dx, dz, dh):
super(CSFactorisedGaussian, self).__init__()
self.dx = dx
self.dz = dz
self.dh = dh
self.layer_1 = nn.Linear(dx, dh)
#self.layer_2 = nn.Linear(dh, dh)
self.layer_2_m = nn.Linear(dh, dz)
self.layer_2_v = nn.Linear(dh, dz)
def forward(self, X):
h = self.layer_1(X).relu()
#h = self.layer_2(h).relu()
m = self.layer_2_m(h)
v = self.layer_2_v(h)
v = nn.functional.softplus(v)
return m, v
def sample(self, n_sample, X, seed=3):
n = X.shape[0]
m, v = self.forward(X)
d = m.shape[1]
with util.TorchSeedContext(seed):
noise = torch.randn(n_sample, n, d)
return v * noise + m
def log_prob(self, X, Z):
m, v = self.forward(X)
return dists.Normal(m, v).log_prob(Z).sum(-1)
def likelihood(self, X):
m, v = self.forward(X)
return dists.Normal(m, v)
class Implicit(ConditionalSampler, nn.Module):
def __init__(self, dx, dz, dh):
super(Implicit, self).__init__()
self.dx = dx
self.dz = dz
self.dh = dh
self.layer_1 = nn.Linear(dx+dz, dh)
self.layer_2 = nn.Linear(dh, dz)
self.elu = nn.ELU()
def forward(self, X):
h = self.layer_1(X).relu()
# h = self.elu(self.layer_1(X))
m = self.layer_2(h)
return m
def sample(self, n_sample, X, seed=3):
n, d = X.shape
noise = torch.randn(n_sample, n, self.dz)
X = torch.stack([X]*n_sample, axis=0)
X = torch.cat([X, noise], -1)
return self.forward(X)
class CSNoiseTransformerAdapter(CSNoiseTransformer):
"""Construct a CSNoiseTransformer having a given
torch.nn.Module as the transformation function.
Attributes:
- module (torch.nn.Module):
A module serves as a forward function.
Assume that it has arguments f(X, noise)
- noise_sampler:
noise sampler
- in_out_shapes:
tuple of the input and output shapes of noise
- tensor_type:
defines a tensor type of the noise
"""
def __init__(self, module, noise_sampler, in_out_shapes, tensor_type=torch.cuda.FloatTensor):
super(CSNoiseTransformerAdapter, self).__init__()
self.module = module
self.noise_sampler = noise_sampler
self.in_out_shapes = in_out_shapes
self.tensor_type = tensor_type
def forward(self, noise, X, *args, **kwargs):
return self.module.forward(noise, X, *args, **kwargs)
def sample_noise(self, n_sample, n, seed=13):
"""Returns (n_sample, n,)+in_out_shape[0] tensor"""
tt = self.tensor_type
noise = self.noise_sampler(n_sample, n, seed).type(tt)
return noise
def in_out_shapes(self):
return self.in_out_shapes
def sample(self, n_sample, X, seed=13):
n = X.shape[0]
noise = self.sample_noise(n_sample, n, seed)
Z = self.forward(noise, X)
return Z
class CSCategoricalMixture(CSNoiseTransformer):
def __init__(self, din, dh1, dh2, dout, dnoise,
n_classes, n_logits, temperature=1.):
super(CSCategoricalMixture, self).__init__()
self.din = din
self.dout = dout
self.dnoise = dnoise
self.n_logits = n_logits
self.n_classes = n_classes
self.feat = net.TwoLayerFC(din+dnoise, dh1, dh2, dout)
self.mlinear = net.MultipleLinear(dout, n_classes, n_logits,
bias=True)
self.temperature = temperature
def forward(self, noise, X):
n_sample = noise.shape[0]
X_ = torch.stack([X]*n_sample)
Xin = torch.cat([X_, noise], axis=-1)
return (self.feat(Xin))
def sample_noise(self, n_sample, n, seed=14):
noise = torch.randn(n_sample, n, self.dnoise)
return noise
def in_out_shapes(self):
return ((self.dnoise,), self.dout)
def sample(self, n_sample, X, seed=13):
n = X.shape[0]
noise = self.sample_noise(n_sample, n)
out = self.forward(noise, X).relu()
logits = self.mlinear(out) / self.temperature
if self.training:
m = dists.RelaxedOneHotCategorical(
self.temperature,
logits=logits,
)
sample = m.rsample()
# print(sample)
return sample
m = dists.OneHotCategorical(logits=logits)
sample = m.sample()
return sample
def main():
from scem.ebm import PPCA
seed = 13
torch.manual_seed(seed)
n = 200
dx = 4
dz = 2
X = torch.randn([n, dx])
Z = torch.ones([n, dz])
W = torch.randn([dx, dz])
var = torch.tensor([10.0])
ppca = PPCA(W, var)
s = ppca.score_joint_latent(X, Z)
ppca_post_score = -(Z@W.T@W-X@W)/var - Z
cs = PTPPCAPosterior(ppca)
# cs.apply(init_weights)
post_score_mse = torch.mean((s-ppca_post_score)**2)
print('Posterior score mse: {}'.format(post_score_mse))
n_sample = 300
assert isinstance(ppca, PPCA)
approx_score = stein.ApproximateScore(
ppca.score_joint_obs, cs)
marginal_score_mse = (torch.mean(
(approx_score(X, n_sample=n_sample)-ppca.score_marginal_obs(X))**2))
print('Marginal score mse: {}'.format(marginal_score_mse))
if __name__ == '__main__':
main()
| 2.640625
| 3
|
complexnn.py
|
iseeklin/Electromagnetic-Signal-Recognition-Using-Deep-Learning
| 0
|
12774819
|
<filename>complexnn.py
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class ComplexConv(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True):
super(ComplexConv, self).__init__()
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.padding = padding
## Model components
self.conv_re = nn.Conv1d(in_channel, out_channel, kernel_size, stride=stride, padding=padding,
dilation=dilation, groups=groups, bias=bias)
self.conv_im = nn.Conv1d(in_channel, out_channel, kernel_size, stride=stride, padding=padding,
dilation=dilation, groups=groups, bias=bias)
def forward(self, x): # shpae of x : [batch,2,channel,axis1,axis2]
n = x.size()[1]
m = int(n/2)
x_real = x[:, :m]
x_imag = x[:, m:]
real = self.conv_re(x_real) - self.conv_im(x_imag)
imaginary = self.conv_re(x_real) + self.conv_im(x_imag)
output = torch.cat((real, imaginary), dim=1)
return output
| 2.59375
| 3
|
Chat.py
|
TheTimgor/sadbot-3
| 0
|
12774820
|
<gh_stars>0
import json
import os
import pickle
from collections import Counter
from heapq import nlargest
from random import choice, sample
import math
import nltk
import numpy
from nltk import NaiveBayesClassifier
from nltk import word_tokenize
from nltk.parse import stanford
from nltk.tag import StanfordNERTagger
# fuck pycharm
# noinspection PyUnresolvedReferences
from pattern.en import conjugate, tenses
from sacremoses import MosesDetokenizer
nltk.download('nps_chat')
from nltk.corpus import nps_chat
cwd = os.getcwd()
os.environ['CLASSPATH'] = os.getcwd() + '/nlp/stanford-parser-full-2018-10-17'
try:
with open('config.json') as f:
config = json.load(f)
except FileNotFoundError:
pass
history = []
vocab = {}
named_entities = []
detokenizer = MosesDetokenizer()
parser = stanford.StanfordParser(model_path=cwd+'/nlp/englishPCFG.ser.gz')
tagger = StanfordNERTagger(cwd+'/nlp/stanford-ner-2018-10-16/classifiers/english.muc.7class.distsim.crf.ser.gz',
cwd+'/nlp/stanford-ner-2018-10-16/stanford-ner-3.9.2.jar')
def traverse_for(tree, tags):
# print(tree)
if type(tree) == str:
return None
elif tree.label() in tags:
return tree.leaves()
elif tree[0]:
for a in tree:
trav = traverse_for(a, tags)
if trav is not None:
return trav
def sentence_features(s, v=vocab):
if type(s) == str:
s = word_tokenize(s)
s_words = set(s)
features = {}
for w in v:
features[f'contains({w})'] = w in s_words
return features
def generate_greeting_classifier(s):
train, test = s[100:], s[:100]
global greeting_classifier
greeting_classifier = NaiveBayesClassifier.train(train)
# print(nltk.classify.accuracy(greeting_classifier, test))
def generate_greeting_classifier_nps():
global greeting_classifier
try:
with open('greet_classifier.pickle', 'rb') as f:
greeting_classifier = pickle.load(f)
except FileNotFoundError:
v = set([w.lower() for w in nps_chat.words()])
posts = nps_chat.xml_posts()[:5000]
h = [(sentence_features(s.text.lower(), v=v), s.get('class') if s.get('class') in ['Greet', 'Bye'] else 'Other')
for s in posts]
generate_greeting_classifier(h)
with open('greet_classifier.pickle', 'wb') as f:
pickle.dump(greeting_classifier, f)
def classify_greeting(s):
v = set([w.lower() for w in nps_chat.words()])
return greeting_classifier.classify(sentence_features(s.lower(), v=v))
def classify_question(s):
if type(s) == str:
s = parser.parse_one(word_tokenize(s))
if traverse_for(s, ['SBARQ']):
return 'wh-question'
elif traverse_for(s, ['SQ']):
return 'y/n-question'
else:
return 'other'
def cosine_dic(dic1, dic2):
numerator = 0
dena = 0
for key1 in dic1:
val1 = dic1[key1]
numerator += val1*dic2.get(key1, 0.0)
dena += val1*val1
denb = 0
for val2 in dic2.values():
denb += val2*val2
try:
return numerator/math.sqrt(dena*denb)
except ZeroDivisionError:
return 0
def word_vectorize(sent):
vector = {}
words = word_tokenize(sent)
counts = dict(Counter(words))
for w in vocab:
if w in counts:
vector[w] = counts[w] / vocab[w]
else:
vector[w] = 0
# print({a: vector[a] for a in vector if vector[a] > 0})
return vector
def find_question_root(s):
if type(s) == str:
s = word_tokenize(s)
t = parser.parse_one(s)
# t.draw()
vp = traverse_for(t, ['VBP', 'VBD', 'VBZ', 'MD'])
np = traverse_for(t, ['NP'])
return np, vp
def fix_np(np):
np = [a.lower() for a in np]
if 'you' in np and ('i' in np or 'me' in np):
return np
if 'you' in np:
return ['i' if a == 'you' else a for a in np]
if 'i' in np:
return ['you' if a == 'i' else a for a in np]
return np
def fix_vp(np, vp):
verb = detokenizer.detokenize(vp)
tnss = tenses(verb)
if np == ['i']:
tns = [a for a in tnss if 2 in a][0]
return [conjugate(verb,
tense=tns[0],
person=1,
number=tns[2],
mood=tns[3],
aspect=tns[4])]
if np == ['you']:
tns = [a for a in tnss if 1 in a][0]
return [conjugate(verb,
tense=tns[0],
person=2,
number=tns[2],
mood=tns[3],
aspect=tns[4])]
return vp
def uninvert(s):
np, vp = find_question_root(s)
np = fix_np(np)
vp = fix_vp(np, vp)
# print(np,vp)
return detokenizer.detokenize(np)+' '+detokenizer.detokenize(vp)
def why_answer(s):
np, vp = find_question_root(s)
np = fix_np(np)
return 'because '+detokenizer.detokenize(np)
def build_model(h):
global model
model = Model(h)
global history
history = h
global vocab
for w in word_tokenize('\n'.join(h)):
if w.lower() in vocab:
vocab[w.lower()] += 1
else:
vocab[w.lower()] = 1
# print(vocab)
global named_entities
try:
with open('named.pickle', 'rb') as f:
named_entities = pickle.load(f)
except FileNotFoundError:
h_tokens = [word_tokenize(s) for s in h]
tagged = tagger.tag_sents(h_tokens)
named_entities = [tagged[0][0]]
for n_e in tagged:
for i in range(1, len(n_e)):
if n_e[i][1] == n_e[i - 1][1]:
named_entities[-1] = (named_entities[-1][0] + ' ' + n_e[i][0], n_e[i][1])
else:
named_entities.append(n_e[i])
# print(named_entities)
with open('named.pickle', 'wb') as f:
pickle.dump(named_entities, f)
generate_greeting_classifier_nps()
# print('finding greetings')
# greeting_classified = {s: classify_greeting(s) for s in h[:100]}
# print('found greetings')
global hellos, byes
# hellos = {s: greeting_classified[s] for s in greeting_classified if greeting_classified[s] == 'Greet'}
# byes = {s: greeting_classified[s] for s in greeting_classified if greeting_classified[s] == 'Bye'}
hellos = {s.text: s.get('class') for s in nps_chat.xml_posts() if s.get('class') == 'Greet'}
byes = {s.text: s.get('class') for s in nps_chat.xml_posts() if s.get('class') == 'Bye'}
print('ready')
class Model:
def __init__(self, hist, state_size=2):
self.model = []
self.state_size = state_size
if type(hist) == str:
hist = hist.split('\n')
for s in hist:
sent = word_tokenize(s)
sent.insert(0, '__begin__')
sent.insert(0, '__begin__')
sent.append('__end__')
s_model = []
for i in range(state_size - 1, len(sent) - 1):
state = sent[i - state_size + 1:i + 1]
s_model.append([[a.lower() for a in state], sent[i + 1]])
for p in s_model:
new = True
for m in self.model:
if m[0] == p[0]:
if p[1] in m[1]:
m[1][p[1]] += 1
else:
m[1][p[1]] = 1
new = False
if new:
self.model.append([p[0], {p[1]: 1}])
def make_sentence(self, seed='', threshold=0.4):
if type(seed) == str:
seed = word_tokenize(seed)
sent = ['__begin__', '__begin__'] + seed
while sent[-1] != '__end__':
weights = {}
for i in range(self.state_size, -1, -1):
state = [a.lower() for a in sent[-i:]]
# print('state: '+str(state))
for s in self.model:
# print(s[0][-i:], state)
if [a.lower() for a in s[0][-i:]] == state:
for w in s[1]:
# print(w)
if w in weights:
weights[w] += s[1][w]
else:
weights[w] = s[1][w]
if weights:
break
# print(weights)
counts = []
words = []
for w in weights:
counts.append(weights[w])
words.append(w)
total = sum(counts)
probs = [a/total for a in counts]
draw = numpy.random.choice(words, 1, p=probs)[0]
sent.append(draw)
# print('sent: ' + str(sent))
return detokenizer.detokenize(sent[2:-1])
def generate_relevant_sentence(vector_in, s):
sentences = {}
for i in range(0, 200):
sentence = model.make_sentence(s)
v_sentence = word_vectorize(sentence)
sentences[sentence] = cosine_dic(vector_in, v_sentence)
closest_out = nlargest(20, sentences, key=sentences.get)
return choice(closest_out)
def get_response(m):
vector_m = word_vectorize(m)
t = parser.parse_one(word_tokenize(m))
q_typ = classify_question(t)
if q_typ == 'y/n-question':
return choice(['yes', 'yup', 'uh-huh', 'no', 'nope', 'naw'])
if q_typ == 'wh-question':
wh_phrase = traverse_for(t, ['WHADJP', 'WHAVP', 'WHNP', 'WHPP', 'WHADVP'])
wh_phrase = [w.lower() for w in wh_phrase]
if 'who' in wh_phrase or 'whose' in wh_phrase or 'who\'s' in wh_phrase or 'whom' in wh_phrase:
people = [w[0] for w in named_entities if w[1] == 'PERSON']
return choice(people)
if 'where' in wh_phrase:
places = [w[0] for w in named_entities if w[1] == 'LOCATION']
return choice(places)
if 'when' in wh_phrase:
times = [w[0] for w in named_entities if w[1] == 'DATE' or w[1] == 'TIME']
return choice(times)
if 'why' in wh_phrase:
seeder = why_answer(m)
return generate_relevant_sentence(vector_m, seeder)
if 'how' in wh_phrase or 'what' in wh_phrase:
seeder = uninvert(m)
return generate_relevant_sentence(vector_m, seeder)
g_typ = classify_greeting(m)
# print(g_typ)
if g_typ == 'Greet':
poss_hellos = {}
for s in hellos:
vector_s = word_vectorize(s)
poss_hellos[s] = cosine_dic(vector_s, vector_m)
largest = nlargest(10, poss_hellos, key=poss_hellos.get)
return choice(largest)
if g_typ == 'Bye':
poss_byes = {}
for s in hellos:
vector_s = word_vectorize(s)
poss_byes[s] = cosine_dic(vector_s, vector_m)
largest = nlargest(10, poss_byes, key=poss_byes.get)
return choice(largest)
sims = {}
for s in history:
if not config['prefix'] in s:
vector_s = word_vectorize(s)
sims[s] = cosine_dic(vector_m, vector_s)
# print(sims)
largest = nlargest(10, sims, key=sims.get)
# print(largest)
seeders = sample(largest, k=5)
sentences = {}
for seeder in seeders:
# print(seeder)
seeder = word_tokenize(seeder)[:2]
for i in range(0, 20):
sentence = model.make_sentence(seeder)
v_sentence = word_vectorize(sentence)
sentences[sentence] = cosine_dic(vector_m, v_sentence)
closest_out = nlargest(5, sentences, key=sentences.get)
return choice(closest_out)
| 2.359375
| 2
|
recipes/Python/577611_edit_dictionary_values_possibly_restrained/recipe-577611.py
|
tdiprima/code
| 2,023
|
12774821
|
<filename>recipes/Python/577611_edit_dictionary_values_possibly_restrained/recipe-577611.py
"""
DICTIONNARY INTERFACE FOR EDITING VALUES
creates labels/edits/menubutton widgets in a TkFrame to edit dictionary values
use: apply(frame,dict,position)
"""
import Tkinter as tk
def cbMenu(controlV,value,btn= None):
controlV.set(str(value))
if not (btn== None):
btn.config(text= str(value))
def updateMB(ctrlV, value):
ctrlV.set(value)
def doLambda(f,*args):
"""Tips: Create lambda within for loop with fixed local variable
without interference across iterations"""
def g(): return f(*args)
return g
def apply(root,d,pos):
"""Creates interface for dictionnary d in root at given grid position """
"TODO: repercuter kwargs"
(x,y,w,h)= pos
lbs= []
saisies= dict()
entries= dict()
for (n,(k,v)) in enumerate(d.iteritems()):
assert (k not in saisies)
l= tk.Label(root,text=str(k))
l.grid(row=n+x,column=y)
if isinstance(v,list):
"""value= list => multiple choice => use menubutton"""
#saisies[k]= tk.StringVar(name=str(n),value= str(v[0]))
saisies[k]= tk.StringVar(value= str(v[0]))
ent=tk.Menubutton(root,textvariable=saisies[k],relief="sunken")
ent.m=tk.Menu(ent,tearoff=0)
ent.config(menu=ent.m)
for (kk,possible) in enumerate(v):
possibleSaved= "%s" %possible
ent.m.add_command(label=str(possible), command= doLambda(updateMB,saisies[k],str(d[k][kk]) ) )
print possible
else:
"""value is not a list => classical edit => use Entry"""
#saisies[k]= tk.StringVar(name=str(n),value= str(v))
saisies[k]= tk.StringVar(value= str(v))
ent= tk.Entry(textvariable=saisies[k])#,width=30)
ent.grid(row=n+x,column=y+1)
entries[k]= ent
return saisies
def get(strVarDict):
d= {}
for (k,v) in strVarDict.iteritems():
#try: v= float(v)
#except: pass
d[k]=v.get()
return d
def main():
"EXAMPLE"
root = tk.Tk()
#d= {'oui':1, 'non':'non'}
d= {'oui':1,'a':'b', 'non':['?','!non'],'mode':[1.1,2.1,3.1]}
v= tk.StringVar(value= "Open File Dialog")
m=tk.Menubutton(root,textvariable=v,relief="raised")
m.grid(row=2,column=1)
mm=tk.Menu(m,tearoff=0)
tk.Button(root, textvariable=v, command=lambda:v.set('oui')).grid(row=1,column=1)
mm.add_command(label="go", command=lambda: cbMenu(v,"non"))
m.config(menu=mm)
s= apply(root,d,(0,2,0,0))
print isinstance(d, dict)
root.mainloop()
#print d
print s
for (k,v) in s.iteritems():
print str(k), '->',str(v.get())
def testindependance():
root = tk.Tk()
d= {'oui':1,'a':'b', 'non':['?','!non'],'mode':[1.1,2.1,3.1]}
s= apply(root,d,(0,2,0,0))
dd= {'oui':1,'a':'b', 'non':['?','!non'],'mode':[1.1,2.1,3.1]}
ss= apply(root,dd,(0,5,0,0))
print "s =",s
print "ss=",ss
print isinstance(d, dict)
root.mainloop()
#print d
#print s
for (k,v) in s.iteritems():
print str(k), '->',str(v.get())
print "-"*10
for (k,v) in ss.iteritems():
print str(k), '->',str(v.get())
print "="*10
print get(s)
print get(ss)
if __name__ == '__main__':
main()
#testindependance()
| 2.765625
| 3
|
src/interface.py
|
luizeduardomr/ScrapingNews
| 0
|
12774822
|
import os
import PySimpleGUI as sg
sg.change_look_and_feel('DarkAmber') # colour
# layout of window
layout = [
[sg.Frame(layout=[
[sg.Radio('1. Estadao', 1, default=False, key='estadao'),
sg.Radio('2. Folha', 1,
default=False, key='folha'),
sg.Radio('3. Uol Notícias', 1, default=False, key='uol')]],
title='Selecione o site para a pesquisa', title_color='white',
relief=sg.RELIEF_SUNKEN, tooltip='Use these to set flags')],
[sg.Text('Nome do arquivo:'), sg.InputText(key='nomearquivo')],
[sg.Text('Palavras chaves:'), sg.InputText(key='palavrachave')],
[sg.Text('Quantidade de resultados:'), sg.InputText(key='quantidade')],
[sg.Submit('Pesquisar'), sg.Button('Cancelar')],
]
window = sg.Window('Mudanças Climáticas Search', layout) # make the window
event, values = window.read()
def Iniciar():
nomearquivo = values['nomearquivo']
palavrachave = values['palavrachave']
quantidade = values['quantidade']
count = 0
while count == 0:
if event in (None, 'Cancelar'):
count+=1
return 'Cancelou o programa'
elif values['estadao'] == True:
opcao = 'estadao'
count+=1
elif values['folha'] == True:
opcao = 'folha'
count+=1
elif values['uol'] == True:
opcao = 'uol'
count+=1
return nomearquivo, palavrachave, opcao, quantidade
window.close()
| 3.21875
| 3
|
features/steps/managers/kobiton_manager.py
|
lordkyzr/launchkey-python
| 9
|
12774823
|
<filename>features/steps/managers/kobiton_manager.py
import requests
from time import sleep
class Version:
def __init__(self, id, state=None, version=None, native_properties=None, latest=None):
"""
Kobiton App Version.
Note that no values are required based on the spec so any value can
default to None.
See: See: https://api.kobiton.com/docs/#app
:param id:
:param state:
:param version:
:param native_properties:
:param latest:
"""
self.id = id
self.state = state
self.version = version
self.native_properties = native_properties
self.latest = latest
class App:
def __init__(self, id, name=None, state=None, created_at=None, private_access=None, os=None, created_by=None,
bypass=None, organization_id=None, icon_url=None, versions=None):
"""
Kobiton app
Note that no values are required based on the spec so any value can
default to None.
See: https://api.kobiton.com/docs/#app
:param id:
:param name:
:param state:
:param created_at:
:param private_access:
:param os:
:param created_by:
:param bypass:
:param organization_id:
:param icon_url:
:param versions:
"""
self.id = id
self.name = name
self.state = state
self.created_at = created_at
self.private_access = private_access
self.os = os
self.created_by = created_by
self.bypass = bypass
self.organization_id = organization_id
self.icon_url = icon_url
self.versions = versions
def __repr__(self):
return "App <id={id}, name=\"{name}\", state=\"{state}\">".format(
id=self.id,
name=self.name,
state=self.state
)
class Device:
def __init__(self, id, udid, is_booked, is_hidden, is_online, model_name, device_name,
resolution, platform_name, platform_version, installed_browsers, support,
device_image_url, is_favorite, is_cloud, is_my_org, is_my_own, hosted_by):
"""
Kobition device
Note that no values are required based on the spec so any value can
default to None.
See: https://api.kobiton.com/docs/#clouddevice
:param id:
:param udid:
:param is_booked:
:param is_hidden:
:param is_online:
:param model_name:
:param device_name:
:param resolution:
:param platform_name:
:param platform_version:
:param installed_browsers:
:param support:
:param device_image_url:
:param is_favorite:
:param is_cloud:
:param is_my_org:
:param is_my_own:
:param hosted_by:
"""
self.id = id
self.udid = udid
self.is_booked = is_booked
self.is_hidden = is_hidden
self.is_online = is_online
self.model_name = model_name
self.device_name = device_name
self.resolution = resolution
self.platform_name = platform_name
self.platform_version = platform_version
self.installed_browser = installed_browsers
self.support = support
self.device_image_url = device_image_url
self.is_favorite = is_favorite
self.is_cloud = is_cloud
self.is_my_org = is_my_org
self.is_my_own = is_my_own
self.hosted_by = hosted_by
def __repr__(self):
return "Device <{device_name}>".format(device_name=self.device_name)
class KobitonManager:
def __init__(self, username, sdk_key, url='https://api.kobiton.com', api_version='v1'):
"""
Manager for interacting with Kobiton
:param username: Kobition username
:param sdk_key: Kobiton sdk key associated with the given username
:param url: Kobiton API url
:param api_version: Kobiton API version
"""
self.__username = username
self.__sdk_key = sdk_key
self.__url = url
self.__api_version = api_version
def _create_request(self, method, endpoint, json=None, data=None,
params=None):
"""
Creates an request to the Kobition API
:param method: HTTP method to use
:param endpoint: API endpoint to query IE: devices, sessions, user, app
:param json: Optional. JSON body data to include.
:param data: Optional. Dictionary, list of tuples, bytes, or file-like
object to send in the body.
:param params: Optional. GET parameters to include.
:return: Dictionary containing response data or boolean stating success
status if no data was returned.
"""
response = getattr(requests, method.lower())(
self.__url + "/" + self.__api_version + "/" + endpoint,
headers={
'Accept': 'application/json'
},
auth=(self.__username, self.__sdk_key),
data=data,
json=json,
params=params
)
response.raise_for_status()
return response.json() if response.text != "OK" else response.ok
def _generate_upload_url(self, filename):
"""
Generates an upload URL
https://api.kobiton.com/docs/#generate-upload-url
:param filename:
:return: Dictionary containing appPath and url (S3 bucket url).
"""
return self._create_request('post', 'apps/uploadUrl/', json={
"filename": filename
})
def _create_app(self, app_name, app_path):
"""
Creates an application to be accessed by Kobiton devices
https://api.kobiton.com/docs/#create-application-or-version
:param app_name: Designated app filename IE: my_app.apk
:param app_path: App path returned by the _generate_upload_url()
:return: Dictionary containing filename and appId keys
"""
return self._create_request('post', 'apps', json={
"filename": app_name,
"appPath": app_path
})
def _upload_app_to_s3(self, app_path, s3_url):
"""
Uploads a given app to a S3 url
:param app_path: Filepath to the app to be uploaded.
:param s3_url: S3 URL to upload to. This url should have been returned
by _generate_upload_url().
:return: None
"""
with open(app_path, 'rb') as f:
data = f.read()
response = requests.put(
s3_url,
data=data,
headers={
'Content-Type': 'application/octet-stream',
'x-amz-tagging': 'unsaved=true'
}
)
response.raise_for_status()
def get_apps(self):
"""
Get list of applications which were added to the Apps Repo.
https://api.kobiton.com/docs/#get-applications
:return: List of kobiton_manager.App objects.
"""
return [
App(
app['id'],
app['name'],
app['state'],
created_at=app.get('createdAt'),
private_access=app.get('privateAccess'),
os=app.get('os'),
created_by=app.get('createdBy'),
bypass=app.get('bypass'),
organization_id=app.get('organizationId'),
icon_url=app.get('iconUrl'),
versions=[
Version(
version['id'],
version['state'],
version['version'],
version['nativeProperties'],
version.get('latest')
) for version in app.get('versions', [])
]
) for app in self._create_request('get', 'apps').get('apps', [])
]
def get_app(self, app_id):
"""
Get information about an application.
https://api.kobiton.com/docs/#get-an-application
:param app_id: The ID to the app
:return: kobiton_manager.App object
"""
app = self._create_request('get', 'apps/%s' % app_id)
return App(
app['id'],
app['name'],
app['state'],
created_at=app.get('createdAt'),
private_access=app.get('privateAccess'),
os=app.get('os'),
created_by=app.get('createdBy'),
bypass=app.get('bypass'),
organization_id=app.get('organizationId'),
icon_url=app.get('iconUrl'),
versions=[
Version(
version['id'],
version['state'],
version['version'],
version['nativeProperties'],
version.get('latest')
) for version in app.get('versions', [])
]
)
def upload_app(self, app_path, app_name=None, retrieve_app_status=False):
"""
Uploads an application via Kobiton's application upload flow:
https://docs.kobiton.com/basic/app-repository/integrate-apps-repo-with-ci/
:param app_path: Filepath to the app to be uploaded.
:param app_name: Optional. App name to label the uploaded app as.
:param retrieve_app_status: Whether to pull the full app information
after upload. If not, an app with only id and the uploaded version id
will be returned.
:return: kobiton_manager.App object
"""
app_name = app_name if app_name else app_path.split("/")[-1]
upload_data = self._generate_upload_url(app_name)
self._upload_app_to_s3(app_path, upload_data['url'])
app = self._create_app(app_name, upload_data['appPath'])
if retrieve_app_status:
try:
app = self.get_app(app['appId'])
except requests.HTTPError:
# We seem to be getting a 500 if we query
# immediately after creating the app
sleep(2)
app = self.get_app(app['appId'])
else:
app = App(app['appId'], versions=[Version(app['versionId'])])
return app
def delete_app(self, app_id):
"""
Deletes a given APP ID from Kobiton
:param app_id:
:return:
"""
return self._create_request('delete', 'apps/%s' % app_id)
def get_devices(self):
"""
Retrieves a list of Kobiton devices
:return: List of kobiton_manager.Device objects
"""
response = self._create_request(
'get',
'devices'
)
return [
Device(
device.get('id'),
device.get('udid'),
device.get('isBooked'),
device.get('isHidden'),
device.get('isOnline'),
device.get('modelName'),
device.get('deviceName'),
device.get('resolution'),
device.get('platformName'),
device.get('platformVersion'),
device.get('installedBrowsers'),
device.get('support'),
device.get('deviceImageUrl'),
device.get('isFavorite'),
device.get('isCloud'),
device.get('isMyOrg'),
device.get('isMyOwn'),
device.get('hostedBy')
) for device in response['cloudDevices']
]
| 2.359375
| 2
|
main.py
|
LucasRibeiroRJBR/Modelo_Conexao_Python_Oracle
| 1
|
12774824
|
<filename>main.py<gh_stars>1-10
import cx_Oracle, os
try:
connection = cx_Oracle.connect(
user='PY',
password='<PASSWORD>',
dsn='localhost:1521/XE',
encoding='UTF-8'
)
print(connection.version)
while True:
id = input('\nDigite o ID do aluno (0 para sair) -> ')
os.system('cls')
if id == '0':
break
else:
pass
c = connection.cursor()
rows = c.execute(f'SELECT * FROM XPTO.STUDENTS WHERE ID = {id}').fetchall()
print(f'+{"-"*3}+{"-"*50}+{"-"*80}+')
print(f'|{"ID":^3}|{"NOME":^50}|{"E-MAIL":^80}|')
print(f'|{"-"*3}+{"-"*50}+{"-"*80}|')
print(f'|{rows[0][0]:^3}|{rows[0][1]:^50}|{rows[0][2]:^80}|')
print(f'+{"-"*3}+{"-"*50}+{"-"*80}+')
except:
pass
| 2.796875
| 3
|
src/djanban/apps/dev_environment/migrations/0003_auto_20160925_1811.py
|
diegojromerolopez/djanban
| 33
|
12774825
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-09-25 16:11
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('boards', '0028_auto_20160925_1809'),
('members', '0008_auto_20160923_2056'),
('dev_environment', '0002_auto_20160921_1748'),
]
operations = [
migrations.AlterModelOptions(
name='interruption',
options={'verbose_name': 'Interruption', 'verbose_name_plural': 'Interruptions'},
),
migrations.AlterIndexTogether(
name='interruption',
index_together=set([('datetime', 'board', 'member'), ('member', 'datetime', 'board')]),
),
]
| 1.585938
| 2
|
dashboard-backend/dashboard/mock_stats.py
|
2021hy-team6/dashboard
| 0
|
12774826
|
<filename>dashboard-backend/dashboard/mock_stats.py<gh_stars>0
import random
import string
import datetime
class MockStats:
def __init__(self, psql):
self.psql = psql
def get_random_text(self, length):
name = [random.choice(string.ascii_letters)
for _ in range(random.choice(range(length-2, length+2)))]
name[random.choice(range(3, length-3))] = ' '
name = ''.join(name).capitalize()
return name
def get_random_datetime(self, yyyy, mm, dd, timedelta):
dttm = datetime.datetime(yyyy, mm, dd) + random.random() * timedelta
return dttm.strftime("%Y-%m-%d %H:%M:%S")
def create_categories(self):
if self.psql.query('select count(*) as cnt from category')[0]['cnt'] > 0:
return {'super_category': self.psql.query('select * from super_category'),
'category': self.psql.query('select * from category')}
# Super Categories
sql_string = """INSERT INTO super_category
(sup_name, is_recyclable)
VALUES (%s, %s)"""
# Insertion
for _ in range(10):
self.psql.execute(sql_string, (self.get_random_text(10), True))
# Set litter
self.psql.execute(sql_string, ('Litter', False))
self.psql.execute(sql_string, ('Uncategorized', False))
# Categories
sql_string = """INSERT INTO category
(obj_name, sup_id)
VALUES (%s, %s)"""
# Get the range of super categories
sup_ids = [row['sup_id'] for row in self.psql.query('select sup_id from super_category')]
# Insertion
for sup_id in sup_ids:
for _ in range(random.choice(range(8, 15))):
self.psql.execute(sql_string, (self.get_random_text(15), sup_id))
return {'super_category': self.psql.query('select * from super_category'),
'category': self.psql.query('select * from category')}
def create_detections(self, yyyyMMdd='20211101', days=30, images=2000):
if self.psql.query('select count(*) as cnt from detection')[0]['cnt'] > 0:
return {'image': self.psql.query('select * from image'),
'detection': self.psql.query('select * from detection')}
# Image
sql_string = """INSERT INTO image
(msec, created_at)
VALUES (%s, timestamp %s)"""
year = int(yyyyMMdd[0:4])
month = int(yyyyMMdd[4:6])
day = int(yyyyMMdd[6:8])
for _ in range(images):
self.psql.execute(sql_string,
(random.choice(range(100, 300)),
self.get_random_datetime(year, month, day, datetime.timedelta(days=days))))
# Detection
sql_string = """INSERT INTO detection
(img_id, obj_name, score)
VALUES (%s, %s, %s)"""
img_ids = [row['img_id'] for row in self.psql.query('select img_id from image')]
obj_names = [row['obj_name'] for row in self.psql.query('select obj_name from category')]
# Insertion
for img_id in img_ids:
for _ in range(random.choice(range(1, 6))):
self.psql.execute(sql_string, (img_id, random.choice(obj_names), round(random.choice(range(550, 999))*0.001, 3)))
return {'image': self.psql.query('select * from image'),
'detection': self.psql.query('select * from detection')}
| 2.5625
| 3
|
tests/test_zuul_lint.py
|
pycontribs/zuul-lint
| 2
|
12774827
|
import pytest
import sh
def test_invalid():
try:
sh.python(["-m", "zuul_lint", "tests/data/zuul-config-invalid.yaml"])
except sh.ErrorReturnCode_1:
return
except sh.ErrorReturnCode as e:
pytest.fail(e)
pytest.fail("Expected to fail")
def test_valid():
try:
sh.python(["-m", "zuul_lint", "tests/data/zuul-config-valid.yaml"])
except sh.ErrorReturnCode as e:
pytest.fail(e)
| 2.3125
| 2
|
vega/search_space/networks/pytorch/customs/adelaide_nn/mobilenetv2_backbone.py
|
qixiuai/vega
| 12
|
12774828
|
<reponame>qixiuai/vega
# -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""Backbone of mobilenet v2."""
from torchvision.models import MobileNetV2
import torch
import torch.nn as nn
class MobileNetV2Backbone(MobileNetV2):
"""Backbone of mobilenet v2."""
def __init__(self, load_path=None):
"""Construct MobileNetV3Tiny class.
:param load_path: path for saved model
"""
super(MobileNetV2Backbone, self).__init__()
self.features = nn.ModuleList(list(self.features)[:18])
if load_path is not None:
self.load_state_dict(torch.load(load_path), strict=False)
def forward(self, x):
"""Do an inference on MobileNetV2.
:param x: input tensor
:return: output tensor
"""
outs = []
for i, feature in enumerate(self.features):
x = feature(x)
if i in [3, 6, 13, 17]:
outs.append(x)
return outs
| 2.15625
| 2
|
src/Python27Packages/PCC/PCC/params.py
|
lefevre-fraser/openmeta-mms
| 0
|
12774829
|
<gh_stars>0
from collections import namedtuple #here so we can use a "structure"-like entity
from numpy import *
import gaussquad
#*****************COMPUTATION OF QUADRATURE NODES AND WEIGHTS**************
def params(method=None, m=None, inpt=None, stvars=None):
node = zeros((inpt,max(m)))
weight = zeros((inpt,max(m)))
if method==4 or method==5:
for i in range(0,inpt):
node[i], weight[i] = gaussquad.gaussquad(m[i], stvars[i].dist, stvars[i].param[0], stvars[i].param[1])
if stvars[i].dist == 'BETA':
node[i] = node[i] * (stvars[i].param[3] - stvars[i].param[2]) + stvars[i].param[2]
return node,weight
# Copyright (c) 2011.
# Developed with the sponsorship of the Defense Advanced Research Projects Agency (DARPA).
# Permission is hereby granted, free of charge, to any person obtaining a copy of this data,
# including any software or models in source or binary form, as well as any drawings,
# specifications, and documentation (collectively "the Data"),
# to deal in the Data without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Data,
# and to permit persons to whom the Data is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Data.
# THE DATA IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS, SPONSORS, DEVELOPERS, CONTRIBUTORS,
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE DATA OR THE USE OR OTHER DEALINGS IN THE DATA.
| 2.546875
| 3
|
keanu-python/tests/test_cast.py
|
rs992214/keanu
| 153
|
12774830
|
<gh_stars>100-1000
from keanu.vertex.vertex_casting import (cast_tensor_arg_to_double, cast_tensor_arg_to_integer,
cast_tensor_arg_to_boolean)
from keanu.vertex import cast_to_boolean_vertex, cast_to_integer_vertex, cast_to_double_vertex
from keanu.vartypes import (primitive_types, numpy_types, pandas_types)
import pytest
import numpy as np
import pandas as pd
from typing import Union, Callable
from keanu.vertex import Gaussian
from keanu.vertex.base import Double, Boolean, Integer
@pytest.mark.parametrize("value", [1, 1., True])
@pytest.mark.parametrize("cast_fn, expected_type",
[(cast_tensor_arg_to_double, float), (cast_tensor_arg_to_integer, int),
(cast_tensor_arg_to_boolean, bool), (cast_to_boolean_vertex, Boolean),
(cast_to_integer_vertex, Integer), (cast_to_double_vertex, Double)])
def test_scalar_cast(value: primitive_types, cast_fn: Callable, expected_type: type) -> None:
assert type(cast_fn(value)) == expected_type
@pytest.mark.parametrize("value", [
np.array([1]),
np.array([1.]),
np.array([True]),
np.array([[[1]]]),
np.array([[1, 4], [5, 38]]),
pd.DataFrame(data=[1]),
pd.DataFrame(data=[1.]),
pd.DataFrame(data=[True]),
pd.DataFrame(data=[[1, 2], [4, 5]]),
pd.Series(data=[1]),
pd.Series(data=[1.]),
pd.Series(data=[True]),
pd.Series(data=[1, 3, 4]),
])
@pytest.mark.parametrize("cast_fn, expected_type", [(cast_tensor_arg_to_double, np.floating),
(cast_tensor_arg_to_integer, np.integer),
(cast_tensor_arg_to_boolean, np.bool_)])
def test_nonscalar_tensor_cast(value: Union[numpy_types, pandas_types], cast_fn: Callable, expected_type: type) -> None:
assert cast_fn(value).dtype == expected_type
@pytest.mark.parametrize("value", [
np.array([1]),
np.array([1.]),
np.array([True]),
np.array([[[1]]]),
np.array([[1, 4], [5, 38]]),
pd.DataFrame(data=[1]),
pd.DataFrame(data=[1.]),
pd.DataFrame(data=[True]),
pd.DataFrame(data=[[1, 2], [4, 5]]),
pd.Series(data=[1]),
pd.Series(data=[1.]),
pd.Series(data=[True]),
pd.Series(data=[1, 3, 4]),
])
@pytest.mark.parametrize("cast_fn, expected_type", [(cast_to_double_vertex, Double), (cast_to_integer_vertex, Integer),
(cast_to_boolean_vertex, Boolean)])
def test_nonscalar_vertex_cast(value: Union[numpy_types, pandas_types], cast_fn: Callable, expected_type: type) -> None:
assert type(cast_fn(value)) == expected_type
@pytest.mark.parametrize("cast_fn, cast_to_type",
[(cast_tensor_arg_to_double, float), (cast_tensor_arg_to_integer, int),
(cast_tensor_arg_to_boolean, bool)])
def test_cant_pass_vertex_to_cast_tensor_arg(cast_fn: Callable, cast_to_type: type) -> None:
gaussian = Gaussian(0., 1.)
with pytest.raises(TypeError, match=r"Cannot cast {} to {}".format(type(gaussian), cast_to_type)):
cast_fn(gaussian)
| 2.171875
| 2
|
chap8/data/gen_mxnet_imglist.py
|
wang420349864/dlcv_for_beginners
| 1,424
|
12774831
|
import os
import sys
input_path = sys.argv[1].rstrip(os.sep)
output_path = sys.argv[2]
filenames = os.listdir(input_path)
with open(output_path, 'w') as f:
for i, filename in enumerate(filenames):
filepath = os.sep.join([input_path, filename])
label = filename[:filename.rfind('.')].split('_')[1]
line = '{}\t{}\t{}\n'.format(i, label, filepath)
f.write(line)
| 2.84375
| 3
|
forward/schechter.py
|
rprollins/forward
| 0
|
12774832
|
<filename>forward/schechter.py
import numpy as np
from collections import namedtuple
SchechterParameters = namedtuple('SchechterParameters', ['a_phi', 'b_phi', 'a_m', 'b_m', 'alpha'])
def dv_domega_dz(z, cosmology):
d_h = cosmology.hubble_distance
d_m = cosmology.comoving_transverse_distance(z)
e_fac = np.sqrt(cosmology.inv_efunc(z))
return d_h * d_m * d_m * e_fac
def schechter(m, z, parameters):
phi_star = parameters.b_phi * np.exp(parameters.a_phi*z)
m_star = parameters.a_m * z + parameters.b_m
out = 0.4 * np.log(10) * phi_star
out = out * np.power(10, 0.4*(m_star-m)*(parameters.alpha+1))
out = out * np.exp(-np.power(10, 0.4*(m_star-m)))
return out
| 2.453125
| 2
|
StinoStarter.py
|
huangxuantao/MyStino
| 2
|
12774833
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
#
# Documents
#
"""
Documents
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import re
import sublime
import sublime_plugin
st_version = int(sublime.version())
if st_version < 3000:
import stino
else:
from . import stino
class SketchListener(sublime_plugin.EventListener):
def __init__(self):
super(SketchListener, self).__init__()
self.sketch_files_dict = {}
self.file_view_dict = {}
pattern_text = r'^(\S*?):([0-9]+?):'
self.pattern = re.compile(pattern_text, re.M | re.S)
def on_activated(self, view):
stino.main.set_status(view)
def on_close(self, view):
monitor_module = stino.pyarduino.base.serial_monitor
if stino.st_console.is_monitor_view(view):
name = view.name()
serial_port = name.split('-')[1].strip()
if serial_port in monitor_module.serials_in_use:
cur_serial_monitor = monitor_module.serial_monitor_dict.get(
serial_port, None)
if cur_serial_monitor:
cur_serial_monitor.stop()
monitor_module.serials_in_use.remove(serial_port)
def on_selection_modified(self, view):
view_name = view.name()
if view_name.startswith('build|') or view_name.startswith('upload|'):
view_selection = view.sel()
region = view_selection[0]
region = view.line(region)
text = view.substr(region)
matches = list(self.pattern.finditer(text))
if matches:
view_selection.clear()
view_selection.add(region)
match = matches[0]
file_path, line_no = match.groups()
if os.path.isfile(file_path):
file_view = view.window().open_file(file_path)
error_point = file_view.text_point(int(line_no) - 1, 0)
region = file_view.line(error_point)
selection = file_view.sel()
selection.clear()
selection.add(region)
file_view.show(error_point)
def on_modified(self, view):
if st_version < 3000:
flag = sublime.DRAW_OUTLINED
else:
flag = sublime.DRAW_NO_FILL
view_name = view.name()
if view_name.startswith('build|') or view_name.startswith('upload|'):
sketch_path = view_name.split('|')[1]
files = self.sketch_files_dict.get(sketch_path, [])
for file_path in files:
file_view = self.file_view_dict.get(file_path, None)
if file_view in sublime.active_window().views():
key = 'stino.' + file_path
file_view.erase_regions(key)
console_regions = []
file_regions_dict = {}
files = []
text = view.substr(sublime.Region(0, view.size()))
matches = self.pattern.finditer(text)
for match in matches:
cur_point = match.start()
line_region = view.line(cur_point)
console_regions.append(line_region)
file_path, line_no = match.groups()
file_view = view.window().open_file(file_path)
error_point = file_view.text_point(int(line_no) - 1, 0)
line_region = file_view.line(error_point)
if not file_path in files:
files.append(file_path)
self.file_view_dict[file_path] = file_view
regions = file_regions_dict.setdefault(file_path, [])
if not line_region in regions:
regions.append(line_region)
file_regions_dict[file_path] = regions
view.add_regions('build_error', console_regions, 'string',
'circle', flag)
self.sketch_files_dict[sketch_path] = files
for file_path in files:
key = 'stino.' + file_path
file_view = self.file_view_dict.get(file_path)
regions = file_regions_dict.get(file_path, [])
file_view.add_regions(key, regions, 'string', 'circle',
flag)
if regions:
region = regions[0]
file_view.show(region)
class ShowArduinoMenuCommand(sublime_plugin.WindowCommand):
def run(self):
show_arduino_menu = stino.settings.get('show_arduino_menu', True)
stino.settings.set('show_arduino_menu', not show_arduino_menu)
stino.main.create_menus()
def is_checked(self):
show_arduino_menu = stino.settings.get('show_arduino_menu', True)
return show_arduino_menu
class UpdateMenuCommand(sublime_plugin.WindowCommand):
def run(self):
stino.main.update_menu()
class NewSketchCommand(sublime_plugin.WindowCommand):
def run(self):
caption = stino.i18n.translate('Name for New Sketch:')
self.window.show_input_panel(caption, '', self.on_done, None, None)
def on_done(self, sketch_name):
stino.main.new_sketch(self.window, sketch_name)
class OpenSketchCommand(sublime_plugin.WindowCommand):
def run(self, sketch_path):
new_window = stino.settings.get('open_project_in_new_window', False)
if new_window:
sublime.run_command('new_window')
window = sublime.windows()[-1]
else:
window = self.window
stino.main.open_sketch(window, sketch_path)
class ImportLibraryCommand(sublime_plugin.TextCommand):
def run(self, edit, library_path):
stino.main.import_library(self.view, edit, library_path)
class ShowSketchFolderCommand(sublime_plugin.TextCommand):
def run(self, edit):
file_path = self.view.file_name()
if file_path:
dir_path = os.path.dirname(file_path)
url = 'file://' + dir_path
sublime.run_command('open_url', {'url': url})
class CompileSketchCommand(sublime_plugin.TextCommand):
def run(self, edit):
stino.main.handle_sketch(self.view, stino.main.build_sketch)
class UploadSketchCommand(sublime_plugin.TextCommand):
def run(self, edit):
stino.main.handle_sketch(self.view, stino.main.upload_sketch)
class UploadUsingProgrammerCommand(sublime_plugin.TextCommand):
def run(self, edit):
stino.main.handle_sketch(self.view, stino.main.upload_sketch,
using_programmer=True)
class SetExtraFlagCommand(sublime_plugin.WindowCommand):
def run(self):
caption = stino.i18n.translate('Extra compilation flags:')
extra_flag = stino.settings.get('extra_flag', '')
self.window.show_input_panel(caption, extra_flag, self.on_done,
None, None)
def on_done(self, extra_flag):
stino.settings.set('extra_flag', extra_flag)
class ToggleFullCompilationCommand(sublime_plugin.WindowCommand):
def run(self):
build_verbose = stino.settings.get('full_compilation', False)
stino.settings.set('full_compilation', not build_verbose)
def is_checked(self):
build_verbose = stino.settings.get('full_compilation', False)
return build_verbose
class ShowCompilationOutputCommand(sublime_plugin.WindowCommand):
def run(self):
build_verbose = stino.settings.get('build_verbose', False)
stino.settings.set('build_verbose', not build_verbose)
def is_checked(self):
build_verbose = stino.settings.get('build_verbose', False)
return build_verbose
class ShowUploadOutputCommand(sublime_plugin.WindowCommand):
def run(self):
upload_verbose = stino.settings.get('upload_verbose', False)
stino.settings.set('upload_verbose', not upload_verbose)
def is_checked(self):
upload_verbose = stino.settings.get('upload_verbose', False)
return upload_verbose
class VerifyCodeCommand(sublime_plugin.WindowCommand):
def run(self):
verify_code = stino.settings.get('verify_code', False)
stino.settings.set('verify_code', not verify_code)
def is_checked(self):
verify_code = stino.settings.get('verify_code', False)
return verify_code
class ToggleBareGccOnlyCommand(sublime_plugin.WindowCommand):
def run(self):
bare_gcc = stino.settings.get('bare_gcc', False)
stino.settings.set('bare_gcc', not bare_gcc)
def is_checked(self):
bare_gcc = stino.settings.get('bare_gcc', False)
return bare_gcc
class ChooseBuildFolderCommand(sublime_plugin.WindowCommand):
def run(self):
stino.main.change_build_dir(self.window)
class SelectBoardCommand(sublime_plugin.WindowCommand):
def run(self, board_id):
stino.main.change_board(self.window, board_id)
def is_checked(self, board_id):
target_board_id = stino.settings.get('target_board_id', '')
return board_id == target_board_id
class SelectSubBoardCommand(sublime_plugin.WindowCommand):
def run(self, option_index, sub_board_id):
stino.main.change_sub_board(self.window, option_index, sub_board_id)
def is_checked(self, option_index, sub_board_id):
target_board_id = stino.settings.get('target_board_id', '')
target_sub_board_ids = stino.settings.get(target_board_id, [])
return sub_board_id in target_sub_board_ids
class SelectProgrammerCommand(sublime_plugin.WindowCommand):
def run(self, programmer_id):
stino.main.change_programmer(programmer_id)
def is_checked(self, programmer_id):
target_programmer_id = stino.settings.get('target_programmer_id', '')
return programmer_id == target_programmer_id
class BurnBootloaderCommand(sublime_plugin.WindowCommand):
def run(self):
stino.main.burn_bootloader(self.window)
class SelectSerialPortCommand(sublime_plugin.WindowCommand):
def run(self, serial_port):
stino.settings.set('serial_port', serial_port)
stino.main.set_status(self.window.active_view())
def is_checked(self, serial_port):
target_serial_port = stino.settings.get('serial_port', '')
return serial_port == target_serial_port
class RunSerialMonitorCommand(sublime_plugin.WindowCommand):
def run(self):
stino.main.toggle_serial_monitor(self.window)
def is_checked(self):
monitor_module = stino.pyarduino.base.serial_monitor
state = False
serial_port = stino.settings.get('serial_port', '')
if serial_port in monitor_module.serials_in_use:
serial_monitor = monitor_module.serial_monitor_dict.get(
serial_port)
if serial_monitor and serial_monitor.is_running():
state = True
return state
class SendSerialMessageCommand(sublime_plugin.WindowCommand):
def run(self):
caption = stino.i18n.translate('Send:')
self.window.show_input_panel(caption, '', self.on_done, None, None)
def on_done(self, text):
stino.main.send_serial_message(text)
class ChooseBaudrateCommand(sublime_plugin.WindowCommand):
def run(self, baudrate):
stino.settings.set('baudrate', baudrate)
def is_checked(self, baudrate):
target_baudrate = stino.settings.get('baudrate', 9600)
return baudrate == target_baudrate
class ChooseLineEndingCommand(sublime_plugin.WindowCommand):
def run(self, line_ending):
stino.settings.set('line_ending', line_ending)
def is_checked(self, line_ending):
target_line_ending = stino.settings.get('line_ending', '\n')
return line_ending == target_line_ending
class ChooseDisplayModeCommand(sublime_plugin.WindowCommand):
def run(self, display_mode):
stino.settings.set('display_mode', display_mode)
def is_checked(self, display_mode):
target_display_mode = stino.settings.get('display_mode', 'Text')
return display_mode == target_display_mode
class AutoFormatCommand(sublime_plugin.WindowCommand):
def run(self):
self.window.run_command('reindent', {'single_line': False})
class ArchiveSketchCommand(sublime_plugin.TextCommand):
def run(self, edit):
file_path = self.view.file_name()
if file_path:
sketch_path = os.path.dirname(file_path)
stino.main.archive_sketch(self.view.window(), sketch_path)
class ChooseArduinoFolderCommand(sublime_plugin.WindowCommand):
def run(self):
stino.main.select_arduino_dir(self.window)
class ChangeSketchbookFolderCommand(sublime_plugin.WindowCommand):
def run(self):
stino.main.change_sketchbook_dir(self.window)
class ToggleGlobalSettings(sublime_plugin.WindowCommand):
def run(self):
global_settings = stino.settings.get('global_settings', True)
stino.settings.set('global_settings', not global_settings)
def is_checked(self):
return True
class ToggleBigProject(sublime_plugin.WindowCommand):
def run(self):
big_project = stino.settings.get('big_project', False)
stino.settings.set('big_project', not big_project)
stino.main.update_menu()
def is_checked(self):
big_project = stino.settings.get('big_project', False)
return big_project
class ToggleOpenProjectInNewWindowCommand(sublime_plugin.WindowCommand):
def run(self):
new_window = stino.settings.get('open_project_in_new_window', False)
stino.settings.set('open_project_in_new_window', not new_window)
def is_checked(self):
new_window = stino.settings.get('open_project_in_new_window', False)
return new_window
class SelectLanguageCommand(sublime_plugin.WindowCommand):
def run(self, lang_id):
stino.i18n.change_lang(lang_id)
stino.main.create_menus()
def is_checked(self, lang_id):
target_lang_id = stino.settings.get('lang_id', 'en')
return lang_id == target_lang_id
class OpenRefCommand(sublime_plugin.WindowCommand):
def run(self, url):
url = stino.main.get_url(url)
sublime.run_command('open_url', {'url': url})
class FindInReferenceCommand(sublime_plugin.TextCommand):
def run(self, edit):
stino.main.find_in_ref(self.view)
class StinoDocumentsCommand(sublime_plugin.WindowCommand):
def run(self):
sublime.run_command('open_url',
{'url': 'https://github.com/Robot-Will/Stino'})
class AboutStinoCommand(sublime_plugin.WindowCommand):
def run(self):
sublime.run_command('open_url',
{'url': 'https://github.com/Robot-Will/Stino'})
class NoneCommandCommand(sublime_plugin.WindowCommand):
def run(self):
pass
def is_enabled(self):
return False
class PanelOutputCommand(sublime_plugin.TextCommand):
def run(self, edit, text):
pos = self.view.size()
self.view.insert(edit, pos, text)
self.view.show(pos)
class ShowItemListCommand(sublime_plugin.WindowCommand):
def run(self, item_type):
stino.main.show_items_panel(self.window, item_type)
| 2.03125
| 2
|
soybean/utils.py
|
lcgong/soybean
| 2
|
12774834
|
<filename>soybean/utils.py
import re
import os
import socket
from sys import modules
from sqlblock.utils.json import json_dumps, json_loads
from rocketmq.client import Message
from .exceptions import InvalidGroupId, InvalidTopicName
VALID_NAME_PATTERN = re.compile("^[%|a-zA-Z0-9_-]+$")
VALID_NAME_STR = (
"allowing only numbers, uppercase and lowercase letters,"
" '%', '|', '-' and '_' symbols"
)
def create_jsonobj_msg(topic, jsonobj, key=None, tag=None, props=None):
msg_obj = Message(topic)
if isinstance(key, str):
msg_obj.set_keys(key.encode("utf-8"))
if isinstance(tag, str):
msg_obj.set_tags(tag.encode("utf-8"))
if isinstance(props, dict):
for k, v in props.items():
msg_obj.set_property(k, v)
msg_obj.set_body(json_dumps(jsonobj).encode("utf-8"))
return msg_obj
def check_topic_name(name):
if not name:
raise InvalidTopicName("The topic name is empty")
if not VALID_NAME_PATTERN.match(name):
raise InvalidTopicName(
f"the topic name '{name}' contains illegal characters, {VALID_NAME_STR}")
if len(name) > 127:
raise InvalidTopicName(
"the topic name is longer than name max length 127.")
def check_group_id(name):
if not name:
raise InvalidGroupId("The group_id is empty")
if not VALID_NAME_PATTERN.match(name):
raise InvalidGroupId(
f"the group_id '{name}' contains illegal characters, {VALID_NAME_STR}")
if len(name) > 255:
raise InvalidGroupId(
"the group_id is longer than name max length 255.")
def make_group_id(channel_name, handler_func, depth=None):
channel_name = pinyin_translate(channel_name)
module_name = handler_func.__module__.replace(".", "-")
func_name = handler_func.__qualname__.replace(".", "-")
module_name = pinyin_translate(module_name)
func_name = pinyin_translate(func_name)
depth = f"-{depth}" if depth is not None else ""
return f"{channel_name}%{module_name}-{func_name}{depth}"
def make_instance_id():
return f"{socket.gethostname()}_{os.getpid()}"
from pypinyin import NORMAL as NORMAL_PINYIN
from pypinyin.converter import DefaultConverter
from pypinyin.seg.simpleseg import seg as pinyin_seg
def pinyin_translate(s):
"""
将字符串中含有的中文字符转换成拼音,每个中文的拼音采用驼峰拼接,如‘中文‘转换为‘ZhongWen’.
"""
converter = DefaultConverter()
segments = pinyin_seg(s)
translated = []
for s in segments:
if not s:
continue
tt = converter.convert(s, NORMAL_PINYIN,
heteronym=False,
errors='default',
strict=True)
t = tt[0][0]
if s == t:
translated.append(s)
else:
translated.append(t[0].upper() + t[1:])
return "".join(translated)
| 2.640625
| 3
|
tests/properties/test_hexagonal.py
|
kei0822kei/twinpy
| 0
|
12774835
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This is pytest for twinpy.properties.hexagonal.
"""
from copy import deepcopy
import numpy as np
from twinpy.properties import hexagonal
a = 2.93
c = 4.65
def test_check_hexagonal_lattice(ti_cell_wyckoff_c):
"""
Check check_hexagonal_lattice.
"""
hexagonal_lattice = ti_cell_wyckoff_c[0]
hexagonal.check_hexagonal_lattice(lattice=hexagonal_lattice)
def test_check_cell_is_hcp(ti_cell_wyckoff_c, ti_cell_wyckoff_d):
"""
Check check_cell_is_hcp.
"""
for cell in [ti_cell_wyckoff_c, ti_cell_wyckoff_d]:
hexagonal.check_cell_is_hcp(cell=cell)
def test_convert_direction():
"""
Check convert_direction_from_four_to_three
and convert_direction_from_three_to_four.
Note:
Let basis vectors for hexagonal lattice be a_1, a_2 and c,
a_1 = [1,0,0] = 1/3[2,-1,-1,0].
"""
def _test_convert_direction_from_three_to_four(three, four_expected):
_four = hexagonal.convert_direction_from_three_to_four(
three=three)
np.testing.assert_allclose(_four, four_expected)
def _test_convert_direction_from_four_to_three(four, three_expected):
_three = hexagonal.convert_direction_from_four_to_three(
four=four)
np.testing.assert_allclose(_three, three_expected)
a_1_three = np.array([1.,0.,0.])
a_1_four = np.array([2.,-1.,-1.,0.]) / 3.
_test_convert_direction_from_three_to_four(three=a_1_three,
four_expected=a_1_four)
_test_convert_direction_from_four_to_three(four=a_1_four,
three_expected=a_1_three)
def test_hexagonal_direction(ti_cell_wyckoff_c):
"""
Check HexagonalDirection.
"""
def _test_reset_indices(hex_dr, three):
_hex_dr = deepcopy(hex_dr)
_hex_dr.reset_indices(three=three)
_three_expected = _hex_dr.three
np.testing.assert_allclose(three, _three_expected)
def _test_inverse(hex_dr):
_inv_hex_dr = deepcopy(hex_dr)
_inv_hex_dr.inverse()
_three = hex_dr.three
_inv_three = _inv_hex_dr.three
np.testing.assert_allclose(_three, _inv_three*(-1.))
def _test_get_cartesian(hex_dr, cart_expected):
_cart = hex_dr.get_cartesian(normalize=False)
_cart_normalized = hex_dr.get_cartesian(normalize=True)
_norm = np.linalg.norm(_cart_normalized)
np.testing.assert_allclose(_cart, cart_expected)
np.testing.assert_allclose(_norm, 1.)
lattice = ti_cell_wyckoff_c[0]
three_a1 = np.array([1.,0.,0.]) # a_1
three_c = np.array([0.,0.,1.]) # c
a1_cart = np.array([a,0.,0.]) # cartesian coordinate for vector a_1
hex_dr_a1 = hexagonal.HexagonalDirection(lattice=lattice, three=three_a1)
_test_reset_indices(hex_dr=hex_dr_a1,
three=three_c)
_test_inverse(hex_dr=hex_dr_a1)
_test_get_cartesian(hex_dr=hex_dr_a1, cart_expected=a1_cart)
def test_convert_plane():
"""
Check convert_plane_from_four_to_three
and convert_plane_from_three_to_four.
Note:
(10-12) plane is equal to (102).
"""
def _test_convert_plane_from_three_to_four(three, four_expected):
_four = hexagonal.convert_plane_from_three_to_four(
three=three)
np.testing.assert_allclose(_four, four_expected)
def _test_convert_plane_from_four_to_three(four, three_expected):
_three = hexagonal.convert_plane_from_four_to_three(
four=four)
np.testing.assert_allclose(_three, three_expected)
twin_three = np.array([1.,0.,2.])
twin_four = np.array([1.,0.,-1.,2.])
_test_convert_plane_from_three_to_four(three=twin_three,
four_expected=twin_four)
_test_convert_plane_from_four_to_three(four=twin_four,
three_expected=twin_three)
def test_hexagonal_plane(ti_cell_wyckoff_c):
"""
Check HexagonalPlane.
"""
def _test_reset_indices(hex_pln, four):
_hex_pln = deepcopy(hex_pln)
_hex_pln.reset_indices(four=four)
_four = _hex_pln.four
np.testing.assert_allclose(_four, four)
def _test_inverse(hex_pln):
_inv_hex_pln = deepcopy(hex_pln)
_inv_hex_pln.inverse()
four = hex_pln.four
_inv_four = _inv_hex_pln.four
np.testing.assert_allclose(_inv_four, four*(-1))
def _test_get_distance_from_plane(hex_pln, frac_coord, d_expected):
_d = hex_pln.get_distance_from_plane(frac_coord=frac_coord)
np.testing.assert_allclose(_d, d_expected)
def _test_get_plane_interval(hex_pln, d_expected):
_d = hex_pln.get_plane_interval()
np.testing.assert_allclose(_d, d_expected)
lattice = ti_cell_wyckoff_c[0]
basal_four = np.array([0.,0.,0.,1.])
twin_four = np.array([1.,0.,-1.,2.])
hex_pln_basal = hexagonal.HexagonalPlane(lattice=lattice,
four=basal_four)
hex_pln_twin = hexagonal.HexagonalPlane(lattice=lattice,
four=twin_four)
c_three = np.array([0.,0.,1.])
_test_reset_indices(hex_pln=hex_pln_twin,
four=basal_four)
_test_inverse(hex_pln=hex_pln_twin)
_test_get_distance_from_plane(hex_pln=hex_pln_basal,
frac_coord=c_three,
d_expected=c)
_test_get_plane_interval(hex_pln=hex_pln_basal,
d_expected=c)
| 2.671875
| 3
|
geetools/cloud_mask.py
|
bworstell/gee_tools
| 4
|
12774836
|
<reponame>bworstell/gee_tools<filename>geetools/cloud_mask.py
# !/usr/bin/env python
# coding=utf-8
from __future__ import print_function
from . import tools
from . import decision_tree
import ee
from . import __version__
from .bitreader import BitReader
import ee.data
if not ee.data._initialized: ee.Initialize()
# options for BitReaders for known collections
# 16 bits
BITS_MODIS09GA = {
'0-1': {0:'clear', 1:'cloud', 2:'mix'},
'2': {1:'shadow'},
'8-9': {1:'small_cirrus', 2:'average_cirrus', 3:'high_cirrus'},
'13': {1:'adjacent'},
'15': {1:'snow'}
}
# 16 bits
BITS_MODIS13Q1 = {
'0-1': {0:'good_qa'},
'2-5': {0:'highest_qa'},
'8': {1:'adjacent'},
'10': {1:'cloud'},
'14': {1:'snow'},
'15': {1:'shadow'}
}
# USGS SURFACE REFLECTANCE
# 8 bits
BITS_LANDSAT_CLOUD_QA = {
'0': {1:'ddv'},
'1': {1:'cloud'},
'2': {1:'shadow'},
'3': {1:'adjacent'},
'4': {1:'snow'},
'5': {1:'water'}
}
# USGS SURFACE REFLECTANCE
# 16 bits
BITS_LANDSAT_PIXEL_QA = {
'1': {1:'clear'},
'2': {1:'water'},
'3': {1:'shadow'},
'4': {1:'snow'},
'5': {1:'cloud'},
'6-7':{3:'high_confidence_cloud'}
}
# USGS SURFACE REFLECTANCE L8
BITS_LANDSAT_PIXEL_QA_L8 = {
'1': {1:'clear'},
'2': {1:'water'},
'3': {1:'shadow'},
'4': {1:'snow'},
'5': {1:'cloud'},
'6-7':{3:'high_confidence_cloud'},
'8-9':{3:'cirrus'},
'10': {1:'occlusion'}
}
# USGS TOA
BITS_LANDSAT_BQA = {
'4': {1:'cloud'},
'5-6': {3:'high_confidence_cloud'},
'7-8': {3:'shadow'},
'9-10': {3:'snow'}
}
# USGS TOA L8
BITS_LANDSAT_BQA_L8 = {
'4': {1:'cloud'},
'5-6': {3:'high_confidence_cloud'},
'7-8': {3:'shadow'},
'9-10': {3:'snow'},
'11-12': {3:'cirrus'}
}
# SENTINEL 2
BITS_SENTINEL2 = {
'10':{1:'cloud'},
'11':{1:'cirrus'}
}
def decode_bits_ee(bit_reader, qa_band):
"""
:param bit_reader: the bit reader
:type bit_reader: BitReader
:param qa_band: name of the band that holds the bit information
:type qa_band: str
:return: a function to map over a collection. The function adds all
categories masks as new bands
"""
options = ee.Dictionary(bit_reader.info)
categories = ee.List(bit_reader.all_categories)
def wrap(image):
def eachcat(cat, ini):
ini = ee.Image(ini)
qa = ini.select(qa_band)
# get data for category
data = ee.Dictionary(options.get(cat))
lshift = ee.Number(data.get('lshift'))
length = ee.Number(data.get('bit_length'))
decoded = ee.Number(data.get('shifted'))
# move = places to move bits right and left back
move = lshift.add(length)
# move bits right and left
rest = qa.rightShift(move).leftShift(move)
# subtract the rest
norest = qa.subtract(rest)
# right shift to compare with decoded data
to_compare = norest.rightShift(lshift) ## Image
# compare if is equal, return 0 if not equal, 1 if equal
mask = to_compare.eq(decoded)
# rename to the name of the category
qa_mask = mask.select([0], [cat])
return ini.addBands(qa_mask)
return ee.Image(categories.iterate(eachcat, image))
return wrap
def general_mask(options, reader, qa_band, update_mask=True,
add_mask_band=True, add_every_mask=False,
all_masks_name='mask'):
""" General function to get a bit mask band given a set of options
a bit reader and the name of the qa_band
:param options: options to decode
:param reader: the bit reader
:param qa_band: the name of the qa band
:param updateMask: whether to update the mask for all options or not
:param addBands: whether to add the mask band for all options or not
:return: a function to map over a collection
"""
encoder = decode_bits_ee(reader, qa_band)
opt = ee.List(options)
clases = ("'{}', "*len(options))[:-2].format(*options)
# Property when adding every band
msg_eb = "Band called 'mask' is for {} and was computed by geetools" \
" version {} (https://github.com/gee-community/gee_tools)"
prop_eb = ee.String(msg_eb.format(clases, __version__))
prop_name_eb = ee.String('{}_band'.format(all_masks_name))
def add_every_bandF(image, encoded):
return image.addBands(encoded).set(prop_name_eb, prop_eb)
def get_all_mask(encoded):
# TODO: put this function in tools
initial = encoded.select([ee.String(opt.get(0))])
rest = ee.List(opt.slice(1))
def func(cat, ini):
ini = ee.Image(ini)
new = encoded.select([cat])
return ee.Image(ini.Or(new))
all_masks = ee.Image(rest.iterate(func, initial)) \
.select([0], [all_masks_name])
mask = all_masks.Not()
return mask
# 0 0 1
if not add_every_mask and not update_mask and add_mask_band:
def wrap(image):
encoded = encoder(image).select(opt)
mask = get_all_mask(encoded)
return image.addBands(mask)
# 0 1 0
elif not add_every_mask and update_mask and not add_mask_band:
def wrap(image):
encoded = encoder(image).select(opt)
mask = get_all_mask(encoded)
return image.updateMask(mask)
# 0 1 1
elif not add_every_mask and update_mask and add_mask_band:
def wrap(image):
encoded = encoder(image).select(opt)
mask = get_all_mask(encoded)
return image.updateMask(mask).addBands(mask)
# 1 0 0
elif add_every_mask and not update_mask and not add_mask_band:
def wrap(image):
encoded = encoder(image).select(opt)
return add_every_bandF(image, encoded)
# 1 0 1
elif add_every_mask and not update_mask and add_mask_band:
def wrap(image):
encoded = encoder(image).select(opt)
mask = get_all_mask(encoded)
return add_every_bandF(image, encoded).addBands(mask)
# 1 1 0
elif add_every_mask and update_mask and not add_mask_band:
def wrap(image):
encoded = encoder(image).select(opt)
mask = get_all_mask(encoded)
updated = image.updateMask(mask)
with_bands = add_every_bandF(updated, encoded)
return with_bands
# 1 1 1
elif add_every_mask and update_mask and add_mask_band:
def wrap(image):
encoded = encoder(image).select(opt)
mask = get_all_mask(encoded)
updated = image.updateMask(mask)
with_bands = add_every_bandF(updated, encoded)
return with_bands.addBands(mask)
return wrap
def modis09ga(options=('cloud', 'mix', 'shadow', 'snow'), update_mask=True,
add_mask_band=True, add_every_mask=False):
""" Function for masking MOD09GA and MYD09GA collections
:return: a function to use in a map function over a collection
"""
reader = BitReader(BITS_MODIS09GA, 16)
return general_mask(options, reader, 'state_1km',
update_mask=update_mask,
add_mask_band=add_mask_band,
add_every_mask=add_every_mask)
def modis13q1(options=('cloud', 'adjacent', 'shadow', 'snow'),
update_mask=True, add_mask_band=True, add_every_mask=False):
""" Function for masking MOD13Q1 and MYD13Q1 collections
:return: a function to use in a map function over a collection
"""
reader = BitReader(BITS_MODIS13Q1, 16)
return general_mask(options, reader, 'DetailedQA',
update_mask=update_mask,
add_mask_band=add_mask_band,
add_every_mask=add_every_mask)
def landsat457SR_cloudQA(options=('cloud', 'adjacent', 'shadow', 'snow'),
update_mask=True, add_mask_band=True, add_every_mask=False):
reader = BitReader(BITS_LANDSAT_CLOUD_QA, 8)
return general_mask(options, reader, 'sr_cloud_qa',
update_mask=update_mask,
add_mask_band=add_mask_band,
add_every_mask=add_every_mask)
def landsat457SR_pixelQA(options=('cloud', 'shadow', 'snow'),
update_mask=True, add_mask_band=True, add_every_mask=False):
reader = BitReader(BITS_LANDSAT_PIXEL_QA, 16)
return general_mask(options, reader, 'pixel_qa',
update_mask=update_mask,
add_mask_band=add_mask_band,
add_every_mask=add_every_mask)
def landsat8SR_pixelQA(options=('cloud', 'shadow', 'snow', 'cirrus'),
update_mask=True, add_mask_band=True, add_every_mask=False):
reader = BitReader(BITS_LANDSAT_PIXEL_QA_L8, 16)
return general_mask(options, reader, 'pixel_qa',
update_mask=update_mask,
add_mask_band=add_mask_band,
add_every_mask=add_every_mask)
def landsat457TOA_BQA(options=('cloud', 'shadow', 'snow'),
update_mask=True, add_mask_band=True, add_every_mask=False):
reader = BitReader(BITS_LANDSAT_BQA, 16)
return general_mask(options, reader, 'BQA',
update_mask=update_mask,
add_mask_band=add_mask_band,
add_every_mask=add_every_mask)
def landsat8TOA_BQA(options=('cloud', 'shadow', 'snow', 'cirrus'),
update_mask=True, add_mask_band=True, add_every_mask=False):
reader = BitReader(BITS_LANDSAT_BQA_L8, 16)
return general_mask(options, reader, 'BQA',
update_mask=update_mask,
add_mask_band=add_mask_band,
add_every_mask=add_every_mask)
def sentinel2(options=('cloud', 'cirrus'), update_mask=True,
add_mask_band=True, add_every_mask=False):
reader = BitReader(BITS_SENTINEL2, 16)
return general_mask(options, reader, 'QA60',
update_mask=update_mask,
add_mask_band=add_mask_band,
add_every_mask=add_every_mask)
def compute(image, mask_band, bits, options=None, name_all='all_masks'):
""" Compute bits using a specified band, a bit's relation and a list of
options
:param image: the image that holds the bit mask band
:type image: ee.Image
:param mask_band: the name of the band that holds the bits mask
:type mask_band: str
:param bits: relation between name and bit
:type bits: dict
:param options: list of 'bits' to compute. Example: ['cloud', 'snow']. If
None, will use all keys of the relation's dict
:type options: list
:param name_all: name for the band that holds the final mask. Default:
'all_masks'
:type name_all: str
:return: The computed mask
:rtype: ee.Image
"""
# cast params in case they are not EE objects
bits_dict = ee.Dictionary(bits)
opt = ee.List(options) if options else bits_dict.keys()
image = ee.Image(image).select(mask_band)
first = ee.Image.constant(0).select([0], [name_all]) # init image
# function for iterate over the options
def for_iterate(option, ini):
i = ee.Image(ini) # cast ini
all = i.select([name_all])
# bits relation dict contains the option?
cond = bits_dict.contains(option)
def for_true():
""" function to execute if condition == True """
# get the mask for the option
mask = tools.image.compute_bits(image, bits_dict.get(option),
bits_dict.get(option),
option)
# name the mask
# mask = ee.Image(mask).select([0], [option])
newmask = all.Or(mask)
# return ee.Image(all.Or(mask)).addBands(mask)
return tools.image.replace(i, name_all, newmask).addBands(mask)
return ee.Image(ee.Algorithms.If(cond, for_true(), i))
good_pix = ee.Image(opt.iterate(for_iterate, first))
# return good_pix.Not()
return good_pix
def hollstein_S2(options=('cloud', 'snow', 'shadow', 'water', 'cirrus'),
name='hollstein', addBands=False, updateMask=True):
""" Compute Hollstein Decision tree for detecting clouds, clouds shadow,
cirrus, snow and water in Sentinel 2 imagery
:param options: masks to apply. Options: 'cloud', 'shadow', 'snow',
'cirrus', 'water'
:type options: list
:param name: name of the band that will hold the final mask. Default: 'hollstein'
:type name: str
:param addBands: add all bands to the image. Default: False
:type addBands: bool
:param updateMask: update the mask of the Image. Default: True
:type updateMask: bool
:return: a function for applying the mask
:rtype: function
"""
def difference(a, b):
def wrap(img):
return img.select(a).subtract(img.select(b))
return wrap
def ratio(a, b):
def wrap(img):
return img.select(a).divide(img.select(b))
return wrap
def compute_dt(img):
# 1
b3 = img.select('B3').lt(3190)
# 2
b8a = img.select('B8A').lt(1660)
r511 = ratio('B5', 'B11')(img).lt(4.33)
# 3
s1110 = difference('B11', 'B10')(img).lt(2550)
b3_3 = img.select('B3').lt(5250)
r210 = ratio('B2','B10')(img).lt(14.689)
s37 = difference('B3', 'B7')(img).lt(270)
# 4
r15 = ratio('B1', 'B5')(img).lt(1.184)
s67 = difference('B6', 'B7')(img).lt(-160)
b1 = img.select('B1').lt(3000)
r29 = ratio('B2', 'B9')(img).lt(0.788)
s911 = difference('B9', 'B11')(img).lt(210)
s911_2 = difference('B9', 'B11')(img).lt(-970)
snow = {'snow':[['1',0], ['22',0], ['34',0]]}
cloud = {'cloud-1':[['1',0], ['22',1],['33',1],['44',1]],
'cloud-2':[['1',0], ['22',1],['33',0],['45',0]]}
cirrus = {'cirrus-1':[['1',0], ['22',1],['33',1],['44',0]],
'cirrus-2':[['1',1], ['21',0],['32',1],['43',0]]}
shadow = {'shadow-1':[['1',1], ['21',1],['31',1],['41',0]],
'shadow-2':[['1',1], ['21',1],['31',0],['42',0]],
'shadow-3':[['1',0], ['22',0],['34',1],['46',0]]}
water = {'water':[['1',1], ['21',1],['31',0],['42',1]]}
all = {'cloud':cloud,
'snow': snow,
'shadow':shadow,
'water':water,
'cirrus':cirrus}
final = {}
for option in options:
final.update(all[option])
dtf = decision_tree.binary(
{'1':b3,
'21':b8a, '22':r511,
'31':s37, '32':r210, '33':s1110, '34':b3_3,
'41': s911_2, '42':s911, '43':r29, '44':s67, '45':b1, '46':r15
}, final, name)
results = dtf
if updateMask and addBands:
return img.addBands(results).updateMask(results.select(name))
elif addBands:
return img.addBands(results)
elif updateMask:
return img.updateMask(results.select(name))
return compute_dt
def dark_pixels(green, swir2, threshold=0.25):
""" Detect dark pixels from green and swir2 band
:param green: name of the green band
:type green: str
:param swir2: name of the swir2 band
:type swir2: str
:param threshold: threshold value from which are considered dark pixels
:type threshold: float
:return: a function
"""
def wrap(img):
return img.normalizedDifference([green, swir2]).gt(threshold)
return wrap
### DEPRECATED FUNCTIONS ###
# GENERIC APPLICATION OF MASKS
# LEDAPS
def ledaps(image):
""" Function to use in Surface Reflectance Collections computed by
LEDAPS
Use:
`masked = collection.map(cloud_mask.ledaps)`
"""
cmask = image.select('QA')
valid_data_mask = tools.image.compute_bits(cmask, 1, 1, 'valid_data')
cloud_mask = tools.image.compute_bits(cmask, 2, 2, 'cloud')
snow_mask = tools.image.compute_bits(cmask, 4, 4, 'snow')
good_pix = cloud_mask.eq(0).And(valid_data_mask.eq(0)).And(snow_mask.eq(0))
result = image.updateMask(good_pix)
return result
def landsatSR(options=('cloud', 'shadow', 'adjacent', 'snow'), name='sr_mask',
addBands=False, updateMask=True):
""" Function to use in Landsat Surface Reflectance Collections:
LANDSAT/LT04/C01/T1_SR, LANDSAT/LT05/C01/T1_SR, LANDSAT/LE07/C01/T1_SR,
LANDSAT/LC08/C01/T1_SR
:param options: masks to apply. Options: 'cloud', 'shadow', 'adjacent',
'snow'
:type options: list
:param name: name of the band that will hold the final mask. Default: 'toa_mask'
:type name: str
:param addBands: add all bands to the image. Default: False
:type addBands: bool
:param updateMask: update the mask of the Image. Default: True
:type updateMask: bool
:return: a function for applying the mask
:rtype: function
"""
sr = {'bits': ee.Dictionary({'cloud': 1, 'shadow': 2, 'adjacent': 3, 'snow': 4}),
'band': 'sr_cloud_qa'}
pix = {'bits': ee.Dictionary({'cloud': 5, 'shadow': 3, 'snow': 4}),
'band': 'pixel_qa'}
# Parameters
options = ee.List(options)
def wrap(image):
bands = image.bandNames()
contains_sr = bands.contains('sr_cloud_qa')
good_pix = ee.Image(ee.Algorithms.If(contains_sr,
compute(image, sr['band'], sr['bits'], options, name_all=name),
compute(image, pix['band'], pix['bits'], options, name_all=name)))
mask = good_pix.select([name]).Not()
if addBands and updateMask:
return image.updateMask(mask).addBands(good_pix)
elif addBands:
return image.addBands(good_pix)
elif updateMask:
return image.updateMask(mask)
else:
return image
return wrap
| 1.820313
| 2
|
demos/python/sdk_wireless_camera_control/docs/conf.py
|
hoehnp/OpenGoPro
| 0
|
12774837
|
# conf.py/Open GoPro, Version 1.0 (C) Copyright 2021 GoPro, Inc. (http://gopro.com/OpenGoPro).
# This copyright was auto-generated on Tue May 18 22:08:50 UTC 2021
project = "Open GoPro Python SDK"
copyright = "2020, GoPro Inc."
author = "<NAME>"
version = "0.5.8"
release = "0.5.8"
templates_path = ["_templates"]
source_suffix = ".rst"
master_doc = "index"
pygments_style = "sphinx"
html_static_path = ["_static"]
extensions = [
"sphinx.ext.autodoc",
"sphinxcontrib.napoleon",
"sphinx_rtd_theme",
"sphinx.ext.autosectionlabel",
]
html_theme = "sphinx_rtd_theme"
html_context = {
"display_github": True,
}
| 1.070313
| 1
|
HackerRank/Python/Maximum_Element.py
|
GoTo-Coders/Competitive-Programming
| 4
|
12774838
|
<reponame>GoTo-Coders/Competitive-Programming
# Link --> https://www.hackerrank.com/challenges/maximum-element/problem
# Code:
def getMax(operations):
maximum = 0
temp = []
answer = []
for i in operations:
if i != '2' and i != '3':
numbers = i.split()
number = int(numbers[1])
temp.append(number)
if number > maximum:
maximum = number
elif i == '2':
temp.pop()
if len(temp) != 0:
maximum = max(temp)
else:
maximum = 0
else:
answer.append(maximum)
return answer
| 3.984375
| 4
|
plot_compo.py
|
AHinterding/etf-loader
| 0
|
12774839
|
<reponame>AHinterding/etf-loader<filename>plot_compo.py
import datetime as dt
from etf_mapper import CompoMapper
if __name__ == '__main__':
mapper = CompoMapper()
plot_date = dt.date.today() # Download data first before running!
mapper.plot(plot_date, 'WOOD')
| 2.125
| 2
|
models/base_trainer.py
|
P0lyFish/noise2-series
| 4
|
12774840
|
<gh_stars>1-10
import os
import logging
from collections import OrderedDict
import torch
import torch.nn as nn
from torch.nn.parallel import DistributedDataParallel
# for debugging purpose
# import cv2
# import numpy as np
# from utils import util
logger = logging.getLogger('base')
class BaseTrainer():
def __init__(self, opt):
self.opt = opt
self.device = torch.device('cuda' if opt['gpu_ids'] is not None
else 'cpu')
self.is_train = opt['is_train']
self.schedulers = []
self.optimizers = []
def feed_data(self, data):
self.LQ = data['LQ'].to(self.device)
if 'HQ' in data.keys():
self.HQ = data['HQ'].to(self.device)
# for debugging purpose
# LQ = util.tensor2img(self.LQ[0])
# HQ = util.tensor2img(self.HQ[0])
# cv2.imwrite('debug.png', np.hstack((LQ, HQ)))
def optimize_parameters(self):
pass
def get_current_visuals(self):
out_dict = OrderedDict()
out_dict['LQ'] = self.LQ.detach()[0].float().cpu()
out_dict['GT'] = self.HQ.detach()[0].float().cpu()
out_dict['pred'] = self.pred.detach()[0].float().cpu()
return out_dict
def get_current_log(self):
return self.log_dict
def load(self):
if self.opt['path']['pretrain_model_G']:
load_path_G = self.opt['path']['pretrain_model_G']
if load_path_G is not None:
logger.info('Loading model for G [{:s}]\
...'.format(load_path_G))
self.load_network(load_path_G, self.netG,
self.opt['path']['strict_load'])
def save(self, iter_label):
self.save_network(self.netG, 'G', iter_label)
def print_network(self):
s, n = self.get_network_description(self.netG)
if isinstance(self.netG, nn.DataParallel):
net_struc_str = '{} - {}'.format(
self.netG.__class__.__name__,
self.netG.module.__class__.__name__
)
else:
net_struc_str = '{}'.format(self.netG.__class__.__name__)
if self.rank <= 0:
logger.info('Network G structure: {}, \
with parameters: {:,d}'.format(net_struc_str, n))
logger.info(s)
def _get_init_lr(self):
"""Get the initial lr, which is set by the scheduler"""
init_lr_groups_l = []
for optimizer in self.optimizers:
init_lr_groups_l.append([v['initial_lr']
for v in optimizer.param_groups])
return init_lr_groups_l
def update_learning_rate(self, cur_iter, warmup_iter=-1):
for scheduler in self.schedulers:
scheduler.step()
# set up warm-up learning rate
if cur_iter < warmup_iter:
# get initial lr for each group
init_lr_g_l = self._get_init_lr()
# modify warming-up learning rates
warm_up_lr_l = []
for init_lr_g in init_lr_g_l:
warm_up_lr_l.append([v / warmup_iter * cur_iter
for v in init_lr_g])
# set learning rate
self._set_lr(warm_up_lr_l)
def get_current_learning_rate(self):
return [param_group['lr'] for param_group in
self.optimizers[0].param_groups]
def get_network_description(self, network):
"""Get the string and total parameters of the network"""
if isinstance(network, nn.DataParallel) or\
isinstance(network, DistributedDataParallel):
network = network.module
return str(network),\
sum(map(lambda x: x.numel(), network.parameters()))
def save_network(self, network, network_label, iter_label):
save_filename = '{}_{}.pth'.format(iter_label, network_label)
save_path = os.path.join(self.opt['path']['models'], save_filename)
# print('xxx {}'.format(len(list(network.parameters()))))
if isinstance(network, nn.DataParallel) or\
isinstance(network, DistributedDataParallel):
network = network.module
state_dict = network.state_dict()
for key, param in state_dict.items():
state_dict[key] = param.cpu()
torch.save(state_dict, save_path)
def load_network(self, load_path, network, strict=True, prefix=''):
if isinstance(network, nn.DataParallel) or\
isinstance(network, DistributedDataParallel):
network = network.module
load_net = torch.load(load_path)
load_net_clean = OrderedDict() # remove unnecessary 'module.'
for k, v in load_net.items():
if k.startswith('module.'):
load_net_clean[k[7:]] = v
else:
load_net_clean[k] = v
load_net.update(load_net_clean)
model_dict = network.state_dict()
for k, v in load_net.items():
k = prefix + k
if (k in model_dict) and (v.shape == model_dict[k].shape):
model_dict[k] = v
else:
print('Load failed:', k)
network.load_state_dict(model_dict, strict=True)
def save_training_state(self, epoch, iter_step):
"""Save training state during training,
which will be used for resuming"""
state = {'epoch': epoch, 'iter': iter_step,
'schedulers': [], 'optimizers': []}
for s in self.schedulers:
state['schedulers'].append(s.state_dict())
for o in self.optimizers:
state['optimizers'].append(o.state_dict())
save_filename = '{}.state'.format(iter_step)
save_path = os.path.join(self.opt['path']['training_state'],
save_filename)
torch.save(state, save_path)
def resume_training(self, resume_state):
"""Resume the optimizers and schedulers for training"""
resume_optimizers = resume_state['optimizers']
resume_schedulers = resume_state['schedulers']
assert len(resume_optimizers) == len(self.optimizers),\
'Wrong lengths of optimizers'
assert len(resume_schedulers) == len(self.schedulers),\
'Wrong lengths of schedulers'
for i, o in enumerate(resume_optimizers):
self.optimizers[i].load_state_dict(o)
for i, s in enumerate(resume_schedulers):
self.schedulers[i].load_state_dict(s)
def test(self):
self.netG.eval()
with torch.no_grad():
self.pred = self.netG(self.LQ)
self.netG.train()
| 2.1875
| 2
|
tests/get_fix_rate_for_amount_test.py
|
k0t3n/changelly_api
| 7
|
12774841
|
<filename>tests/get_fix_rate_for_amount_test.py
import pytest
import requests_mock
from changelly_api.conf import API_ROOT_URL
from changelly_api.exceptions import AmountGreaterThanMaximum, AmountLessThanMinimum
@requests_mock.Mocker(kw='requests_mock')
def test(api, get_fix_rate_for_amount_data, **kwargs):
r_mock = kwargs['requests_mock']
r_mock.post(API_ROOT_URL, json=get_fix_rate_for_amount_data['response'])
response = api.get_fix_rate_for_amount(get_fix_rate_for_amount_data['request'])
assert response == get_fix_rate_for_amount_data['response']['result']
@requests_mock.Mocker(kw='requests_mock')
def test_invalid_minimum_amount(api, get_fix_rate_for_amount_data, **kwargs):
minimum_amount = 10
r_mock = kwargs['requests_mock']
data = {
'error': {
'code': -32600,
'message': f'invalid amount: minimal amount is {minimum_amount}'
}
}
r_mock.post(API_ROOT_URL, json=data)
with pytest.raises(AmountLessThanMinimum) as error:
api.get_fix_rate_for_amount(get_fix_rate_for_amount_data['request'])
assert error.value.threshold_value == minimum_amount
@requests_mock.Mocker(kw='requests_mock')
def test_invalid_maximum_amount(api, get_fix_rate_for_amount_data, **kwargs):
maximum_amount = 10
r_mock = kwargs['requests_mock']
response = {
'error': {
'code': -32600,
'message': f'invalid amount: maximal amount is {maximum_amount}'
}
}
r_mock.post(API_ROOT_URL, json=response)
with pytest.raises(AmountGreaterThanMaximum) as error:
api.get_fix_rate_for_amount(get_fix_rate_for_amount_data['request'])
assert error.value.threshold_value == maximum_amount
| 2.296875
| 2
|
lab10-2.py
|
hanna56/Algorithm-lecture
| 2
|
12774842
|
<filename>lab10-2.py
# 양방향 연결 리스트 노드 삽입 (insertBefore() 구현)
class Node:
def __init__(self, item):
self.data = item
self.prev = None
self.next = None
class DoublyLinkedList:
def __init__(self):
self.nodeCount = 0
self.head = Node(None)
self.tail = Node(None)
self.head.prev = None
self.head.next = self.tail
self.tail.prev = self.head
self.tail.next = None
def traverse(self):
result = []
curr = self.head
while curr.next.next:
curr = curr.next
result.append(curr.data)
return result
def insertBefore(self, next, newNode):
prev = next.prev
prev.next = newNode
next.prev = newNode
newNode.prev = prev
newNode.next = next
self.nodeCount += 1
return True
def solution(x):
return 0
| 3.9375
| 4
|
panda/dataframe/pearson_r_dataframe.py
|
vaibhavg12/python
| 0
|
12774843
|
<reponame>vaibhavg12/python
import pandas as pd
path = "C:\\Users\\gv01\\Desktop\\googleSync\\LEarning\\Udacity\\Data Scientists Foundation\\python\\Resources\\"
filename = 'nyc-subway-weather.csv'
subway_df = pd.read_csv(path+filename)
def correlation(x, y):
'''
Fill in this function to compute the correlation between the two
input variables. Each input is either a NumPy array or a Pandas
Series.
correlation = average of (x in standard units) times (y in standard units)
Remember to pass the argument "ddof=0" to the Pandas std() function!
'''
std_x = (x-x.mean())/x.std(ddof=0)
std_y = (y-y.mean())/y.std(ddof=0)
return (std_x * std_y).mean()
entries = subway_df['ENTRIESn_hourly']
cum_entries = subway_df['ENTRIESn']
rain = subway_df['meanprecipi']
temp = subway_df['meantempi']
print (correlation(entries, rain))
print (correlation(entries, temp))
print (correlation(rain, temp))
print (correlation(entries, cum_entries))
| 4.0625
| 4
|
Institute/database_handler.py
|
harshraj22/smallProjects
| 2
|
12774844
|
import json
INSTITUTION_TEMPLATE = '''
{
"Institution":{
"Students":{
},
"Teachers":{
},
"Quizzes":{
"DataStructures":{
},
"Algorithms":{
},
"MachineLearning":{
}
}
}
}
'''
class DatabaseHandler:
def __init__(self):
# add a try catch block if the file does not exists
with open('database.json') as f:
self.institute_data = json.load(f)
def get_students_list(self):
return self.institute_data['Institution']['Students']
def get_teachers_list(self):
return self.institute_data['Institution']['Teachers']
def update_teachers_list(self, teachers_list):
# make some check to be sure teachers_list is in same format as required
self.institute_data['Institution']['Teachers'] = teachers_list
with open('database.json', 'w') as f:
json.dump(self.institute_data, f, indent=2)
def update_students_list(self, students_list):
# make some check to be sure students_list is in same format as required
self.institute_data['Institution']['Students'] = students_list
with open('database.json', 'w') as f:
json.dump(self.institute_data, f, indent=2)
def get_subjects_list(self):
'''
returns list of subjects available to give quiz
'''
Quizzes_dict = self.institute_data['Institution']['Quizzes']
subjects_lists = list(Quizzes_dict.keys())
return subjects_lists
def get_subject_quiz(self,subject):
'''
returns a list of quizzes in respective subject
'''
return self.institute_data['Institution']['Quizzes'][subject]
def get_tests_list(self):
''' returning a quizess dictionary by that author who is logged in it returns
name of subject name as key and in subject name key as quiz name'''
return self.institute_data['Institution']['Quizzes']
def add_new_quiz(self, quizzes_list):
# make some check to be sure quizzes_list is in same format as required
self.institute_data['Institution']['Quizzes'] = quizzes_list
with open('database.json', 'w') as f:
json.dump(self.institute_data, f, indent=2)
| 3.15625
| 3
|
scripts/proppr-helpers/pronghorn-wrapper.py
|
TeamCohen/ProPPR
| 138
|
12774845
|
<reponame>TeamCohen/ProPPR<gh_stars>100-1000
import sys
import os
import shutil
import getopt
import logging
import subprocess
import util as u
def makebackup(f):
bi=1
backup = "%s.%d" % (f,bi)
#backup_parent = "./"
#if f[0] == "/": backup_parent=""
#if f.rfind("/") > 0: backup_parent += f[:f.rfind("/")]
while os.path.isfile(backup):#backup in os.listdir(backup_parent):
bi+=1
backup = "%s.%d" % (f,bi)
return backup
if __name__=="__main__":
logging.basicConfig(level=logging.INFO)
#usage: the following arguments, followed by a "+" and a list
#of any remaining arguments to pass back to calls of the 'proppr'
#script in invokeProppr
argspec = ["src=", "src2=", "dst=", "dst2=", "stem=",
"C=", "n", #global proppr opts
"model=", "numIters=",
]
try:
optlist,args = getopt.getopt(sys.argv[1:], 'x', argspec)
except getopt.GetoptError as err:
print 'option error: ',str(err)
sys.exit(-1)
optdict = dict(optlist)
optdict['PROPPR_ARGS'] = args[1:]
queries = optdict['--src']
dbFile = optdict['--src2']
modelFile = optdict['--dst']
paramsFile = optdict['--dst2']
stem = optdict['--stem']
modelType = optdict['--model']
numIters = int(optdict['--numIters'])
eta = 1.0
if "--eta" in args:
i=args.index("--eta")
eta = float(args[i+1])
optdict['PROPPR_ARGS'] = args[1:i]+args[i+2:]
# make ground file
groundFile = stem+".grounded"
u.invokeProppr(optdict,'ground',queries,groundFile)
# make gradient file
gradFile = stem+".gradient"
u.invokeProppr(optdict,'gradient',groundFile,gradFile,"--epochs","0")
for i in range(numIters):
logging.info('training pass %i' % i)
# update pronghorn model
u.invokeHelper(optdict,'pronghorn.py',"update",gradFile,paramsFile,dbFile,modelFile,modelType,"--eta","%g"%eta)
# backup paramsFile
backup = makebackup(paramsFile)
if "--n" not in optdict:
shutil.copyfile(paramsFile,backup)
# proppr update
u.invokeProppr(optdict,'gradient',groundFile,gradFile,"--epochs","1","--initParams",backup,"--params",paramsFile,"--srw","ppr:eta=%g" % eta)
eta = eta * 0.8
# update pronghorn model
u.invokeHelper(optdict,'pronghorn.py',"update",gradFile,paramsFile,dbFile,modelFile,modelType)
| 2.375
| 2
|
allies/management/commands/strip_allies.py
|
kevincornish/HeckGuide
| 4
|
12774846
|
<gh_stars>1-10
from django.core.management.base import BaseCommand, CommandError
from api import HeckfireApi, TokenException
from django.conf import settings
from allies.models import Ally
import logging
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = 'Strip a users allies via supplied username and token'
def add_arguments(self, parser):
parser.add_argument('username', type=str)
parser.add_argument('token', type=int)
def handle(self, *args, **options):
"""
This class finds all allies owned by given username in heckguide db,
then attempts to purchase all found allies.
"""
staytoken = settings.STAY_ALIVE_TOKEN
if options['token'] == 1:
token = settings.HECKFIRE_API_TOKEN
elif options['token'] == 106:
token = settings.TOKEN_106
elif options['token'] == 10:
token = settings.TOKEN_10
elif options['token'] == 92:
token = settings.TOKEN_92
elif options['token'] == 99:
token = settings.TOKEN_99
elif options['token'] == 128:
token = settings.TOKEN_128
elif options['token'] == 129:
token = settings.TOKEN_129
elif options['token'] == 121:
token = settings.TOKEN_121
elif options['token'] == 130:
token = settings.TOKEN_130
username = options['username']
user_list = Ally.objects.filter(owner__username__iexact=username).values("user_id", "cost", "username")
api = HeckfireApi(token=token, staytoken=staytoken)
try:
for user in user_list:
username = user['username']
cost = user['cost']
user_id = user['user_id']
try:
logger.info(f"Buying {username}, Cost: {cost}")
api.collect_loot()
api.buy_ally(user_id, cost)
api.stay_alive()
except TokenException as e:
logger.info(f"Exception: {e}")
except IndexError as e:
logger.info(f"User does not exist")
| 2.265625
| 2
|
metric/rapid/observations.py
|
NCAR/metric
| 0
|
12774847
|
<filename>metric/rapid/observations.py
"""
Module containing code to work with Rapid observational data
"""
from netCDF4 import Dataset, num2date, date2num
import datetime
import numpy as np
import metric.utils
class RapidObs(object):
""" Template class to interface with observed ocean transports """
def __init__(self, f, time_avg=None, mindt=None, maxdt=None):
""" Create instance holding ocean transport data """
self.f = f
self.time_avg = time_avg
self.mindt = mindt
self.maxdt = maxdt
self._read_data()
def _read_data(self):
""" Abstract method to read data and apply time averaging """
pass
def _read_dates(self):
""" Abstract method to initialized dates """
pass
def _ym_dates(self):
""" Return yearly mean date time objects """
ym_dates = []
for yr in range(self.yy.min(), self.yy.max()+1):
ind = (self.yy == yr)
if ind.any():
ym_dates.append(datetime.datetime(yr, 7, 1))
return np.array(ym_dates)
def _mm_dates(self):
""" Return monthly mean date time objects """
mm_dates = []
for yr in range(self.yy.min(), self.yy.max()+1):
for mon in range(1,12+1):
ind = (self.yy == yr) & (self.mm == mon)
if ind.any():
mm_dates.append(datetime.datetime(yr, mon, 15))
return np.array(mm_dates)
def _calc_ym(self, data, profile=False):
""" Return yearly mean values """
ym_data = []
for yr in range(self.yy.min(), self.yy.max()+1):
ind = (self.yy == yr)
if ind.any():
if profile:
ym_data.append(np.mean(data[ind,:],axis=0))
else:
ym_data.append(np.mean(data[ind]))
return np.array(ym_data)
def _calc_mm(self, data, profile=False):
""" Return monthly mean values """
mm_data = []
for yr in range(self.yy.min(), self.yy.max()+1):
for mon in range(1,12+1):
ind = (self.yy == yr) & (self.mm == mon)
if ind.any():
if profile:
mm_data.append(np.mean(data[ind,:],axis=0))
else:
mm_data.append(np.mean(data[ind]))
return np.array(mm_data)
def _readnc(self, ncvar):
""" Read variable from netcdf file """
nc = Dataset(self.f)
data = nc.variables[ncvar][:]
nc.close()
return data
class StreamFunctionObs(RapidObs):
"""
Sub-class to hold overturning streamfunction observations
from the RAPID-MOCHA-WBTS array at 26N.
Data source:
https://www.bodc.ac.uk/data/published_data_library/catalogue/
10.5285/35784047-9b82-2160-e053-6c86abc0c91b/
Data reference:
<NAME>.; <NAME>.; <NAME>.; <NAME>.; <NAME>.;
<NAME>.; <NAME>. (2016). Atlantic meridional
overturning circulation observed by the RAPID-MOCHA-WBTS
(RAPID-Meridional Overturning Circulation and Heatflux
Array-Western Boundary Time Series) array at 26N from
2004 to 2015. British Oceanographic Data Centre - Natural
Environment Research Council, UK. doi:10/bkzc.
"""
def _read_data(self):
""" Read data and apply time averaging """
self._read_dates()
self.z = self._readnc('depth')
if self.time_avg is None:
self.dates = self.original_dates
self.sf = self._readnc('stream_function_mar').transpose()
elif self.time_avg == 'monthly':
self.dates = self._mm_dates()
self.sf = self._calc_mm(self._readnc('stream_function_mar').transpose(),
profile=True)
elif self.time_avg == 'yearly':
self.dates = self._ym_dates()
self.sf = self._calc_ym(self._readnc('stream_function_mar').transpose(),
profile=True)
else:
print(self.time_avg)
raise ValueError('time_avg must be "monthly" or "yearly"')
if (self.mindt is not None) and (self.maxdt is not None):
tind = utils.get_dateind(self.dates, self.mindt, self.maxdt)
self.sf = self.sf[tind,:]
self.dates = self.dates[tind]
def _read_dates(self):
""" Read date information from file """
nc = Dataset(self.f)
t = nc.variables['time']
self.original_dates = num2date(t[:],units=t.units)
self.hh = np.array([dt.hour for dt in self.original_dates], dtype=np.int)
self.dd = np.array([dt.day for dt in self.original_dates], dtype=np.int)
self.mm = np.array([dt.month for dt in self.original_dates], dtype=np.int)
self.yy = np.array([dt.year for dt in self.original_dates], dtype=np.int)
def write_to_netcdf(self, ncfile):
""" Write observation data to netcdf file """
# Open ncfile and create coords
dataset = Dataset(ncfile, 'w', format='NETCDF4_CLASSIC')
zdim = dataset.createDimension('depth', self.z.size)
tdim = dataset.createDimension('time', None)
# Create time coordinate
time = dataset.createVariable('time',np.float64,(tdim.name,))
time.units = 'hours since 0001-01-01 00:00:00.0'
time.calendar = 'gregorian'
time[:] = date2num(self.dates, time.units, calendar=time.calendar)
# Create depth coordinate
z = dataset.createVariable('depth',np.float64,(zdim.name,))
z.units = 'm'
z[:] = self.z
# Create streamfunction variable
sf = dataset.createVariable('stream_function_mar',np.float64,(tdim.name, zdim.name))
sf.units = 'Sv'
sf[:] = self.sf
# Close file
print('SAVING: {}'.format(ncfile))
dataset.close()
class TransportObs(RapidObs):
"""
Sub-class to hold volume transport observations
from the RAPID-MOCHA-WBTS array at 26N.
Data source:
https://www.bodc.ac.uk/data/published_data_library/catalogue/
10.5285/35784047-9b82-2160-e053-6c86abc0c91b/
Data reference:
<NAME>.; <NAME>.; <NAME>.; <NAME>.; <NAME>.;
<NAME>.; <NAME>. (2016). Atlantic meridional
overturning circulation observed by the RAPID-MOCHA-WBTS
(RAPID-Meridional Overturning Circulation and Heatflux
Array-Western Boundary Time Series) array at 26N from
2004 to 2015. British Oceanographic Data Centre - Natural
Environment Research Council, UK. doi:10/bkzc.
"""
def _read_data(self):
""" Read data and apply time averaging """
self._read_dates()
if self.time_avg is None:
self.dates = self.original_dates
self.ekman = self._readnc('t_ek10')
self.umo = self._readnc('t_umo10')
self.fc = self._readnc('t_gs10')
self.moc = self._readnc('moc_mar_hc10')
elif self.time_avg == 'monthly':
self.dates = self._mm_dates()
self.ekman = self._calc_mm(self._readnc('t_ek10'))
self.umo = self._calc_mm(self._readnc('t_umo10'))
self.fc = self._calc_mm(self._readnc('t_gs10'))
self.moc = self._calc_mm(self._readnc('moc_mar_hc10'))
elif self.time_avg == 'yearly':
self.dates = self._ym_dates()
self.ekman = self._calc_ym(self._readnc('t_ek10'))
self.umo = self._calc_ym(self._readnc('t_umo10'))
self.fc = self._calc_ym(self._readnc('t_gs10'))
self.moc = self._calc_ym(self._readnc('moc_mar_hc10'))
else:
print(self.time_avg)
raise ValueError('time_avg must be "monthly" or "yearly"')
if (self.mindt is not None) and (self.maxdt is not None):
tind = utils.get_dateind(self.dates, self.mindt, self.maxdt)
self.ekman = self.ekman[tind]
self.umo = self.umo[tind]
self.fc = self.fc[tind]
self.moc = self.moc[tind]
self.dates = self.dates[tind]
def _read_dates(self):
""" Read date information from file """
nc = Dataset(self.f)
t = nc.variables['time']
self.original_dates = num2date(t[:],units=t.units)
self.hh = np.array([dt.hour for dt in self.original_dates], dtype=np.int)
self.dd = np.array([dt.day for dt in self.original_dates], dtype=np.int)
self.mm = np.array([dt.month for dt in self.original_dates], dtype=np.int)
self.yy = np.array([dt.year for dt in self.original_dates], dtype=np.int)
def write_to_netcdf(self, ncfile):
""" Write observation data to netcdf file """
# Open ncfile and create coords
dataset = Dataset(ncfile, 'w', format='NETCDF4_CLASSIC')
tdim = dataset.createDimension('time', None)
# Create time coordinate
time = dataset.createVariable('time',np.float64,(tdim.name,))
time.units = 'hours since 0001-01-01 00:00:00.0'
time.calendar = 'gregorian'
time[:] = date2num(self.dates, time.units, calendar=time.calendar)
# Create variables
ek = dataset.createVariable('t_ek10',np.float64,(tdim.name,))
ek.units = 'Sv'
ek[:] = self.ekman
umo = dataset.createVariable('t_umo10',np.float64,(tdim.name,))
umo.units = 'Sv'
umo[:] = self.umo
fc = dataset.createVariable('t_gs10',np.float64,(tdim.name,))
fc.units = 'Sv'
fc[:] = self.fc
moc = dataset.createVariable('t_moc_mar_hc10',np.float64,(tdim.name,))
moc.units = 'Sv'
moc[:] = self.moc
# Close file
print('SAVING: {}'.format(ncfile))
dataset.close()
class HeatTransportObs(RapidObs):
"""
Sub-class to hold meridional heat transport observations
from the RAPID-MOCHA-WBTS array at 26N.
Data source:
https://www.rsmas.miami.edu/users/mocha/mocha_results.htm
Data reference:
http://journals.ametsoc.org/doi/abs/10.1175/2010JCLI3997.1
"""
def _read_data(self):
"""
Read data at original frequency or calculate a time-average
"""
self._read_dates()
self.z = self._readnc('z')
if self.time_avg is None:
self.dates = self.original_dates
self.q_eddy = self._readnc('Q_eddy') / 1e15
self.q_ek = self._readnc('Q_ek') / 1e15
self.q_fc = self._readnc('Q_fc') / 1e15
self.q_gyre = self._readnc('Q_gyre') / 1e15
self.q_geoint = self._readnc('Q_int') / 1e15
self.q_mo = self._readnc('Q_mo') / 1e15
self.q_ot = self._readnc('Q_ot') / 1e15
self.q_sum = self._readnc('Q_sum') / 1e15
self.q_wbw = self._readnc('Q_wedge') / 1e15
self.t_basin = self._readnc('T_basin')
self.v_basin = self._readnc('V_basin')
self.v_fc = self._readnc('V_fc')
elif self.time_avg == 'monthly':
self.dates = self._mm_dates()
self.q_eddy = self._calc_mm(self._readnc('Q_eddy')) / 1e15
self.q_ek = self._calc_mm(self._readnc('Q_ek')) / 1e15
self.q_fc = self._calc_mm(self._readnc('Q_fc')) / 1e15
self.q_gyre = self._calc_mm(self._readnc('Q_gyre')) / 1e15
self.q_geoint = self._calc_mm(self._readnc('Q_int')) / 1e15
self.q_mo = self._calc_mm(self._readnc('Q_mo')) / 1e15
self.q_ot = self._calc_mm(self._readnc('Q_ot')) / 1e15
self.q_sum = self._calc_mm(self._readnc('Q_sum')) / 1e15
self.q_wbw = self._calc_mm(self._readnc('Q_wedge')) / 1e15
self.t_basin = self._calc_mm(self._readnc('T_basin'), profile=True)
self.v_basin = self._calc_mm(self._readnc('V_basin'), profile=True)
self.v_fc = self._calc_mm(self._readnc('V_fc'), profile=True)
elif self.time_avg == 'yearly':
self.dates = self._ym_dates()
self.q_eddy = self._calc_ym(self._readnc('Q_eddy')) / 1e15
self.q_ek = self._calc_ym(self._readnc('Q_ek')) / 1e15
self.q_fc = self._calc_ym(self._readnc('Q_fc')) / 1e15
self.q_gyre = self._calc_ym(self._readnc('Q_gyre')) / 1e15
self.q_geoint = self._calc_ym(self._readnc('Q_int')) / 1e15
self.q_mo = self._calc_ym(self._readnc('Q_mo')) / 1e15
self.q_ot = self._calc_ym(self._readnc('Q_ot')) / 1e15
self.q_sum = self._calc_ym(self._readnc('Q_sum')) / 1e15
self.q_wbw = self._calc_ym(self._readnc('Q_wedge')) / 1e15
self.t_basin = self._calc_ym(self._readnc('T_basin'), profile=True)
self.v_basin = self._calc_ym(self._readnc('V_basin'), profile=True)
self.v_fc = self._calc_ym(self._readnc('V_fc'), profile=True)
else:
print(self.time_avg)
raise ValueError('time_avg must be "monthly" or "yearly"')
if (self.mindt is not None) and (self.maxdt is not None):
tind = utils.get_dateind(self.dates, self.mindt, self.maxdt)
self.q_eddy = self.q_eddy[tind]
self.q_ek = self.q_ek[tind]
self.q_fc = self.q_fc[tind]
self.q_gyre = self.q_gyre[tind]
self.q_geoint = self.q_geoint[tind]
self.q_mo = self.q_mo[tind]
self.q_ot = self.q_ot[tind]
self.q_sum = self.q_sum[tind]
self.q_wbw = self.q_wbw[tind]
self.t_basin = self.t_basin[tind,:]
self.v_basin = self.v_basin[tind,:]
self.v_fc = self.v_fc[tind,:]
self.dates = self.dates[tind]
def _read_dates(self):
""" Read date information from file """
dts = []
self.hh = np.array(self._readnc('hour'), dtype=np.int)
self.dd = np.array(self._readnc('day'), dtype=np.int)
self.mm = np.array(self._readnc('month'), dtype=np.int)
self.yy = np.array(self._readnc('year'), dtype=np.int)
for ndt in range(len(self.hh)):
dts.append(datetime.datetime(
self.yy[ndt], self.mm[ndt], self.dd[ndt], self.hh[ndt],0,0))
self.original_dates = np.array(dts)
def write_to_netcdf(self, ncfile):
""" Write observation data to netcdf file """
# Open ncfile and create coords
dataset = Dataset(ncfile, 'w', format='NETCDF4_CLASSIC')
tdim = dataset.createDimension('time', None)
zdim = dataset.createDimension('depth', self.z.size)
# Create time coordinate
time = dataset.createVariable('time',np.float64,(tdim.name,))
time.units = 'hours since 0001-01-01 00:00:00.0'
time.calendar = 'gregorian'
time[:] = date2num(self.dates, time.units, calendar=time.calendar)
# Create depth coordinate
z = dataset.createVariable('depth',np.float64,(zdim.name,))
z.units = 'm'
z[:] = self.z
# Create variables
q_eddy = dataset.createVariable('Q_eddy',np.float64,(tdim.name,))
q_eddy.units = 'PW'
q_eddy[:] = self.q_eddy
q_ek = dataset.createVariable('Q_ek',np.float64,(tdim.name,))
q_ek.units = 'PW'
q_ek[:] = self.q_ek
q_fc = dataset.createVariable('Q_fc',np.float64,(tdim.name,))
q_fc.units = 'PW'
q_fc[:] = self.q_fc
q_gyre = dataset.createVariable('Q_gyre',np.float64,(tdim.name,))
q_gyre.units = 'PW'
q_gyre[:] = self.q_gyre
q_geoint = dataset.createVariable('Q_int',np.float64,(tdim.name,))
q_geoint.units = 'PW'
q_geoint[:] = self.q_geoint
q_mo = dataset.createVariable('Q_mo',np.float64,(tdim.name,))
q_mo.units = 'PW'
q_mo[:] = self.q_mo
q_ot = dataset.createVariable('Q_ot',np.float64,(tdim.name,))
q_ot.units = 'PW'
q_ot[:] = self.q_ot
q_sum = dataset.createVariable('Q_sum',np.float64,(tdim.name,))
q_sum.units = 'PW'
q_sum[:] = self.q_sum
q_wbw = dataset.createVariable('Q_wedge',np.float64,(tdim.name,))
q_wbw.units = 'PW'
q_wbw[:] = self.q_wbw
t_basin = dataset.createVariable('T_basin',np.float64,(tdim.name,zdim.name,))
t_basin.units = 'degC'
t_basin[:] = self.t_basin
v_basin = dataset.createVariable('V_basin',np.float64,(tdim.name,zdim.name,))
v_basin.units = 'Sv/m'
v_basin[:] = self.v_basin
v_fc = dataset.createVariable('V_fc',np.float64,(tdim.name,zdim.name,))
v_fc.units = 'Sv/m'
v_fc[:] = self.v_fc
# Close file
print('SAVING: {}'.format(ncfile))
dataset.close()
class FloridaCurrentObs(RapidObs):
"""
Class to hold Florida current transport estimates derived from
submarine cable measurements.
Data source:
http://www.aoml.noaa.gov/phod/floridacurrent/data_access.php
The Florida Current cable and section data are made freely available
on the Atlantic Oceanographic and Meteorological Laboratory web page
(www.aoml.noaa.gov/phod/floridacurrent/) and are funded by the DOC-NOAA
Climate Program Office - Ocean Observing and Monitoring Division.
The project scientists would also appreciate it if you informed us of
any publications or presentations that you prepare using this data.
Continued funding of this project depends on us being able to justify
to NOAA (and hence the US Congress) the usefulness of this data.
"""
def _read_data(self):
""" Read data and apply time averaging """
self._read_dates()
if self.time_avg is None:
self.fc = self._readnc('florida_current_transport')
elif self.time_avg == 'monthly':
self.dates = self._mm_dates()
self.fc = self._calc_mm(self._readnc('florida_current_transport'))
elif self.time_avg == 'yearly':
self.dates = self._ym_dates()
self.fc = self._calc_ym(self._readnc('florida_current_transport'))
else:
print(self.time_avg)
raise ValueError('time_avg must be "monthly" or "yearly"')
if (self.mindt is not None) and (self.maxdt is not None):
tind = utils.get_dateind(self.dates, self.mindt, self.maxdt)
self.fc = self.fc[tind]
self.dates = self.dates[tind]
def _read_dates(self):
""" Read date information from file """
nc = Dataset(self.f)
t = nc.variables['time']
self.original_dates = num2date(t[:],units=t.units)
self.hh = np.array([dt.hour for dt in self.original_dates], dtype=np.int)
self.dd = np.array([dt.day for dt in self.original_dates], dtype=np.int)
self.mm = np.array([dt.month for dt in self.original_dates], dtype=np.int)
self.yy = np.array([dt.year for dt in self.original_dates], dtype=np.int)
def write_to_netcdf(self, ncfile):
""" Write observation data to netcdf file """
# Open ncfile and create coords
dataset = Dataset(ncfile, 'w', format='NETCDF4_CLASSIC')
tdim = dataset.createDimension('time', None)
# Create time coordinate
time = dataset.createVariable('time',np.float64,(tdim.name,))
time.units = 'hours since 0001-01-01 00:00:00.0'
time.calendar = 'gregorian'
time[:] = date2num(self.dates, time.units, calendar=time.calendar)
# Create variables
fc = dataset.createVariable('florida_current_transport',np.float64,(tdim.name,))
fc.units = 'Sv'
fc[:] = self.fc
# Close file
print('SAVING: {}'.format(ncfile))
dataset.close()
| 2.640625
| 3
|
pc-containers-get-filtered-CSV-export.py
|
antoinesylvia/pc-toolbox
| 2
|
12774848
|
from __future__ import print_function
import os
from pprint import pprint
try:
input = raw_input
except NameError:
pass
import argparse
import pc_lib_api
import pc_lib_general
import json
import pandas
from datetime import datetime, date, time
from pathlib import Path
# --Execution Block-- #
# --Parse command line arguments-- #
parser = argparse.ArgumentParser(prog='rltoolbox')
parser.add_argument(
'-u',
'--username',
type=str,
help='*Required* - Prisma Cloud API Access Key ID that you want to set to access your Prisma Cloud account.')
parser.add_argument(
'-p',
'--password',
type=str,
help='*Required* - Prisma Cloud API Secret Key that you want to set to access your Prisma Cloud account.')
parser.add_argument(
'-url',
'--uiurl',
type=str,
help='*Required* - Base URL used in the UI for connecting to Prisma Cloud. '
'Formatted as app.prismacloud.io or app2.prismacloud.io or app.eu.prismacloud.io, etc. '
'You can also input the api version of the URL if you know it and it will be passed through.')
parser.add_argument(
'-url_compute',
'--uiurl_compute',
type=str,
help='*Required* - Base URL used in the UI for connecting to Prisma Cloud Compute. '
'Formatted as region.cloud.twistlock.com/identifier.'
'Retrieved from Compute->Manage->System->Downloads->Path to Console')
parser.add_argument(
'-y',
'--yes',
action='store_true',
help='(Optional) - Override user input for verification (auto answer for yes).')
args = parser.parse_args()
# --End parse command line arguments-- #
# --Main-- #
# Get login details worked out
pc_settings = pc_lib_general.pc_login_get(args.username, args.password, args.uiurl, args.uiurl_compute)
# Verification (override with -y)
if not args.yes:
print()
print('Ready to excute commands aginst your Prisma Cloud tenant.')
verification_response = str(input('Would you like to continue (y or yes to continue)?'))
continue_response = {'yes', 'y'}
print()
if verification_response not in continue_response:
pc_lib_general.pc_exit_error(400, 'Verification failed due to user response. Exiting...')
# Sort out API Login
print('API - Getting authentication token...', end='')
pc_settings = pc_lib_api.pc_jwt_get(pc_settings)
print('Done.')
# Get containers list
print('API - Getting containers list...', end='')
pc_settings, response_package = pc_lib_api.api_containers_get(pc_settings)
file_name = "containers_list_filtered_" + str(datetime.now().strftime("%Y-%m-%d-%H-%M-%S")) + ".csv"
file_path = os.path.join(Path.home(), "prisma-compute-exports")
containers = response_package['data']
data_header = "Application,Hostname,Cluster,Image Name,Namespace"
print("Exporting data to: " + os.path.join(file_path, file_name))
pc_lib_general.pc_file_write_csv(file_name, data_header, file_path)
for container in containers:
data_info_hostname = container['hostname']
data_info_namespace = container['info']['namespace']
data_info_cluster = container['info']['cluster']
data_info_imageName = container['info']['imageName']
data_info_app = container['info']['app']
data_line = data_info_app + "," + data_info_hostname + "," + data_info_cluster + "," + data_info_imageName + "," + data_info_namespace
pc_lib_general.pc_file_write_csv(file_name, data_line, file_path)
print('Done.')
| 2.609375
| 3
|
leetcode/algorithms/maximum-depth-of-binary-tree.py
|
yasserglez/programming-problems
| 2
|
12774849
|
<filename>leetcode/algorithms/maximum-depth-of-binary-tree.py<gh_stars>1-10
# https://leetcode.com/problems/maximum-depth-of-binary-tree/
from typing import Optional
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def maxDepth(self, root: Optional[TreeNode]) -> int:
if not root:
return 0
else:
return 1 + max(self.maxDepth(root.left), self.maxDepth(root.right))
r = TreeNode(3)
r.left = TreeNode(9)
r.right = TreeNode(20)
r.right.left = TreeNode(15)
r.right.right = TreeNode(7)
s = Solution()
print(s.maxDepth(r))
| 3.859375
| 4
|
sunpy/io/special/asdf/tags/tests/test_coordinate_frames.py
|
Cubostar/sunpy
| 0
|
12774850
|
<gh_stars>0
import os
import platform
from distutils.version import LooseVersion
import numpy as np
import pytest
import astropy.units as u
from astropy.coordinates import CartesianRepresentation
import sunpy.coordinates.frames as frames
from sunpy.tests.helpers import asdf_entry_points
asdf = pytest.importorskip('asdf', '2.0.2')
from asdf.tests.helpers import assert_roundtrip_tree # isort:skip
sunpy_frames = list(map(lambda name: getattr(frames, name), frames.__all__))
@pytest.fixture(params=sunpy_frames)
@asdf_entry_points
def coordframe_scalar(request):
frame = request.param
if frame._default_representation is CartesianRepresentation:
data = np.random.random(3) * u.km
else:
data = np.random.random(2) * u.arcsec
return frame(*data, obstime='2018-01-01T00:00:00')
@pytest.fixture(params=sunpy_frames)
@asdf_entry_points
def coordframe_array(request):
frame = request.param
if frame._default_representation is CartesianRepresentation:
data = np.random.random((3, 10)) * u.km
else:
data = np.random.random((2, 10)) * u.arcsec
return frame(*data, obstime='2018-01-01T00:00:00')
def test_hgc_100():
# Test that HeliographicCarrington is populated with Earth as the observer when loading a
# older schema (1.0.0)
test_file = os.path.join(os.path.dirname(__file__), "hgc_100.asdf")
with asdf.open(test_file) as input_asdf:
hgc = input_asdf['hgc']
assert isinstance(hgc, frames.HeliographicCarrington)
if hgc.obstime is None:
assert hgc.observer == 'earth'
else:
assert hgc.observer.object_name == 'earth'
# Skip these two tests on windows due to a weird interaction with atomicfile
# and tmpdir
skip_windows_asdf = pytest.mark.skipif(
(LooseVersion(asdf.__version__) < LooseVersion("2.3.1")
and platform.system() == 'Windows'),
reason="See https://github.com/spacetelescope/asdf/pull/632")
@skip_windows_asdf
@asdf_entry_points
def test_saveframe(coordframe_scalar, tmpdir):
tree = {'frame': coordframe_scalar}
assert_roundtrip_tree(tree, tmpdir)
@skip_windows_asdf
@asdf_entry_points
def test_saveframe_arr(coordframe_array, tmpdir):
tree = {'frame': coordframe_array}
assert_roundtrip_tree(tree, tmpdir)
| 2.09375
| 2
|