max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
Oscar/Helpers/Watchdog.py
|
onderogluserdar/boardInstrumentFramework
| 16
|
12778951
|
##############################################################################
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
# File Abstract:
# Sinds an 'I am alive' message once in a while
#
##############################################################################
import xml.dom.minidom
from xml.parsers.expat import ExpatError
from Helpers import Log
from Data import ConnectionPoint
from Data.ConnectionPoint import ConnectionType
from Helpers import Target
from Helpers import TargetManager
from Helpers import ThreadManager
from Helpers import Configuration
from Helpers import VersionMgr
from Util import Time
from Util import Sleep
#############
# This class sends a heartbeat (watchdog re-arm) to all upstream Oscars, so we don't timout
#############
class WatchdogTimer(object):
def __init__(self):
name = "Watchdog Timer Thread"
self.__WorkerThread = ThreadManager.GetThreadManager().CreateThread(name,self.WatchdogProc)
ThreadManager.GetThreadManager().StartThread(name)
def WatchdogProc(self,fnKillSignalled,userData):
lastUpdate = 0
interval = Configuration.get().GetTimeoutPeriod() * 0.25 # send a watchdog at 4x rate of timeout
buffer = "<?xml version=\"1.0\" encoding=\"utf-8\"?>"
buffer = buffer + "<Oscar Type=\"WatchdogTimer\">"
buffer = buffer + "<Version>1.0</Version>"
buffer = buffer + "<Port>"+str(Configuration.get().GetUpstreamConnection().getPort())+"</Port>"
buffer = buffer + "</Oscar>"
while not fnKillSignalled(): # run until signalled to end - call passed function to check for the signal
if lastUpdate < Time.GetCurrMS() - interval:
TargetManager.GetTargetManager().BroadcastUpstreamToType(buffer,ConnectionType.UpstreamOscar) # send heartbeat to all upstream Oscars
lastUpdate = Time.GetCurrMS()
Sleep.Sleep(0.25) #snooze for 250 ms
#############
# This class sends connection info to everything downstream periodically
# those downstream things (other Oscars and Marvins) use this to send packets back
#############
class ConnectionUpdateTimer(object):
def __init__(self):
name = "Connection Update Timer Thread"
self.__WorkerThread = ThreadManager.GetThreadManager().CreateThread(name,self.WorkerProc)
ThreadManager.GetThreadManager().StartThread(name)
def WorkerProc(self,fnKillSignalled,userData):
lastUpdate = 0
interval = Configuration.get().GetConnectionUpdateInterval()
buffer = "<?xml version=\"1.0\" encoding=\"utf-8\"?>"
buffer = buffer + "<Oscar Type=\"ConnectionInformation\">"
buffer = buffer + "<Version>1.0</Version>"
buffer = buffer + "<OscarVersion>" + VersionMgr.ReadVer() + "</OscarVersion>"
buffer = buffer + "<ID>" + Configuration.get().GetID()+"</ID>"
buffer = buffer + "<Port>"+str(Configuration.get().GetDownstreamConnection().getPort())+"</Port>"
buffer = buffer + "</Oscar>"
#<?xml version="1.0" encoding="utf-8"?>
#<Oscar Type="ConnectionInformation">
# <Version>1.0</Version>
# <ID>Foo</Foo>
# <Port>Port</Port>
#</Oscar>
while not fnKillSignalled(): # run until signalled to end - call passed function to check for the signal
if lastUpdate < Time.GetCurrMS() - interval:
TargetManager.GetTargetManager().BroadcastDownstream(buffer,True,None) # send Connection Data to all downstream things (Oscars & Marvins)
lastUpdate = Time.GetCurrMS()
Configuration.get().RescanTargets()
else:
Sleep.Sleep(0.25)
TargetManager.GetTargetManager().CheckForRemovalOfDynamicMarvins()
| 1.476563
| 1
|
bunny/ext/const.py
|
senpai-development/SenpaiSlasher
| 0
|
12778952
|
from numbers import Number
from string import ascii_letters
__version__ = None
class Version:
"""A simplified class to return a formatted version."""
def __new__(cls, *args) -> None:
if isinstance([arg for arg in args], Number):
if args[-1] not in ascii_letters:
return [".".join(args) for arg in args][0]
else:
return [".".join(args[:-1]) for arg in args][0] + args[-1]
| 3.484375
| 3
|
baselines/maml_torch/utils.py
|
mikehuisman/metadl
| 26
|
12778953
|
<reponame>mikehuisman/metadl<filename>baselines/maml_torch/utils.py
import tensorflow as tf
def create_grads_shell(model):
""" Create list of gradients associated to each trainable layer in model.
Returns:
-------
list_grads, array-like : each element of this list is tensor representing
the associated layer's gradient.
"""
list_grads = []
for layer in model.trainable_variables :
list_grads.append(tf.Variable(tf.zeros_like(layer)))
return list_grads
def reset_grads(meta_grads):
"""Reset the variable that contains the meta-learner gradients.
Arguments:
----------
meta_grads : list of tf.Variable
Note : Each element is guaranteed to remain a tf.Variable. Using
tf.zeros_like on tf.Variable does not transform the element to
tf.Tensor
"""
for ele in meta_grads :
ele.assign(tf.zeros_like(ele))
def app_custom_grads(model, inner_gradients, lr):
""" Apply gradient update to the model's parameters using inner_gradients.
"""
i = 0
#print(inner_gradients)
for k, layer in enumerate(model.layers) :
if 'kernel' in dir(layer) :
#print(layer.kernel.shape)
layer.kernel.assign_sub(tf.multiply(lr, inner_gradients[i]))
i+=1
elif 'normalization' in layer.name:
layer.trainable_weights[0].assign_sub(\
tf.multiply(lr, inner_gradients[i]))
i+=1
if 'bias' in dir(layer):
layer.bias.assign_sub(tf.multiply(lr, inner_gradients[i]))
i+=1
elif 'normalization' in layer.name:
layer.trainable_weights[1].assign_sub(\
tf.multiply(lr, inner_gradients[i]))
i+=1
| 2.484375
| 2
|
src/changie/utils.py
|
ZaX51/changie
| 0
|
12778954
|
def read_file(file):
with open(file, "r") as f:
return f.read()
def write_file(file, s):
with open(file, "w+") as f:
return f.write(s)
| 3.328125
| 3
|
Emmanuel/Notebooks/Eda.py
|
daye-oa/Data-Science-Projects
| 0
|
12778955
|
my first work
| 1.492188
| 1
|
evosax/experimental/decodings/random.py
|
RobertTLange/evosax
| 102
|
12778956
|
import jax
import chex
from typing import Union, Optional
from .decoder import Decoder
from ...utils import ParameterReshaper
class RandomDecoder(Decoder):
def __init__(
self,
num_encoding_dims: int,
placeholder_params: Union[chex.ArrayTree, chex.Array],
rng: chex.PRNGKey = jax.random.PRNGKey(0),
rademacher: bool = False,
identity: bool = False,
n_devices: Optional[int] = None,
):
super().__init__(
num_encoding_dims, placeholder_params, identity, n_devices
)
self.rademacher = rademacher
# Instantiate base reshaper class
self.base_reshaper = ParameterReshaper(
placeholder_params, identity, n_devices
)
# Sample a random matrix - Gaussian or Rademacher (+1/-1)
if not self.rademacher:
self.project_matrix = jax.random.normal(
rng, (self.num_encoding_dims, self.base_reshaper.total_params)
)
else:
self.project_matrix = jax.random.rademacher(
rng, (self.num_encoding_dims, self.base_reshaper.total_params)
)
def reshape(self, x: chex.Array) -> chex.ArrayTree:
"""Perform reshaping for random projection case."""
# 1. Project parameters to raw dimensionality using pre-sampled matrix
project_x = (
x @ self.project_matrix
) # (popsize, num_enc_dim) x (num_enc_dim, num_dims)
# 2. Reshape using base reshaper class
x_reshaped = self.base_reshaper.reshape(project_x)
return x_reshaped
def reshape_single(self, x: chex.Array) -> chex.ArrayTree:
"""Reshape a single flat vector using random projection matrix."""
x_re = x.reshape(1, self.num_encoding_dims)
# 1. Project parameters to raw dimensionality using pre-sampled matrix
project_x = (x_re @ self.project_matrix).squeeze()
# 2. Reshape using base reshaper class
x_reshaped = self.base_reshaper.reshape_single(project_x)
return x_reshaped
| 2.53125
| 3
|
ryu/lib/packet/wifi.py
|
SyedDanialAliShah/ryu
| 0
|
12778957
|
<reponame>SyedDanialAliShah/ryu
# Copyright (C) 2016 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
radius packet parser/serializer
"""
import struct
from ryu.lib import stringify
from ryu.lib import type_desc
from . import packet_base
from . import ether_types
WIFI_PORT = 8000
WIFI_CTOC_PORT = 8001
class WiFiMsg(packet_base.PacketBase):
_HEADER_FMT = "!BBHI"
_MIN_LEN = struct.calcsize(_HEADER_FMT)
association = dict()
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Code | Identifier | Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# | Authenticator |
# | |
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Attributes...
# +-+-+-+-+-+-+-+-+-+-+-+-+-
def __init__(self, client=None, bssid=None, ssid=None, rssi=None,
target_bssid=None, target_rssi=None, load=None, target_load=None):
super(WiFiMsg, self).__init__()
self.client = client
self.bssid = bssid
self.ssid = ssid
self.rssi = rssi
self.target_bssid = target_bssid
self.target_rssi = target_rssi
self.load = load
self.target_load = target_load
@classmethod
def parser(cls, buf):
(ver_opt_len, flags, protocol,
vni) = struct.unpack_from(cls._HEADER_FMT, buf)
version = ver_opt_len >> 6
# The Opt Len field expressed in four byte multiples.
opt_len = (ver_opt_len & 0x3F) * 4
msg_ = buf.split(',')
client = msg_[0] #client name: useful for Mininet-WiFi
bssid = msg_[1]
ssid = msg_[2]
rssi = msg_[3]
target_bssid = msg_[4]
target_rssi = msg_[5]
load = msg_[6]
target_load = msg_[7]
msg = cls(client, bssid, ssid, rssi, target_bssid, target_rssi, load, target_load)
#Interprocess Communication between SDN and ME platform
rssi=int(rssi)
target_rssi=int(target_rssi)
target_MEhost=int(target_bssid[-2:])
target_load=int(target_load)
#if rssi<-65 and target_rssi<-85 and target_bssid==02:
if rssi<-65 and target_rssi>-85 and target_load+1<5:
#load = int(load)
#target_load = int(target_load)
#if load>target_load+1:
import zmq
context = zmq.Context()
#print("Connecting to Server on port 5555")
socket = context.socket(zmq.REQ)
socket.connect("tcp://10.0.0.109:5530")
#print('Sending Hello')
socket.send('Instantiate Docker Application at ME Host {0} of Network {0}'.format(target_MEhost))
#elif rssi<-65 and target_rssi<-85 and target_bssid==03:
#import zmq
#context = zmq.Context()
#socket = context.socket(zmq.REQ)
#socket.connect("tcp://10.0.0.109:5557")
#socket.send('Instantiate Docker at ME Host 3 of Network 3')
#print('Waiting for answer')
#message = socket.recv()
elif rssi<-65 and target_rssi>-85 and target_load+1>5:
import zmq
context = zmq.Context()
socket = context.socket(zmq.REQ)
socket.connect("tcp://10.0.0.109:5530")
#print('Sending Hello')
socket.send('Instantiate Docker Application at ME Host {0} of Network {0}'.format(target_MEhost))
from . import ethernet
WiFiMsg._TYPES = ethernet.ethernet._TYPES
WiFiMsg.register_packet_type(ethernet.ethernet,
ether_types.ETH_TYPE_TEB)
return (msg, WiFiMsg.get_packet_type(protocol),
buf[cls._MIN_LEN + opt_len:])
def serialize(self, payload=None, prev=None):
tunnel_options = bytearray()
for o in self.options:
tunnel_options += o.serialize()
self.opt_len = len(tunnel_options)
# The Opt Len field expressed in four byte multiples.
opt_len = self.opt_len // 4
return (struct.pack(self._HEADER_FMT,
(self.version << 6) | opt_len,
self.flags, self.protocol, self.vni << 8)
+ tunnel_options)
class WiFiCtoCMsg(packet_base.PacketBase):
_HEADER_FMT = "!BBHI"
_MIN_LEN = struct.calcsize(_HEADER_FMT)
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Code | Identifier | Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# | Authenticator |
# | |
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Attributes...
# +-+-+-+-+-+-+-+-+-+-+-+-+-
def __init__(self, client=None, bssid=None):
super(WiFiCtoCMsg, self).__init__()
self.client = client
self.bssid = bssid
@classmethod
def parser(cls, buf):
(ver_opt_len, flags, protocol,
vni) = struct.unpack_from(cls._HEADER_FMT, buf)
version = ver_opt_len >> 6
# The Opt Len field expressed in four byte multiples.
opt_len = (ver_opt_len & 0x3F) * 4
msg_ = buf.split(',')
client = msg_[0] #client name: useful for Mininet-WiFi
bssid = msg_[1]
msg = cls(client, bssid)
from . import ethernet
WiFiCtoCMsg._TYPES = ethernet.ethernet._TYPES
WiFiCtoCMsg.register_packet_type(ethernet.ethernet,
ether_types.ETH_TYPE_TEB)
return (msg, WiFiCtoCMsg.get_packet_type(protocol),
buf[cls._MIN_LEN + opt_len:])
def serialize(self, payload=None, prev=None):
tunnel_options = bytearray()
for o in self.options:
tunnel_options += o.serialize()
self.opt_len = len(tunnel_options)
# The Opt Len field expressed in four byte multiples.
opt_len = self.opt_len // 4
return (struct.pack(self._HEADER_FMT,
(self.version << 6) | opt_len,
self.flags, self.protocol, self.vni << 8)
+ tunnel_options)
class Option(stringify.StringifyMixin, type_desc.TypeDisp):
"""
Tunnel Options
"""
_OPTION_PACK_STR = "!BBH"
_OPTION_LEN = struct.calcsize(_OPTION_PACK_STR)
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Option Class | Type |R|R|R| Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Variable Option Data |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
def __init__(self, option_class=None, type_=None, length=0):
super(Option, self).__init__()
if option_class is None or type_ is None:
(option_class, type_) = self._rev_lookup_type(self.__class__)
self.option_class = option_class
self.type = type_
self.length = length
@classmethod
def parse_value(cls, buf):
# Sub-classes should override this method, if needed.
return {}
def serialize_value(self):
# Sub-classes should override this method, if needed.
return b''
@classmethod
def parser(cls, buf):
(option_class, type_,
length) = struct.unpack_from(cls._OPTION_PACK_STR, buf[:WiFiMessage._MIN_LEN/5])
# The Length field expressed in four byte multiples.
length *= 4
subcls = Option._lookup_type((option_class, type_))
print(option_class)
return (
subcls(option_class=option_class, type_=type_, length=length,
**subcls.parse_value(
buf[cls._OPTION_LEN:cls._OPTION_LEN + length])),
buf[cls._OPTION_LEN + length:])
def serialize(self, _payload=None, _prev=None):
data = self.serialize_value()
self.length = len(data)
# The Length field expressed in four byte multiples.
length = self.length // 4
return (struct.pack(self._OPTION_PACK_STR, int(self.option_class),
self.type, length) + data)
@Option.register_unknown_type()
class OptionDataUnknown(Option):
"""
Unknown Option Class and Type specific Option
"""
def __init__(self, buf, option_class=None, type_=None, length=0):
super(OptionDataUnknown, self).__init__(option_class=option_class,
type_=type_,
length=length)
self.buf = buf
@classmethod
def parse_value(cls, buf):
return {"buf": buf}
def serialize_value(self):
return self.buf
| 2.0625
| 2
|
model/attention/width_att.py
|
dora-alvarado/wanet-retinal-vessel-segmentation
| 2
|
12778958
|
##############################################################################
# Created by: <NAME>
# Email: <EMAIL>
#
# Note: This code was heavily inspired from https://github.com/junfu1115/DANet
##############################################################################
from __future__ import division
from torch.nn import Module, Conv2d, Parameter, Softmax
import torch
import torch.nn as nn
torch_ver = torch.__version__[:3]
__all__ = ['PAM', 'CAM']
class PAM(Module):
""" Position attention module"""
def __init__(self, in_dim, squeezing=8):
super(PAM, self).__init__()
self.chanel_in = in_dim
self.query_conv = Conv2d(in_channels=in_dim, out_channels=in_dim // squeezing, kernel_size=1)
self.key_conv = Conv2d(in_channels=in_dim, out_channels=in_dim // squeezing, kernel_size=1, dilation=2)
self.value_conv = Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1)
self.gamma = Parameter(torch.zeros(1))
self.softmax = Softmax(dim=-1)
def forward(self, x):
"""
inputs :
x : input feature maps( B X C X H X W)
returns :
out : attention value
attention: B X (HxW) X (HxW)
"""
m_batchsize, C, height, width = x.size()
proj_query = self.query_conv(x).view(m_batchsize, -1, width*height).permute(0, 2, 1)
proj_key = self.key_conv(x).view(m_batchsize, -1, width*height)
energy = torch.bmm(proj_query, proj_key)
attention = self.softmax(energy)
proj_value = self.value_conv(x).view(m_batchsize, -1, width*height)
out = torch.bmm(proj_value, attention.permute(0, 2, 1))
out = out.view(m_batchsize, C, height, width)
out = self.gamma*out
return out
class CAM(Module):
""" Channel attention module"""
def __init__(self, in_dim):
super(CAM, self).__init__()
self.chanel_in = in_dim
self.gamma = Parameter(torch.zeros(1))
self.softmax = Softmax(dim=-1)
def forward(self,x):
"""
inputs :
x : input feature maps( B X C X H X W)
returns :
out : attention value
attention: B X C X C
"""
m_batchsize, C, height, width = x.size()
proj_query = x.view(m_batchsize, C, -1)
n = proj_query.shape[-1]
avg = torch.mean(proj_query, dim=2, keepdim=True).repeat([1,1,proj_query.shape[-1]])
proj_query -=avg
proj_key = proj_query.permute(0, 2, 1)
energy = torch.bmm(1/n*proj_query, proj_key)
attention = self.softmax(energy)
proj_value = x.view(m_batchsize, C, -1)
out = torch.bmm(attention, proj_value)
out = out.view(m_batchsize, C, height, width)
out = self.gamma*out
return out
class WAM(nn.Module):
def __init__(self, in_channels, out_channels, squeezing_factor=4, squeezing_factor_pam=8, norm_layer=nn.BatchNorm2d):
super(WAM, self).__init__()
inter_channels = in_channels // squeezing_factor
self.conv5a = nn.Sequential(nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False),
norm_layer(inter_channels),
nn.ReLU())
self.conv5c = nn.Sequential(nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False),
norm_layer(inter_channels),
nn.ReLU())
self.sa = PAM(inter_channels, squeezing=squeezing_factor_pam)
self.sc = CAM(inter_channels)
self.conv51 = nn.Sequential(nn.Conv2d(inter_channels, inter_channels, 3, padding=1, bias=False),
norm_layer(inter_channels),
nn.ReLU())
self.conv52 = nn.Sequential(nn.Conv2d(inter_channels, inter_channels, 3, padding=1, bias=False),
norm_layer(inter_channels),
nn.ReLU())
self.conv8 = nn.Sequential(nn.Dropout2d(0.1, False), nn.Conv2d(inter_channels, out_channels, 1))
def forward(self, x):
feat1 = self.conv5a(x)
sa_feat = self.sa(feat1)
sa_conv = self.conv51(sa_feat)
feat2 = self.conv5c(x)
sc_feat = self.sc(feat2)
sc_conv = self.conv52(sc_feat)
feat_sum = sa_conv + sc_conv
sasc_output = self.conv8(feat_sum)
return sasc_output
| 2.53125
| 3
|
tests/dummy_storage.py
|
voidfiles/ssshelf
| 0
|
12778959
|
<gh_stars>0
class DummyStorage(object):
def __init__(self, *args, **kwargs):
self.create_key_call_count = 0
self.get_key_call_count = 0
self.get_keys_call_count = 0
self.remove_key_call_count = 0
self.remove_keys_call_count = 0
async def create_key(self, *args, **kwargs):
self.create_key_call_count += 1
self.create_key_called_with = {
'args': args,
'kwargs': kwargs
}
async def remove_key(self, *args, **kwargs):
self.remove_key_call_count += 1
async def remove_keys(self, *args, **kwargs):
self.remove_keys_call_count += 1
def _set_get_key(self, data=None, metadata=None):
self._next_data = data
self._next_metadata = metadata
async def get_key(self, storage_key):
self.get_key_call_count += 1
return {
'data': self._next_data,
'metadata': self._next_metadata,
}
def _set_get_keys(self, keys):
self._next_keys = keys
async def get_keys(self, prefix, max_keys=200, after=None):
self.get_keys_call_count += 1
return self._next_keys
| 2.5
| 2
|
src/algorithms/04-graph-algorithms/graph.py
|
SamVanhoutte/python-musings
| 0
|
12778960
|
<filename>src/algorithms/04-graph-algorithms/graph.py
import numpy as np
from enum import Enum
class VertexState(Enum):
Open = 0
Wip = 1
Closed = -1
class Vertex:
def __init__(self, n):
self.name = n
self.state = VertexState.Open
def print(self):
print('Vertex', self.name, ':', self.state)
class Graph:
def __init__(self):
# def __init__(self, n):
self.vertices = {}
self.edges = []
self.edge_indices = {}
def add_vertex(self, vertex):
if isinstance(vertex, Vertex) and vertex.name not in self.vertices:
self.vertices[vertex.name] = vertex
for edge in self.edges:
edge.append(0)
self.edges.append([0] * (len(self.edges)+1))
self.edge_indices[vertex.name] = len(self.edge_indices)
return True
else:
print('vertex', vertex.name, 'not added')
return False
def add_edge(self, u, v, weight=1):
if u in self.vertices and v in self.vertices:
self.edges[self.edge_indices[u]][self.edge_indices[v]] = weight
self.edges[self.edge_indices[v]][self.edge_indices[u]] = weight
return True
else:
print('could not add edge', u, v)
return False
def get_neighbors(self, vertex_name):
if vertex_name in self.vertices:
vertex_edges = np.array(list(self.edges[self.edge_indices[vertex_name]]))
edge_array = np.array(list(self.vertices.keys()))
neighboring_vertices = [self.vertices.get(key) for key in list(edge_array[vertex_edges > 0])]
return [v for v in neighboring_vertices if v.state != VertexState.Closed and v.name != vertex_name]
def print_graph(self):
for v, i in sorted(self.edge_indices.items()):
print(str(v) + ' ', end='')
for j in range(len(self.edges)):
print(self.edges[i][j], end='')
print(' ')
def print_status(self):
for vertex in self.vertices.values():
vertex.print()
| 3.515625
| 4
|
app/user/migrations/0046_auto_20170826_0132.py
|
Sovol2018/sovolo
| 2
|
12778961
|
<filename>app/user/migrations/0046_auto_20170826_0132.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-25 16:32
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0045_auto_20170826_0132'),
]
operations = [
migrations.AlterField(
model_name='userreviewlist',
name='rating',
field=models.IntegerField(choices=[(5, '5'), (3, '3'), (1, '1'), (2, '2'), (4, '4')], validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(5)]),
),
]
| 1.671875
| 2
|
scraper/storage_spiders/viettelstorevn.py
|
chongiadung/choinho
| 0
|
12778962
|
<gh_stars>0
# Auto generated by generator.py. Delete this line if you make modification.
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
XPATH = {
'name' : "//div[@class='col-lg-7 col-md-7 col-sm-7 col-xs-7 produce-info']/div[@class='row'][1]/div/h1",
'price' : "//div/span[@id='_price_new436']",
'category' : "//div[@class='row history']/span[@class='text1']/a",
'description' : "//div[@id='owl-feature']|//div[@class='row digital ']",
'images' : "//div[@id='owl-feature']//img[1]/@src|//div[@class='row digital ']//img[1]/@src",
'canonical' : "//link[@rel='canonical']/@href",
'base_url' : "",
'brand' : "",
'in_stock' : "",
'guarantee' : "",
'promotion' : ""
}
name = 'viettelstore.vn'
allowed_domains = ['viettelstore.vn']
start_urls = ['https://viettelstore.vn/']
tracking_url = ''
sitemap_urls = ['http://viettelstore.vn/sitemap.xml']
sitemap_rules = [('http://viettelstore\.vn/.+-pid\d+\.html', 'parse_item')]
sitemap_follow = ['']
rules = [
Rule(LinkExtractor(allow=['/[a-zA-Z0-9-]+-pid+\d+\.html']), 'parse_item'),
Rule(LinkExtractor(allow=['/danh-muc/[a-zA-Z0-9-]+\.html']), 'parse'),
#Rule(LinkExtractor(), 'parse_item_and_links'),
]
| 2.015625
| 2
|
model.py
|
ckyeungac/DeepIRT
| 38
|
12778963
|
import logging
import numpy as np
import tensorflow as tf
from tensorflow.contrib import slim
from tensorflow.contrib import layers
from memory import DKVMN
from utils import getLogger
# set logger
logger = getLogger('Deep-IRT-model')
def tensor_description(var):
"""Returns a compact and informative string about a tensor.
Args:
var: A tensor variable.
Returns:
a string with type and size, e.g.: (float32 1x8x8x1024).
"""
description = '(' + str(var.dtype.name) + ' '
sizes = var.get_shape()
for i, size in enumerate(sizes):
description += str(size)
if i < len(sizes) - 1:
description += 'x'
description += ')'
return description
class DeepIRTModel(object):
def __init__(self, args, sess, name="KT"):
self.args = args
self.sess = sess
self.name = name
self.create_model()
def create_model(self):
self._create_placeholder()
self._influence()
self._create_loss()
self._create_optimizer()
self._add_summary()
def _create_placeholder(self):
logger.info("Initializing Placeholder")
self.q_data = tf.placeholder(tf.int32, [self.args.batch_size, self.args.seq_len], name='q_data')
self.qa_data = tf.placeholder(tf.int32, [self.args.batch_size, self.args.seq_len], name='qa_data')
self.label = tf.placeholder(tf.float32, [self.args.batch_size, self.args.seq_len], name='label')
def _influence(self):
# Initialize Memory
logger.info("Initializing Key and Value Memory")
with tf.variable_scope("Memory"):
init_key_memory = tf.get_variable(
'key_memory_matrix', [self.args.memory_size, self.args.key_memory_state_dim],
initializer=tf.truncated_normal_initializer(stddev=0.1)
)
init_value_memory = tf.get_variable(
'value_memory_matrix', [self.args.memory_size, self.args.value_memory_state_dim],
initializer=tf.truncated_normal_initializer(stddev=0.1)
)
# Boardcast value-memory matrix to Shape (batch_size, memory_size, memory_value_state_dim)
init_value_memory = tf.tile( # tile the number of value-memory by the number of batch
tf.expand_dims(init_value_memory, 0), # make the batch-axis
tf.stack([self.args.batch_size, 1, 1])
)
logger.debug("Shape of init_value_memory = {}".format(init_value_memory.get_shape()))
logger.debug("Shape of init_key_memory = {}".format(init_key_memory.get_shape()))
# Initialize DKVMN
self.memory = DKVMN(
memory_size=self.args.memory_size,
key_memory_state_dim=self.args.key_memory_state_dim,
value_memory_state_dim=self.args.value_memory_state_dim,
init_key_memory=init_key_memory,
init_value_memory=init_value_memory,
name="DKVMN"
)
# Initialize Embedding
logger.info("Initializing Q and QA Embedding")
with tf.variable_scope('Embedding'):
q_embed_matrix = tf.get_variable(
'q_embed', [self.args.n_questions+1, self.args.key_memory_state_dim],
initializer=tf.truncated_normal_initializer(stddev=0.1)
)
qa_embed_matrix = tf.get_variable(
'qa_embed', [2*self.args.n_questions+1, self.args.value_memory_state_dim],
initializer=tf.truncated_normal_initializer(stddev=0.1)
)
# Embedding to Shape (batch size, seq_len, memory_state_dim(d_k or d_v))
logger.info("Initializing Embedding Lookup")
q_embed_data = tf.nn.embedding_lookup(q_embed_matrix, self.q_data)
qa_embed_data = tf.nn.embedding_lookup(qa_embed_matrix, self.qa_data)
logger.debug("Shape of q_embed_data: {}".format(q_embed_data.get_shape()))
logger.debug("Shape of qa_embed_data: {}".format(qa_embed_data.get_shape()))
sliced_q_embed_data = tf.split(
value=q_embed_data, num_or_size_splits=self.args.seq_len, axis=1
)
sliced_qa_embed_data = tf.split(
value=qa_embed_data, num_or_size_splits=self.args.seq_len, axis=1
)
logger.debug("Shape of sliced_q_embed_data[0]: {}".format(sliced_q_embed_data[0].get_shape()))
logger.debug("Shape of sliced_qa_embed_data[0]: {}".format(sliced_qa_embed_data[0].get_shape()))
pred_z_values = list()
student_abilities = list()
question_difficulties = list()
reuse_flag = False
logger.info("Initializing Influence Procedure")
for i in range(self.args.seq_len):
# To reuse linear vectors
if i != 0:
reuse_flag = True
# Get the query and content vector
q = tf.squeeze(sliced_q_embed_data[i], 1)
qa = tf.squeeze(sliced_qa_embed_data[i], 1)
logger.debug("qeury vector q: {}".format(q))
logger.debug("content vector qa: {}".format(qa))
# Attention, correlation_weight: Shape (batch_size, memory_size)
self.correlation_weight = self.memory.attention(embedded_query_vector=q)
logger.debug("correlation_weight: {}".format(self.correlation_weight))
# Read process, read_content: (batch_size, value_memory_state_dim)
self.read_content = self.memory.read(correlation_weight=self.correlation_weight)
logger.debug("read_content: {}".format(self.read_content))
# Write process, new_memory_value: Shape (batch_size, memory_size, value_memory_state_dim)
self.new_memory_value = self.memory.write(self.correlation_weight, qa, reuse=reuse_flag)
logger.debug("new_memory_value: {}".format(self.new_memory_value))
# Build the feature vector -- summary_vector
mastery_level_prior_difficulty = tf.concat([self.read_content, q], 1)
self.summary_vector = layers.fully_connected(
inputs=mastery_level_prior_difficulty,
num_outputs=self.args.summary_vector_output_dim,
scope='SummaryOperation',
reuse=reuse_flag,
activation_fn=tf.nn.tanh
)
logger.debug("summary_vector: {}".format(self.summary_vector))
# Calculate the student ability level from summary vector
student_ability = layers.fully_connected(
inputs=self.summary_vector,
num_outputs=1,
scope='StudentAbilityOutputLayer',
reuse=reuse_flag,
activation_fn=None
)
# Calculate the question difficulty level from the question embedding
question_difficulty = layers.fully_connected(
inputs=q,
num_outputs=1,
scope='QuestionDifficultyOutputLayer',
reuse=reuse_flag,
activation_fn=tf.nn.tanh
)
# Prediction
pred_z_value = 3.0 * student_ability - question_difficulty
pred_z_values.append(pred_z_value)
student_abilities.append(student_ability)
question_difficulties.append(question_difficulty)
self.pred_z_values = tf.reshape(
tf.stack(pred_z_values, axis=1),
[self.args.batch_size, self.args.seq_len]
)
self.student_abilities = tf.reshape(
tf.stack(student_abilities, axis=1),
[self.args.batch_size, self.args.seq_len]
)
self.question_difficulties = tf.reshape(
tf.stack(question_difficulties, axis=1),
[self.args.batch_size, self.args.seq_len]
)
logger.debug("Shape of pred_z_values: {}".format(self.pred_z_values))
logger.debug("Shape of student_abilities: {}".format(self.student_abilities))
logger.debug("Shape of question_difficulties: {}".format(self.question_difficulties))
def _create_loss(self):
logger.info("Initializing Loss Function")
# convert into 1D
label_1d = tf.reshape(self.label, [-1])
pred_z_values_1d = tf.reshape(self.pred_z_values, [-1])
student_abilities_1d = tf.reshape(self.student_abilities, [-1])
question_difficulties_1d = tf.reshape(self.question_difficulties, [-1])
# find the label index that is not masking
index = tf.where(tf.not_equal(label_1d, tf.constant(-1., dtype=tf.float32)))
# masking
filtered_label = tf.gather(label_1d, index)
filtered_z_values = tf.gather(pred_z_values_1d, index)
filtered_student_abilities = tf.gather(student_abilities_1d, index)
filtered_question_difficulties = tf.gather(question_difficulties_1d, index)
logger.debug("Shape of filtered_label: {}".format(filtered_label))
logger.debug("Shape of filtered_z_values: {}".format(filtered_z_values))
logger.debug("Shape of filtered_student_abilities: {}".format(filtered_student_abilities))
logger.debug("Shape of filtered_question_difficulties: {}".format(filtered_question_difficulties))
if self.args.use_ogive_model:
# make prediction using normal ogive model
dist = tfd.Normal(loc=0.0, scale=1.0)
self.pred = dist.cdf(pred_z_values_1d)
filtered_pred = dist.cdf(filtered_z_values)
else:
self.pred = tf.math.sigmoid(pred_z_values_1d)
filtered_pred = tf.math.sigmoid(filtered_z_values)
# convert the prediction probability to logit, i.e., log(p/(1-p))
epsilon = 1e-6
clipped_filtered_pred = tf.clip_by_value(filtered_pred, epsilon, 1.-epsilon)
filtered_logits = tf.log(clipped_filtered_pred/(1-clipped_filtered_pred))
# cross entropy loss
cross_entropy = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
logits=filtered_logits,
labels=filtered_label
)
)
self.loss = cross_entropy
def _create_optimizer(self):
with tf.variable_scope('Optimizer'):
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.args.learning_rate)
gvs = self.optimizer.compute_gradients(self.loss)
clipped_gvs = [(tf.clip_by_norm(grad, self.args.max_grad_norm), var) for grad, var in gvs]
self.train_op = self.optimizer.apply_gradients(clipped_gvs)
def _add_summary(self):
tf.summary.scalar('Loss', self.loss)
self.tensorboard_writer = tf.summary.FileWriter(
logdir=self.args.tensorboard_dir,
graph=self.sess.graph
)
model_vars = tf.trainable_variables()
total_size = 0
total_bytes = 0
model_msg = ""
for var in model_vars:
# if var.num_elements() is None or [] assume size 0.
var_size = var.get_shape().num_elements() or 0
var_bytes = var_size * var.dtype.size
total_size += var_size
total_bytes += var_bytes
model_msg += ' '.join(
[var.name,
tensor_description(var),
'[%d, bytes: %d]' % (var_size, var_bytes)]
)
model_msg += '\n'
model_msg += 'Total size of variables: %d \n' % total_size
model_msg += 'Total bytes of variables: %d \n' % total_bytes
logger.info(model_msg)
| 2.59375
| 3
|
functions/nag_function_is_output.py
|
daviddoret/pyxag
| 1
|
12778964
|
<reponame>daviddoret/pyxag<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 27 12:15:58 2019
@author: david
"""
def nag_function_is_output(name):
"""
Check wether a NAG function name is of type output
"""
if name[0:1] == 'o':
return True
else:
return False
| 2.65625
| 3
|
app.py
|
Sk70249/Book-Data-Scraprer
| 1
|
12778965
|
import requests
from pages.book_pages import AllBooksPage
# For extracting data from single page of a Website
page_content = requests.get("http://books.toscrape.com/index.html").content
page = AllBooksPage(page_content)
books = page.books
# Far extracting data from multiple pages of a Website
for p_num in range(1, page.totalpages): #To Extract data from first 10 pages
url = f"http://books.toscrape.com/catalogue/page-{p_num+1}.html"
page_content = requests.get(url).content
page = AllBooksPage(page_content)
books.extend(page.books)
USER_CHOICE ="""Enter the choice accordingly:-
- "b" for printing BEST BOOKS
- "c" for printing CHEAPEST BOOKS
- "o" for printing ALL BOOKS CONTENT
- "q" for EXIT
Enter your choice:"""
def print_best_books():
best_books = sorted(books, key=lambda x: x.rating * -1)[:10] # Top 10 highest rated books
for book in best_books:
print(book)
def print_cheapest_books():
cheap_books = sorted(books, key=lambda x: x.price)[:10] # Top 10 least price books
for book in cheap_books:
print(book)
def overall_content():
for book in books:
print(book)
user_choices = {
"b": print_best_books,
"c": print_cheapest_books,
"o": overall_content
}
def menu():
user_input = input(USER_CHOICE).strip()
while user_input!="q":
if user_input in ("b", "c", "o"):
user_choices[user_input]()
else:
print("<Wrong Input: Please! Enter correct choice>")
user_input = input(USER_CHOICE)
#Driver Function
if __name__ == "__main__":
menu()
| 3.625
| 4
|
meadow/meadow/tests/utils/test_book_searcher.py
|
digital-gachilib/meadow
| 0
|
12778966
|
<gh_stars>0
from django.test import TestCase
from meadow.models import Book
from meadow.tests.factories.book import BookFactory
from meadow.utils.book_searcher import book_preview, search_by_title
class BookPreviewTestCase(TestCase):
def test_book_preview_book_exists(self):
some_book = BookFactory()
result = book_preview(some_book.id)
self.assertEqual(result["title"], some_book.title)
self.assertEqual(result["description"], some_book.description)
self.assertEqual(result["author"]["first_name"], some_book.author.first_name)
self.assertEqual(result["author"]["last_name"], some_book.author.last_name)
def test_book_preview_book_doesnot_exist(self):
some_book = BookFactory()
# there is definitely no book with invalid_id in the DB
invalid_id = some_book.id + 1
# the function should raise an exception if the id is invalid
with self.assertRaises(Book.DoesNotExist):
book_preview(invalid_id)
class BookSearchTestCase(TestCase):
def test_search_empty_title(self):
books = [BookFactory() for _ in range(5)]
title = ""
result = search_by_title(title)
self.assertEqual(len(books), len(result))
def test_search_some_unique_title(self):
books = [BookFactory() for _ in range(5)]
book_to_search = books[1]
title = book_to_search.title
result = search_by_title(title)
self.assertEqual(len(result), 1)
self.assertEqual(result[0].title, title)
def test_search_title_doesnot_exist(self):
[BookFactory() for _ in range(5)]
title = "Some cook title which doesn't exist in DB"
result = search_by_title(title)
self.assertEqual(result, [])
| 2.765625
| 3
|
tangoObjects.py
|
15-411/Tango
| 2
|
12778967
|
# tangoREST.py
#
# Implements objects used to pass state within Tango.
#
import redis
import pickle
import Queue
import logging
from datetime import datetime, timedelta
from config import Config
redisConnection = None
# Pass in an existing connection to redis, sometimes necessary for testing.
def getRedisConnection(connection=None):
global redisConnection
if redisConnection is None:
if connection:
redisConnection = connection
return redisConnection
redisConnection = redis.StrictRedis(
host=Config.REDIS_HOSTNAME, port=Config.REDIS_PORT, db=0)
return redisConnection
class InputFile():
"""
InputFile - Stores pointer to the path on the local machine and the
name of the file on the destination machine
"""
def __init__(self, localFile, destFile):
self.localFile = localFile
self.destFile = destFile
def __repr__(self):
return "InputFile(localFile: %s, destFile: %s)" % (self.localFile,
self.destFile)
class TangoMachine():
"""
TangoMachine - A description of the Autograding Virtual Machine
"""
def __init__(self, name="DefaultTestVM", image=None, vmms=None,
network=None, cores=None, memory=None, fallback_instance_type=None, disk=None,
domain_name=None, ec2_id=None, resume=None, id=None,
instance_id=None):
self.name = name
self.image = image
self.network = network
self.cores = cores
self.memory = memory
self.fallback_instance_type = fallback_instance_type
self.disk = disk
self.vmms = vmms
self.domain_name = domain_name
self.ec2_id = ec2_id
self.resume = resume
self.id = id
self.instance_id = id
# The following attributes can instruct vmms to set the test machine
# aside for further investigation.
self.keepForDebugging = False
self.notes = None
def __repr__(self):
return "TangoMachine(image: %s, vmms: %s, id: %s)" % (self.image, self.vmms, self.id)
class TangoJob():
"""
TangoJob - A job that is to be run on a TangoMachine
"""
def __init__(self, vm=None,
outputFile=None, name=None, limitingKey=None, input=None,
notifyURL=None, timeout=0,
maxOutputFileSize=Config.MAX_OUTPUT_FILE_SIZE,
accessKeyId=None, accessKey=None):
self.assigned = False
self.retries = 0
self.vm = vm
if input is None:
self.input = []
else:
self.input = input
self.outputFile = outputFile
self.name = name
self.limitingKey = limitingKey
self.notifyURL = notifyURL
self.timeout = timeout
self.trace = []
self.maxOutputFileSize = maxOutputFileSize
self._remoteLocation = None
self.accessKeyId = accessKeyId
self.accessKey = accessKey
self.tm = datetime.now()
self.startTime = None
self.endTime = None
def makeAssigned(self):
self.syncRemote()
self.assigned = True
self.updateRemote()
def makeUnassigned(self):
self.syncRemote()
self.assigned = False
self.updateRemote()
def isNotAssigned(self):
self.syncRemote()
return not self.assigned
def appendTrace(self, trace_str):
# trace attached to the object can be retrived and sent to rest api caller
self.syncRemote()
self.trace.append("%s|%s" % (datetime.now().ctime(), trace_str))
self.updateRemote()
def setId(self, new_id):
self.id = new_id
if self._remoteLocation is not None:
dict_hash = self._remoteLocation.split(":")[0]
key = self._remoteLocation.split(":")[1]
dictionary = TangoDictionary(dict_hash)
dictionary.delete(key)
self._remoteLocation = dict_hash + ":" + str(new_id)
self.updateRemote()
# Record in the job object that now is the time the job started.
def recordStartTime(self):
self.syncRemote()
self.startTime = datetime.now()
self.updateRemote()
# Record in the job object that now is the time the job completed.
def recordEndTime(self):
self.syncRemote()
self.endTime = datetime.now()
self.updateRemote()
# Calculate the running time of the job.
# If the job hasn't started (as determined by the presence of the startTime
# field), then return the timedelta value corresponding to 0.
# If the job has started but not finished (as determined by the presence of
# the endTime field), then return the timedelta between startTime and now.
# If the job has finished, then return the timedelta between startTime and
# endTime.
def runningTime(self):
if self.startTime == None:
return timedelta()
if self.endTime == None:
return datetime.now() - self.startTime
return self.endTime - self.startTime
def syncRemote(self):
if Config.USE_REDIS and self._remoteLocation is not None:
dict_hash = self._remoteLocation.split(":")[0]
key = self._remoteLocation.split(":")[1]
dictionary = TangoDictionary(dict_hash)
temp_job = dictionary.get(key)
if temp_job:
self.updateSelf(temp_job)
def updateRemote(self):
if Config.USE_REDIS and self._remoteLocation is not None:
dict_hash = self._remoteLocation.split(":")[0]
key = self._remoteLocation.split(":")[1]
dictionary = TangoDictionary(dict_hash)
dictionary.set(key, self)
def updateSelf(self, other_job):
self.assigned = other_job.assigned
self.retries = other_job.retries
self.vm = other_job.vm
self.input = other_job.input
self.outputFile = other_job.outputFile
self.name = other_job.name
self.limitingKey = other_job.limitingKey
self.notifyURL = other_job.notifyURL
self.timeout = other_job.timeout
self.trace = other_job.trace
self.maxOutputFileSize = other_job.maxOutputFileSize
self.startTime = other_job.startTime
self.endTime = other_job.endTime
def TangoIntValue(object_name, obj):
if Config.USE_REDIS:
return TangoRemoteIntValue(object_name, obj)
else:
return TangoNativeIntValue(object_name, obj)
class TangoRemoteIntValue():
def __init__(self, name, value, namespace="intvalue"):
"""The default connection parameters are: host='localhost', port=6379, db=0"""
self.__db = getRedisConnection()
self.key = '%s:%s' % (namespace, name)
cur_val = self.__db.get(self.key)
if cur_val is None:
self.set(value)
def increment(self):
return self.__db.incr(self.key)
def get(self):
return int(self.__db.get(self.key))
def set(self, val):
return self.__db.set(self.key, val)
class TangoNativeIntValue():
def __init__(self, name, value, namespace="intvalue"):
self.key = '%s:%s' % (namespace, name)
self.val = value
def increment(self):
self.val = self.val + 1
return self.val
def get(self):
return self.val
def set(self, val):
self.val = val
return val
def TangoQueue(object_name):
if Config.USE_REDIS:
return TangoRemoteQueue(object_name)
else:
return Queue.Queue()
class TangoRemoteQueue():
"""Simple Queue with Redis Backend"""
def __init__(self, name, namespace="queue"):
"""The default connection parameters are: host='localhost', port=6379, db=0"""
self.__db = getRedisConnection()
self.key = '%s:%s' % (namespace, name)
self.name = name
# for debugging. return a readable string representation
def dump(self):
unpickled_obj = self.__db.lrange(self.key, 0, -1)
objs = []
for obj in unpickled_obj:
objs.append(pickle.loads(obj))
return objs
def qsize(self):
"""Return the approximate size of the queue."""
return self.__db.llen(self.key)
def empty(self):
"""Return True if the queue is empty, False otherwise."""
return self.qsize() == 0
def put(self, item):
"""Put item into the queue."""
pickled_item = pickle.dumps(item)
self.__db.rpush(self.key, pickled_item)
def get(self, block=True, timeout=None):
"""Remove and return an item from the queue.
If optional args block is true and timeout is None (the default), block
if necessary until an item is available."""
if block:
item = self.__db.blpop(self.key, timeout=timeout)
else:
item = self.__db.lpop(self.key)
# if item:
# item = item[1]
item = pickle.loads(item)
return item
def make_empty(self):
while True:
item = self.__db.lpop(self.key)
if item is None:
break
def get_nowait(self):
"""Equivalent to get(False)."""
return self.get(False)
def __getstate__(self):
ret = {}
ret['key'] = self.key
return ret
def __setstate__(self, dict):
self.__db = getRedisConnection()
self.__dict__.update(dict)
# This is an abstract class that decides on
# if we should initiate a TangoRemoteDictionary or TangoNativeDictionary
# Since there are no abstract classes in Python, we use a simple method
def TangoDictionary(object_name):
if Config.USE_REDIS:
return TangoRemoteDictionary(object_name)
else:
return TangoNativeDictionary()
# Dictionary that maintains a separate dictionary D.
# Suppose the original dictionary contains mappings k --> v.
# Then a wrapping dictionary D will still contain mappings k --> v,
# but also maintains a side dictionary D' with mappings f(v) --> (k, v).
# This dictionary D' is stored as the "wrapped" field.
# f should not change over the relevant lifetime of the value.
class WrappingDictionary():
def __init__(self, object_name, dictionary, f):
self.wrapped = TangoDictionary(object_name)
self.f = f
self.dictionary = dictionary
def set(self, id, obj):
self.wrapped.set(self.f(obj), id)
return self.dictionary.set(id, obj)
def get(self, id):
return self.dictionary.get(id)
def getWrapped(self, k):
id = self.wrapped.get(k)
val = self.dictionary.get(id)
if id is None or val is None:
return None
else:
return (id, val)
def keys(self):
return self.dictionary.keys()
def values(self):
return self.dictionary.values()
def delete(self, id):
self.wrapped.delete(self.f(self.dictionary.get(id)))
return self.dictionary.delete(id)
def _clean(self):
self.wrapped._clean()
return self.dictionary._clean()
def iteritems(self):
return self.dictionary.iteritems();
class TangoRemoteDictionary():
def __init__(self, object_name):
self.r = getRedisConnection()
self.hash_name = object_name
self.log = logging.getLogger("TangoRemoteDictionary")
def set(self, id, obj):
pickled_obj = pickle.dumps(obj)
if hasattr(obj, '_remoteLocation'):
obj._remoteLocation = self.hash_name + ":" + str(id)
self.r.hset(self.hash_name, str(id), pickled_obj)
return str(id)
def get(self, id):
if str(id) in self.r.hkeys(self.hash_name):
unpickled_obj = self.r.hget(self.hash_name, str(id))
obj = pickle.loads(unpickled_obj)
return obj
else:
return None
def keys(self):
return self.r.hkeys(self.hash_name)
def values(self):
vals = self.r.hvals(self.hash_name)
valslist = []
for val in vals:
valslist.append(pickle.loads(val))
return valslist
def delete(self, id):
self._remoteLocation = None
self.r.hdel(self.hash_name, id)
def _clean(self):
# only for testing
self.r.delete(self.hash_name)
def iteritems(self):
# find all non-empty spots in the job id spectrum (actual jobs) and sort
# by the time of creation to prevent starvation of jobs with larger ids
return iter(sorted([(i, self.get(i)) for i in xrange(1,Config.MAX_JOBID+1)
if self.get(i) != None], key=lambda x: x[1].tm))
class TangoNativeDictionary():
def __init__(self):
self.dict = {}
def set(self, id, obj):
self.dict[str(id)] = obj
def get(self, id):
if str(id) in self.dict.keys():
return self.dict[str(id)]
else:
return None
def keys(self):
return self.dict.keys()
def values(self):
return self.dict.values()
def delete(self, id):
if str(id) in self.dict.keys():
del self.dict[str(id)]
def iteritems(self):
return iter(sorted([(i, self.get(i)) for i in xrange(1,Config.MAX_JOBID+1)
if self.get(i) != None], key=lambda x: x[1].tm))
def _clean(self):
# only for testing
return
| 2.515625
| 3
|
test.py
|
DITDSI/Projet1
| 0
|
12778968
|
print("test avec git")
| 0.714844
| 1
|
einvoice/einvoice/data/create_sample_data.py
|
BPC-OpenSourceTools/Discovery-Tools
| 2
|
12778969
|
<gh_stars>1-10
#!/usr/bin/env python3
# pylint: disable=R0902, W1514
# disabling "Too many instance attributes," "using open without specifying and
# endcoding,"" which is a known bug in pylint.
# File: create_sample_data.py
# About: Create test e-Invoices using fake data sets.
# Development: <NAME>
# Date: 2021-06-22 (June 22, 2021)
#
"""
Classes and functions to generate sample/test e-Invoices.
Test data is generated in two ways.
1. Read in from a file.
2. Additional dummy data is genearted on the fly by the Faker package.
Data generated by Faker is new every run. Data sets from the CSV files
are static.
Data items are combined into an e-Invoice line item and stored as a list.
The output of this sample list of line items is written to a JSON file.
Usage:
genLI = generateLineItems()
genLI.
"""
import csv
import random
import logging
from json import dumps
from faker import Faker
from einvoice.app_logging import create_logger
# from einvoice.line_item import LineItem
# from einvoice.party_address import Address
class CreateSampleData:
"""An instance of the CreateSampleData object.
The job of this class/object is to generate sample data for
an e-Invoice.
Args:
Attributes:
items[]: A list of line_items to populate an e-Invoice. Populated by
reading in from a CSV file.
f.per_item[]: A list of item sizes/groups/types to
populate an e-Invoice.
Populated by reading in from a CSV file.
Returns:
Raises:
"""
def __init__(self):
self.log = create_logger("create_sample_data")
self.log.info("Generating e-Invoice Data!")
self.fake = Faker()
self.companies = []
self.org_id = ""
self.name = ""
self.address_1 = ""
self.address_2 = ""
self.city = ""
self.state = ""
self.zip_code = ""
self.company = ""
self.reader = None
self.items = []
self.per_item = []
self.line_items = []
self.line_item_id = ""
self.line_item_quantity = 0
self.line_item_per_item = ""
self.line_item_price_per_item = 0.0
self.line_item_name = ""
self.line_item_total = 0.0
self.sample_line_item = ""
def generate_fake_address(self, count=1):
"""Generate as many fake addresses as requested.
Args:
count:
The number of addresses requested.
Raises:
Returns:
A list of JSON entries with each one representing an
address.
"""
self.log = create_logger("create_sample_data")
Faker.seed(0)
# self.companies = []
for _ in range(count):
self.org_id = self.fake.bothify(text='????-######',
letters='ACDEFGHIJKLMNOPQR'
'SQSTeUVWXYZ')
self.name = self.fake.company()
self.address_1 = "Attn: " + self.fake.name()
self.address_2 = self.fake.street_address()
self.city = self.fake.city()
self.state = self.fake.state()
self.zip_code = self.fake.postcode()
# Create a JSON string of the Company.
self.company = str({"org_id": self.org_id, "name": self.name,
"address_1": self.address_1,
"address_2": self.address_2,
"city": self.city, "state": self.state,
"zip_code": self.zip_code})
self.companies.append(self.company)
self.log.debug("Created a data for company: %s", self.company)
return self.companies
def create_sample_list_items(self, count=1):
"""Generate as many fake line_items as requested.
Args:
_count:
The number of line_items requested.
Raises:
Returns:
A list of JSON entries with each one representing a line item.
"""
self.log = create_logger("create_sample_data")
with open('./item_list.csv', newline='') as csvfile:
self.reader = csv.reader(csvfile)
for row in self.reader:
self.items.append(row)
with open('./per_item_list.csv', newline='') as csvfile:
self.reader = csv.reader(csvfile)
for row in self.reader:
self.per_item.append(row)
for _ in range(count):
self.line_item_id = self.fake.bothify(text='??????-###',
letters='ACDEFGHIJKLMN'
'OPQRSQSTeUVWXYZ')
self.line_item_quantity = random.randint(1, 10)
self.line_item_per_item = random.choice(self.per_item)
self.line_item_price_per_item = (random.randint(100, 10000))/100
self.line_item_name = random.choice(self.items)
self.line_item_total = (self.line_item_quantity
* self.line_item_price_per_item)
# Create a JSON string of the sample_line_item
self.sample_line_item = str({'Item ID': self.line_item_id,
'Quantity': self.line_item_quantity,
'Per Item': self.line_item_per_item,
'Price per Item':
self.line_item_price_per_item,
'Item': self.line_item_name,
'Total': self.line_item_total})
self.line_items.append(self.sample_line_item)
self.log.debug("Created line item entry: %s",
self.sample_line_item)
return self.line_items
def write_json_to_file(self, json_object):
"""Writes data to a json file."""
self.log = create_logger("create_sample_data")
if len(json_object) < 1:
self.log.debug("Risk of EOB with no objects to write.")
return
for i, obj in enumerate(json_object):
json_str = dumps(obj.__dict__)
logging.debug("List item %s: %s", str(i), json_str)
print(json_str)
| 2.578125
| 3
|
playlist/myPlaylist.py
|
seanomisteal/PythonPlay
| 0
|
12778970
|
<filename>playlist/myPlaylist.py
import plistlib
def main():
filename = "test-data\maya.xml"
#findDuplicates(filename)
#filenames = ("test-data\pl1.xml", "test-data\pl2.xml")
#findCommonTracks(filenames)
filename = "test-data\mymusic.xml"
plotStats(filename)
def plotStats(filename):
# read in a playlist
plist = plistlib.readPlist(filename)
# get the tracks from the playlist
tracks = plist['Tracks']
# create list of song ratings and track durations
ratings = []
durations = []
# iterate through the tracks
for trackId, track in tracks.items():
try:
ratings.append(track['Album Rating'])
durations.append(track['Total Time'])
except:
pass
# ensure that valid data was collected
if ratings == [] or durations == []:
print("No valid Album Rating/Duration data in %s." % filename)
return
def findCommonTracks(filenames):
trackNameSets = []
for filename in filenames:
trackNames = set()
plist = plistlib.readPlist(filename)
tracks = plist["Tracks"]
for trackId, track in tracks.items():
try:
trackNames.add(track['Name'])
except:
pass
trackNameSets.append(trackNames)
# get the set of common tracks
commonTracks = set.intersection(*trackNameSets)
# write to file
if len(commonTracks) > 0:
f = open("common.txt", "wb")
for val in commonTracks:
s = "%s\n" % val
f.write(s.encode("UTF-8"))
f.close()
print("%d common tracks found" % len(commonTracks))
else:
print("No common tracks!")
def findDuplicates(filename):
print('Finding duplicate tracks in %s...' % filename)
# read in a playlist
plist = plistlib.readPlist(filename)
# get the tracks from the Tracks dictionary
tracks = plist['Tracks']
# create a track name dictionary
trackNames = {}
# iterate through the tracks
for trackId, track in tracks.items():
try:
name = track['Name']
duration = track['Total Time']
if (name, duration//1000) in trackNames:
count = trackNames[(name, duration//1000)]
trackNames[(name, duration//1000)] = count+1
else:
trackNames[(name, duration//1000)] = 1
except:
pass
dups = []
for k, v in trackNames.items():
if v > 1:
dups.append((v, k))
# save duplicates to a file
if len(dups) > 0:
print("Found %d duplicates. Track names saved to dup.txt" % len(dups))
else:
print("No duplicate tracks found!")
f = open("dups.txt", "w")
for val in dups:
f.write("[%d] %s\n" % (val[0], val[1]))
f.close()
# main method
if __name__ == '__main__':
main()
| 3.375
| 3
|
subastas_repo/personas/models.py
|
diegoduncan21/subastas
| 0
|
12778971
|
# -*- coding: utf-8 -*-
from django.db import models
from model_utils import Choices
class Persona(models.Model):
nombres = models.CharField(max_length=100, blank=True, null=True)
apellidos = models.CharField(max_length=100, blank=True, null=True)
razon_social = models.CharField(max_length=100, blank=True, null=True)
dni = models.CharField(max_length=10, blank=True, null=True, unique=True)
cuit = models.CharField(max_length=15, blank=True, null=True)
domicilio = models.ForeignKey("Domicilio")
telefono = models.CharField(max_length=20)
def __unicode__(self):
return "%s, %s (%s)" % (self.apellidos, self.nombres, self.dni)
class Titulo(models.Model):
nombre = models.CharField(max_length=100)
def __unicode__(self):
return self.nombre
class Profesional(models.Model):
nombres = models.CharField(max_length=100)
apellidos = models.CharField(max_length=100)
dni = models.CharField('DNI',
max_length=10,
blank=True,
null=True)
titulo = models.ForeignKey(Titulo, blank=True, null=True)
matricula = models.CharField('Número Matrícula',
max_length=50,
blank=True,
null=True)
telefono = models.CharField('Teléfono',
max_length=20,
blank=True,
null=True)
def __unicode__(self):
return "%s, %s" % (self.apellidos, self.nombres)
class Meta:
verbose_name_plural = 'Profesionales'
class Domicilio(models.Model):
direccion = models.CharField(max_length=80)
descripcion = models.TextField(blank=True, null=True)
localidad = models.ForeignKey('Localidad')
def __unicode__(self):
return self.direccion
class Localidad(models.Model):
nombre = models.CharField(max_length=50)
codigo_postal = models.CharField(max_length=15)
def __unicode__(self):
return "%s (%s)" % (self.nombre, self.codigo_postal)
class Meta:
verbose_name_plural = 'Localidades'
| 2.171875
| 2
|
exp_runner/interfaces.py
|
slipnitskaya/exp-runner
| 3
|
12778972
|
<filename>exp_runner/interfaces.py
import abc
from typing import Any
from typing import Dict
from typing import List
from typing import Tuple
from typing import Union
from typing import Iterable
from typing import NoReturn
class Dataset(abc.ABC):
@abc.abstractmethod
def __getitem__(self, index: int) -> Any:
pass
@abc.abstractmethod
def __len__(self) -> int:
pass
@property
@abc.abstractmethod
def training(self) -> bool:
pass
class Saver(abc.ABC):
@abc.abstractmethod
def save(self, report: List[Dict[str, Any]]) -> NoReturn:
pass
class Metric(abc.ABC):
@abc.abstractmethod
def __call__(self,
y_true: Iterable[Union[float, int]],
y_pred: Iterable[Union[float, int]]) -> Union[float, Tuple[float, ...]]:
pass
| 2.546875
| 3
|
kadal/reliability_analysis/akmcs.py
|
timjim333/KADAL
| 7
|
12778973
|
<filename>kadal/reliability_analysis/akmcs.py
import time
import numpy as np
import matplotlib.pyplot as plt
from kadal.misc.sampling.samplingplan import realval
from kadal.testcase.RA.testcase import evaluate
class AKMCS:
"""Create AK-MCS model for reliability analysis
(Active Kriging - Monte Carlo Simulation)
Args:
krigobj (kriging_model.Kriging): Kriging object for AKMCS
analysis.
akmcsInfo (dict): Dictionary that contains AKMCS model
information, with the items:
akmcsInfo["init_samp"] (np.ndarray): Initial Monte-Carlo
population.
akmcsInfo["maxupdate"] (int): Maximum number of updates.
Defaults to 120.
akmcsInfo["problem"] (str): Type of case.
Returns:
updatedX (np.ndarray): updated samples.
minUiter (np.ndarray): minimum U value for each iteration
"""
def __init__(self, krigobj, akmcsInfo):
"""
Initialize akmcs
Args:
krigobj (kriging_model.Kriging): Kriging object for AKMCS
analysis.
akmcsInfo (dict): Dictionary that contains AKMCS model
information, with the keys:
akmcsInfo["init_samp"] (np.ndarray): Initial
Monte-Carlo population.
akmcsInfo["maxupdate"] (int): Maximum number of updates.
Defaults to 120.
akmcsInfo["problem"] (str): Type of case.
"""
akmcsInfo = akmcsInfocheck(akmcsInfo)
self.krigobj = krigobj
self.akmcsInfo = akmcsInfo
self.init_samp = akmcsInfo["init_samp"]
self.maxupdate = akmcsInfo["maxupdate"]
self.nsamp = np.size(self.init_samp, axis=0)
self.Gx = np.zeros(shape=[self.nsamp, 1])
self.sigmaG = np.zeros(shape=[self.nsamp, 1])
self.stop_criteria = 100 # assign large number
self.logging = None
def run(
self,
autoupdate=True,
disp=True,
savedatato=None,
logging=False,
saveimageto=None,
plotdatapos=None,
plotdataneg=None,
loggingAPIkey=None,
logname=None,
logworkspace=None,
):
"""
Run AKMCS analysis
Args:
autoupdate (bool): Perform automatic update on design space or not. Default to True.
disp (bool): Display progress or not. Default to True.
savedatato (str): Filename to save update data. e.g.: 'filename.csv'
Return:
None
"""
# logging
if logging:
# disable logging
print("Logging feature is currently disabled.")
pass
else:
pass
# Calculate Gx and SigmaG
# Split init_samp to avoid memory error
krig_initsamp = self.krigobj.KrigInfo["X"]
t1 = time.time()
run_times = int(np.ceil(self.nsamp / 10000))
for i in range(run_times):
start = i * 10000
if i != (run_times - 1):
stop = (i + 1) * 10000
else:
stop = self.nsamp
init_samp = self.init_samp[start:stop, :]
gx, sigmag = self.krigobj.predict(init_samp, ["pred", "s"])
self.Gx[start:stop, :] = gx
self.sigmaG[start:stop, :] = sigmag
t2 = time.time()
# Calculate probability of failure
self.Pf = self.pfcalc()
# Calculate learning function U
self.lfucalc()
self.stopcrit()
self.updateX = np.array([self.xnew])
self.minUiter = np.array([self.minU])
if disp:
print(f"Done iter no: 0, Pf: {self.Pf}, minU: {self.minU}")
# Update samples automatically
while autoupdate:
labeladded = False
for i_update in range(self.maxupdate):
# Evaluate new samples and append into Kriging object information
t = time.time()
ynew = evaluate(self.xnew, type=self.akmcsInfo["problem"])
self.krigobj.KrigInfo["y"] = np.vstack(
(self.krigobj.KrigInfo["y"], ynew)
)
self.krigobj.KrigInfo["X"] = np.vstack(
(self.krigobj.KrigInfo["X"], self.xnew)
)
self.krigobj.KrigInfo["nsamp"] += 1
# standardize model and train updated kriging model
t3 = time.time()
self.krigobj.standardize()
self.krigobj.train(disp=False)
t4 = time.time()
# Calculate Gx and SigmaG
# Split init_samp to avoid memory error
run_times = int(np.ceil(self.nsamp / 10000))
for ii in range(run_times):
start = ii * 10000
if ii != (run_times - 1):
stop = (ii + 1) * 10000
else:
stop = self.nsamp
init_samp = self.init_samp[start:stop, :]
gx, sigmag = self.krigobj.predict(init_samp, ["pred", "s"])
self.Gx[start:stop, :] = gx
self.sigmaG[start:stop, :] = sigmag
t5 = time.time()
# Calculate Pf, COV and LFU
self.Pf = self.pfcalc()
self.cov = self.covpf()
self.lfucalc()
self.stopcrit()
t6 = time.time()
# Update variables
self.updateX = np.vstack((self.updateX, self.xnew))
self.minUiter = np.vstack((self.minUiter, self.minU))
elapsed = time.time() - t
if disp:
print(f"iter no: {i_update+1}, Pf: {self.Pf}, "
f"stopcrit: {self.stop_criteria}, "
f"time(s): {elapsed}, ynew: {ynew}")
if logging:
self.logging.log_parameter(
"krigtype", self.krigobj.KrigInfo["type"]
)
outdict = {
"Prob_fail": self.Pf,
"stopcrit": self.stop_criteria,
"time(s)": elapsed,
}
self.logging.log_metrics(outdict, step=i_update + 1)
if savedatato is not None:
temparray = np.array([i_update, self.Pf, self.stop_criteria, elapsed])
if i_update == 0:
totaldata = temparray[:]
else:
totaldata = np.vstack((totaldata, temparray))
filename = savedatato
np.savetxt(
filename,
totaldata,
delimiter=",",
header="iter,Pf,stopcrit,time(s)",
)
else:
pass
if saveimageto is not None:
imagefile = saveimageto + str(i_update) + ".PNG"
title = "Pf = " + str(self.Pf)
plt.figure(0, figsize=[10, 9])
if not labeladded:
plt.scatter(
plotdatapos[:, 0],
plotdatapos[:, 1],
c="yellow",
label="Feasible",
)
plt.scatter(
plotdataneg[:, 0],
plotdataneg[:, 1],
c="cyan",
label="Infeasible",
)
plt.scatter(
krig_initsamp[:, 0],
krig_initsamp[:, 1],
c="red",
label="Initial Kriging Population",
)
plt.scatter(
self.updateX[:, 0],
self.updateX[:, 1],
s=75,
c="black",
marker="x",
label="Update",
)
labeladded = True
else:
plt.scatter(plotdatapos[:, 0], plotdatapos[:, 1], c="yellow")
plt.scatter(plotdataneg[:, 0], plotdataneg[:, 1], c="cyan")
plt.scatter(krig_initsamp[:, 0], krig_initsamp[:, 1], c="red")
plt.scatter(
self.updateX[:, 0],
self.updateX[:, 1],
s=75,
c="black",
marker="x",
)
plt.xlabel("X1", fontsize=18)
plt.ylabel("X2", fontsize=18)
plt.tick_params(axis="both", which="both", labelsize=16)
plt.legend(loc=1, prop={"size": 15})
plt.title(title, fontdict={"fontsize": 20})
plt.savefig(imagefile, format="png")
else:
pass
# Break condition
if self.stop_criteria <= 0.05 and i_update >= 15:
break
else:
pass
print(f"COV: {self.cov}")
if self.cov <= 0.05:
break
else:
pass
break # temporary break for debugging, delete/comment this line later
def pfcalc(self):
nGless = len([i for i in self.Gx if i <= 0])
nsamp = np.size(self.init_samp, axis=0)
Pf = nGless / nsamp
return Pf
def covpf(self):
nmc = np.size(self.init_samp, axis=0)
if self.Pf == 0:
cov = 1000
else:
cov = np.sqrt((1 - self.Pf) / (self.Pf * nmc))
return cov
def lfucalc(self):
self.U = abs(self.Gx) / self.sigmaG.reshape(-1, 1)
self.minU = np.min(self.U)
minUloc = np.argmin(self.U)
self.xnew = self.init_samp[minUloc, :]
def stopcrit(self):
nsamp = np.size(self.init_samp, axis=0)
temp1 = self.Gx - 1.96 * self.sigmaG.reshape(-1, 1)
temp2 = self.Gx + 1.96 * self.sigmaG.reshape(-1, 1)
pfp = len([i for i in temp1 if i <= 0]) / nsamp
pfn = len([i for i in temp2 if i <= 0]) / nsamp
pf0 = len([i for i in self.Gx if i <= 0]) / nsamp
if pf0 == 0:
self.stop_criteria = 100
else:
self.stop_criteria = (pfp - pfn) / pf0
def mcpopgen(
lb=None,
ub=None,
n_order=6,
n_coeff=1,
type="random",
ndim=2,
stddev=1,
mean=0,
rand_seed=None,
):
if rand_seed is not None:
np.random.seed(rand_seed)
nmc = int(n_coeff * 10 ** n_order)
if type.lower() == "normal" or type.lower() == "gaussian":
pop = stddev * np.random.randn(nmc, ndim) + mean
elif type.lower() == "lognormal":
var = stddev ** 2
sigma = np.sqrt(np.log(var / (mean ** 2) + 1))
mu = np.log((mean ** 2) / np.sqrt(var + mean ** 2))
pop = np.exp(sigma * np.random.randn(nmc, ndim) + mu)
elif type.lower() == "gumbel":
beta = (stddev / np.pi) * np.sqrt(6)
pop = np.random.gumbel(mean, beta, (nmc, ndim))
elif type.lower() == "random":
if lb.any() == None or ub.any() == None:
raise ValueError("type 'random' is selected, please input lower "
"bound and upper bound value")
else:
pop = realval(lb, ub, np.random.rand(nmc, len(lb)))
else:
raise ValueError("Monte Carlo sampling type not supported")
return pop
def akmcsInfocheck(akmcsInfo):
""" Helper function to check the AKMCS dictionary.
Checks akmcsInfo dict and sets default values, if required
parameters are not supplied.
Args:
akmcsInfo (dict): Dictionary that contains AKMCS information.
Returns:
akmcsInfo: Checked/Modified AKMCS Information.
"""
if "init_samp" not in akmcsInfo:
raise ValueError('akmcsInfo["init_samp"] must be defined')
else:
pass
if "maxupdate" not in akmcsInfo:
akmcsInfo["maxupdate"] = 120
print("Maximum update is set to ", akmcsInfo["maxupdate"])
else:
print("Maximum update is set to ", akmcsInfo["maxupdate"], " by user.")
if "problem" not in akmcsInfo:
raise ValueError('akmcsInfo["problem"] must be defined')
else:
pass
return akmcsInfo
| 2.390625
| 2
|
sandbox/test_Linux.py
|
gwiederhecker/MPh
| 1
|
12778974
|
<gh_stars>1-10
"""
Tests running a stand-alone Comsol client on Linux.
The script does not depend on MPh, but starts the Comsol client
directly via the Java bridge JPype. Paths to the Comsol installation
are hard-coded for an installation of Comsol 5.6 at the default
location. Other versions can be tested by editing the assignment to
the `root` variable.
Even though this script sets up all environment variables just like
the Comsol documentation suggests for Java development with the
Eclipse IDE (on pages 23 and 916 in the Programming Reference Manual
of Comsol 5.6), it still fails to work unless the user exports
`LD_LIBRARY_PATH` in the shell before starting the script, e.g., by
adding the following lines at the end of `.bashrc`:
```shell
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:\
/usr/local/comsol56/multiphysics/lib/glnxa64:\
/usr/local/comsol56/multiphysics/ext/graphicsmagick/glnxa64
```
It is odd that this is necessary because, as this script demonstrates,
the Java VM is well aware of the above environment variable even if
the user does not explicitly `export` it. It is also not Java that has
trouble finding the external (non-Java) libraries. The issue seems to
occur because the libraries themselves, as they are being loaded
dynamically, have trouble finding each other.
Unfortunately, this means that, on Linux, MPh does not work "out of
the box", but must rely on the user to intervene, at least as far as
the stand-alone client is concerned.
"""
import jpype
import jpype.imports
from packaging import version
from pathlib import Path
import os
if version.parse(jpype.__version__) >= version.parse('1.2.2_dev0'):
import jpype.config
jpype.config.destroy_jvm = False
else:
import atexit
@atexit.register
def exit_JVM():
if jpype.isJVMStarted():
jpype.java.lang.Runtime.getRuntime().exit(0)
print('Setting environment variables.')
root = Path('/usr/local/comsol56/multiphysics')
lib = str(root/'lib'/'glnxa64')
gcc = str(root/'lib'/'glnxa64'/'gcc')
ext = str(root/'ext'/'graphicsmagick'/'glnxa64')
cad = str(root/'ext'/'cadimport'/'glnxa64')
pre = str(root/'java'/'glnxa64'/'jre'/'lib'/'amd64'/'libjsig.so')
var = 'LD_LIBRARY_PATH'
if var in os.environ:
path = os.environ[var].split(os.pathsep)
else:
path = []
if lib not in path:
os.environ[var] = os.pathsep.join([lib, gcc, ext, cad] + path)
vars = ('MAGICK_CONFIGURE_PATH', 'MAGICK_CODER_MODULE_PATH',
'MAGICK_FILTER_MODULE_PATH')
for var in vars:
os.environ[var] = ext
os.environ['LD_PRELOAD'] = pre
os.environ['LC_NUMERIC'] = os.environ['LC_ALL'] = 'C'
print(f'Starting Comsol\'s Java VM via JPype {jpype.__version__}.')
jvm = root/'java'/'glnxa64'/'jre'/'lib'/'amd64'/'server'/'libjvm.so'
jpype.startJVM(str(jvm), classpath=str(root/'plugins'/'*'))
print('Inspecting environment from the Java side.')
path = jpype.java.lang.System.getProperty('java.library.path') or ''
print('Java library search path is:')
for folder in path.split(os.pathsep):
print(f' {folder}')
path = jpype.java.lang.System.getenv('PATH') or ''
print('System binary search path is:')
for folder in path.split(os.pathsep):
print(f' {folder}')
path = jpype.java.lang.System.getenv('LD_LIBRARY_PATH') or ''
print('System library search path is:')
for folder in path.split(os.pathsep):
print(f' {folder}')
print('Starting stand-alone Comsol client.')
from com.comsol.model.util import ModelUtil as client
client.initStandalone(False)
client.loadPreferences()
print('Testing if Comsol can load shared libraries.')
from com.comsol.nativejni import FlNativeUtil
FlNativeUtil.ensureLibIsLoaded()
print('Loading Comsol model.')
tag = client.uniquetag('model')
model = client.load(tag, '../tests/capacitor.mph')
print('Loading external image.')
tags = [str(tag) for tag in model.func().tags()]
names = [model.func(tag).label() for tag in tags]
tag = tags[names.index('test_function')]
model.func(tag).discardData()
model.func(tag).set('filename', '../tests/gaussian.tif')
model.func(tag).importData()
print('Solving model.')
for tag in model.study().tags():
model.study(tag).run()
| 2.25
| 2
|
data/studio21_generated/introductory/4746/starter_code.py
|
vijaykumawat256/Prompt-Summarization
| 0
|
12778975
|
def fisHex(name):
| 1.0625
| 1
|
backend/phonebook/employees/views.py
|
unmade/phonebook
| 0
|
12778976
|
from rest_framework import generics
from .models import Employee
from .serializers import EmployeeSerializer
class EmployeeListAPIView(generics.ListAPIView):
queryset = Employee.objects.select_name().select_job().prefetch_contacts().prefetch_secretaries()
serializer_class = EmployeeSerializer
search_fields = ('firstname__name', 'patronymic__name', 'surname__name')
filter_fields = ('company', 'center', 'division', 'phones__number', 'emails__email')
class EmployeeRetrieveAPIView(generics.RetrieveAPIView):
queryset = Employee.objects.select_name().select_job().prefetch_contacts().prefetch_secretaries()
serializer_class = EmployeeSerializer
| 2.140625
| 2
|
upscale/api/keys.py
|
nl5887/upscale
| 1
|
12778977
|
<reponame>nl5887/upscale<filename>upscale/api/keys.py
import sys, getopt, os
import argparse
import shlex
import yaml
import git
import shutil
import subprocess
from jinja2 import Template
import tempfile
import git
import base64
import logging
from sqlalchemy.sql import exists
from sqlalchemy.sql import and_, or_, not_
from upscale import config
config = config.config
from upscale.db.model import (Session, Namespace, Project, Key)
def get(namespace_arg, application_arg):
session = Session()
namespace = session.query(Namespace).filter(Namespace.name==namespace_arg).one()
application = namespace.projects.filter(Project.name == application_arg).one()
key = application.key
return (key.public_key)
def add(namespace_arg, name_arg, public_arg):
session = Session()
namespace = session.query(Namespace).filter(Namespace.name==namespace_arg).one()
from sqlalchemy.sql import exists
from sqlalchemy.sql import and_, or_, not_
if (namespace.keys.filter(and_(Key.name==name_arg, Key.active==True)).first()):
raise Exception('Key already exists')
if (namespace.keys.filter(and_(Key.public==public_arg, Key.active==True)).first()):
raise Exception('Key already exists')
# validate public key
values = public_arg.split()
if (len(values)==2):
data = base64.decodestring(values[1])
if (data[4:11] != values[0]):
raise Exception("Invalid ssh key")
elif (len(values)==3):
data = base64.decodestring(values[1])
if (data[4:11] != values[0]):
raise Exception("Invalid ssh key")
else:
raise Exception("Invalid ssh key")
key = Key()
key.name = name_arg
key.public = public_arg
key.active = True
namespace.keys.append(key)
session.commit()
# update git repository for namespace
update(namespace)
def delete(namespace_arg, name_arg, ):
session = Session()
namespace = session.query(Namespace).filter(Namespace.name==namespace_arg).one()
key = namespace.keys.filter(and_(Key.name==name_arg, Key.active==True)).first()
if not key:
raise Exception('Key not found.')
key.active=False
session.commit()
update(namespace)
def list(namespace_arg):
session = Session()
namespace = session.query(Namespace).filter(Namespace.name==namespace_arg).one()
return (namespace.keys.filter(Key.active==True))
# should be actually using a celery task on right (git) host
def update(namespace):
import subprocess
from jinja2 import Template
home = os.path.join(config['data'], namespace.name, 'home')
s = subprocess.Popen(['su', '-s', '/bin/sh', namespace.name], stdin=subprocess.PIPE, stdout=subprocess.PIPE, )
logging.debug(s.communicate(Template(
"""
# remove existing keys
echo "{{ config['public-key'] }} " > {{home}}/.ssh/authorized_keys
# recreate
{% for key in keys %}
echo "{{ key.public }}" >> {{home}}/.ssh/authorized_keys
{% endfor %}
""").render(namespace=namespace, keys=namespace.keys.filter(Key.active==True), config=config, home=home )))
| 2.28125
| 2
|
scarletio/utils/compact.py
|
HuyaneMatsu/scarletio
| 3
|
12778978
|
<filename>scarletio/utils/compact.py
__all__ = ()
# Test for pypy bug:
# https://foss.heptapod.net/pypy/pypy/issues/3239
class dummy_init_tester:
def __new__(cls, value):
return object.__new__(cls)
__init__ = object.__init__
try:
dummy_init_tester(None)
except TypeError:
NEEDS_DUMMY_INIT = True
else:
NEEDS_DUMMY_INIT = False
del dummy_init_tester
| 1.976563
| 2
|
ncbi_taxonomy/gi_to_taxon.py
|
dacuevas/bioinformatics
| 0
|
12778979
|
<reponame>dacuevas/bioinformatics
#!/usr/local/bin/python3
# gi_to_taxon.py
# Collect taxonomy information for given GI numbers
#
# Author: <NAME> (<EMAIL>)
# Created on 07 Aug 2017
# Updated on 08 Aug 2017
from __future__ import print_function, absolute_import, division
import sys
import os
import time
import datetime
import argparse
import taxon
###############################################################################
# FUNCTION DEFINITIONS
###############################################################################
def timestamp():
"""
Return time stamp.
"""
t = time.time()
fmt = '[%Y-%m-%d %H:%M:%S]'
return datetime.datetime.fromtimestamp(t).strftime(fmt)
def print_status(msg, end='\n'):
"""
Print status message.
"""
print('{} {}'.format(timestamp(), msg), file=sys.stderr, end=end)
sys.stderr.flush()
def check_file(file_path, directory_path=None):
"""
Check if file exists.
"""
if directory_path:
file_path = os.path.join(directory_path, file_path)
return os.path.isfile(file_path)
def exit_script(num=1):
"""
Exit script.
"""
sys.exit(num)
###############################################################################
# ARGUMENT PARSING
###############################################################################
desc = 'Collect taxonomy information for GI numbers'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('gifile', help='Input file of GI numbers. Gi numbers '
'should be second value on each line')
parser.add_argument('outdir', help='Output directory')
parser.add_argument('-v', '--verbose', action='store_true',
help='Verbose output')
args = parser.parse_args()
# Check that input file exists
if not os.path.isfile(args.gifile):
print(args.gifile, 'does not exist', file=sys.stderr)
parser.print_usage()
exit_script()
# Check that output directory exists
if not os.path.isdir(args.outdir):
print(args.outdir, 'does not exist', file=sys.stderr)
parser.print_usage()
exit_script()
# Global variables
gi_file = args.gifile
out_dir = args.outdir
vbs = args.verbose
###############################################################################
# LOAD INPUT FILE
###############################################################################
# Create log file
log = open(os.path.join(args.outdir, 'log.txt'), 'w', buffering=1)
log.write(timestamp() + ' Starting script\n')
gi_counts = {} # Hold gi numbers and counts
# Read in GI file
print_status('Loading input file')
with open(args.gifile) as f:
for li, l in enumerate(f, start=1):
if vbs:
print('Reading entry', li, end='\r', file=sys.stderr)
gi = l.split()[1].split('|')[1]
if gi in gi_counts:
gi_counts[gi] += 1
else:
gi_counts[gi] = 1
print_status('Finished input file')
print_status('Loaded {} unique GIs'.format(len(gi_counts)))
###############################################################################
# LOAD NCBI TAXONOMY FILES
###############################################################################
print_status('Loading NCBI taxonomy files')
print_status('Loading GI => TAXID database')
gi_to_taxid = taxon.readGiTaxId('nucl')
print_status('GI => TAXID database loaded')
print_status('Converting GIs to TAXIDs')
taxids = {}
for i, gi in enumerate(gi_counts, start=1):
if vbs:
print('Converted', i, 'out of ', len(gi_counts), 'GI values',
end='\r', file=sys.stderr)
try:
tid = gi_to_taxid[gi]
taxids[gi] = tid
except KeyError:
msg = 'GI ' + gi + ' not found in gi_taxid file'
log.write(msg + '\n')
continue
print_status('GI to TAXID conversion complete')
# gi_to_taxid.clear() # Clear to remove from memory
print_status('Loading TAXID => NAME database')
names, blastnames = taxon.readNames()
# Not using blastnames currently, so deleting
blastnames.clear()
print_status('TAXID => NAME database loaded')
print_status('Loading taxonomy info database')
taxa = taxon.readTaxa()
print_status('Taxonomy info database loaded')
###############################################################################
# CONNECT GI TO TAXONOMY INFO
###############################################################################
all_data = {} # Holds all data for output
num_taxid = len(taxids)
print_status('Gathering taxonomy information')
for ti, tax_id in enumerate(taxids.values(), start=1):
if vbs:
print('Working on tax ID ', ti, 'out of ', num_taxid,
end='\r', file=sys.stderr)
try:
curr_node = taxa[tax_id]
except KeyError:
msg = 'TAXID ' + tax_id + ' not found in nodes file'
log.write(msg + '\n')
continue
start_tid = tax_id
all_data[start_tid] = {'species': None,
'genus': None,
'family': None,
'order': None,
'class': None}
# Find all taxonomy hierarchy for this tax id
# Loop until Phylum is reached. Phylum is right above Class
# End at rank 1 in case
while curr_node.rank != 'phylum' and curr_node.parent != 1:
curr_name = ''
try:
curr_name = names[curr_node.taxid].name
except KeyError:
msg = 'TAXID ' + curr_node.taxid + ' not found in names file'
log.write(msg + '\n')
# Set name
all_data[start_tid][curr_node.rank] = curr_name
# Get parent and repeat
curr_node = taxa[curr_node.parent]
print_status('Completed taxonomy information')
###############################################################################
# OUTPUT
###############################################################################
out_file = os.path.join(out_dir, 'tax_info.tsv')
print_status('Creating output file ' + out_file)
with open(out_file, 'w') as f:
# Header info
f.write('gi\tcount\tspecies\tgenus\tfamily\torder\tclass\n')
for gi, tax_id in taxids.items():
count = str(gi_counts[gi])
species = all_data[tax_id]['species']
genus = all_data[tax_id]['genus']
family = all_data[tax_id]['family']
order = all_data[tax_id]['order']
clss = all_data[tax_id]['class']
# Check if any are None; set to some default value
default = ''
if species is None:
species = default
if genus is None:
genus = default
if family is None:
family = default
if order is None:
order = default
if clss is None:
clss = default
f.write('\t'.join([gi, count, species, genus, family, order, clss]) +
'\n')
log.write(timestamp() + ' Script complete\n')
log.close()
print_status('Script complete!')
| 2.234375
| 2
|
bookorbooks/country/api/views/country_views.py
|
talhakoylu/SummerInternshipBackend
| 1
|
12778980
|
<reponame>talhakoylu/SummerInternshipBackend
from rest_framework.generics import ListAPIView, RetrieveAPIView
from country.models import Country
from country.api.serializers import CountrySerializer, CountryDetailWithCitySerializer
class CountryListAPIView(ListAPIView):
queryset = Country.objects.all()
serializer_class = CountrySerializer
class CountryDetailAPIView(RetrieveAPIView):
queryset = Country.objects.all()
serializer_class = CountryDetailWithCitySerializer
lookup_url_kwarg = 'code'
lookup_field = 'code__iexact'
| 2.234375
| 2
|
src/deephaven_ib/_internal/short_rates.py
|
deephaven-examples/deephaven-ib
| 2
|
12778981
|
<reponame>deephaven-examples/deephaven-ib<filename>src/deephaven_ib/_internal/short_rates.py<gh_stars>1-10
"""Functionality for working with short rates."""
import ftplib
import html
import tempfile
from deephaven import read_csv
from deephaven.table import Table
class IBFtpWriter:
"""Writer for downloading text pipe-separated-value files from the IB FTP site.
Closing the writer causes the temporary file containing the data to be deleted.
"""
header: str
source: str
file: tempfile.NamedTemporaryFile
def __init__(self):
self.header = None
self.source = None
self.file = tempfile.NamedTemporaryFile(mode="w", suffix=".psv")
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def file_name(self) -> str:
"""Name of the temporary file."""
return self.file.name
def flush(self) -> None:
"""Flush writes to the temporary file."""
self.file.flush()
def close(self) -> None:
"""Close the temporary file. This makes the temporary file unavailable."""
self.file.close()
def write(self, line: str) -> None:
"""Write a line to the temporary file."""
if line.startswith("#BOF") or line.startswith("#EOF"):
return
line = html.unescape(line)
# REMOVE TRAILING "|" that breaks CSV parser
# https://github.com/deephaven/deephaven-core/issues/1800
if line.endswith("|"):
line = line[:-1]
if line.startswith("#"):
line = f"Source|{line[1:]}"
if self.header is None:
self.header = line
self.file.write(f"{line}\n")
elif self.header != line:
raise Exception(f"Mismatched headers: {self.header} {line}")
else:
return
else:
self.file.write(f"{self.source}|{line}\n")
def load_short_rates() -> Table:
"""Downloads the short rates from the IB FTP site and returns them as a table."""
host: str = "ftp3.interactivebrokers.com"
user: str = "shortstock"
with ftplib.FTP(host=host, user=user) as ftp, IBFtpWriter() as p:
try:
files = ftp.nlst("*.txt")
for file in files:
p.source = file[:-4]
res = ftp.retrlines(f'RETR {file}', p.write)
if not res.startswith('226 Transfer complete'):
raise Exception(f"FTP download failed: {user}@{host} {file} {res}")
except ftplib.all_errors as e:
print('FTP error:', e)
p.flush()
return read_csv(p.file_name(), delimiter="|") \
.rename_columns([
"Sym=SYM",
"Currency=CUR",
"Name=NAME",
"Contract=CON",
"RebateRate=REBATERATE",
"FeeRate=FEERATE",
"Available=AVAILABLE"])
| 3.140625
| 3
|
esteganography/fileManipulation.py
|
giovaninppc/MC920
| 1
|
12778982
|
<reponame>giovaninppc/MC920
import numpy as np
from skimage import io
def openImage(path: str, gray: bool = False):
return io.imread(path, as_gray = gray)
def saveImage(path: str, img):
io.imsave(path, img)
def openTextFile(path):
file = open(path, 'r')
txt = file.read()
file.close()
return txt
def writeTextFile(path, text):
file = open(path, 'w+')
file.write(text)
file.close()
| 2.78125
| 3
|
ceph-plugins/check_ceph_health.py
|
DeltaBG/icinga2-plugins
| 2
|
12778983
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2013-2016 SWITCH http://www.switch.ch
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import argparse
import os
import subprocess
import sys
import re
import json
__version__ = '1.7.0'
# default ceph values
CEPH_ADM_COMMAND = '/usr/sbin/cephadm'
CEPH_COMMAND = '/usr/bin/ceph'
# nagios exit code
STATUS_OK = 0
STATUS_WARNING = 1
STATUS_ERROR = 2
STATUS_UNKNOWN = 3
def main():
# parse args
parser = argparse.ArgumentParser(description="'ceph health' nagios plugin.")
parser.add_argument('-e','--exe', help='ceph executable [%s]' % CEPH_COMMAND)
parser.add_argument('-A','--admexe', help='cephadm executable [%s]' % CEPH_ADM_COMMAND)
parser.add_argument('--cluster', help='ceph cluster name')
parser.add_argument('-c','--conf', help='alternative ceph conf file')
parser.add_argument('-m','--monaddress', help='ceph monitor address[:port]')
parser.add_argument('-i','--id', help='ceph client id')
parser.add_argument('-n','--name', help='ceph client name')
parser.add_argument('-k','--keyring', help='ceph client keyring file')
parser.add_argument('--check', help='regexp of which check(s) to check (luminous+) '
"Can be inverted, e.g. '^((?!(PG_DEGRADED|OBJECT_MISPLACED)$).)*$'")
parser.add_argument('-w','--whitelist', help='whitelist regexp for ceph health warnings')
parser.add_argument('-d','--detail', help="exec 'ceph health detail'", action='store_true')
parser.add_argument('-V','--version', help='show version and exit', action='store_true')
parser.add_argument('-a','--cephadm', help='uses cephadm to execute the command', action='store_true')
parser.add_argument('-s','--skip-muted', help='skip muted checks', action='store_true')
args = parser.parse_args()
# validate args
cephadm_exec = args.admexe if args.admexe else CEPH_ADM_COMMAND
ceph_exec = args.exe if args.exe else CEPH_COMMAND
if args.cephadm:
if not os.path.exists(cephadm_exec):
print("ERROR: cephadm executable '%s' doesn't exist" % cephadm_exec)
return STATUS_UNKNOWN
else:
if not os.path.exists(ceph_exec):
print("ERROR: ceph executable '%s' doesn't exist" % ceph_exec)
return STATUS_UNKNOWN
if args.version:
print('version %s' % __version__)
return STATUS_OK
if args.conf and not os.path.exists(args.conf):
print("ERROR: ceph conf file '%s' doesn't exist" % args.conf)
return STATUS_UNKNOWN
if args.keyring and not os.path.exists(args.keyring):
print("ERROR: keyring file '%s' doesn't exist" % args.keyring)
return STATUS_UNKNOWN
# build command
ceph_health = [ceph_exec]
if args.cephadm:
# Prepend the command with the cephadm binary and the shell command
ceph_health = [cephadm_exec, 'shell'] + ceph_health
if args.monaddress:
ceph_health.append('-m')
ceph_health.append(args.monaddress)
if args.cluster:
ceph_health.append('--cluster')
ceph_health.append(args.cluster)
if args.conf:
ceph_health.append('-c')
ceph_health.append(args.conf)
if args.id:
ceph_health.append('--id')
ceph_health.append(args.id)
if args.name:
ceph_health.append('--name')
ceph_health.append(args.name)
if args.keyring:
ceph_health.append('--keyring')
ceph_health.append(args.keyring)
ceph_health.append('health')
if args.detail:
ceph_health.append('detail')
ceph_health.append('--format')
ceph_health.append('json')
#print(ceph_health)
# exec command
p = subprocess.Popen(ceph_health,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output, err = p.communicate()
try:
output = json.loads(output)
except ValueError:
output = dict()
# parse output
# print "output:", output
#print "err:", err
if output:
ret = STATUS_OK
msg = ""
extended = []
if 'checks' in output:
#luminous
for check,status in output['checks'].items():
# skip check if not selected
if args.check and not re.search(args.check, check):
continue
if args.skip_muted and ('muted' in status and status['muted']):
continue
check_detail = "%s( %s )" % (check, status['summary']['message'])
if status["severity"] == "HEALTH_ERR":
extended.append(msg)
msg = "CRITICAL: %s" % check_detail
ret = STATUS_ERROR
continue
if args.whitelist and re.search(args.whitelist,status['summary']['message']):
continue
check_msg = "WARNING: %s" % check_detail
if not msg:
msg = check_msg
ret = STATUS_WARNING
else:
extended.append(check_msg)
else:
#pre-luminous
for status in output["summary"]:
if status != "HEALTH_OK":
if status == "HEALTH_ERROR":
msg = "CRITICAL: %s" % status['summary']
ret = STATUS_ERROR
continue
if args.whitelist and re.search(args.whitelist,status['summary']):
continue
if not msg:
msg = "WARNING: %s" % status['summary']
ret = STATUS_WARNING
else:
extended.append("WARNING: %s" % status['summary'])
if msg:
print(msg)
else:
print("HEALTH OK")
if extended: print('\n'.join(extended))
return ret
elif err:
# read only first line of error
one_line = err.split('\n')[0]
if '-1 ' in one_line:
idx = one_line.rfind('-1 ')
print('ERROR: %s: %s' % (ceph_exec, one_line[idx+len('-1 '):]))
else:
print(one_line)
return STATUS_UNKNOWN
if __name__ == "__main__":
sys.exit(main())
| 2.125
| 2
|
scripts/experiment_synth_nparity.py
|
accosmin/zob
| 6
|
12778984
|
from config import *
from experiment import *
# initialize experiment:
# - classification problem: predict the parity bit of binary inputs
cfg = config.config()
exp = experiment(cfg.expdir + "/synth_nparity", trials = 10)
exp.set_task(cfg.task_synth_nparity(n = 8, count = 10000))
# loss functions
exp.add_loss("logistic", cfg.loss("s-logistic"))
# trainers
epochs = 100
patience = 100
epsilon = 1e-6
for solver in cfg.batch_solvers():
exp.add_trainer("batch_{}".format(solver), cfg.batch_trainer(solver, epochs, patience, epsilon))
for solver in cfg.stoch_solvers():
exp.add_trainer("stoch_{}".format(solver), cfg.stoch_trainer(solver, epochs, patience, epsilon))
# models
output = {"name":"output","type":"affine","omaps":1,"orows":1,"ocols":1}
fc1 = {"name":"fc1","type":"affine","omaps":16,"orows":1,"ocols":1}
fc2 = {"name":"fc2","type":"affine","omaps":32,"orows":1,"ocols":1}
fc3 = {"name":"fc3","type":"affine","omaps":64,"orows":1,"ocols":1}
ac1 = {"name":"ac1","type":"act-snorm"}
ac2 = {"name":"ac2","type":"act-snorm"}
ac3 = {"name":"ac3","type":"act-snorm"}
mlp0 = {"nodes": [output], "model": []}
mlp1 = {"nodes": [fc1, ac1, output], "model": [["fc1", "ac1", "output"]]}
mlp2 = {"nodes": [fc1, ac1, fc2, ac2, output], "model": [["fc1", "ac1", "fc2", "ac2", "output"]]}
mlp3 = {"nodes": [fc1, ac1, fc2, ac2, fc3, ac3, output], "model": [["fc1", "ac1", "fc2", "ac2", "fc3", "ac3", "output"]]}
exp.add_model("mlp0", mlp0)
exp.add_model("mlp1", mlp1)
exp.add_model("mlp2", mlp2)
exp.add_model("mlp3", mlp3)
# train all configurations
exp.train_all()
# compare configurations
exp.summarize_by_trainers("stoch", "stoch_*")
exp.summarize_by_trainers("batch", "batch_*")
exp.summarize_by_trainers("all", ".*")
exp.summarize_by_models("all", ".*")
| 2.34375
| 2
|
mesh_tensorflow/transformer/vocab_embeddings.py
|
bmaier96/mesh
| 0
|
12778985
|
<filename>mesh_tensorflow/transformer/vocab_embeddings.py
# coding=utf-8
# Copyright 2020 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Different ways to go from token ids to hidden states and states to logits."""
import gin
import mesh_tensorflow as mtf
from mesh_tensorflow.transformer import transformer
import tensorflow.compat.v1 as tf
@gin.configurable
class FactorizedVocabEmbedding(object):
"""Factorizes the embedding matrix with projection to a small inner dimension.
Like ALBERT (https://arxiv.org/abs/1706.03762).
Interface matches mesh_tensorflow.transformer VocabEmbedding object.
"""
def __init__(self,
mesh,
vocab_dim,
output_dim,
variable_dtype,
name,
ensemble_dim,
inner_dimension_size=gin.REQUIRED):
"""Configurable embedding for the vocabulary.
Most of the arguments get passed to `mtf.layers.embedding_weights` with an
option to factorize the embedding matrix.
Args:
mesh: a mtf.Mesh
vocab_dim: a mtf.Dimension
output_dim: a mtf.Dimension
variable_dtype: a mtf.VariableDType
name: a string
ensemble_dim: a mtf.Dimension
inner_dimension_size: a positive integer, the size of the inner dimension
of the embedding matrix
"""
self._vocab_dim = vocab_dim
self._output_dim = output_dim
self._inner_dim = mtf.Dimension("inner_vocab", inner_dimension_size)
self._factor1 = mtf.layers.embedding_weights(
mesh=mesh,
vocab_dim=vocab_dim,
output_dim=self._inner_dim,
variable_dtype=variable_dtype,
name="{}1".format(name),
ensemble_dim=ensemble_dim,
initializer=tf.random_normal_initializer(
stddev=inner_dimension_size**-0.25))
self._factor2 = mtf.layers.embedding_weights(
mesh=mesh,
vocab_dim=self._inner_dim,
output_dim=output_dim,
variable_dtype=variable_dtype,
name="{}2".format(name),
ensemble_dim=ensemble_dim,
initializer=tf.random_normal_initializer(
stddev=inner_dimension_size**-0.25))
def ids_to_embedding(self, ids, context):
del context
tmp = mtf.gather(self._factor1, ids, self._vocab_dim)
return mtf.einsum([tmp, self._factor2], reduced_dims=[self._inner_dim])
def hidden_to_logits(self, hidden, context):
del context
hidden *= self._output_dim.size**-0.5
tmp = mtf.einsum([hidden, self._factor2], reduced_dims=[self._output_dim])
return mtf.einsum([tmp, self._factor1], reduced_dims=[self._inner_dim])
class _Cluster(object):
"""Helper class for adaptive embeddings specifying a cluster of tokens.
Essentially a wrapper around a vocab embedding for the cluster with additional
metadata so that we can apply the embedding to the actual ids and hidden
states.
"""
def __init__(self, embedding, start_token_id, end_token_id):
"""Cluster constructor.
Args:
embedding: a FactorizedVocabEmbedding or transformer.VocabEmbedding, the
vocab embedding to use for the cluster
start_token_id: an integer, the inclusive id of the first token in the
cluster
end_token_id: an integer, the exclusive id of the last token in the
cluster
"""
self._embedding = embedding
self._start_token_id = start_token_id
self._end_token_id = end_token_id
def ids_to_embedding(self, ids, context):
"""Ids to embeddings with ids not in cluster mapped to the zero vector."""
ids -= self._start_token_id
# The mtf.gather in the embedding's ids_to_embedding implementation will
# cause the one hot representations of tokens greater than cluster vocab
# dimension size to be the zero vector. Thus the embeddings for those tokens
# will be the zero vector.
ids = mtf.where(mtf.greater_equal(ids, 0), ids, self._end_token_id)
return self._embedding.ids_to_embedding(ids, context)
def hidden_to_logits(self, hidden, context):
"""Returns the logits for tokens within the cluster."""
return self._embedding.hidden_to_logits(hidden, context)
@gin.configurable
class AdaptiveVocabEmbedding(object):
"""A vocab embedding assigning variable capacity to clusters of tokens.
Similar to the adaptive input representations in this paper
(https://arxiv.org/abs/1809.10853). However, they use an adaptive softmax to
compute logits while this embedding uses a regular softmax.
The idea is to create clusters of tokens and assign different capacity to
different clusters by factoring their embedding matrices to different inner
dimensions.
The clustering can be done by word frequency with more frequent tokens getting
higher capacity. In this implementation, token ids of clusters must be
contiguous in the vocabulary.
Interface matches mesh_tensorflow.transformer VocabEmbedding object.
"""
def __init__(self,
mesh,
vocab_dim,
output_dim,
variable_dtype,
name,
ensemble_dim,
clusters=gin.REQUIRED):
"""Configurable embedding for the vocabulary.
Most of the arguments get passed to `mtf.layers.embedding_weights`.
The clustering parameters are specified by the `clusters` argument. It is a
list of dicts with keys "token_count" and "embedding_size". Token count
specifies the number of tokens in the cluster, and embedding size specifies
the hidden dimension size of its embedding.
For example, let's say we have a vocab size of 500k and pass as clusters:
[
{"token_count": 50000, "embedding_size": 1024},
{"token_count": 100000, "embedding_size": 256},
{"token_count": 350000, "embedding_size": 64},
]
Then tokens with ids 0 (inclusive) to 50k (exclusive) will be in the first
cluster with embedding size of 1024, tokens with ids 50k to 150k will be in
the second cluster with embedding size of 256, and tokens with ids 150k to
500k will be in the third cluster with embedding size of 64.
Args:
mesh: a mtf.Mesh
vocab_dim: a mtf.Dimension
output_dim: a mtf.Dimension
variable_dtype: a mtf.VariableDType
name: a string
ensemble_dim: a mtf.Dimension
clusters: a list(dict), specification of the clusters
Raises:
ValueError: The sum of the token counts across the clusters does not equal
the vocabulary size.
"""
self._vocab_dim = vocab_dim
self._output_dim = output_dim
token_counts = [cluster["token_count"] for cluster in clusters]
if sum(token_counts) != vocab_dim.size:
raise ValueError(
"The cluster token counts {} do not sum to the vocab size {}.".format(
token_counts, vocab_dim.size))
self._clusters = []
start_token_id = 0
for i, cluster in enumerate(clusters):
token_count = cluster["token_count"]
embedding_size = cluster["embedding_size"]
cluster_vocab_dim = mtf.Dimension(vocab_dim.name, token_count)
if embedding_size == self._output_dim.size:
# In this case we don't need to up project from the embedding space to
# the model state space.
cluster_embedding = transformer.VocabEmbedding(
mesh=mesh,
vocab_dim=cluster_vocab_dim,
output_dim=output_dim,
variable_dtype=variable_dtype,
name="{}_{}".format(name, i),
ensemble_dim=ensemble_dim)
else:
cluster_embedding = FactorizedVocabEmbedding(
mesh=mesh,
vocab_dim=cluster_vocab_dim,
output_dim=output_dim,
variable_dtype=variable_dtype,
name="{}_{}".format(name, i),
ensemble_dim=ensemble_dim,
inner_dimension_size=embedding_size)
self._clusters.append(
_Cluster(
embedding=cluster_embedding,
start_token_id=start_token_id,
end_token_id=start_token_id + token_count))
start_token_id += token_count
def ids_to_embedding(self, ids, context):
# Ids not in each cluster will be mapped to the zero vector. Since clusters
# are disjoint, this sum is correct.
return sum(
cluster.ids_to_embedding(ids, context) for cluster in self._clusters)
def hidden_to_logits(self, hidden, context):
# Each cluster returns the logits for only the tokens with itself, so their
# concatenation is the full logits.
return mtf.concat(
[
cluster.hidden_to_logits(hidden, context=context)
for cluster in self._clusters
],
concat_dim_name=self._vocab_dim.name,
)
@gin.configurable
class MixtureOfSoftmaxes(object):
"""Embedding with the token distributions as a weighted mixture of softmaxes.
Expressing the token distributions in this way improves expressiveness and
enables the matrix of token probabilities given all contexts to be high rank.
The vocab embedding is the same as the default, which is just a simple
embedding.
See https://arxiv.org/pdf/1711.03953.pdf for more details.
"""
def __init__(self,
mesh: mtf.Mesh,
vocab_dim: mtf.Dimension,
output_dim: mtf.Dimension,
variable_dtype: mtf.VariableDType,
name: str,
ensemble_dim: mtf.Dimension,
num_softmaxes: int = gin.REQUIRED):
"""Configurable embedding for the vocabulary.
Most of the arguments get passed to `mtf.layers.embedding_weights`.
Args:
mesh: the mesh used to layout the tensors.
vocab_dim: the dimension corresponding to vocabulary.
output_dim: the dimension corresponding to the model
hidden states.
variable_dtype: the datatype information for the
variables used in the embedding tensors.
name: a name to base variable names off of.
ensemble_dim: the dimension used for ensembling.
Absolutely no guarantees that this code will work with ensembling.
num_softmaxes: a positive int, the number of components to use in the
mixture.
"""
self._vocab_dim = vocab_dim
self._output_dim = output_dim
self._copy_output_dim = mtf.Dimension("_{}_copy".format(output_dim.name),
output_dim.size)
self._components_dim = mtf.Dimension("softmax_components", num_softmaxes)
self._embedding_weights = mtf.layers.embedding_weights(
mesh=mesh,
vocab_dim=vocab_dim,
output_dim=output_dim,
variable_dtype=variable_dtype,
name="{}_embedding_weights".format(name),
ensemble_dim=ensemble_dim)
self._mixture_weights = mtf.layers.embedding_weights(
mesh=mesh,
vocab_dim=self._components_dim,
output_dim=output_dim,
variable_dtype=variable_dtype,
name="{}_mixture_weights".format(name),
ensemble_dim=ensemble_dim)
self._context_weights = mtf.layers.embedding_weights(
mesh=mesh,
vocab_dim=self._copy_output_dim,
output_dim=output_dim,
variable_dtype=variable_dtype,
name="{}_context_weights".format(name),
ensemble_dim=([ensemble_dim] if ensemble_dim else []) +
[self._components_dim])
def ids_to_embedding(self, ids: mtf.Tensor, context) -> mtf.Tensor:
del context
return mtf.gather(self._embedding_weights, ids, self._vocab_dim)
def hidden_to_logits(self, hidden: mtf.Tensor,
context: transformer.Context) -> mtf.Tensor:
"""Function called by mtf transformer to get the logits.
Note that we are taking the log of a mixture of softmaxes. The logits will
then go through a softmax. This could potentially run into numerical
stability issues. If that happens, try setting the activation_dtype to
float32.
Args:
hidden: hidden model states of the final decoder layer.
context: the context used for the call to the
transformer.
Returns:
The logits.
"""
del context
hidden *= self._output_dim.size**-0.5
component_prior_logits = mtf.einsum([hidden, self._mixture_weights],
reduced_dims=[self._output_dim])
component_contexts = mtf.einsum([
mtf.rename_dimension(hidden, self._output_dim.name,
self._copy_output_dim.name),
self._context_weights,
],
reduced_dims=[self._copy_output_dim])
component_contexts = mtf.tanh(component_contexts)
component_logits = mtf.einsum([component_contexts, self._embedding_weights],
reduced_dims=[self._output_dim])
component_prior_logits = mtf.log_softmax(
component_prior_logits, reduced_dim=self._components_dim)
component_logits = mtf.log_softmax(
component_logits, reduced_dim=self._vocab_dim)
logits = component_prior_logits + component_logits
logits = mtf.reduce_logsumexp(logits, reduced_dim=self._components_dim)
return logits
@gin.configurable
class Mixtape(object):
"""Embedding that uses Mixtape in computing logits.
Expressing the token distributions in this way improves expressiveness and
enables the matrix of token probabilities given all contexts to be high rank.
Mixtape has the advantage of added efficiency over other methods such as
mixture of softmax.
The vocab embedding is the same as the default, which just a simple embedding.
See
https://papers.nips.cc/paper/9723-mixtape-breaking-the-softmax-bottleneck-efficiently.pdf
for more details.
"""
def __init__(self,
mesh: mtf.Mesh,
vocab_dim: mtf.Dimension,
output_dim: mtf.Dimension,
variable_dtype: mtf.VariableDType,
name: str,
ensemble_dim: mtf.Dimension,
extra_ids: int = 0,
dropout_rate: float = 0.0,
gate_embedding_size: int = gin.REQUIRED,
frequent_token_fraction: float = 0.1,
noise_std_dev: float = 0.0):
"""Configurable embedding for the vocabulary.
Most of the arguments get passed to `mtf.layers.embedding_weights`.
Mixtape shares gates for low frequency tokens to improve efficiency. Since
our vocabs are sorted in decreasing order of frequency with sentinels
appended to the end, we need to do a little trick to ensure that the
sentinels are treated as high frequency. If you want to treat the sentinels
as low frequency tokens, then pass in zero for `extra_ids`.
Args:
mesh: the mesh used to layout the tensors.
vocab_dim: the dimension corresponding to vocabulary.
output_dim: the dimension corresponding to the model hidden states.
variable_dtype: the datatype information for the variables used in the
embedding tensors.
name: a name to base variable names off of.
ensemble_dim: the dimension used for ensembling. Absolutely no guarantees
that this code will work with ensembling.
extra_ids: a non-negative integer, the number of sentinels at the end of
the vocab.
dropout_rate: a float between 0 and 1, the rate to use for dropout.
gate_embedding_size: a positive integer, the size to use for embedding for
the gates. It is usually chosen to be much smaller than d_model.
frequent_token_fraction: a float between 0 and 1, what fraction of tokens
to consider as high frequency and not share gates for.
noise_std_dev: a non-negative float, the standard deviation of the
Gaussian noise to add to the pre-activation priors.
"""
self._extra_ids = extra_ids
self._dropout_rate = dropout_rate
self._noise_std_dev = noise_std_dev
self._mesh = mesh
self._vocab_dim = vocab_dim
self._frequent_vocab_dim = mtf.Dimension(
vocab_dim.name, int(frequent_token_fraction * vocab_dim.size))
self._rare_vocab_dim = mtf.Dimension(
vocab_dim.name, vocab_dim.size - self._frequent_vocab_dim.size)
self._output_dim = output_dim
self._copy_output_dim = mtf.Dimension("_{}_copy".format(output_dim.name),
output_dim.size)
self._pre_gates_dim = mtf.Dimension("gates", 3)
self._gates_dim = mtf.Dimension("gates", 4)
self._gate_embedding_dim = mtf.Dimension("gate_embedding",
gate_embedding_size)
self._embedding_weights = mtf.layers.embedding_weights(
mesh=mesh,
vocab_dim=vocab_dim,
output_dim=output_dim,
variable_dtype=variable_dtype,
name="{}_embedding_weights".format(name),
ensemble_dim=ensemble_dim)
ensemble_dims = [ensemble_dim] if ensemble_dim else []
self._context_weights = mtf.layers.embedding_weights(
mesh=mesh,
vocab_dim=self._copy_output_dim,
output_dim=output_dim,
variable_dtype=variable_dtype,
name="{}_context_weights".format(name),
ensemble_dim=ensemble_dims + [self._gates_dim])
self._context_weights_bias = mtf.get_variable(
mesh,
name="{}_context_weights_bias".format(name),
shape=mtf.Shape(ensemble_dims + [self._gates_dim, output_dim]),
dtype=variable_dtype,
initializer=tf.zeros_initializer())
self._prior_weights = mtf.layers.embedding_weights(
mesh=mesh,
vocab_dim=self._gate_embedding_dim,
output_dim=output_dim,
variable_dtype=variable_dtype,
name="{}_prior_weights".format(name),
ensemble_dim=ensemble_dims + [self._pre_gates_dim])
self._prior_weights_bias = mtf.get_variable(
mesh,
name="{}_prior_weights_bias".format(name),
shape=mtf.Shape(ensemble_dims +
[self._pre_gates_dim, self._gate_embedding_dim]),
dtype=variable_dtype,
initializer=tf.zeros_initializer())
self._prior_vocab_vector = mtf.get_variable(
mesh,
name="{}_prior_vocab_vector".format(name),
shape=mtf.Shape(ensemble_dims +
[self._frequent_vocab_dim, self._gate_embedding_dim]),
dtype=variable_dtype,
initializer=tf.random_normal_initializer())
self._prior_gates_vector = mtf.get_variable(
mesh,
name="{}_prior_gates_vector".format(name),
shape=mtf.Shape(ensemble_dims + [self._pre_gates_dim, output_dim]),
dtype=variable_dtype,
initializer=tf.random_normal_initializer())
self._prior_bias = mtf.get_variable(
mesh,
name="{}_prior_bias".format(name),
shape=mtf.Shape(ensemble_dims +
[self._frequent_vocab_dim, self._pre_gates_dim]),
dtype=variable_dtype,
initializer=tf.random_normal_initializer())
def ids_to_embedding(self, ids: mtf.Tensor, context) -> mtf.Tensor:
del context
return mtf.gather(self._embedding_weights, ids, self._vocab_dim)
def _sigmoid_tree(self, tensor):
"""Create probability distribution along gates dim using a sigmoid tree."""
gamma = mtf.split(
mtf.sigmoid(tensor), self._pre_gates_dim, self._pre_gates_dim.size)
return mtf.concat([
gamma[0] * gamma[1],
gamma[0] * (1 - gamma[1]),
(1 - gamma[0]) * gamma[2],
(1 - gamma[0]) * (1 - gamma[2]),
], self._gates_dim.name)
def _dropout(self, tensor, context):
if context.train and self._dropout_rate != 0.0:
return mtf.dropout(
tensor,
1.0 - self._dropout_rate,
noise_shape=tensor.shape - context.length_dim)
return tensor
def _rearrange_sentinels(self, logits):
"""Reorder along the vocab dim so the last few tokens don't share gates."""
if not self._extra_ids:
return logits
sentinels, nonsentinels = mtf.split(
logits, self._vocab_dim,
[self._extra_ids, self._vocab_dim.size - self._extra_ids])
return mtf.concat([nonsentinels, sentinels], self._vocab_dim.name)
def hidden_to_logits(self, hidden: mtf.Tensor,
context: transformer.Context) -> mtf.Tensor:
"""Function called by mtf transformer to get the logits.
Args:
hidden: an mtf.Tensor, hidden model states of the final decoder layer.
context: a transformer.Context, the context used for the call to the
transformer.
Returns:
An mtf.Tensor, the logits.
"""
hidden *= self._output_dim.size**-0.5
component_contexts = mtf.einsum([
mtf.rename_dimension(hidden, self._output_dim.name,
self._copy_output_dim.name),
self._context_weights,
],
reduced_dims=[self._copy_output_dim])
component_contexts = mtf.tanh(component_contexts +
self._context_weights_bias)
component_logits = mtf.einsum([component_contexts, self._embedding_weights],
reduced_dims=[self._output_dim])
component_logits = self._dropout(component_logits, context)
prior_tanh = mtf.tanh(
mtf.einsum([self._prior_weights, hidden],
reduced_dims=[self._output_dim]) + self._prior_weights_bias)
prior_tanh = self._dropout(prior_tanh, context)
prior_shared_logits = mtf.einsum([self._prior_gates_vector, hidden],
reduced_dims=[self._output_dim])
prior_frequent_vocab_logits = (
mtf.einsum([self._prior_vocab_vector, prior_tanh]) +
prior_shared_logits + self._prior_bias)
prior_logits = mtf.concat([
prior_frequent_vocab_logits,
mtf.ones(
self._mesh,
mtf.Shape([self._rare_vocab_dim]),
dtype=prior_shared_logits.dtype) * prior_shared_logits
], self._vocab_dim.name)
if context.train and self._noise_std_dev != 0.0:
prior_logits += mtf.random_normal(
self._mesh, prior_logits.shape, stddev=self._noise_std_dev)
prior_proportions = self._sigmoid_tree(prior_logits)
logits = mtf.einsum([component_logits, prior_proportions],
reduced_dims=[self._gates_dim])
return self._rearrange_sentinels(logits)
| 2.0625
| 2
|
config.py
|
kia-kia/IC-MLNet
| 2
|
12778986
|
from easydict import EasyDict
D = EasyDict()
D.num_gpus = 4
D.batch_size = 24
D.epochs = 80
D.decay_epochs = 20
D.decay_rate = 0.5
D.learning_rate = 1e-3
D.input_dataset = 'ec_pf_tp_AT24_33x33_025' #'multiorigin_cf_tp_AT24_33x33_025'
D.block_type = 'nolocal2d' # nolocal2d conv2d
D.merge_type = 'add' # concat add
D.model_dir = './summary_and_ckpt/'
D.is_test = False
D.is_cross = False
D.sub_dir = 'cross/'
D.data_dir = './datasets/'
D.num_filters = 64
D.cut_dim = 16
D.input_h = 33
D.input_w = 33
D.splited_channel = 50
D.input_channel = 50
D.out_channel = 1
D.res_dense_block = 4
D.dense_block = 3
D.in_dense_layers = 4
D.enable_function = False
D.model_name_reg = "model.ckpt"
| 1.554688
| 2
|
dens_lim.py
|
wdeshazer/gt3
| 1
|
12778987
|
<reponame>wdeshazer/gt3<gh_stars>1-10
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 10 20:24:53 2018
@author: max
"""
from __future__ import division
import numpy as np
from scipy.special import jv
import sys
from math import sqrt
import matplotlib.pyplot as plt
def calc_quadratic(a, b, c):
y1 = -b * (1 + sqrt(1 - 4 * (a * c / b ** 2))) / (2 * a)
y2 = -b * (1 - sqrt(1 - 4 * (a * c / b ** 2))) / (2 * a)
return y1, y2
class DensityLimit:
def __init__(self, core, nbi):
sys.dont_write_bytecode = True
self.r = core.r[:, 0]
self.a = core.a
self.eq44(core, nbi)
pass
def eq44(self, core, nbi):
def av(X, p=self):
numerator = np.sum(p.r * X * jv(0, 5.5*p.r/p.a)*p.a/(len(p.r)-1))
denominator = np.sum(p.r * jv(0, 5.5*p.r/p.a)*p.a/(len(p.r)-1))
return numerator / denominator
ni = core.n.i[:, 0]
Ti = core.T.i.J[:, 0]
n0 = ni[0]
n_av = av(ni)
f = n0/n_av
chi = core.chi.bohm[:,0]
chi_hat = av(ni*chi)/n_av
D = 0.5
D_hat = av(ni*D)/n_av
a = core.a
g = ni/n0
fz = 0.05
Lz = core.Lz.t[:, 0]
dLzdT = core.Lz.ddT.t[:, 0]
sv_fus = core.sv.fus.dd[:, 0]
dsv_fus_dT = core.sv.fus.d_dT.dd[:, 0]
Ua = 0
H_aux = nbi.beams.D1.dPdV.v1D.W
H_ohm = 0
dHdT = 3/(2*Ti)*(H_ohm - H_aux)
dHdn = 0
nn = core.n.n.tot[:, 0]
sv_ion = core.sv.ion.st[:, 0]
dsv_ion_dT = core.sv.ion.d_dT.st[:, 0]
dSdn = nn*sv_ion
dSdT = ni*nn*dsv_ion_dT
if False:
print 'sv_ion = ', sv_ion
print 'nn = ', nn
print 'ni = ',ni
print 'Ti = ',Ti
print 'a = ',a
print 'dSdn = ',dSdn
print 'D_hat = ',D_hat
print 'dHdn = ',dHdn
print 'Ua = ',Ua
print 'sv_fus = ',sv_fus
print 'fz = ',fz
print 'Lz = ',Lz
ya = 3*av(Ti)*(av(dSdn) - (5.5/a)**2*D_hat) - av(dHdn + 2*ni*(1/4*Ua*sv_fus - fz*Lz))
yb = 3*av(ni)*(av(dSdn) - (5.5/a)**2*D_hat) + 3*av(Ti)*av(dSdT) - av(dHdT + ni**2 * (1/4*Ua*dsv_fus_dT + fz*(-dLzdT)))
yc = 3*av(ni)*av(dSdT)
if False:
print 'ya = ',ya
print 'yb = ',yb
print 'yc = ',yc
print
y1, y2 = calc_quadratic(ya, yb, yc)
if False:
print
print 'y1 = ',y1
print 'y2 = ',y2
t1y1 = chi_hat * (5.5/a)**2 * av(g) + 2 * y1 * (fz*av(g*Lz) - av(1/4*Ua*g*sv_fus))
t2y1 = 2 * av(g**2 * (1/4*Ua*dsv_fus_dT + fz*(-dLzdT)))
t3y1 = 4*(av(-dHdT)-y1*av(dHdn)) * av(g**2*(1/4*Ua*dsv_fus_dT + fz*(-dLzdT)))
t4y1 = chi_hat * (5.5/a)**2*av(g) + 2*y1*(fz*av(g*Lz) - av(1/4*Ua*g*sv_fus))**2
t1y2 = chi_hat * (5.5/a)**2 * av(g) + 2 * y2 * (fz*av(g*Lz) - av(1/4*Ua*g*sv_fus))
t2y2 = 2 * av(g**2 * (1/4*Ua*dsv_fus_dT + fz*(-dLzdT)))
t3y2 = 4*(av(-dHdT)-y2*av(dHdn)) * av(g**2*(1/4*Ua*dsv_fus_dT + fz*(-dLzdT)))
t4y2 = chi_hat * (5.5/a)**2 * av(g) + 2 * y2 * (fz * av(g * Lz) - av(1/4 * Ua * g * sv_fus))**2
if False:
print
print 't1y1 = ',t1y1
print 't2y1 = ',t2y1
print 't3y1 = ',t3y1
print 't4y1 = ',t4y1
print 't1y2 = ',t1y2
print 't2y2 = ',t2y2
print 't3y2 = ',t3y2
print 't4y2 = ',t4y2
nlim1 = (1 / f) * (t1y1 / t2y1) * (1 + sqrt(1 + t3y1 / t4y1))
nlim2 = (1 / f) * (t1y2 / t2y2) * (1 + sqrt(1 + t3y2 / t4y2))
nlim3 = (1 / f) * (t1y1 / t2y1) * (1 - sqrt(1 + t3y1 / t4y1))
nlim4 = (1 / f) * (t1y2 / t2y2) * (1 - sqrt(1 + t3y2 / t4y2))
if False:
print
print 'nlim1 = ',nlim1
print 'nlim2 = ',nlim2
print 'nlim3 = ',nlim3
print 'nlim4 = ',nlim4
print
nlim = min(i for i in [nlim1, nlim2, nlim3, nlim4] if i > 0)
if n_av > nlim:
print 'n_av = ',n_av
print 'nlim = ',nlim
print 'Disruption predicted: YES'
else:
print 'n_av = ',n_av
print 'nlim = ',nlim
print 'Disruption predicted: NO'
print
| 2.5
| 2
|
sideboard/_version.py
|
EliAndrewC/sideboard
| 0
|
12778988
|
from __future__ import unicode_literals
__version__ = '0.1.0'
| 1.085938
| 1
|
nipype/interfaces/tests/test_auto_SelectFiles.py
|
nicholsn/nipype
| 1
|
12778989
|
<gh_stars>1-10
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.interfaces.io import SelectFiles
def test_SelectFiles_inputs():
input_map = dict(base_directory=dict(),
force_lists=dict(usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
raise_on_empty=dict(usedefault=True,
),
sort_filelist=dict(usedefault=True,
),
)
inputs = SelectFiles.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_SelectFiles_outputs():
output_map = dict()
outputs = SelectFiles.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| 2.09375
| 2
|
binary_search_tree.py
|
alexsmartens/algorithms
| 0
|
12778990
|
# This binary_search_tree.py is an implementation of binary search tree based on the idea from CLRS, Chapter 12
from tree_visualization import tree_visualize
class Binary_tree:
def __init__(self):
self.root = None
def node(self, key, p=None, left=None, right=None):
return {
"key": key,
"p": p,
"left": left,
"right": right
}
def tree_insert(self, key):
new_node = self.node(key)
p = None
node = self.root
while node is not None:
p = node
if new_node["key"] < node["key"]:
node = node["left"]
else:
node = node["right"]
new_node["p"] = p
if p is None:
# Tree is empty
self.root = new_node
elif new_node["key"] < p["key"]:
p["left"] = new_node
else:
p["right"] = new_node
def inorder_tree_walk(self, alternative_root="not provided"):
if alternative_root == "not provided":
root = self.root
else:
root = alternative_root
if root is not None:
self.inorder_tree_walk(root["left"])
print(str(root["key"]) + ", ", end="")
self.inorder_tree_walk(root["right"])
def tree_search(self, k):
node = self.root
while node is not None and node["key"] != k:
if k < node["key"]:
node = node["left"]
else:
node = node["right"]
return node
def tree_minimum(self, node=None):
if node is None:
node = self.root
while node["left"] is not None:
node = node["left"]
return node
def tree_maximum(self, node=None):
if node is None:
node = self.root
while node["right"] is not None:
node = node["right"]
return node
def tree_successor(self, node=None):
if node is None:
node = self.root
if node["right"] is not None:
return self.tree_minimum(node["right"])
p = node["p"]
while p is not None and p["right"] is not None and node["key"] == p["right"]["key"]:
node = p
p = node["p"]
return p
def transplant(self, u, v):
if u["p"] is None:
self.root = v
elif u["p"]["left"] is not None and u["key"] == u["p"]["left"]["key"]:
u["p"]["left"] = v
else:
u["p"]["right"] = v
if v is not None:
v["p"] = u["p"]
def tree_delete(self, k):
z = self.tree_search(k)
if z is None:
return z
if z["left"] is None:
self.transplant(z, z["right"])
elif z["right"] is None:
self.transplant(z, z["left"])
else:
y = self.tree_minimum(z["right"])
if y["p"]["key"] != z["key"]:
self.transplant(y, y["right"])
y["right"] = z["right"]
if y["right"] is not None:
y["right"]["p"] = y
self.transplant(z, y)
y["left"] = z["left"]
y["left"]["p"] = y
# Running simple examples
my_tree = Binary_tree()
my_tree.tree_insert(18)
my_tree.tree_insert(14)
my_tree.tree_insert(25)
my_tree.tree_insert(1)
my_tree.tree_insert(21)
my_tree.tree_insert(19)
my_tree.tree_insert(12)
my_tree.tree_insert(23)
my_tree.tree_insert(16)
print("my_tree.root " + str(my_tree.root))
my_tree.inorder_tree_walk()
tree_visualize(my_tree.root)
# tree_visualize(my_tree.root, True)
print("my_tree.tree_search(18)[key]: " + str(my_tree.tree_search(18)["key"]))
print("my_tree.tree_minimum()[key]: " + str(my_tree.tree_minimum()["key"]))
print("my_tree.tree_maximum()[key]: " + str(my_tree.tree_maximum()["key"]))
print("my_tree.tree_successor()[key]: " + str(my_tree.tree_successor()["key"]))
print("my_tree.tree_successor(my_tree.tree_search(1))[key]: " + str(my_tree.tree_successor(my_tree.tree_search(1))["key"]))
print("my_tree.tree_successor(my_tree.tree_search(16))[key]: " + str(my_tree.tree_successor(my_tree.tree_search(16))["key"]))
print("my_tree.tree_successor(my_tree.tree_search(18))[key]: " + str(my_tree.tree_successor(my_tree.tree_search(18))["key"]))
print("my_tree.tree_successor(my_tree.tree_search(12))[key]: " + str(my_tree.tree_successor(my_tree.tree_search(12))["key"]))
print("my_tree.tree_successor(my_tree.tree_search(21))[key]: " + str(my_tree.tree_successor(my_tree.tree_search(21))["key"]))
print("my_tree.tree_successor(my_tree.tree_search(23))[key]: " + str(my_tree.tree_successor(my_tree.tree_search(23))["key"]))
print("my_tree.tree_successor(my_tree.tree_search(25)): " + str(my_tree.tree_successor(my_tree.tree_search(25))))
tree_visualize(my_tree.root)
my_tree.tree_delete(25)
my_tree.tree_delete(14)
my_tree.tree_delete(18)
my_tree.tree_delete(1)
my_tree.tree_insert(18)
tree_visualize(my_tree.root)
| 4.15625
| 4
|
laskea/config.py
|
sthagen/laskea
| 1
|
12778991
|
<reponame>sthagen/laskea
"""Configuration API for laskea."""
import copy
import json
import os
import pathlib
import sys
from typing import Dict, List, Mapping, Tuple, no_type_check
import jmespath
import laskea
import laskea.api.jira as api
TEMPLATE_EXAMPLE = """\
{
"table": {
"column": {
"fields": [
"Key",
"Summary",
"Custom Field Name",
"Custom Field Other"
],
"field_map": {
"key": [
"key",
"key"
],
"summary": [
"summary",
"fields.summary"
],
"custom field name": [
"customfield_11501",
"fields.customfield_11501"
],
"custom field other": [
"customfield_13901",
"fields.customfield_13901[].value"
]
},
"lf_only": true,
"join_string": " <br>"
}
},
"remote": {
"is_cloud": false,
"user": "",
"token": "",
"base_url": "https://remote-jira-instance.example.com/"
},
"local": {
"markers": "[[[fill ]]] [[[end]]]",
"quiet": false,
"verbose": false,
"strict": false
},
"tabulator": {
"overview": {
"base_url": "https://example.com/metrics/",
"path": "$year$/kpi-table-$year$.json",
"years": [2022],
"matrix": [
["section", "Section", False, "L"],
["name", "Name", False, "L"],
["unit", "Unit", False, "C"],
["all", "ALL", True, "R"],
["pr1", "PR1", True, "R"],
["pr2", "PR2", True, "R"],
["pr3", "PR3", True, "R"],
["description", "Description", False, "L"]
]
},
"metrics": {
"base_url": "https://example.com/metrics/",
"paths": {
"review_effectivity": "$year$/review_effectivity/kpi-review_effectivity-per_product-report-$year$.json",
"sprint_effectivity": "$year$/sprint_effectivity/kpi-sprint_effectivity-per_product-report-$year$.json",
"task_traceability": "$year$/task_traceability/kpi-task_traceability-per_product-report-$year$.json",
},
"years": [2021, 2022],
"matrix": [
["month", "Month", False, "L"],
["all", "ALL", True, "R"],
["pr1", "PR1", True, "R"],
["pr2", "PR2", True, "R"],
["pr3", "PR3", True, "R"],
["trend_all", "±ALL", True, "R"],
["trend_pr1", "±PR1", True, "R"],
["trend_pr2", "±PR2", True, "R"],
["trend_pr3", "±PR3", True, "R"]
]
}
}
}
"""
def generate_template() -> str:
"""Return template of a well-formed JSON configuration."""
return TEMPLATE_EXAMPLE
@no_type_check
def load_configuration(configuration: Dict[str, object]) -> Dict[str, str]:
"""LaterAlligator."""
if not configuration:
print('Warning: Requested load from empty configuration', file=sys.stderr)
return {}
source_of = {}
column_fields = jmespath.search('table.column.fields[]', configuration)
if column_fields:
source_of['column_fields'] = 'config'
api.BASE_COL_FIELDS = copy.deepcopy(column_fields)
column_fields = os.getenv(f'{laskea.APP_ENV}_COL_FIELDS', '')
if column_fields:
source_of['column_fields'] = 'env'
api.BASE_COL_FIELDS = json.loads(column_fields)
field_map = jmespath.search('table.column.field_map', configuration)
if field_map:
source_of['field_map'] = 'config'
api.BASE_COL_MAPS = copy.deepcopy(field_map)
field_map = os.getenv(f'{laskea.APP_ENV}_COL_MAPS', '')
if field_map:
source_of['field_map'] = 'env'
api.BASE_COL_MAPS = json.loads(field_map)
lf_only = jmespath.search('table.column.lf_only', configuration)
if lf_only:
source_of['lf_only'] = 'config'
api.BASE_LF_ONLY = lf_only
lf_only = os.getenv(f'{laskea.APP_ENV}_LF_ONLY', '')
if lf_only:
source_of['lf_only'] = 'env'
api.BASE_LF_ONLY = lf_only
join_string = jmespath.search('table.column.join_string', configuration)
if join_string:
source_of['join_string'] = 'config'
api.BASE_JOIN_STRING = join_string
join_string = os.getenv(f'{laskea.APP_ENV}_JOIN_STRING', '')
if join_string:
source_of['join_string'] = 'env'
api.BASE_JOIN_STRING = join_string
remote_user = jmespath.search('remote.user', configuration)
if remote_user:
source_of['remote_user'] = 'config'
api.BASE_USER = remote_user
remote_user = os.getenv(f'{laskea.APP_ENV}_USER', '')
if remote_user:
source_of['remote_user'] = 'env'
api.BASE_USER = remote_user
remote_token = jmespath.search('remote.token', configuration)
if remote_token:
source_of['remote_token'] = 'config' # nosec
api.BASE_TOKEN = remote_token
remote_token = os.getenv(f'{laskea.APP_ENV}_TOKEN', '')
if remote_token:
source_of['remote_token'] = 'env' # nosec
api.BASE_TOKEN = remote_token
remote_base_url = jmespath.search('remote.base_url', configuration)
if remote_base_url:
source_of['remote_base_url'] = 'config'
api.BASE_URL = remote_base_url
remote_base_url = os.getenv(f'{laskea.APP_ENV}_BASE_URL', '')
if remote_base_url:
source_of['remote_base_url'] = 'env'
api.BASE_URL = remote_base_url
local_markers = jmespath.search('local.markers', configuration)
if local_markers:
source_of['local_markers'] = 'config'
laskea.BASE_MARKERS = local_markers
local_markers = os.getenv(f'{laskea.APP_ENV}_MARKERS', '')
if local_markers:
source_of['local_markers'] = 'env'
laskea.BASE_MARKERS = local_markers
verbose = bool(jmespath.search('local.verbose', configuration))
if verbose:
source_of['verbose'] = 'config'
laskea.DEBUG = verbose
verbose = bool(os.getenv(f'{laskea.APP_ENV}_DEBUG', ''))
if verbose:
source_of['verbose'] = 'env'
laskea.DEBUG = verbose
is_cloud = bool(jmespath.search('remote.is_cloud', configuration))
if is_cloud:
source_of['is_cloud'] = 'config'
laskea.IS_CLOUD = is_cloud
is_cloud = bool(os.getenv(f'{laskea.APP_ENV}_IS_CLOUD', ''))
if is_cloud:
source_of['is_cloud'] = 'env'
laskea.IS_CLOUD = is_cloud
strict = bool(jmespath.search('local.strict', configuration))
if strict:
source_of['strict'] = 'config'
laskea.STRICT = strict
strict = bool(os.getenv(f'{laskea.APP_ENV}_STRICT', ''))
if strict:
source_of['strict'] = 'env'
laskea.STRICT = strict
quiet = bool(jmespath.search('local.quiet', configuration))
if quiet:
source_of['quiet'] = 'config'
laskea.QUIET = quiet
if source_of['verbose'] == 'config':
laskea.DEBUG = quiet
quiet = bool(os.getenv(f'{laskea.APP_ENV}_QUIET', ''))
if quiet:
source_of['quiet'] = 'env'
laskea.QUIET = quiet
source_of['verbose'] = 'env'
laskea.DEBUG = quiet
if 'tabulator' in configuration:
laskea.TABULATOR = copy.deepcopy(configuration['tabulator'])
return source_of
@no_type_check
def discover_configuration(conf: str) -> Tuple[Dict[str, object], str]:
"""Try to retrieve the configuration following the "(explicit, local, parents, home)
first wun wins" strategy."""
configuration = None
if conf:
cp = pathlib.Path(conf)
if not cp.is_file() or not cp.stat().st_size:
print('Given configuration path is no file or empty', file=sys.stderr)
sys.exit(2)
if not laskea.QUIET:
print(f'Reading configuration file {cp} as requested...', file=sys.stderr)
with cp.open() as handle:
configuration = json.load(handle)
else:
cn = laskea.DEFAULT_CONFIG_NAME
cwd = pathlib.Path.cwd().resolve()
for pp in (cwd, *cwd.parents):
cp = pp / cn
if cp.is_file() and cp.stat().st_size:
if not laskea.QUIET:
print(f'Reading from discovered configuration path {cp}', file=sys.stderr)
with cp.open() as handle:
configuration = json.load(handle)
return configuration, str(cp)
cp = pathlib.Path.home() / laskea.DEFAULT_CONFIG_NAME
if cp.is_file() and cp.stat().st_size:
if not laskea.QUIET:
print(
f'Reading configuration file {cp} from home directory at {pathlib.Path.home()} ...',
file=sys.stderr,
)
with cp.open() as handle:
configuration = json.load(handle)
return configuration, str(cp)
if not laskea.QUIET:
print(
f'User home configuration path to {cp} is no file or empty - ignoring configuration data',
file=sys.stderr,
)
return configuration, str(cp)
@no_type_check
def report_context(command: str, transaction_mode: str, vector: List[str]) -> None:
"""DRY."""
if laskea.QUIET:
return
print(f'Command: ({command})', file=sys.stderr)
print(f'- Transaction mode: ({transaction_mode})', file=sys.stderr)
print('Environment(variable values):', file=sys.stderr)
app_env_user = f'{laskea.APP_ENV}_USER'
app_env_token = f'{laskea.APP_ENV}_TOKEN'
app_env_base_url = f'{laskea.APP_ENV}_BASE_URL'
app_env_col_fields = f'{laskea.APP_ENV}_COL_FIELDS'
app_env_col_maps = f'{laskea.APP_ENV}_COL_MAPS'
app_env_markers = f'{laskea.APP_ENV}_MARKERS'
app_env_lf_only = f'{laskea.APP_ENV}_LF_ONLY'
app_env_join_string = f'{laskea.APP_ENV}_JOIN_STRING'
empty = ''
print(f'- {laskea.APP_ENV}_USER: ({os.getenv(app_env_user, empty)})', file=sys.stderr)
print(
f'- {laskea.APP_ENV}_TOKEN: ({laskea.FAKE_SECRET if len(os.getenv(app_env_token, empty)) else empty})',
file=sys.stderr,
)
print(f'- {laskea.APP_ENV}_BASE_URL: ({os.getenv(app_env_base_url, empty)})', file=sys.stderr)
print(f'- {laskea.APP_ENV}_COL_FIELDS: ({os.getenv(app_env_col_fields, empty)})', file=sys.stderr)
print(f'- {laskea.APP_ENV}_COL_MAPS: ({os.getenv(app_env_col_maps, empty)})', file=sys.stderr)
print(f'- {laskea.APP_ENV}_MARKERS: ({os.getenv(app_env_markers, empty)})', file=sys.stderr)
print(f'- {laskea.APP_ENV}_LF_ONLY: ({os.getenv(app_env_lf_only, empty)})', file=sys.stderr)
print(f'- {laskea.APP_ENV}_JOIN_STRING: ({os.getenv(app_env_join_string, empty)})', file=sys.stderr)
print('Effective(variable values):', file=sys.stderr)
print(f'- RemoteUser: ({api.BASE_USER})', file=sys.stderr)
print(f'- RemoteToken: ({"*" * len(api.BASE_PASS)})', file=sys.stderr)
print(f'- RemoteBaseURL: ({api.BASE_URL})', file=sys.stderr)
print(f'- ColumnFields(table): ({api.BASE_COL_FIELDS})', file=sys.stderr)
print(f'- ColumnMaps(remote->table): ({api.BASE_COL_MAPS})', file=sys.stderr)
print(f'- Markers(pattern): ({laskea.BASE_MARKERS})', file=sys.stderr)
print(f'- lf_only: ({laskea.BASE_LF_ONLY})', file=sys.stderr)
print(f'- join_string: ({laskea.BASE_JOIN_STRING})', file=sys.stderr)
print(f'- CallVector: ({vector})', file=sys.stderr)
@no_type_check
def report_sources_of_effective_configuration(source_of: Dict[str, str], header: str) -> None:
"""DRY."""
if laskea.QUIET:
return
print(header, file=sys.stderr)
print('# --- BEGIN ---', file=sys.stderr)
print(json.dumps(source_of, indent=2), file=sys.stderr)
print('# --- E N D ---', file=sys.stderr)
@no_type_check
def safe_report_configuration(configuration: Dict[str, object], header: str) -> None:
"""DRY."""
if laskea.QUIET:
return
print(header, file=sys.stderr)
print('# --- BEGIN ---', file=sys.stderr)
fake_configuration = copy.deepcopy(configuration)
if jmespath.search('remote.token', fake_configuration):
fake_configuration['remote']['token'] = <PASSWORD>KE_<PASSWORD> # noqa
print(json.dumps(fake_configuration, indent=2), file=sys.stderr)
print('# --- E N D ---', file=sys.stderr)
@no_type_check
def create_and_report_effective_configuration(header: str) -> None:
"""DRY."""
if laskea.QUIET:
return
effective = {
'table': {
'column': {
'fields': copy.deepcopy(api.BASE_COL_FIELDS),
'field_map': copy.deepcopy(api.BASE_COL_MAPS),
'lf_only': api.BASE_LF_ONLY,
'join_string': api.BASE_JOIN_STRING,
},
},
'remote': {
'is_cloud': api.BASE_IS_CLOUD,
'user': api.BASE_USER,
'token': '',
'base_url': api.BASE_URL,
},
'local': {
'markers': laskea.BASE_MARKERS,
'quiet': laskea.QUIET,
'verbose': laskea.DEBUG,
'strict': laskea.STRICT,
},
}
safe_report_configuration(effective, header)
def process(conf: str, options: Mapping[str, bool]) -> None:
"""SPOC."""
configuration, cp = discover_configuration(conf)
verbose = bool(options.get('verbose', ''))
if configuration is not None:
if laskea.DEBUG or verbose:
safe_report_configuration(configuration, f'Loaded configuration from {cp}:')
source_of = load_configuration(configuration)
if laskea.DEBUG or verbose:
report_sources_of_effective_configuration(source_of, f'Configuration source after loading from {cp}:')
if not laskea.QUIET:
print('Configuration interface combined file, environment, and commandline values!', file=sys.stderr)
create_and_report_effective_configuration(
f'Effective configuration combining {cp}, environment variables, and defaults:'
)
print(
f'INFO: Upstream JIRA instance is addressed per {"cloud" if api.BASE_IS_CLOUD else "server"} rules',
file=sys.stderr,
)
| 1.78125
| 2
|
services/database.py
|
njncalub/logistiko
| 0
|
12778992
|
<filename>services/database.py<gh_stars>0
from core import settings
from data.services import DataService
def get_database():
db = DataService(engine=settings.DATABASE_URL)
return db
db_service = get_database()
| 1.75
| 2
|
splashgen/components/CTAButton.py
|
ndejong/splashgen
| 246
|
12778993
|
from splashgen import Component
class CTAButton(Component):
def __init__(self, link: str, text: str) -> None:
self.link = link
self.text = text
def render(self) -> str:
return f'<a href="{self.link}" class="btn btn-primary btn-lg px-4">{self.text}</a>'
| 2.53125
| 3
|
tensorforce/core/parameters/ornstein_uhlenbeck.py
|
stheid/tensorforce
| 1
|
12778994
|
<reponame>stheid/tensorforce
# Copyright 2018 Tensorforce Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
from tensorforce import util
from tensorforce.core.parameters import Parameter
class OrnsteinUhlenbeck(Parameter):
"""
Ornstein-Uhlenbeck process.
Args:
name (string): Module name
(<span style="color:#0000C0"><b>internal use</b></span>).
dtype ("bool" | "int" | "long" | "float"): Tensor type
(<span style="color:#0000C0"><b>internal use</b></span>).
theta (float > 0.0): Theta value
(<span style="color:#00C000"><b>default</b></span>: 0.15).
sigma (float > 0.0): Sigma value
(<span style="color:#00C000"><b>default</b></span>: 0.3).
mu (float): Mu value
(<span style="color:#00C000"><b>default</b></span>: 0.0).
absolute (bool): Absolute value
(<span style="color:#00C000"><b>default</b></span>: false).
min_value (dtype-compatible value): Lower parameter value bound
(<span style="color:#0000C0"><b>internal use</b></span>).
max_value (dtype-compatible value): Upper parameter value bound
(<span style="color:#0000C0"><b>internal use</b></span>).
summary_labels ('all' | iter[string]): Labels of summaries to record
(<span style="color:#00C000"><b>default</b></span>: inherit value of parent module).
"""
def __init__(
self, name, dtype, theta=0.15, sigma=0.3, mu=0.0, absolute=False, min_value=None,
max_value=None, summary_labels=None
):
self.theta = theta
self.mu = mu
self.sigma = sigma
self.absolute = absolute
super().__init__(
name=name, dtype=dtype, min_value=min_value, max_value=max_value,
summary_labels=summary_labels
)
def min_value(self):
if self.absolute:
return util.py_dtype(dtype=self.dtype)(0.0)
else:
super().min_value()
def final_value(self):
return util.py_dtype(dtype=self.dtype)(self.mu)
def parameter_value(self, step):
self.process = self.add_variable(
name='process', dtype='float', shape=(), is_trainable=False, initializer=self.mu
)
delta = self.theta * (self.mu - self.process) + self.sigma * tf.random.normal(shape=())
if self.absolute:
parameter = self.process.assign(value=tf.math.abs(x=(self.process + delta)))
else:
parameter = self.process.assign_add(delta=delta)
if self.dtype != 'float':
parameter = tf.dtypes.cast(x=parameter, dtype=util.tf_dtype(dtype=self.dtype))
else:
parameter = tf.identity(input=parameter)
return parameter
| 2.375
| 2
|
recupero/migrations/0003_tipoprestacion_anio_update.py
|
cluster311/ggg
| 6
|
12778995
|
# Generated by Django 2.2.4 on 2019-11-03 15:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('recupero', '0002_auto_20191103_1159'),
]
operations = [
migrations.AddField(
model_name='tipoprestacion',
name='anio_update',
field=models.PositiveIntegerField(default=2019, help_text='Si viene del nomenclador indicar de que versión es'),
),
]
| 1.546875
| 2
|
MyWatchList/views/ErrorsHandler.py
|
fgl-foundation/MovieDB
| 0
|
12778996
|
from django.shortcuts import render
def error_404(request,*args, **argv):
data = {}
return render(request, 'error_404.html', data)
| 1.632813
| 2
|
bento/commands/tests/test_command_contexts.py
|
cournape/Bento
| 55
|
12778997
|
<reponame>cournape/Bento
from bento.commands.registries \
import \
_RegistryBase
from bento.compat.api import moves
class Test_RegistryBase(moves.unittest.TestCase):
def test_simple(self):
registry = _RegistryBase()
registry.register_category("dummy", lambda: 1)
registry.register_callback("dummy", "dummy_func", lambda: 2)
self.assertEqual(registry.callback("dummy", "dummy_func")(), 2)
self.assertEqual(registry.callback("dummy", "non_existing_dummy_func")(), 1)
def test_double_registration(self):
registry = _RegistryBase()
registry.register_category("dummy", lambda: 1)
self.assertRaises(ValueError, lambda: registry.register_category("dummy", lambda: 2))
self.assertEqual(registry.callback("dummy", "non_existing_dummy_func")(), 1)
def test_missing_category(self):
registry = _RegistryBase()
self.assertRaises(ValueError, lambda: registry.register_callback("dummy", "dummy_func", lambda: 2))
self.assertRaises(ValueError, lambda: registry.callback("dummy", "dummy_func"))
def test_default_callback(self):
registry = _RegistryBase()
registry.register_category("dummy", lambda: 1)
self.assertEqual(registry.default_callback("dummy"), 1)
self.assertRaises(ValueError, lambda: registry.default_callback("non_existing_category"))
| 2.40625
| 2
|
glassyiffpy/horni.py
|
Deltara3/glassyiffpy
| 0
|
12778998
|
import requests, random, time
from bs4 import BeautifulSoup
#These functions are what I should have used in the first place lol
def getter(url): #extracts images from a url and returns all the images as a list
try:
imglist = []
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
mydivs = (soup.find_all("div", {"class": "section-content"}))
bs = BeautifulSoup(str(mydivs), 'html.parser')
images = bs.find_all('img')
for img in images:
#print(img)
if img.has_attr('data-src'):
#print(img['data-src'])
imglist.append(img['data-src'])
else:
#print("fuck")
pass
#print(imglist, " - ---- -- -")
return imglist
except Exception as e:
print(e)
def pager(start, num=3): # this function is useful
nummy = 1
imlist = getter(start+str(nummy))
while len(imlist) < num:
print(1)
imlist.append(getter(start + str(nummy)))
nummy +=1
resultP = imlist[:num]
return resultP
#class horni:
#Go to horny jail
# main = "https://yiff-party.com/"
def randomIMG(): # this function is an abomination and I should have used getter() and pager() instead but I'm too lazy to change it now
try:
listofimg = []
pageNUM = random.randint(5,480)
page = requests.get(f"https://yiff-party.com/page/{pageNUM}/")
soup = BeautifulSoup(page.content, 'html.parser')
mydivs = (soup.find_all("div", {"class": "section-content"}))
bs = BeautifulSoup(str(mydivs), 'html.parser')
images = bs.find_all('img')
for img in images:
#print(img)
if img.has_attr('data-src'):
#print(img['data-src'])
listofimg.append(img['data-src'])
else:
#print("fuck")
pass
result = random.choice(listofimg)
#print(result)
return result
except Exception as e:
print(e)
def newest(cat="main"): # this function is even more of an abomination and I should have used getter() and pager() instead but I'm too lazy to change it now
# It returns the newest image and only the newest image
try:
listofimg = []
if "gay" in cat:
page = requests.get("https://yiff-party.com/genre/male-male/")
elif "lesbian" in cat:
page = requests.get("https://yiff-party.com/genre/female-female/")
elif "straight" in cat:
page = requests.get("https://yiff-party.com/genre/male-female/")
elif "animated" in cat:
page = requests.get("https://yiff-party.com/category/yiff-animated/")
elif "anthro" in cat:
page = requests.get("https://yiff-party.com/genre/anthro/")
elif "feral" in cat:
page = requests.get("https://yiff-party.com/genre/feral/")
else:
page = requests.get("https://yiff-party.com/")
soup = BeautifulSoup(page.content, 'html.parser')
mydivs = (soup.find_all("div", {"class": "section-content"}))
bs = BeautifulSoup(str(mydivs), 'html.parser')
images = bs.find_all('img')
for img in images:
#print(img)
if img.has_attr('data-src'):
#print(img['data-src'])
listofimg.append(img['data-src'])
else:
#print("fuck")
pass
output = listofimg[0]
return output
except Exception as e:
print(e)
def stream(cat="main"):
if "gay" in cat:
url ="https://yiff-party.com/genre/male-male/"
elif "lesbian" in cat:
url = "https://yiff-party.com/genre/female-female/"
elif "straight" in cat:
url = "https://yiff-party.com/genre/male-female/"
elif "animated" in cat:
url = "https://yiff-party.com/category/yiff-animated/"
elif "anthro" in cat:
url = "https://yiff-party.com/genre/anthro/"
elif "feral" in cat:
url = "https://yiff-party.com/genre/feral/page/"
else:
url = "https://yiff-party.com/"
base = getter(url)
del(base[0])
while True:
face = getter(url)
if face == base:
time.sleep(600)
else:
for i in face:
if i in base:
pass
else:
yield i
base = face
time.sleep(600)
def yiff(num, cat="main"):
try:
listofimg = []
if "gay" in cat:
listofimg.append(pager("https://yiff-party.com/genre/male-male/page/", num))
elif "lesbian" in cat:
listofimg.append(pager("https://yiff-party.com/genre/female-female/page/", num))
elif "straight" in cat:
listofimg.append(pager("https://yiff-party.com/genre/male-female/page/", num))
elif "animated" in cat:
listofimg.append(pager("https://yiff-party.com/category/yiff-animated/page/", num))
elif "anthro" in cat:
listofimg.append(pager("https://yiff-party.com/genre/anthro/page/", num))
elif "feral" in cat:
listofimg.append(pager("https://yiff-party.com/genre/feral/page/", num))
else:
listofimg.append(pager("https://yiff-party.com/page/", num))
return(listofimg)
except Exception as e:
print(e)
def help():
print("""Welcome to the horniest python package every written!
This code is designed to help you interact with yiff-party.com without having to without having to write your own code. It can pull your chosen number of the latest images from any of the 6 categories. It can pull a random image from any category and it also provide's a live feature called 'stream' which allows you to iterate over subbmissions as they are uploaded to the website!
Usage:
print(horni.randomIMG())
> result will be a random image url
print(horni.newsest("gay"))
> result will be the newsest image url in the 'gay' category.
You can input any of the six categories or 'main' for the main page which icludes all categories
(gay/lesbian/straight/animated/anthro/feral/main)
for image in horni.yiff(50,"anthro"):
print(image)
>this will return a list of 50 images in the anthro category
for image in horni.stream("main"):
print(image)
>This loop will run forever, printing out the images urls as they are uploaded to the site.
This code was originally written by Glass-Paramedic for qweter1006
:)
""")
| 2.828125
| 3
|
mundo3-EstruturasCompostas/072-NumeroPorExtenso.py
|
jonasht/CursoEmVideo-CursoDePython3
| 0
|
12778999
|
<reponame>jonasht/CursoEmVideo-CursoDePython3<filename>mundo3-EstruturasCompostas/072-NumeroPorExtenso.py
#Exercício Python 072:
# Crie um programa que tenha uma dupla totalmente preenchida com uma contagem por extenso, de zero até vinte.
# Seu programa deverá ler um número pelo teclado (entre 0 e 20) e mostrá-lo por extenso.
n = -1
numero = ('Zero', 'um', 'dois', 'tres', 'quatro', 'cinco', 'seis', 'sete', 'oito', 'nove', 'dez', 'onze',
'doze', 'treze', 'quatorze', 'quinze', 'dezesseis', 'dezessete', 'dezoito', 'dezenove', 'vinte')
while True:
n = int(input('digite um numero [0:20]:'))
if 0 <= n <=20:
break
print('o numero deve ser de 0 a vinte somente')
print('=-'*25)
print(f'O Numero digitado foi {numero[n]}')
| 3.96875
| 4
|
senic_hub/backend/tests/test_setup_config.py
|
neelotpalnag/senic-hub
| 2
|
12779000
|
from unittest import mock
from pytest import fixture
@fixture
def url(route_url):
return route_url('configuration')
@mock.patch('senic_hub.backend.commands.supervisor.program_status')
@mock.patch('senic_hub.backend.commands.supervisor.start_program')
@mock.patch('senic_hub.backend.views.config.sleep')
@mock.patch('senic_hub.backend.views.config.stop_program')
def test_setup_create_config_returns_200(
stop_program_mock, sleep_mock, start_program_mock, program_status_mock,
url, browser):
program_status_mock.return_value = 'STOPPED'
assert browser.post_json(url, {}, status=200)
stop_program_mock.assert_called_once_with('device_discovery')
start_program_mock.assert_has_calls([
mock.call('nuimo_app')
])
@mock.patch('senic_hub.backend.views.config.subprocess.Popen')
def test_setup_delete_config_returns_200_and_creates_files(
subprocess_mock, url, browser):
assert browser.delete(url, status=200)
subprocess_mock.assert_called_with(['/usr/bin/senic_hub_factory_reset'])
| 2.203125
| 2
|
print_pdfs_dynamic_website.py
|
hhalaby/web-crawling-automation
| 2
|
12779001
|
<reponame>hhalaby/web-crawling-automation<filename>print_pdfs_dynamic_website.py
import os.path
import random
import string
import time
import ait
import pyautogui
from selenium import webdriver
from selenium.common.exceptions import ElementClickInterceptedException
from selenium.common.exceptions import StaleElementReferenceException
from selenium.common.exceptions import UnexpectedAlertPresentException
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.by import By
USERNAME = ''
PASSWORD = ''
URL = ''
SAVE_PATH = ''
COOKIES_BANNER_XPATH = ''
USERNAME_TEXTBOX_ID = ''
PASSWORD_TEXTBOX_ID = ''
LOGIN_BUTTON_ID = ''
BROWSE_BUTTON_1_ID = ''
SELECT_LIST_ID = ''
ITEMS_OF_INTEREST = []
SUB_ITEMS_OF_INTEREST = []
MATCHING_CSS_SELECTOR_FOR_SUB_ITEMS_OF_INTEREST = "[id*='POS_'"
MATCHING_CSS_SELECTOR_FOR_LIST_LEAVES = "[id*='LEAF_'"
SKIP_OVERWRITE_EXISTING = True
def wait():
time.sleep(random.uniform(0.3, 0.6))
class Crawl:
def __init__(self):
chrome_options = Options()
# chrome_options.add_argument("--disable-extensions")
# chrome_options.add_argument("--disable-gpu")
# chrome_options.add_argument("--disable-notifications")
chrome_options.add_argument('--kiosk-printing')
# chrome_options.add_argument('--disable-print-preview')
# chrome_options.add_experimental_option('prefs', prefs)
chrome_options.add_argument("--user-agent='Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 ("
"KHTML, like Gecko) Chrome/93.0.4577.82 Safari/537.36 Edg/93.0.961.52'")
self.driver = webdriver.Chrome("chromedriver_v94.exe", options=chrome_options)
# self.driver.maximize_window()
self.vars = {}
self.username = USERNAME
self.password = PASSWORD
self.url_login = URL
self.items_of_interest = ITEMS_OF_INTEREST
self.sub_items_of_interest = SUB_ITEMS_OF_INTEREST
self.save_path = SAVE_PATH
self.valid_chars = "-_.() %s%s" % (string.ascii_letters, string.digits)
# self.driver.execute_script("window.alert = function() {};")
def teardown_method(self):
self.driver.quit()
self.driver.refresh()
def login(self):
self.driver.get(url=self.url_login)
wait()
self.driver.find_element(By.XPATH, COOKIES_BANNER_XPATH).click()
wait()
username = self.driver.find_element(By.ID, USERNAME_TEXTBOX_ID)
username.send_keys(self.username)
wait()
password = self.driver.find_element(By.ID, PASSWORD_TEXTBOX_ID)
password.send_keys(<PASSWORD>)
wait()
self.driver.find_element(By.ID, LOGIN_BUTTON_ID).click()
def print_asset(self):
wait()
self.driver.find_element(By.ID, BROWSE_BUTTON_1_ID).click()
wait()
select_list = Select(self.driver.find_element(By.ID, SELECT_LIST_ID))
for item in self.items_of_interest:
select_list.select_by_visible_text(item)
wait()
print("Clicking Sub-items")
self.sub_items_list = self.driver.find_element(By.CSS_SELECTOR,
MATCHING_CSS_SELECTOR_FOR_SUB_ITEMS_OF_INTEREST)
print(f'number of sub items 1st round: {len(self.sub_items_list)}')
self.sub_items_already_clicked = []
for sub_item in self.sub_items_list:
if sub_item.text not in self.sub_items_of_interest:
self.sub_items_already_clicked.append(sub_item)
print(f'number of clicked sub items 1st round: {len(self.sub_items_already_clicked)}')
while len(self.sub_items_list) != len(self.sub_items_already_clicked):
for sub_item in self.sub_items_list:
if sub_item not in self.sub_items_already_clicked:
# time.sleep(0.5)
attempts = 0
while attempts < 3:
try:
time.sleep(0.1)
sub_item.click()
break
except StaleElementReferenceException as e:
print(e)
print(f'Stale Element: {sub_item}')
time.sleep(0.1)
except ElementClickInterceptedException as e:
print(e)
time.sleep(0.1)
attempts += 1
self.sub_items_already_clicked.append(sub_item)
time.sleep(0.1)
self.sub_items_list = self.driver.find_element(By.CSS_SELECTOR,
MATCHING_CSS_SELECTOR_FOR_SUB_ITEMS_OF_INTEREST)
print(f'# of sub items: {len(self.sub_items_list)}')
print(f'# of clicked sub items: {len(self.sub_items_already_clicked)}')
print("Finished clicking sub items")
leaf_list = self.driver.find_element(By.CSS_SELECTOR, MATCHING_CSS_SELECTOR_FOR_LIST_LEAVES)
print(f'number of leaves: {len(leaf_list)}')
for leaf in leaf_list:
time.sleep(0.5)
attempts = 0
while attempts < 3:
try:
leaf.click()
break
except ElementClickInterceptedException as e:
print(e)
time.sleep(0.1)
except UnexpectedAlertPresentException as e:
print(e)
break
except StaleElementReferenceException as e:
print(e)
print(f'Stale Element: {leaf}')
attempts += 1
time.sleep(0.5)
print(f'Getting name of leaf # {leaf_list.index(leaf)}')
name = item + '_'
try:
name += self.driver.find_element(By.ID, 'titleId').text
except UnexpectedAlertPresentException as e:
print(e)
time.sleep(0.5)
ait.press('\n')
continue
name = "".join(x for x in name if x in self.valid_chars)
name.replace(' ', '_')
path = self.save_path + name + '.pdf'
# if item exists skip save
if SKIP_OVERWRITE_EXISTING and os.path.isfile(path):
continue
print(f'Printing leaf # {leaf_list.index(leaf)}')
self.driver.find_element(By.CLASS_NAME, 'printbtn').click()
time.sleep(0.1)
attempts = 0
while attempts < 3:
try:
self.driver.switch_to.window(test.driver.window_handles[1])
except IndexError as e:
print(e)
attempts += 1
time.sleep(0.5)
self.driver.execute_script('window.print();')
time.sleep(0.5)
pyautogui.typewrite(name)
time.sleep(0.5)
ait.press('\n')
self.driver.close()
time.sleep(0.2)
self.driver.switch_to.window(test.driver.window_handles[0])
# start_time = time.time()
test = Crawl()
test.login()
test.print_asset()
# elapsed_time = (time.time() - start_time)/60
# print(f'{elapsed_time:.1f} minutes')
| 2.671875
| 3
|
python/redmine.py
|
Y05H1/rtv
| 0
|
12779002
|
<filename>python/redmine.py
# -*- coding: utf-8 -*-
import json
from datetime import datetime, date, timedelta
from dateutil.relativedelta import relativedelta
import dateutil.parser
import numpy as np
class RedmineAnalyzer(object):
def __init__(self, rc):
self.rc = rc
def _get_id(self, path='', list_name='', key='', name=''):
ret = self.rc.get(path=path)
lists = json.loads(ret.text)
for l in lists[list_name]:
if l[key] == name:
return l['id']
return -1
def _get_list(self, path, list_name, key):
ret = self.rc.get(path=path)
lists = json.loads(ret.text)
ret_list = []
for l in lists[list_name]:
ret_list.append(l[key])
return ret_list
def get_user_id(self, name=''):
return self._get_id(path='redmine/users.json', list_name='users', key='login', name=name)
def get_user_list(self):
return self._get_list(path='redmine/users.json', list_name='users', key='login')
def get_project_id(self, name=''):
return self._get_id(path='redmine/projects.json', list_name='projects', key='name', name=name)
def get_status_id(self, name=''):
return self._get_id(path='redmine/issue_statuses.json', list_name='issue_statuses', key='name', name=name)
def get_status_list(self):
return self._get_list(path='redmine/issue_statuses.json', list_name='issue_statuses', key='name')
def get_version_id(self, name=''):
return self._get_id(path='redmine/projects/1/versions.json', list_name='versions', key='name', name=name)
def get_versions_list(self):
ret = self.rc.get(path='redmine/projects/1/versions.json')
lists = json.loads(ret.text)
ret_list = []
for l in lists['versions']:
if ( l['status'] == 'open' ):
ret_list.append(l['name'])
return ret_list
def get_roadmap_status(self, versions_list):
roadmap_list = []
for version in versions_list:
vid = self.get_version_id(name=version)
query = 'fixed_version_id=' + str(vid) + '&status_id=*&limit=100'
ret = self.rc.get(path='redmine/issues.json?'+query)
tickets = json.loads(ret.text)
fixed_total = 0
persons = []
closed_tickets = 0
for t in tickets['issues']:
fixed_total += t['done_ratio']
p = t['assigned_to']['name'].split(" ")[1].lower() if t.has_key('assigned_to') else ''
persons.append(p)
if ( t.has_key('closed_on') ):
closed_tickets += 1
status = round(( fixed_total / ( 100.0 * tickets['total_count'] ) ) * 100.0,1) if tickets['total_count'] != 0 else 0
total = tickets['total_count']
persons = list(set(persons))
ret = self.rc.get(path='redmine/versions/'+ str(vid) +'.json')
tickets = json.loads(ret.text)
due_date = tickets['version']['due_date'] if tickets['version'].has_key('due_date') else '2999-12-31'
name = tickets['version']['name']
roadmap_list.append( {'version': version, 'status': status, 'author': persons,
'total': total, 'unclosed': total - closed_tickets, 'due': due_date} )
return sorted(roadmap_list, key=lambda x:x['due'], reverse=False)
def get_tickets_list(self, query):
ret = self.rc.get(path='redmine/issues.json?limit=100&'+query)
tickets = json.loads(ret.text)
rslt = []
for t in tickets['issues']:
assign = t['assigned_to']['name'].split(" ")[1].lower() if t.has_key('assigned_to') else ''
author = t['author']['name'].split(" ")[1].lower() if t.has_key('author') else ''
created = t['created_on'].split("T")[0]
updated = t['updated_on'].split("T")[0]
estimated = t['estimated_hours'] if t.has_key('estimated_hours') else 0
term = 0
if ( t.has_key('closed_on') ):
closed_date = dateutil.parser.parse(t['closed_on'])
created_date = dateutil.parser.parse(t['created_on'])
dt = closed_date - created_date
term = dt.days + 1
rslt.append({'id':t['id'], 'subject':t['subject'], 'assign':assign, 'author':author, 'term':term, 'status':t['status']['name'],
'tracker':t['tracker']['name'], 'project':t['project']['name'], 'created':created, 'updated':updated,
'estimated':estimated, 'done_ratio':t['done_ratio']})
return rslt
def get_tickets_count_per_user(self, user_list, status_list, query=''):
rslt = {}
for user in user_list:
uid = self.get_user_id(name=user)
rslt[user] = {}
total_tickets = 0
for status in status_list:
sid = self.get_status_id(name=status)
q = query + '&assigned_to_id=' + str(uid) + '&status_id=' + str(sid)
ret = self.rc.get(path='redmine/issues.json?'+q)
lists = json.loads(ret.text)
rslt[user][status] = lists['total_count']
total_tickets += lists['total_count']
rslt[user]['Total'] = total_tickets
return rslt
def get_tickets_count(self, query):
ret = self.rc.get(path='redmine/issues.json?'+query)
tickets = json.loads(ret.text)
return tickets['total_count']
def get_tickets_transition_per_tracker(self, search=[], query='', label=''):
data = []
for s in search:
created_tickets = self.get_tickets_count(query='status_id=*&created_on=<='+s+'&'+query)
closed_tickets = self.get_tickets_count(query='status_id=*&closed_on=<='+s+'&'+query)
data.append(created_tickets - closed_tickets)
return {'label': label, 'data': data}
def get_tickets_lifetime(self, max=1000, query=''):
q = 'status_id=closed&limit=100&' + query
pages = self.get_tickets_count(query=q)/100 + 1
tickets = []
#print pages
for p in range(pages):
ret = self.rc.get(path='redmine/issues.json?'+'page='+str(p+1)+'&'+q)
ret2 = json.loads(ret.text)
tickets.extend(ret2['issues'])
term = []
term_map = {}
for t in tickets:
closed_date = dateutil.parser.parse(t['closed_on'])
created_date = dateutil.parser.parse(t['created_on'])
dt = closed_date - created_date
day = dt.days + 1 if dt.days < max else max
term.append(day)
if not term_map.has_key(day):
term_map[day] = 0
term_map[day] += 1
if len(term) == 0:
return term_map
np_data = np.array(term)
for i in range(1,np.max(np_data)+1):
if not term_map.has_key(i):
term_map[i] = 0
return term_map
def get_updated_ticket_count(self, search_month=[], status='', query='', label=''):
rslt = []
for m in search_month:
nextmonth = m + relativedelta(months=1)
q = 'status_id=*&'+status+'=><'+m.strftime('%Y-%m-%d')+'|'+nextmonth.strftime('%Y-%m-%d') + '&' + query
rslt.append(self.get_tickets_count(query=q))
return {'label':label, 'data':rslt}
def get_updated_tickets_transition(self, term):
sm = self.get_term(term=term)
monthly_created_tickets = self.get_updated_ticket_count(search_month=sm, status='created_on', label='created')
monthly_closed_tickets = self.get_updated_ticket_count(search_month=sm, status='closed_on', label='closed')
monthly_updated_tickets_data = []
monthly_updated_tickets_data.append(monthly_created_tickets)
monthly_updated_tickets_data.append(monthly_closed_tickets)
monthly_updated_tickets_desc = self.get_term(term=term, mode='%Y-%m')
return (monthly_updated_tickets_desc, monthly_updated_tickets_data)
def get_reporter_list(self, user_list=[], query=''):
rslt = {}
for user in user_list:
uid = self.get_user_id(name=user)
q = query + '&author_id=' + str(uid) + '&status_id=*'
rslt[user] = self.get_tickets_count(query=q)
return rslt
def set_date(self, day, val=1):
if val==0:
val = 1
return date(day.year, day.month, val)
def get_term(self, term, inc=False, per=1, mode=''):
search = []
ignores = [1]
today = date.today()
target = self.set_date(today, 1)
for i in range(term):
target2 = target - relativedelta(months=i)
for j in range(per):
target3 = self.set_date(target2, 30/per*j)
ignores.append( 30/per*j )
if today < target3:
continue
target4 = target3.strftime(mode) if mode != '' else target3
search.append( target4 )
if inc and today.day not in ignores:
td = today.strftime(mode) if mode != '' else today
search.append(td)
search.sort()
#search.reverse()
return search
def get_gantt_data(self, query):
ret = self.rc.get(path='redmine/issues.json?limit=100&'+query)
tickets = json.loads(ret.text)
pdict = {}
plist = []
tlist = []
rslt = {}
for t in tickets['issues']:
group = t['project']['id']
if not pdict.has_key(group):
plist.append({'id':group, 'content':t['project']['name']})
pdict[group] = True
type = 'point' if ( t['status']['id'] == 1 ) else 'range'
start = t['created_on'].split("T")[0]
end = t['closed_on'] if ( t.has_key('closed_on') ) else t['updated_on'].split("T")[0]
end = dateutil.parser.parse(end) + relativedelta(days=1)
classname = ''
if ( t['status']['id'] == 1 ):
classname = 'created'
elif ( t.has_key('closed_on') ):
classname = 'closed'
else:
classname = 'updated'
content = '#' + str(t['id']) + ' ' +t['subject']
tlist.append({'group':group, 'content':content, 'start':start, 'end':end.strftime('%Y-%m-%d'), 'className':classname, 'type':type});
#return sorted(roadmap_list, key=lambda x:x['due'], reverse=False)
#plist = sorted(plist, key=lambda x:x['id'])
rslt['groups'] = sorted(plist, key=lambda x:x['id'])
rslt['items'] = tlist
return rslt
def get_gantt_term(self, before=10, after=3):
end_date = date.today() + relativedelta(days=after)
start_date = end_date - relativedelta(days=before)
return [start_date.strftime('%Y-%m-%d'), end_date.strftime('%Y-%m-%d')]
def get_trackers_list(self):
return self._get_list(path='redmine/trackers.json', list_name='trackers', key='name')
def get_trackers_id(self, name=''):
return self._get_id(path='redmine/trackers.json', list_name='trackers', key='id', name=name)
def get_tickets_transition(self, search, trackers):
ret = []
tr = self.get_tickets_transition_per_tracker(search=search, label='total')
ret.append(tr)
for tracker in trackers:
id = self.get_trackers_id(name=tracker)
tr = self.get_tickets_transition_per_tracker(search=search, label=tracker, query='tracker_id='+str(id))
ret.append(tr)
return ret
| 2.578125
| 3
|
lib/checkpoint.py
|
kaolin/rigor
| 5
|
12779003
|
""" Saved progress for Rigor, allowing users to resume long-running runs that fail part way through """
import rigor.logger
import tempfile
import time
import cPickle as pickle
import os
kPickleProtocol = pickle.HIGHEST_PROTOCOL
class Checkpoint(object):
""" Saved checkpoint results, loaded from a file """
def __init__(self, timestamp, parameters, seen, results):
"""
:param timestamp: when the checkpoint file was first created
:param dict parameters: parameters used in the original run
:param set(int) seen: a set of IDs that have been checkpointed already, to make it easy to skip duplicate evaluations
:param results: the saved results
"""
self.timestamp = timestamp
self.parameters = parameters
self.seen = seen
self.results = results
class NullCheckpointer(object):
"""
Does nothing. Used in place of actual checkpointer to make code simpler in :py:class:`~rigor.Runner`.
"""
def log(self, id, entry, flush=True):
pass
def __enter__(self):
return self
def __exit__(self, exc_type, value, traceback):
pass
class Checkpointer(object):
"""
Saves progress of algorithm evaluations in a file, to be loaded later
if there is an error and the evaluation is interrupted
"""
def __init__(self, parameters, checkpoint_file=None, delete_on_success=True):
"""
:param parameters: parameters that were used to generate checkpointed results
:param file checkpoint_file: open file to use for checkpointing, or :py:class:`None` to create a new one
:param delete_on_success: whether to delete the checkpoint file when closed
"""
self._logger = rigor.logger.get_logger('.'.join((__name__, self.__class__.__name__)))
self._parameters = parameters
if not checkpoint_file:
self._file = tempfile.NamedTemporaryFile('wb', prefix='rigor-checkpoint-', delete=False)
self.filename = self._file.name
else:
self._file = checkpoint_file
self.filename = checkpoint_file.name
self._delete = delete_on_success
self._write_header()
self._logger.info("Checkpoint filename is {}".format(self.filename))
def _write_header(self):
""" Writes an identifying header to the checkpoint file """
pickle.dump(time.time(), self._file, kPickleProtocol)
pickle.dump(self._parameters, self._file, kPickleProtocol)
def log(self, id, entry, flush=True):
"""
Logs a checkpoint entry to the file
:param id: The percept ID
:param entry: structured data returned from Algorithm.apply()
:param flush: whether to flush file output with each log entry (safer, but slower if processing each percept is very quick)
"""
pickle.dump((id, entry), self._file, kPickleProtocol)
if flush:
self._file.flush()
def close(self, success):
"""
Closes the checkpoint file.
:param success: whether operation finished successfully
"""
self._file.close()
if self._delete and success:
os.remove(self.filename)
def __enter__(self):
return self
def __exit__(self, exc_type, value, traceback):
self.close(exc_type is None)
@classmethod
def read_header(cls, checkpoint_file):
"""
Loads just the header portion of a checkpoint file.
:param checkpoint_file: file open in :py:const:`rb` mode containing a checkpoint
"""
timestamp = pickle.load(checkpoint_file)
parameters = pickle.load(checkpoint_file)
return timestamp, parameters
@classmethod
def resume(cls, old_file, new_file=None, delete_on_success=True):
"""
Resumes from an existing checkpoint file.
:param file old_file: existing open checkpoint file to resume from
:param file new_file: open new checkpoint file (must be different from the old file)
:param delete_on_success: whether to delete the new checkpoint file when closed, if successful
:return: (Checkpointer object, Checkpoint object)
"""
timestamp, parameters = cls.read_header(old_file)
checkpointer = cls(parameters, new_file, delete_on_success)
entries = list()
seen = set()
while True:
try:
id, entry = pickle.load(old_file)
seen.add(id)
entries.append(entry)
checkpointer.log(id, entry, flush=False)
except EOFError:
break
return checkpointer, Checkpoint(timestamp, parameters, seen, entries)
| 2.953125
| 3
|
deep_learning_from_scratch/4_activation_function.py
|
wdxtub/deep-learning-note
| 37
|
12779004
|
<reponame>wdxtub/deep-learning-note
import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
# 阶跃函数
def step_function0(x):
if x > 0:
return 1
return 0
# 支持 Numpy 数组的实现
def step_function(x):
return np.array(x > 0, dtype=np.int)
# 简单测试一下
x = np.array([-1.0, 1.0, 2.0])
print(x)
print(step_function(x))
# 绘制图形
x = np.arange(-5.0, 5.0, 0.1)
y = step_function(x)
title = 'step function'
# sigmoid 函数
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def relu(x):
return np.maximum(0, x)
# 广播
t = np.array([1.0, 2.0, 3.0])
print(1+t)
print(1/t)
# 绘制图形
x = np.arange(-5.0, 5.0, 0.1)
y1 = sigmoid(x)
title1 = 'sigmoid'
title2 = 'relu'
y2 = relu(x)
plt.plot(x, y1, label=title1)
plt.plot(x, y, label=title, linestyle='--')
plt.plot(x, y2, label=title2)
plt.ylim(-0.1, 5.1)
plt.title('activation functions')
plt.legend()
plt.show()
| 3.390625
| 3
|
bin/grad_desc.py
|
FrankSchaust/SC2CombatPredictor
| 0
|
12779005
|
#!/usr/bin/env python3
# Copyright 2017 <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import keras
import numpy as np
import tensorflow as tf
from absl import app
from data import simulation_pb2
from bin.load_batch import load_batch
from bin.data_visualization import map_id_to_units_race
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
from mpl_toolkits.mplot3d.axes3d import Axes3D
from bin.util import *
from lib.unit_constants import *
from lib.config import REPLAYS_PARSED_DIR, REPLAY_DIR, REPO_DIR, STANDARD_VERSION
def main():
learning_rates = [0.05]
beta1 = [0.9, 0.7, 0.6, 0.5]
beta2 = [0.95, 0.85, 0.75, 0.65]
epsilon = 1e-06
training_epochs = 50
trackAcc = []
trackAccs = []
trackCost = []
trackCosts = []
for learning_rate in learning_rates:
for b1 in beta1:
for b2 in beta2:
print("Run gradient descent with Learning Rate: %-6s --- Beta1: %-4s --- Beta2: %-5s" % (learning_rate, b1, b2))
trackAcc, trackCost = run_grad_desc(learning_rate, training_epochs, b1, b2, epsilon)
trackAccs.append(trackAcc)
trackCosts.append(trackCost)
create_graphs(trackAccs, trackCosts, learning_rates, training_epochs, beta1, beta2)
def run_grad_desc(learning_rate, training_epochs, b1, b2, eps):
# Graph Input
x = tf.placeholder(tf.float32, [None, 94])
y = tf.placeholder(tf.float32, [None, 3])
# initialize weight and bias
W_1 = tf.Variable(tf.truncated_normal([94, 94]))
W_2 = tf.Variable(tf.truncated_normal([94, 47]))
W_3 = tf.Variable(tf.truncated_normal([47, 3]))
# Construct Model
x_ = tf.matmul(x, W_1)
x_ = tf.matmul(x_, W_2)
logits = tf.matmul(x_, W_3)
pred = tf.nn.softmax(logits)
# minimize error using cross entropy
# cross_entropy
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=y))
optimizer = tf.contrib.opt.NadamOptimizer(learning_rate, b1, b2, eps).minimize(cost)
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
init = tf.global_variables_initializer()
trackAcc = []
trackCost = []
with tf.Session() as s:
s.run(init)
xs_train, xs_test, ys_train, ys_test = load(version=['1_3d'], file_version='multiple')
# loop to train for specified number of epochs
for epoch in range(training_epochs):
_, c = s.run([optimizer, cost], feed_dict={x: xs_train, y: ys_train})
acc = s.run(accuracy, feed_dict={x: xs_test, y: ys_test})
# track accuracy to display in graph when algorithm finished
trackCost.append(c)
trackAcc.append(acc*100)
#print('Epoch:', '%04d' % (epoch+1), "completed with an accuracy of:", "{:.3f}".format(acc), "cost=", "{:.9f}".format(c))
# evaluate accuary when all training steps are completed
print ("Accuracy:", accuracy.eval({x: xs_test, y: ys_test}))
trackAcc = np.array(trackAcc)
return trackAcc, trackCost
def create_graphs(trackAcc, trackCost, learning_rate, training_epochs, b1, b2):
# create graph
fig = plt.figure(figsize=plt.figaspect(4.))
# add plot
ax = fig.add_subplot(2,1,1)
# create array that corresponds to the number of training steps as x-axis
# y-axis is the accuracy in %
a = np.arange(1, training_epochs+1)
b = np.arange(1, training_epochs+1)
ax.set_title('Test Accuracy')
i = 0
bx = fig.add_subplot(2,1,2)
bx.set_title('Cost by Epoch')
m = ''
col = ''
sign = ['.', '-', ',', 'o']
cols = ['b','g', 'y', 'r']
for lr in learning_rate:
for n in range(len(learning_rate)):
if n > 3:
m = '^'
break
if lr == learning_rate[n]:
m = sign[n]
for b_ in b1:
for j in range(len(b1)):
if j > 3:
col = 'k'+m
break
if b_ == b1[j]:
col == cols[j]+m
for b_2 in b2:
ax.plot(a, trackAcc[i], col, label=i)
bx.plot(b, trackCost[i], col, label=i)
i += 1
plt.show()
# function to load the csv-data and construct the input array as return
# input array is a vector with one entry per possible unit id
# 94 entries 47 per combat party
def load(version = STANDARD_VERSION, file_version='single'):
match_arr = []
# load file(s) depending on desired input and version number
if file_version == 'multiple':
replay_log_files = []
replay_log_files = build_file_array('logs', version)
i = 0
#print('Looking over', len(replay_log_files), 'files')
while i < len(replay_log_files):
match_arr.append(read_csv(replay_log_files[i]))
i = i + 1
if file_version == 'single':
file_path = os.path.join(REPO_DIR, 'all_csv_from_version_' + version + '.csv')
match_arr = read_summed_up_csv(file_path, 250)
unit_vector_A = np.zeros(47)
unit_vector_B = np.zeros(47)
xs = []
ys = []
#print(match_arr[0], match_arr[3])
n=0
typeerror = 0
for match in match_arr:
# if str(match['winner_code']) == str(2):
# continue
try:
for id in match['team_A']:
id = int(id.replace("'", ""))
if id == 85:
continue
if id == 9:
unit_vector_A[0] += 1
if id == 12 or id == 13 or id == 15 or id == 17:
unit_vector_A[1] += 1
if id == 104:
unit_vector_A[2] += 1
if id == 105:
unit_vector_A[3] += 1
if id == 106:
unit_vector_A[4] += 1
if id == 107:
unit_vector_A[5] += 1
if id == 108:
unit_vector_A[6] += 1
if id == 109:
unit_vector_A[7] += 1
if id == 110:
unit_vector_A[8] += 1
if id == 111:
unit_vector_A[9] += 1
if id == 112:
unit_vector_A[10] += 1
if id == 114:
unit_vector_A[11] += 1
if id == 126:
unit_vector_A[12] += 1
if id == 129:
unit_vector_A[13] += 1
if id == 289:
unit_vector_A[14] += 1
if id == 499:
unit_vector_A[15] += 1
if id == 4:
unit_vector_A[16] += 1
if id == 10:
unit_vector_A[17] += 1
if id == 73:
unit_vector_A[18] += 1
if id == 74:
unit_vector_A[19] += 1
if id == 75:
unit_vector_A[20] += 1
if id == 76:
unit_vector_A[21] += 1
if id == 77:
unit_vector_A[22] += 1
if id == 78:
unit_vector_A[23] += 1
if id == 79:
unit_vector_A[24] += 1
if id == 80:
unit_vector_A[25] += 1
if id == 82:
unit_vector_A[26] += 1
if id == 83:
unit_vector_A[27] += 1
if id == 84:
unit_vector_A[28] += 1
if id == 141:
unit_vector_A[29] += 1
if id == 311:
unit_vector_A[30] += 1
if id == 694:
unit_vector_A[31] += 1
if id == 32 or id == 33:
unit_vector_A[32] += 1
if id == 34 or id == 35:
unit_vector_A[33] += 1
if id == 45:
unit_vector_A[34] += 1
if id == 48:
unit_vector_A[35] += 1
if id == 49:
unit_vector_A[36] += 1
if id == 50:
unit_vector_A[37] += 1
if id == 51:
unit_vector_A[38] += 1
if id == 52:
unit_vector_A[39] += 1
if id == 53 or id == 484:
unit_vector_A[40] += 1
if id == 54:
unit_vector_A[41] += 1
if id == 55:
unit_vector_A[42] += 1
if id == 56:
unit_vector_A[43] += 1
if id == 57:
unit_vector_A[44] += 1
if id == 268:
unit_vector_A[45] += 1
if id == 692:
unit_vector_A[46] += 1
for id in match['team_B']:
id = int(id.replace("'", ""))
if id == 85:
continue
if id == 9:
unit_vector_B[0] += 1
if id == 12 or id == 13 or id == 15 or id == 17:
unit_vector_B[1] += 1
if id == 104:
unit_vector_B[2] += 1
if id == 105:
unit_vector_B[3] += 1
if id == 106:
unit_vector_B[4] += 1
if id == 107:
unit_vector_B[5] += 1
if id == 108:
unit_vector_B[6] += 1
if id == 109:
unit_vector_B[7] += 1
if id == 110:
unit_vector_B[8] += 1
if id == 111:
unit_vector_B[9] += 1
if id == 112:
unit_vector_B[10] += 1
if id == 114:
unit_vector_B[11] += 1
if id == 126:
unit_vector_B[12] += 1
if id == 129:
unit_vector_B[13] += 1
if id == 289:
unit_vector_B[14] += 1
if id == 499:
unit_vector_B[15] += 1
if id == 4:
unit_vector_B[16] += 1
if id == 10:
unit_vector_B[17] += 1
if id == 73:
unit_vector_B[18] += 1
if id == 74:
unit_vector_B[19] += 1
if id == 75:
unit_vector_B[20] += 1
if id == 76:
unit_vector_B[21] += 1
if id == 77:
unit_vector_B[22] += 1
if id == 78:
unit_vector_B[23] += 1
if id == 79:
unit_vector_B[24] += 1
if id == 80:
unit_vector_B[25] += 1
if id == 82:
unit_vector_B[26] += 1
if id == 83:
unit_vector_B[27] += 1
if id == 84:
unit_vector_B[28] += 1
if id == 141:
unit_vector_B[29] += 1
if id == 311:
unit_vector_B[30] += 1
if id == 694:
unit_vector_B[31] += 1
if id == 32 or id == 33:
unit_vector_B[32] += 1
if id == 34 or id == 35:
unit_vector_B[33] += 1
if id == 45:
unit_vector_B[34] += 1
if id == 48:
unit_vector_B[35] += 1
if id == 49:
unit_vector_B[36] += 1
if id == 50:
unit_vector_B[37] += 1
if id == 51:
unit_vector_B[38] += 1
if id == 52:
unit_vector_B[39] += 1
if id == 53 or id == 484:
unit_vector_B[40] += 1
if id == 54:
unit_vector_B[41] += 1
if id == 55:
unit_vector_B[42] += 1
if id == 56:
unit_vector_B[43] += 1
if id == 57:
unit_vector_B[44] += 1
if id == 268:
unit_vector_B[45] += 1
if id == 692:
unit_vector_B[46] += 1
unit_vector = np.append(unit_vector_A, unit_vector_B)
xs.append(unit_vector)
ys.append(int(match['winner_code']))
except TypeError:
print(id)
typeerror += 1
continue
except ZeroDivisionError:
continue
#print(typeerror)
#print(xs[0])
ys = keras.utils.to_categorical(ys, num_classes=3)
split = int(len(xs)*0.1)
# # Make train / test split
xs_train = xs[:-split]
ys_train = ys[:-split]
xs_test = xs[-split:]
ys_test = ys[-split:]
return xs_train, xs_test, ys_train, ys_test
if __name__ == "__main__":
main()
| 2.34375
| 2
|
webhook.py
|
xelnagamex/conf_bot
| 0
|
12779006
|
<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from http.server import HTTPServer,SimpleHTTPRequestHandler,CGIHTTPRequestHandler
from socketserver import BaseServer
import ssl
import json
import settings
# fuckin dirty hack. idk the best way to inherit return func into
# RequestHandler class
class RequestHandler(SimpleHTTPRequestHandler):
def __init__(self,
request,
client_address,
server):
self.worker = settings.worker
super(RequestHandler, self).__init__(
request=request,
client_address=client_address,
server=server)
def do_POST(self):
"""Serve a POST request."""
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
length = self.headers.get('content-length')
post_body = self.rfile.read(int(length))
msg = json.loads(post_body.decode("utf-8"))
self.worker.handleUpdate(msg)
def do_GET(self):
pass
class WebHook:
def __init__(self,
certfile,
keyfile,
address = '0.0.0.0',
port=8443,
RequestHandler=RequestHandler):
self.httpd = HTTPServer((address, port), RequestHandler)
self.httpd.socket = ssl.wrap_socket (self.httpd.socket,
certfile=certfile,
keyfile=keyfile,
server_side=True)
def serve(self):
try:
self.httpd.serve_forever()
except KeyboardInterrupt:
pass
finally:
# Clean-up server (close socket, etc.)
self.httpd.server_close()
| 2.4375
| 2
|
selenium_study/test_case/testwb_researchweibo.py
|
songxiaoshi/automation_case
| 0
|
12779007
|
<reponame>songxiaoshi/automation_case<gh_stars>0
# code = 'utf-8'
import unittest
import xlwt
from selenium import webdriver
from selenium.webdriver.common.by import By
import sys
from PO.weibopagelist import wbplist
# from selenium.webdriver.support.wait import WebDriverWait #显示等待
# from selenium.webdriver.support import expected_conditions as EC #元素的状态是否是可点击的
import time
class researchWeibo(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Chrome()
def test_research(self):
driver = self.driver
url = 'https://s.weibo.com/'
driver.get(url)
time.sleep(5)
# WebDriverWait(self.driver,10).until(EC.visibility_of_element_located(By.CSS_SELECTOR,'div[class="search-input"]>input[type="text"]'))
# #等待元素出现时马上操作
driver.find_element(By.CSS_SELECTOR, 'div[class="search-input"]>input[type="text"').send_keys('<PASSWORD>')
# WebDriverWait(self.driver,10).until(EC.visibility_of_element_located(By.CLASS_NAME,'s-btn-b'))
driver.find_element(By.CLASS_NAME, 's-btn-b').click()
# WebDriverWait(self.driver,10).until(EC.visibility_of_element
# _located(By.XPATH,'// *[ @ id = "pl_feedlist_index"] / div[1] / div[1]'))
# pagelist = driver.find_element(By.XPATH,'// *[ @ id = "pl_feedlist_index"] / div[1] / div[1]').text
# p = driver.find_elements(By.CSS_SELECTOR,'div[action-type="feed_list_item"]')
p = driver.find_elements(*wbplist.pagelist)
print(p)
keyword = 'web自动化'
wb = xlwt.Workbook()
wt = wb.add_sheet(keyword)
wt.write(0, 0, '内容')
wt.write(0, 1, '发送人')
wt.write(0, 2, '发布时间')
wt.write(0, 3, '来源')
wt.write(0, 4, ' 收藏数')
wt.write(0, 5, '转发数')
wt.write(0, 6, '评论数')
wt.write(0, 7, '点赞数')
counter = 0
for pl in p:
counter += 1
# WebDriverWait(self.driver,10).until(EC.visibility_of_element_located(*wbplist.title))
# title = pl.find_element(By.CSS_SELECTOR, 'p[node-type="feed_list_content"]').text
# username参数是一个元组,find_element的参数规则是需要2个值,所以需要对传来的元组参数做解包,解包之后就是2个值了
title = pl.find_element(*wbplist.title).text
username = pl.find_element(*wbplist.username).text
times = pl.find_element(*wbplist.times).text
source = pl.find_element(*wbplist.source).text
coll = pl.find_element(*wbplist.coll).text
send = pl.find_element(*wbplist.send).text
send_num = str(send).split("转发")[1] # split分割字符串,
if send_num == '':
send_num = 0
comment = pl.find_element(*wbplist.comment).text
comment_num = str(comment).split("评论")[1]
if comment_num == '':
comment_num = 0
like = pl.find_element(*wbplist.like).text
print(send_num, comment_num)
wt.write(counter, 0, title) # 取标题存到文件
wt.write(counter, 1, username) # 取发送人存到文件
wt.write(counter, 2, times) # 取发布时间存到文件
wt.write(counter, 3, source) # 取来源存到文件
wt.write(counter, 4, coll) # 取收藏数存到文件
wt.write(counter, 5, send_num) # 取转发数存到文件
wt.write(counter, 6, comment_num) # 取评论数存到文件
wt.write(counter, 7, like) # 取点赞数存到文件
wb.save('weibo.xls')
"""
driver.find_element_by_css_selector('div[class="search-input"]>input[type="text"]').send_keys('web自动化')
driver.find_element_by_class_name('s-btn-b').click()
time.sleep(2)
#eles = driver.find_element_by_css_selector('div[faction-type="feed_list_item"]')
eles = driver.find_element_by_xpath('// *[ @ id = "pl_feedlist_index"] / div[1] / div[1]').text
"""
"""wb = xlwt.Workbook()
wt = wb.add_sheet(())
for ele in eles:
title = driver.find_element_by_css_selector('p[class="txt"]').text
username = driver.find_element_by_css_selector('a[class="name"]').text
print(driver.find_element_by_class_name())"""
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
686509196
| 2.546875
| 3
|
src/Python/801-900/867.TransposeMatrix.py
|
Peefy/PeefyLeetCode
| 2
|
12779008
|
<reponame>Peefy/PeefyLeetCode
class Solution:
def transpose(self, A):
"""
:type A: List[List[int]]
:rtype: List[List[int]]
"""
A[:] = map(list,zip(*A))
return A
if __name__ == '__main__':
solution = Solution()
print(solution.transpose([[1, 2, 3], [4, 5, 6], [7, 8, 9]]))
else:
pass
| 3.4375
| 3
|
src/koala/lattice.py
|
Imperial-CMTH/koala
| 0
|
12779009
|
<filename>src/koala/lattice.py
import numpy as np
import numpy.typing as npt
from dataclasses import dataclass, field
from functools import cached_property
import matplotlib.transforms
INVALID = np.iinfo(int).max
class LatticeException(Exception):
pass
@dataclass
class Plaquette:
"""Represents a single plaquette in a lattice. Not a list since plaquettes can have varying size.
:param vertices: Indices correspondng to the vertices that border the plaquette. These are always organised to start from the lowest index and then go clockwise around the plaquette
:type vertices: np.ndarray[int] (n_sides)
:param edges: Indices correspondng to the edges that border the plaquette. These are arranged to start from the lowest indexed vertex and progress clockwise.
:type edges: np.ndarray[int] (n_sides)
:param directions: Valued +1,-1 depending on whether the i'th edge points clockwise/anticlockwise around the plaquette
:type directions: np.ndarray[int] (n_sides)
:param centers: Coordinates of the center of the plaquette
:type centers: np.ndarray[float] (2)
:param n_sides: Number of sides to the plaquette
:type n_sides: int
:param adjacent_plaquettes: Indices of all the plaquettes that share an edge with this one, ordered in the same order as the plaquette edges
:type adjacent_plaquettes: np.ndarray[int] (n_sides)
"""
vertices: np.ndarray
edges: np.ndarray
directions: np.ndarray
center: np.ndarray
n_sides: int
adjacent_plaquettes: np.ndarray
@dataclass(frozen=True)
class Edges:
"""
Represents the list of edges in the lattice
:param indices: Indices of points connected by each edge.
:type indices: np.ndarray[int] (nedges, 2)
:param vectors: Vectors pointing along each edge
:type vectors: np.ndarray[float] (nedges, 2)
:param crossing: Tells you whether the edge crosses the boundary conditions, and if so, in which direction. One value for x-direction and one for y-direction
:type crossing: np.ndarray[int] (nedges, 2)
:param adjacent_plaquettes: Lists the indices of every plaquette that touches each edge
:type adjacent_plaquettes: np.ndarray[int] (nedges, 2)
"""
indices: np.ndarray
vectors: np.ndarray
crossing: np.ndarray
# adjacent_edges: np.ndarray TODO - add this feature
#a reference to the parent lattice, has no type because Lattice isn't defined yet
_parent: ... = field(default=None, repr=False)
@cached_property
def adjacent_plaquettes(self) -> np.ndarray:
self._parent.plaquettes #access lattice.plaquettes to make them generate
return self._parent._edges_adjacent_plaquettes
@dataclass(frozen=True)
class Vertices:
"""
Represents a list of vertices in the lattice
:param positions: List of the positions of every vertex
:type positions: np.ndarray[float] (nvertices, 2)
:param adjacent_edges: Lists the indices of every edge that connects to that vertex. Listed in clockwise order from the lowest index
:type adjacent_edges: list[np.ndarray] (nvertices, n_edges_per_vertex)
:param adjacent_plaquettes: Lists the indices of every plaquette that touches the vertex
:type adjacent_plaquettes: np.ndarray[int] (nvertices, 3)
"""
positions: np.ndarray
adjacent_edges: np.ndarray
# adjacent_vertices: np.ndarray TODO - add this feature
# a reference to the parent lattice, has no type because the Lattice class isn't defined yet
_parent: ... = field(default=None, repr=False)
@cached_property
def adjacent_plaquettes(self) -> np.ndarray:
self._parent.plaquettes #access lattice.plaquettes to make them generate
return self._parent._vertices_adjacent_plaquettes
class Lattice(object):
"""Data structure containing information about a lattice consisting of vertices in real space connected by undirected edges.
:param vertices: Data structure containing vertex positions, and the edges/plaquettes touching each vertex
:type vertices: Vertices
:param edges: Data structure containing indices of vertices comprising each edge, the spatial displacement vectors
corresponding to those edges, flags for edges which cross the system boundary in periodic Lattices, and the plaquettes
touching each edge.
:type edges: Edges
:param plaquettes: All of the polygons (aka plaquettes) comprising the lattice, specifying their constituent vertices, edges,
winding directions, and centers.
:type plaquettes: list[Plaquette]
"""
def __init__(
self,
vertices: npt.NDArray[np.floating],
edge_indices: npt.NDArray[np.integer],
edge_crossing: npt.NDArray[np.integer],
unit_cell = matplotlib.transforms.IdentityTransform(),
):
"""Constructor for Lattices
:param vertices: Spatial locations of lattice vertices
:type vertices: npt.NDArray[np.floating] Shape (nverts, 2)
:param edge_indices: Indices corresponding to the vertices which each edge connects
:type edge_indices: npt.NDArray[np.integer] Shape (nedges, 2)
:param edge_crossing: Flags describing which boundaries of the system each edge crosses in periodic boundary conditions.
Each entry in the final axis corresponds to a spatial dimension, 1(-1) denotes an edge crossing a boundary in the positive
(negative) direction along that dimension. 0 corresponds to no boundary crossing.
:type edge_crossing: npt.NDArray[np.integer] Shape (nedges, 2)
"""
# calculate the vector corresponding to each edge
edge_vectors = (vertices[edge_indices][:, 1] -
vertices[edge_indices][:, 0] + edge_crossing)
# calculate the list of edges adjacent to each vertex
vertex_adjacent_edges = _sorted_vertex_adjacent_edges(
vertices, edge_indices, edge_vectors)
self.vertices = Vertices(
positions=vertices,
adjacent_edges=vertex_adjacent_edges,
_parent = self,
)
self.edges = Edges(
indices=edge_indices,
vectors=edge_vectors,
crossing=edge_crossing,
_parent = self,
)
# some properties that count edges and vertices etc...
self.n_vertices = self.vertices.positions.shape[0]
self.n_edges = self.edges.indices.shape[0]
self.unit_cell = unit_cell
def __repr__(self):
return f"Lattice({self.n_vertices} vertices, {self.n_edges} edges)"
# find all the plaquettes
@cached_property
def plaquettes(self):
_plaquettes = _find_all_plaquettes(self)
# now add edge adjacency and point adjacency for plaquettes
def set_first_invalid(row, value):
index = np.where(row == INVALID)[0][0]
row[index] = value
# arrays that hold neighbouring plaquettes for edges and vertices
edges_plaquettes = np.full((self.n_edges, 2), INVALID)
vertices_plaquettes = np.full((self.n_vertices, 3), INVALID)
# set the values
for n,plaquette in enumerate(_plaquettes):
plaq_dir_index = (0.5*(1-plaquette.directions)).astype(int)
edges_plaquettes[plaquette.edges, plaq_dir_index] = n
x = vertices_plaquettes[plaquette.vertices]
np.apply_along_axis(set_first_invalid,1,x,n)
vertices_plaquettes[plaquette.vertices] = x
# Later when lattice.edges.adjacent_plaquettes or lattice.vertices.adjacent_plaquettes
# are accessed, they are copied from __vertices_adjacent_plaquettes and __edges_adjacent_plaquettes
self._vertices_adjacent_plaquettes = vertices_plaquettes
self._edges_adjacent_plaquettes = edges_plaquettes
# set the neighbouring plaquettes for every plaquette - stored in same order as plaquette edges
for n, plaquette in enumerate(_plaquettes):
edge_plaquettes = edges_plaquettes[plaquette.edges]
roll_vals = np.where(edge_plaquettes != n)[1]
other_plaquettes = edge_plaquettes[np.arange(len(roll_vals)), roll_vals]
_plaquettes[n].adjacent_plaquettes = other_plaquettes
return _plaquettes
@cached_property
def n_plaquettes(self):
return len(self.plaquettes)
def __eq__(self, other):
if not isinstance(other, self.__class__): return False
average_separation = 1 / np.sqrt(self.n_vertices)
return all([np.allclose(self.vertices.positions, other.vertices.positions, atol = average_separation / 100),
np.all(self.edges.indices == other.edges.indices),
np.all(self.edges.crossing == other.edges.crossing)])
def __ne__(self, other):
return not self.__eq__(other)
def __getstate__(self):
#define a minimal representation for the lattice
for dtype in [np.uint8, np.uint16, np.uint32, np.uint64]:
if self.n_vertices <= np.iinfo(dtype).max:
edges = self.edges.indices.astype(dtype)
break
else:
raise ValueError("A lattice with > 2**64 vertices is just too much")
vertices = self.vertices.positions.astype(np.float32)
def check_fits(array, dtype): assert (np.iinfo(dtype).min <= np.min(array) and np.max(array) <= np.iinfo(dtype).max)
check_fits(self.edges.crossing, np.int8)
crossing = self.edges.crossing.astype(np.int8)
return vertices, edges, crossing
def __setstate__(self, state):
if isinstance(state, dict):
self.__dict__.update(state) #For backwards compatibility
else: #The new way to pickle just saves vertex positions, edge indices and edge crossing
self.__init__(*state)
def _sorted_vertex_adjacent_edges(
vertex_positions,
edge_indices,
edge_vectors):
"""Gives you an array where the i'th row contains the indices of the edges that connect to the i'th vertex.
The edges are always organised in clockwise order starting from 12:00, which will be handy later ;)
:param vertex_positions: List of the positions of every vertex
:type vertex_positions: np.ndarray[float] (nvertices, 2)
:param edge_indices: Indices of points connected by each edge.
:type edge_indices: np.ndarray[int] (nedges, 2)
:param edge_vectors: Vectors pointing along each edge
:type edge_vectors: np.ndarray[float] (nedges, 2)
:return: List containing the indices of the edges that connect to each point, ordered clockwise around the point.
:rtype: list[int] (nvertices, nedges_per_vertex)
"""
# sorts these lists to make sure that they always are ordered clockwise from 12:00 like on a clock face
vertex_adjacent_edges = []
for index in range(vertex_positions.shape[0]):
v_edges = np.nonzero(
(edge_indices[:, 0] == index) + (edge_indices[:, 1] == index))[0]
# is the chosen index is first or second in the list
v_parities = []
for i, edge in enumerate(v_edges):
par = 1 if (edge_indices[edge][0] == index) else -1
v_parities.append(par)
v_parities = np.array(v_parities)
# find the angle that each vertex comes out at
v_vectors = edge_vectors[v_edges]*v_parities[:, None]
v_angles = np.arctan2(-v_vectors[:, 0], v_vectors[:, 1]) % (2*np.pi)
# reorder the indices
# [sorted(-v_angles).index(x) for x in -v_angles]
order = np.argsort(-v_angles)
edges_out = v_edges[order]
vertex_adjacent_edges.append(edges_out)
return vertex_adjacent_edges
def _find_plaquette(
starting_edge: int,
starting_direction: int,
l: Lattice):
"""Given a single edge, and a direction, this code finds the plaquette corresponding to starting in that
direction and only taking left turns. This means plaquettes are ordered anticlockwise - which amounts to going round each vertex clockwise.
:param starting_edge: Index of the edge where you start
:type starting_edge: int
:param starting_direction: Direction to take the first step. +1 means the same direction as the edge, -1 means opposite
:type starting_direction: int (+1 or -1)
:param l: Lattice to be searched for the plaquette
:type l: Lattice
:return: A plaquette object representing the found plaquette
:rtype: Plaquette
"""
edge_indices = l.edges.indices
vertex_adjacent_edges = l.vertices.adjacent_edges
s_dir_index = int(0.5*(1-starting_direction))
start_vertex = edge_indices[starting_edge, s_dir_index]
current_edge = starting_edge
current_vertex = start_vertex
current_direction = starting_direction
# print(current_vertex, current_edge ,edge_indices[current_edge], current_direction)
plaquette_edges = [starting_edge]
plaquette_vertices = [start_vertex]
plaquette_directions = [starting_direction]
valid_plaquette = True
while True:
current_vertex = edge_indices[current_edge][np.where(
np.roll(edge_indices[current_edge], 1) == current_vertex)[0][0]]
current_edge_choices = vertex_adjacent_edges[current_vertex]
current_edge = current_edge_choices[(np.where(current_edge_choices == current_edge)[
0][0] + 1) % current_edge_choices.shape[0]]
current_direction = 1 if np.where(
edge_indices[current_edge] == current_vertex)[0][0] == 0 else -1
# stop when you get back to where you started
if current_edge == starting_edge and current_direction == starting_direction:
break
# if you get trapped in a loop that doesn't include the start point - stop and return an exception
edge_dir_bundle = [[e,d] for e,d in zip (plaquette_edges, plaquette_directions)]
cond = [current_edge, current_direction ]in edge_dir_bundle[1:]
if cond:
# print(current_edge, current_direction, edge_dir_bundle)
raise LatticeException('plaquette finder is getting stuck. This usually happens if the lattice has self edges or other unexpected properties')
plaquette_edges.append(current_edge)
plaquette_vertices.append(current_vertex)
plaquette_directions.append(current_direction)
plaquette_edges = np.array(plaquette_edges)
plaquette_vertices = np.array(plaquette_vertices)
plaquette_directions = np.array(plaquette_directions)
# check --- not sure if this is necessary --- check
# check if the plaquette contains the same edge twice - if this is true then that edge is a bridge
# this means the plaquette is not legit!
# if len(np.unique(plaquette_edges)) != len(plaquette_edges):
# print('double_edge')
# valid_plaquette = False
# this bit checks if the loop crosses a PBC boundary once only - if so then it is one of the two edges of a system crossing strip plaquette
# which means that the system is in strip geometry. We discard the plaquette.
plaquette_crossings = plaquette_directions[:,None] *l.edges.crossing[plaquette_edges]
overall_crossings = np.sum(plaquette_crossings, axis= 0)
if np.sum(overall_crossings != [0,0]):
# then this plaquette is invalid
valid_plaquette = False
# form the points by adding the edge vectors to the first point - ignores boundary problems
plaquette_vectors = l.edges.vectors[plaquette_edges] * plaquette_directions[:,None]
plaquette_sums = np.cumsum(plaquette_vectors, 0)
points = l.vertices.positions[plaquette_vertices[0]]+plaquette_sums
plaquette_center = np.sum(points, 0) / (points.shape[0])%1
# now we check if the plaquette is acually the boundary of the lattice - this happens when
# we are in open boundaries, do this by checking the winding number using the outer angles
# if they go the wrong way round we have an exterior plaquette
angs = np.arctan2(plaquette_vectors[:,0] ,plaquette_vectors[:,1])
rel_angs = angs - np.roll(angs,1)
ang = np.sum((rel_angs + np.pi) % (2*np.pi) - np.pi)
w_number = np.round(ang / (2*np.pi)).astype('int')
if w_number != -1:
valid_plaquette = False
n_sides = plaquette_edges.shape[0]
found_plaquette = Plaquette(vertices=plaquette_vertices,
edges=plaquette_edges, directions=plaquette_directions, center=plaquette_center, n_sides= n_sides, adjacent_plaquettes=None)
return found_plaquette, valid_plaquette
def _find_all_plaquettes(l: Lattice):
"""Construct a list of Plaquette objects, representing all of the polygons in the lattice.
:param l: Lattice whose plaquettes are to be found
:type l: Lattice
:return: List of all plaquettes in the lattice
:rtype: list[Plaquette]
"""
edge_indices = l.edges.indices
# have we already found a plaquette on that edge going in that direction
edges_fwd_backwd_remaining = np.ones_like(edge_indices)
plaquettes = []
for i in range(edge_indices.shape[0]):
# every edge touches at most two new plaquettes one going forward and one going backwards
if edges_fwd_backwd_remaining[i, 0] == 1:
plaq_obj, valid = _find_plaquette(
i, 1, l)
direction_index = (0.5*(1-plaq_obj.directions)).astype(int)
edges_fwd_backwd_remaining[plaq_obj.edges, direction_index] = 0
if valid:
plaquettes.append(plaq_obj)
if edges_fwd_backwd_remaining[i, 1] == 1:
plaq_obj, valid = _find_plaquette(
i, -1, l)
direction_index = (0.5*(1-plaq_obj.directions)).astype(int)
edges_fwd_backwd_remaining[plaq_obj.edges, direction_index] = 0
if valid:
plaquettes.append(plaq_obj)
return np.array(plaquettes, dtype = object)
def permute_vertices(l: Lattice, ordering: npt.NDArray[np.integer]) -> Lattice:
"""Create a new lattice with the vertex indices rearranged according to ordering,
such that new_l.vertices[i] = l.vertices[ordering[i]].
:param l: Original lattice to have vertices reordered
:type l: Lattice
:param ordering: Permutation of vertex ordering, i = ordering[i']
:type ordering: npt.NDArray[np.integer]
:return: New lattice object with permuted vertex indices
:rtype: Lattice
"""
original_verts = l.vertices
original_edges = l.edges
nverts = original_verts.positions.shape[0]
inverse_ordering = np.zeros((nverts,)).astype(int)
inverse_ordering[ordering] = np.arange(nverts).astype(int) # inverse_ordering[i] = i'
new_edges = Edges(
indices = inverse_ordering[original_edges.indices],
vectors = original_edges.vectors,
crossing = original_edges.crossing,
_parent = None
)
new_verts = original_verts.positions[ordering]
return Lattice(
vertices=new_verts,
edge_indices=new_edges.indices,
edge_crossing=new_edges.crossing
)
def cut_boundaries(l: Lattice, boundary_to_cut: list = [True,True]) -> Lattice:
"""Removes the x and/or y boundary edges of the lattice.
:param l: The lattice to cut.
:type l: Lattice
:param boundary_to_cut: whether to cut the x or y boundaries, defaults to [True,True]
:type boundary_to_cut: list[Bool], optional
:return: A new lattice with boundaries cut.
:rtype: Lattice
"""
vertices = l.vertices.positions
edges = l.edges.indices
crossing = l.edges.crossing
x_external = crossing[:,0] != 0
y_external = crossing[:,1] != 0
condx = 1-x_external*boundary_to_cut[0]
condy = 1-y_external*boundary_to_cut[1]
cond = condx* condy
internal_edge_ind = np.nonzero(cond)[0]
new_edges = edges[internal_edge_ind]
new_crossing = crossing[internal_edge_ind]
lattice_out = Lattice(
vertices,
new_edges,
new_crossing
)
return lattice_out
| 2.9375
| 3
|
utilities/CGI-pythons/surf_trajLL2.py
|
sinotec2/Focus-on-Air-Quality
| 0
|
12779010
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import cgi, os, sys
import cgitb
import tempfile as tf
import json
#paths
JSON='/Users/Data/cwb/e-service/surf_trj/sta_list.json'
TRJs={'forc':'/Users/Data/cwb/e-service/btraj_WRFnests/ftuv10.py','obsv':'/Users/Data/cwb/e-service/surf_trj/traj2kml.py'}
WEB='/Library/WebServer/Documents/'
CGI='/Library/WebServer/CGI-Executables/'
INI='/opt/local/bin/conda_ini'
NCL='/opt/anaconda3/envs/ncl_stable/bin/ncl '
MBLs={'obsv':'/Library/WebServer/Documents/taiwan/taiMarbleScale.ncl',\
'forc':'/Library/WebServer/Documents/taiwan/chnMarble.ncl'}
SED='/usr/bin/sed -ie'
pth=WEB+'trj_results/'
OUT='>>'+pth+'trj.out'
form = cgi.FieldStorage()
dirTJ={'b':'T','f':'F'} #back->true; foreward->false
nam = form.getvalue('AQSname')
try:
ist=int(nam)
except:
AQ=nam
else:
fn = open(JSON)
d_nstnam = json.load(fn)
AQ=d_nstnam[nam]
os.system('echo '+AQ+OUT)
OBF = form.getvalue("dirOF") #dirOF=obsv/forc
TRJ = TRJs[OBF]
MBL = MBLs[OBF]
DIR = form.getvalue("dirFB")
TF = dirTJ[DIR[0]]
num = form.getvalue("number")
dat = form.getvalue("date")
message='../../trj_results/'+DIR+AQ+dat+num+'.csv'
print 'Content-Type: text/html\n\n'
print open(CGI+'header.txt','r')
if os.path.isfile(WEB+message[6:]):
print """\
<p>The assigned KML file has been created and maybe downloaded in your Downloads directory.</p>
<p>You may re-download by clicking this <a href="%s">link</a>, or...</p>
<p> submit the KML file at Google Maps or OpenStreet interface at the
<a href=http://172.16.58.3/Leaflet/docs/index.html>Leaflet</a>.</p>
<p> return to the previous page and redefine the trajectory.</p>
</body>
</html>
""" % (message+'.kml')
else:
fn=message.split('/')[-1]
title=fn.replace('.csv','').replace('trj','trj_')
mbl=MBL.split('/')[-1]
cmd ='cd '+pth+';'
cmd+='cp wait.png '+fn+'.png;'
cmd+='cd '+WEB+';'
cmd+= TRJ+' -t '+AQ+' -d '+dat+num+' -b '+TF+OUT+';'
cmd+='source '+INI+' ncl_stable '+OUT+';'
cmd+='cd '+pth+';'
cmd+='cp '+MBL+' .;'+SED+(' "s/TITLE/{:s}/g" '+mbl).format(title)+';'
cmd+= NCL+mbl+OUT+';'
cmd+='cp topo.png '+fn+'.png;'
os.system('echo "'+cmd+'">'+pth+'cmd.cs')
if 'uv10' in cmd:
os.system('sh '+pth+'cmd.cs & disown')
else:
os.system('sh '+pth+'cmd.cs ')
os.system('echo "OK 3!"'+OUT)
print """\
<p>Trajectory job has been submitted to the system. DO NOT RELOAD this page, please !!</p>
<p>The ncl_PNG download should start shortly(CWB WRF may takes 5 min). If it doesn't, click
<a data-auto-download href="%s">here</a>.</p>
<p>Or you may downnload the KML and draw the plot by yourself, please click
<a data-auto-download href="%s">here</a>.</p>
<p>The KML may be posted on google map or OpenStreet interface:
<a href=http://172.16.58.3/Leaflet/docs/index.html>Leaflet</a>.</p>
</body>
</html>
""" % (message+'.png' ,message+'.kml')
sys.stdout.close()
sys.exit('fine!')
| 2.109375
| 2
|
integration_tests/module_detection.py
|
pieperm/IARC-2020
| 12
|
12779011
|
<filename>integration_tests/module_detection.py
#!/usr/bin/env python3
"""Integration test for module detection at the mast"""
import os
import sys
parent_dir = os.path.dirname(os.path.abspath(__file__))
gparent_dir = os.path.dirname(parent_dir)
ggparent_dir = os.path.dirname(gparent_dir)
gggparent_dir = os.path.dirname(ggparent_dir)
sys.path += [parent_dir, gparent_dir, ggparent_dir, gggparent_dir]
import logging
from flight_manager import FlightManager
from flight.state_settings import StateSettings
if __name__ == "__main__":
try:
state_settings: StateSettings = StateSettings()
state_settings.enable_early_laps(True)
state_settings.set_number_of_early_laps(1)
state_settings.enable_to_mast(True)
state_settings.enable_module_detection(True)
state_settings.set_run_title("Module Detection Test")
state_settings.set_run_description("Early laps, then going to the mast, then running module detection")
flight_manager: FlightManager = FlightManager(state_settings)
flight_manager.main()
except:
logging.exception("Unfixable error detected")
| 2.21875
| 2
|
kornia/contrib/__init__.py
|
lyhyl/kornia
| 0
|
12779012
|
<reponame>lyhyl/kornia
from kornia.contrib.connected_components import connected_components
from kornia.contrib.extract_patches import extract_tensor_patches, ExtractTensorPatches
__all__ = ["connected_components", "extract_tensor_patches", "ExtractTensorPatches"]
| 1.351563
| 1
|
machine-learning/diabetes.py
|
m-01101101/product-analytics
| 0
|
12779013
|
"""
The PIMA Indians dataset obtained from the UCI Machine Learning Repository
The goal is to predict whether or not a given female patient will contract diabetes
based on features such as BMI, age, and number of pregnancies
It is a binary classification problem
"""
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.stats import randint
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (
classification_report,
confusion_matrix,
roc_curve,
roc_auc_score,
)
from sklearn.model_selection import (
train_test_split,
cross_val_score,
GridSearchCV,
RandomizedSearchCV,
)
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
plt.style.use("ggplot")
_df = pd.read_csv("datasets/diabetes.csv")
df = _df.dropna()
X = df.drop("Outcome", axis=1).values
# X = X.reshape(-1, 8)
y = df.Outcome.values
y = y.reshape(-1, 1)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.4, random_state=42
)
knn = KNeighborsClassifier(n_neighbors=6)
knn.fit(X_test, y_test)
y_pred = knn.predict(X_test)
print("k-NN performance")
# must always be (test, prediction)
print(confusion_matrix(y_test, y_pred))
print(classification_report(y_test, y_pred))
# the support columns gives the number of samples of the true response that lie in that class
#### logistic regression ####
logreg = LogisticRegression()
logreg.fit(X_train, y_train)
y_pred = logreg.predict(X_test)
print("logistic regression performance")
print(confusion_matrix(y_test, y_pred))
print(classification_report(y_test, y_pred))
# predict_proba returns an array with two columns: each column contains the probabilities for the respective target values.
# we choose the second column, the one with index 1,
# that is, the probabilities of the predicted labels being '1'
y_pred_prob = logreg.predict_proba(X_test)[:, 1]
fpr, tpr, thresholds = roc_curve(y_test, y_pred_prob)
_ = plt.plot([0, 1], [0, 1], "k--")
_ = plt.plot(fpr, tpr)
_ = plt.xlabel("False Positive Rate")
_ = plt.ylabel("True Positive Rate")
_ = plt.title("ROC Curve")
plt.show()
print(f"AUC: {roc_auc_score(y_test, y_pred_prob)}")
cv_auc = cross_val_score(logreg, X, y, cv=5, scoring="roc_auc")
#### hyperparameter tuning ####
# Setup the hyperparameter grid
c_space = np.logspace(-5, 8, 15)
param_grid = {"C": c_space} # hyperparameter to tune and values to test
logreg = LogisticRegression()
logreg_cv = GridSearchCV(
logreg, param_grid, cv=5
) # instantiate the GridSearchCV object
logreg_cv.fit(X, y) # fits in place
print(
f"""Tuned Logistic Regression Parameters: {logreg_cv.best_params_}
Best score is {logreg_cv.best_score_}"""
)
#### random tuning ####
tree = DecisionTreeClassifier()
param_dist = {
"max_depth": [3, None],
"max_features": randint(1, 9),
"min_samples_leaf": randint(1, 9),
"criterion": ["gini", "entropy"],
}
tree_cv = RandomizedSearchCV(tree, param_dist, cv=5)
tree_cv.fit(X, y)
print(
f"""Tuned Decision Tree Parameters: {tree_cv.best_params_}
Best score is {tree_cv.best_score_}"""
)
| 3.71875
| 4
|
modules/tests/generator.py
|
ansteh/multivariate
| 0
|
12779014
|
<gh_stars>0
import pandas as ps
import numpy as np
import os, sys
sys.path.append('../../modules/')
import generation.normal as generator
import generation.nonnormal as nonnormalGenerator
import analysis.deviation as deviation
from analysis.covariance import cov
from analysis.correlation import corr
from analysis.mean import mean
import algorithms.fleishman_multivariate as fm
from sampling.arbitrary import Sampling
from monte_carlo.arbitrary.univariate import Metropolis as UnivariateMetropolis
from algorithms.fleishman_univariate import generate_fleishman_from_collection
from algorithms.fleishman_multivariate import sample_from_matrix as fm_mv_from_matrix
def getNormalDistrubutedData():
data = ps.read_csv(os.path.join(os.path.dirname(__file__), "../resources/apple-tree.csv"), sep = ',')
matrix = data.as_matrix()
matrix = matrix.T
matrix = np.array(matrix, dtype=np.float64)
return matrix
def testNormalDistributedGenerator():
matrix = getNormalDistrubutedData()
simulated = generator.simulate(matrix, 1000000)
#print simulated
threshold = 1e-02
print deviation.conforms(mean(matrix), mean(simulated.T), threshold)
print deviation.conforms(cov(matrix), cov(simulated.T), 1e-01)
print deviation.disparity(cov(matrix), cov(simulated.T))
def getNonNormalDistrubutedData():
data = ps.read_csv(os.path.join(os.path.dirname(__file__), "../resources/WIKI-FB.csv"), sep = ',')
matrix = data.as_matrix()
matrix = matrix[:, 1:]
matrix = np.array(matrix, dtype=np.float64)
return matrix
def testNonNormalDistributedGenerator():
data = getNonNormalDistrubutedData()
simulated = nonnormalGenerator.simulate(data)
print simulated.shape
print corr(data) - corr(simulated)
return simulated
def testNormalVsIterativeGeneration():
matrix = getNormalDistrubutedData()
N = 10
normalSampled = generator.simulate(matrix, N)
iterativeSampled = nonnormalGenerator.simulate(matrix.T)
print normalSampled.shape
print iterativeSampled.shape
#print matrix, matrix[:, :2], cov(matrix[:, :2])
print cov(matrix[:, :5])
print cov(matrix)
print cov(normalSampled.T)
print cov(iterativeSampled.T)
threshold = 1
print deviation.conforms(mean(matrix), mean(normalSampled.T), threshold), deviation.conforms(mean(matrix), mean(iterativeSampled.T), threshold)
print deviation.conforms(cov(matrix), cov(normalSampled.T), 1e-01), deviation.conforms(cov(matrix), cov(iterativeSampled.T), 1e-01)
print deviation.conforms(corr(matrix), corr(normalSampled.T), threshold), deviation.conforms(corr(matrix), corr(iterativeSampled.T), threshold)
def testFleishmanGenerator():
data = getNormalDistrubutedData()
Sample = fm.sample_from_matrix(data)
print Sample
def testArbitrarySampling():
data = getNormalDistrubutedData()
sampler = Sampling(data[0])
print data[0]
print sampler.next()
print sampler.next(10)
def testUnivariateMetropolis():
data = np.random.uniform(0, 1, 10000)
metropolis = UnivariateMetropolis(data)
sampled = metropolis.sample(100)
print sampled
print np.mean(data), np.mean(sampled)
def testMultivariateFleishman():
data = getNonNormalDistrubutedData()[:, 1:].T
sampled = fm_mv_from_matrix(data, 100)
print data.shape
print cov(data)
print sampled.shape
print cov(data).shape
print cov(sampled.T).shape
print deviation.disparity(cov(data), cov(sampled.T))
def testUnivariateMetropolisWithComparison():
data = getNormalDistrubutedData()
metropolis = UnivariateMetropolis(data[0])
# print data[0]
# print metropolis.sample(10)
# data = getNonNormalDistrubutedData()
# print data.shape
# print data[:, 1].size
slicedData = getNonNormalDistrubutedData()[:, 1]
means = []
covs = []
stds = []
for i in range(100):
metropolis = UnivariateMetropolis(slicedData)
N = 100
sampled = metropolis.sample(N)
sampler = Sampling(slicedData)
simpleSampling = sampler.next(N)
fmSample = generate_fleishman_from_collection(slicedData, N)
# print 'N=', N
# print ' data metropolis simple sampling fleishman'
# print 'unique samples ', np.unique(slicedData).size, ' ', np.unique(sampled).size, ' ', np.unique(sampled).size, ' ',np.unique(fmSample).size
# print 'covariance ', cov(slicedData), cov(sampled), cov(simpleSampling), cov(fmSample)
# print 'diff covariance ', cov(slicedData)-cov(slicedData), abs(cov(slicedData)-cov(sampled)), abs(cov(slicedData)-cov(simpleSampling)), abs(cov(slicedData)-cov(fmSample))
means.append([np.mean(slicedData), np.mean(sampled), np.mean(simpleSampling), np.mean(fmSample)])
covs.append([np.cov(slicedData), np.cov(sampled), np.cov(simpleSampling), np.cov(fmSample)])
stds.append([np.std(slicedData), np.std(sampled), np.std(simpleSampling), np.std(fmSample)])
means = np.array(means)
#print means
print 'means statistics:'
print 'statistic data metropolis simple sampling fleishman'
print 'std ', np.std(means[:, 0]), ' ', np.std(means[:, 1]), ' ', np.std(means[:, 2]), ' ', np.std(means[:, 3])
print 'cov ', np.cov(means[:, 0]), ' ', np.cov(means[:, 1]), ' ', np.cov(means[:, 2]), ' ', np.cov(means[:, 3])
print 'mean ', np.mean(means[:, 0]), ' ', np.mean(means[:, 1]), ' ', np.mean(means[:, 2]), ' ', np.mean(means[:, 3])
print ''
print ''
covs = np.array(covs)
#print means
print 'covs statistics:'
print 'statistic data metropolis simple sampling fleishman'
print 'std', np.std(covs[:, 0]), ' ', np.std(covs[:, 1]), np.std(covs[:, 2]), ' ', np.std(covs[:, 3])
print 'cov', np.cov(covs[:, 0]), ' ', np.cov(covs[:, 1]), np.cov(covs[:, 2]), ' ', np.cov(covs[:, 3])
print 'mean', np.mean(covs[:, 0]), np.mean(covs[:, 1]), np.mean(covs[:, 2]), ' ', np.mean(covs[:, 3])
stds = np.array(stds)
#print means
# print 'stds statistics:'
# print 'statistic data metropolis simple sampling fleishman'
# print 'std', np.std(stds[:, 0]), ' ', np.std(stds[:, 1]), np.std(stds[:, 2]), ' ', np.std(stds[:, 3])
# print 'cov', np.cov(stds[:, 0]), ' ', np.cov(stds[:, 1]), np.cov(stds[:, 2]), ' ', np.cov(stds[:, 3])
# print 'mean', np.mean(stds[:, 0]), np.mean(stds[:, 1]), np.mean(stds[:, 2]), ' ', np.mean(stds[:, 3])
print ''
print ''
print 'statistic data metropolis simple sampling fleishman'
print 'stds statistics:'
print 'std', np.std(stds, axis=0)
print 'mean', np.mean(stds, axis=0)
| 2.453125
| 2
|
actions.py
|
Blubmin/adversarial_tower_defense
| 0
|
12779015
|
<filename>actions.py
savedStates = []
def SaveState(actionState):
# Find all states matching the given state that are already in the database
matchingStates = list(state for state in savedStates if state.boardState == actionState.boardState)
bestScore = actionState.score
if matchingStates:
# For each matching state
for state in matchingStates:
if bestScore > state.score:
savedStates.remove(state)
else:
bestScore = state.score
# Save the given state if it is better than any matching saved states
if bestScore == actionState.score:
savedStates.append(actionState)
def GetNearestState(boardState):
# Sort states based on distance
sortedStates = sorted(savedStates, key=lambda state: boardState.normalizedDistToState(state.boardState))
# Sort closest 10 states based on score
closestStates = sorted(sortedStates[0:10], key=lambda state: state.score)
# Return best scoring of 10-closest states
return closestStates[len(closestStates)-1]
def GetNearestUnitPlacementState(boardState):
pass
class ActionState:
def __init__(self, stepCount, boardState, action, score):
self.stepCount = stepCount
self.boardState = boardState
self.action = action
self.score = score
class NoAction:
def __init__(self):
self.name = "NoAction"
class PlaceTowerAction:
def __init__(self, x, y):
self.name = "PlaceTowerAction"
self.x = x
self.y = y
class PlaceUnitAction:
def __init__(self, x):
self.name = "PlaceUnitAction"
self.x = x
| 3.21875
| 3
|
Section 4/44_POM/testAll.py
|
IgorPavlovski84/-Automating-Web-Testing-with-Selenium-and-Python
| 26
|
12779016
|
<gh_stars>10-100
import unittest
from selenium import webdriver
from page import HomePage
from page import AboutPage
from locators import CommonPageLocators
from locators import AboutPageLocators
class TestPyOrgBase(unittest.TestCase):
"""
TBD
"""
def setUp(self):
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('headless')
chrome_options.add_argument('window-size=1920x1080')
self.driver = webdriver.Chrome(options=chrome_options)
def tearDown(self):
self.driver.close()
class TestHome(TestPyOrgBase):
"""
TBD
"""
def setUp(self):
super().setUp()
self.home = HomePage(self.driver)
def test_TC001_py3_doc_button(self):
self.home.hover_to(CommonPageLocators.DOC)
self.home.assert_elem_text(CommonPageLocators.PY3_DOC_BUTTON, 'Python 3.x Docs')
self.home.click(CommonPageLocators.PY3_DOC_BUTTON)
assert self.driver.current_url == 'https://docs.python.org/3/'
def test_TC002_blahblah_search(self):
self.home.search_for('blahblah')
self.home.assert_elem_text(CommonPageLocators.SEARCH_RESULT_LIST, 'No results found.')
class TestAbout(TestPyOrgBase):
"""
TBD
"""
def setUp(self):
super().setUp()
self.about = AboutPage(self.driver)
def test_TC003_verify_upcoming_events_section_present(self):
self.about.assert_elem_text(AboutPageLocators.UPCOMING_EVENTS, 'Upcoming Events')
if __name__ == '__main__':
unittest.main()
| 2.53125
| 3
|
untitled1.py
|
moeinderakhshan/workshop_practice
| 0
|
12779017
|
<reponame>moeinderakhshan/workshop_practice
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 31 10:25:16 2021
@author: Derakhshan
"""
a=3+5
print(a)
| 1.976563
| 2
|
handlingMissingKeys.py
|
universekavish/pythonTraining
| 0
|
12779018
|
<reponame>universekavish/pythonTraining
#handling missing keys in python dictionaries
country_code = {'India' : '0091', 'Australia' : '0025', 'Nepal' : '00977'}
# 1. Using get()
# get(key, def_val)
print(country_code.get('India', 'Not Found'))
print(country_code.get('Japan', 'Not found'))
# 2. Using setdefault()
# setdefault(key, def_value)
#works same as get() but each time key is missing, a new key is created with the def_value provided
country_code.setdefault('Japan', 'Not present')
print(country_code['India'])
print(country_code['Japan'])
# IMPORTANT
# 3. Using defaultdict
# defaultdict is present in collections module, so import
# defaultdict takes a function(default factory) as its argument.
# if a key is not present in the defaultdict, the default factory value is returned and displayed.
import collections
# declaring defaultdict
# sets default value 'Key not found' to absent keys
defd = collections.defaultdict(lambda : 'Key Not Found')
defd['a'] = 1
defd['b'] = 2
print('Value associated with \'a\' : ', end = '')
print(defd['a'])
print('Value associated with \'c\' : ', end = '')
print(defd['c'])
| 3.921875
| 4
|
cli/src/plz/cli/show_status_operation.py
|
prodo-ai/plz
| 29
|
12779019
|
import collections
from typing import Any, Optional
from plz.cli.composition_operation import CompositionOperation, \
create_path_string_prefix
from plz.cli.configuration import Configuration
from plz.cli.log import log_info
from plz.cli.operation import on_exception_reraise
ExecutionStatus = collections.namedtuple('ExecutionStatus',
['running', 'success', 'code'])
class ShowStatusOperation(CompositionOperation):
"""Output the status of an execution"""
@classmethod
def name(cls):
return 'status'
@classmethod
def prepare_argument_parser(cls, parser, args):
cls.maybe_add_execution_id_arg(parser, args)
def __init__(self,
configuration: Configuration,
execution_id: Optional[str] = None):
super().__init__(configuration)
self.execution_id = execution_id
@on_exception_reraise('Retrieving the status failed.')
def get_status(self, atomic_execution_id: Optional[str] = None):
if atomic_execution_id is None:
atomic_execution_id = self.get_execution_id()
status = self.controller.get_status(atomic_execution_id)
return ExecutionStatus(running=status['running'],
success=status['success'],
code=status['exit_status'])
def run_atomic(self, atomic_execution_id: str,
composition_path: [(str, Any)]):
status = self.get_status(atomic_execution_id)
string_prefix = create_path_string_prefix(composition_path)
log_info(f'{string_prefix}Status:')
print('Running:', status.running)
print('Success:', status.success)
print('Exit Status:', status.code)
| 2.21875
| 2
|
ldap_parser.py
|
frnde/ldap_errors_fixer
| 0
|
12779020
|
from ldif import LDIFParser
class ParseLDIF(LDIFParser):
def __init__(self, input_file, processing_object):
LDIFParser.__init__(self, input_file)
self.processing_object = processing_object
def handle(self,dn, entry):
self.processing_object.process_entry(dn, entry)
| 2.359375
| 2
|
scrabble/raw_metadata_stats.py
|
jbkoh/Scrabble
| 6
|
12779021
|
import json
import pdb
from functools import reduce
from collections import OrderedDict, Counter
import random
import re
def replace_num_or_special(word):
if re.match('\d+', word):
return 'NUMBER'
elif re.match('[a-zA-Z]+', word):
return word
else:
return 'SPECIAL'
building = 'ebu3b'
with open('metadata/{0}_sentence_dict_justseparate.json'.format(building),
'r') as fp:
sentence_dict = json.load(fp)
srcids = list(sentence_dict.keys())
for srcid, sentence in sentence_dict.items():
sentence_dict[srcid] = list(map(replace_num_or_special, sentence))
adder = lambda x,y: x + y
total_words = set(reduce(adder, sentence_dict.values()))
word_counter = Counter(reduce(adder, sentence_dict.values()))
with open('model/{0}_word_clustering_justseparate.json'.format(building),
'r') as fp:
cluster_dict = json.load(fp)
# Learning Sample Selection
sample_srcids = set()
length_counter = lambda x: len(x[1])
ander = lambda x, y: x and y
n = 100
sample_cnt = 0
shuffle_flag = False
sorted_cluster_dict = OrderedDict(
sorted(cluster_dict.items(), key=length_counter, reverse=True))
#n = len(sorted_cluster_dict) #TODO: Remove if not working well
while len(sample_srcids) < n:
cluster_dict_items = list(sorted_cluster_dict.items())
if shuffle_flag:
random.shuffle(cluster_dict_items)
for cluster_num, srcid_list in cluster_dict_items:
valid_srcid_list = set(srcid_list)\
.intersection(set(srcids))\
.difference(set(sample_srcids))
if len(valid_srcid_list) > 0:
sample_srcids.add(\
random.choice(list(valid_srcid_list)))
if len(sample_srcids) >= n:
break
sample_sentence_dict = dict((srcid, sentence_dict[srcid])
for srcid in sample_srcids)
pdb.set_trace()
| 2.84375
| 3
|
bot/chat_types/alltypes.py
|
telegrambotdev/hamilton-bot
| 0
|
12779022
|
from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton
import psutil
async def help(client, msg, args):
client.select_lang(msg, "all")
await msg.reply(msg.lang["help"]["ok"])
async def start(client, msg, args):
client.select_lang(msg, "all")
await msg.reply(msg.lang["start"]["ok"])
# Choose of languages
# - Callback of response
async def setlang(client, callback, args):
msg = callback.message
client.select_lang(msg, "all")
if msg.chat.type in ("group", "supergroup"):
info = await client.get_chat_member(msg.chat.id, callback.from_user.id)
if info.status not in ("administrator", "creator"):
await client.answer_callback_query(
callback.id,
msg.lang["setlang"]["not_admin"],
show_alert=True
)
return
if not args[0] in client.langs:
await client.edit_message_text(
message_id=msg.message_id,
chat_id=msg.chat.id,
text=msg.lang["setlang"]["not_found"]
)
return
client.db.set_lang(msg.chat.id, args[0])
client.select_lang(msg, "all")
await client.edit_message_text(
message_id=msg.message_id,
chat_id=msg.chat.id,
text=msg.lang["setlang"]["ok"]
)
await client.answer_callback_query(callback.id, "Ok.")
# - Send buttons to choose a language
async def getlangs(client, msg, args):
client.select_lang(msg, "all")
text = msg.lang["setlang"]["select"] + "\n\n"
buttons = []
for lang in client.langs.keys():
buttons.append(
[
InlineKeyboardButton(
client.langs[lang]["name"]+" - "+lang,
callback_data="setlang " + lang
)
]
)
await msg.reply(text, reply_markup=InlineKeyboardMarkup(buttons))
# Channel of updates from bot
async def channel(client, msg, args):
client.select_lang(msg, "all")
if "channel" in client.conf:
await msg.reply(
msg.lang["channel"]["ok"].format(uri=client.conf["channel"])
)
# Stats of server computer
async def status(client, msg, args):
cpu = psutil.cpu_freq()
cpu_str: str = f"{int(cpu.current)}/{int(cpu.max)}MHZ ({psutil.cpu_percent()}%)"
mem = psutil.virtual_memory()
mem_str: str = f"{mem.used // 1048576}/{mem.total // 1048576}MiB"
mem_str += f" ({int((mem.used / mem.total) * 100)}%)"
disk = psutil.disk_usage(".")
disk_str: str = f"{disk.used // (2**30)}/{disk.total // (2**30)}GiB"
disk_str += f" ({int(disk.percent)}%)"
await msg.reply(
"Server status\n\n" +
f"Memory: {mem_str}\n" +
f"CPU[min={int(cpu.min)}MHZ]: {cpu_str}\n" +
f"Disk: {disk_str}"
)
| 2.484375
| 2
|
src/services/text_postprocessor/kafka/kafka_producer.py
|
dam1002/GESPRO_GESTIONVERSIONES
| 7
|
12779023
|
<reponame>dam1002/GESPRO_GESTIONVERSIONES
# Copyright (C) 2021 <NAME> <<EMAIL>>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# For license information on the libraries used, see LICENSE.
"""Kafka Producer."""
__version__ = '0.1.0'
import socket
from confluent_kafka import SerializingProducer
from confluent_kafka.serialization import StringSerializer
class Producer:
"""Wrapper class around :obj:`confluent_kafka.SerializerProducer`.
It includes the specific producer configuration. When
a :obj:`Producer` is instanciated, it will return
a :obj:`confluent_kafka.SerializingProducer`.
For more information, see the official Confluent Kafka
`SerializerProducer documentation
<https://docs.confluent.io/platform/current/clients/confluent-kafka-python/#serializingproducer>`__.
"""
def __new__(cls):
# Producer configuration. Must match Stimzi/Kafka configuration.
config = {'bootstrap.servers': "jizt-cluster-kafka-bootstrap:9092",
'client.id': socket.gethostname(),
'key.serializer': StringSerializer('utf_8'),
'value.serializer': StringSerializer('utf_8')}
return SerializingProducer(config)
| 2.046875
| 2
|
simsapa/assets/ui/memos_browser_window_ui.py
|
ilius/simsapa
| 0
|
12779024
|
<gh_stars>0
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'simsapa/assets/ui/memos_browser_window.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MemosBrowserWindow(object):
def setupUi(self, MemosBrowserWindow):
MemosBrowserWindow.setObjectName("MemosBrowserWindow")
MemosBrowserWindow.resize(856, 623)
MemosBrowserWindow.setBaseSize(QtCore.QSize(800, 600))
self.central_widget = QtWidgets.QWidget(MemosBrowserWindow)
self.central_widget.setObjectName("central_widget")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.central_widget)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.main_layout = QtWidgets.QVBoxLayout()
self.main_layout.setObjectName("main_layout")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.verticalLayout_4 = QtWidgets.QVBoxLayout()
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.label_4 = QtWidgets.QLabel(self.central_widget)
self.label_4.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.label_4.setObjectName("label_4")
self.verticalLayout_4.addWidget(self.label_4)
self.back_input = QtWidgets.QPlainTextEdit(self.central_widget)
self.back_input.setMinimumSize(QtCore.QSize(0, 50))
self.back_input.setMaximumSize(QtCore.QSize(16777215, 50))
self.back_input.setObjectName("back_input")
self.verticalLayout_4.addWidget(self.back_input)
self.horizontalLayout.addLayout(self.verticalLayout_4)
self.verticalLayout_3 = QtWidgets.QVBoxLayout()
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.label_3 = QtWidgets.QLabel(self.central_widget)
self.label_3.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.label_3.setObjectName("label_3")
self.verticalLayout_3.addWidget(self.label_3)
self.front_input = QtWidgets.QPlainTextEdit(self.central_widget)
self.front_input.setMinimumSize(QtCore.QSize(0, 50))
self.front_input.setMaximumSize(QtCore.QSize(16777215, 50))
self.front_input.setObjectName("front_input")
self.verticalLayout_3.addWidget(self.front_input)
self.horizontalLayout.addLayout(self.verticalLayout_3)
self.main_layout.addLayout(self.horizontalLayout)
self.searchbar_layout = QtWidgets.QHBoxLayout()
self.searchbar_layout.setSizeConstraint(QtWidgets.QLayout.SetMinimumSize)
self.searchbar_layout.setObjectName("searchbar_layout")
self.search_input = QtWidgets.QLineEdit(self.central_widget)
self.search_input.setMinimumSize(QtCore.QSize(0, 35))
self.search_input.setClearButtonEnabled(True)
self.search_input.setObjectName("search_input")
self.searchbar_layout.addWidget(self.search_input)
self.search_button = QtWidgets.QPushButton(self.central_widget)
self.search_button.setMinimumSize(QtCore.QSize(0, 40))
self.search_button.setObjectName("search_button")
self.searchbar_layout.addWidget(self.search_button)
self.main_layout.addLayout(self.searchbar_layout)
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.memos_list = QtWidgets.QListView(self.central_widget)
self.memos_list.setMinimumSize(QtCore.QSize(0, 400))
self.memos_list.setObjectName("memos_list")
self.verticalLayout_2.addWidget(self.memos_list)
self.main_layout.addLayout(self.verticalLayout_2)
self.horizontalLayout_2.addLayout(self.main_layout)
MemosBrowserWindow.setCentralWidget(self.central_widget)
self.menubar = QtWidgets.QMenuBar(MemosBrowserWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 856, 20))
self.menubar.setObjectName("menubar")
self.menu_File = QtWidgets.QMenu(self.menubar)
self.menu_File.setObjectName("menu_File")
self.menu_Edit = QtWidgets.QMenu(self.menubar)
self.menu_Edit.setObjectName("menu_Edit")
self.menu_Windows = QtWidgets.QMenu(self.menubar)
self.menu_Windows.setObjectName("menu_Windows")
self.menu_Help = QtWidgets.QMenu(self.menubar)
self.menu_Help.setObjectName("menu_Help")
self.menu_Memos = QtWidgets.QMenu(self.menubar)
self.menu_Memos.setObjectName("menu_Memos")
MemosBrowserWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MemosBrowserWindow)
self.statusbar.setObjectName("statusbar")
MemosBrowserWindow.setStatusBar(self.statusbar)
self.toolBar_2 = QtWidgets.QToolBar(MemosBrowserWindow)
self.toolBar_2.setObjectName("toolBar_2")
MemosBrowserWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.toolBar_2)
self.action_Copy = QtWidgets.QAction(MemosBrowserWindow)
self.action_Copy.setObjectName("action_Copy")
self.action_Paste = QtWidgets.QAction(MemosBrowserWindow)
self.action_Paste.setObjectName("action_Paste")
self.action_Quit = QtWidgets.QAction(MemosBrowserWindow)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/close"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.action_Quit.setIcon(icon)
self.action_Quit.setObjectName("action_Quit")
self.action_Sutta_Search = QtWidgets.QAction(MemosBrowserWindow)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(":/book"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.action_Sutta_Search.setIcon(icon1)
self.action_Sutta_Search.setObjectName("action_Sutta_Search")
self.action_Dictionary_Search = QtWidgets.QAction(MemosBrowserWindow)
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap(":/dictionary"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.action_Dictionary_Search.setIcon(icon2)
self.action_Dictionary_Search.setObjectName("action_Dictionary_Search")
self.action_About = QtWidgets.QAction(MemosBrowserWindow)
self.action_About.setObjectName("action_About")
self.action_Website = QtWidgets.QAction(MemosBrowserWindow)
self.action_Website.setObjectName("action_Website")
self.action_Close_Window = QtWidgets.QAction(MemosBrowserWindow)
self.action_Close_Window.setObjectName("action_Close_Window")
self.action_Open = QtWidgets.QAction(MemosBrowserWindow)
self.action_Open.setObjectName("action_Open")
self.action_Document_Reader = QtWidgets.QAction(MemosBrowserWindow)
self.action_Document_Reader.setObjectName("action_Document_Reader")
self.action_Add = QtWidgets.QAction(MemosBrowserWindow)
self.action_Add.setObjectName("action_Add")
self.action_Remove = QtWidgets.QAction(MemosBrowserWindow)
self.action_Remove.setObjectName("action_Remove")
self.action_Open_Selected = QtWidgets.QAction(MemosBrowserWindow)
self.action_Open_Selected.setObjectName("action_Open_Selected")
self.action_Library = QtWidgets.QAction(MemosBrowserWindow)
self.action_Library.setObjectName("action_Library")
self.action_Sync_to_Anki = QtWidgets.QAction(MemosBrowserWindow)
self.action_Sync_to_Anki.setObjectName("action_Sync_to_Anki")
self.action_Memos = QtWidgets.QAction(MemosBrowserWindow)
self.action_Memos.setObjectName("action_Memos")
self.action_Dictionaries_Manager = QtWidgets.QAction(MemosBrowserWindow)
self.action_Dictionaries_Manager.setObjectName("action_Dictionaries_Manager")
self.action_Clear = QtWidgets.QAction(MemosBrowserWindow)
self.action_Clear.setObjectName("action_Clear")
self.action_Links = QtWidgets.QAction(MemosBrowserWindow)
self.action_Links.setObjectName("action_Links")
self.menu_File.addAction(self.action_Open)
self.menu_File.addAction(self.action_Close_Window)
self.menu_File.addSeparator()
self.menu_File.addAction(self.action_Quit)
self.menu_Edit.addAction(self.action_Copy)
self.menu_Edit.addAction(self.action_Paste)
self.menu_Windows.addAction(self.action_Sutta_Search)
self.menu_Windows.addAction(self.action_Dictionary_Search)
self.menu_Windows.addAction(self.action_Dictionaries_Manager)
self.menu_Windows.addAction(self.action_Document_Reader)
self.menu_Windows.addAction(self.action_Library)
self.menu_Windows.addAction(self.action_Memos)
self.menu_Windows.addAction(self.action_Links)
self.menu_Help.addAction(self.action_Website)
self.menu_Help.addAction(self.action_About)
self.menu_Memos.addAction(self.action_Sync_to_Anki)
self.menubar.addAction(self.menu_File.menuAction())
self.menubar.addAction(self.menu_Edit.menuAction())
self.menubar.addAction(self.menu_Windows.menuAction())
self.menubar.addAction(self.menu_Memos.menuAction())
self.menubar.addAction(self.menu_Help.menuAction())
self.toolBar_2.addAction(self.action_Add)
self.toolBar_2.addAction(self.action_Clear)
self.toolBar_2.addAction(self.action_Remove)
self.toolBar_2.addAction(self.action_Sync_to_Anki)
self.retranslateUi(MemosBrowserWindow)
QtCore.QMetaObject.connectSlotsByName(MemosBrowserWindow)
def retranslateUi(self, MemosBrowserWindow):
_translate = QtCore.QCoreApplication.translate
MemosBrowserWindow.setWindowTitle(_translate("MemosBrowserWindow", "Memos - Simsapa"))
self.label_4.setText(_translate("MemosBrowserWindow", "Front"))
self.label_3.setText(_translate("MemosBrowserWindow", "Back"))
self.search_button.setText(_translate("MemosBrowserWindow", "Search"))
self.menu_File.setTitle(_translate("MemosBrowserWindow", "&File"))
self.menu_Edit.setTitle(_translate("MemosBrowserWindow", "&Edit"))
self.menu_Windows.setTitle(_translate("MemosBrowserWindow", "&Windows"))
self.menu_Help.setTitle(_translate("MemosBrowserWindow", "&Help"))
self.menu_Memos.setTitle(_translate("MemosBrowserWindow", "&Memos"))
self.toolBar_2.setWindowTitle(_translate("MemosBrowserWindow", "toolBar_2"))
self.action_Copy.setText(_translate("MemosBrowserWindow", "&Copy"))
self.action_Paste.setText(_translate("MemosBrowserWindow", "&Paste"))
self.action_Quit.setText(_translate("MemosBrowserWindow", "&Quit"))
self.action_Quit.setShortcut(_translate("MemosBrowserWindow", "Ctrl+Q"))
self.action_Sutta_Search.setText(_translate("MemosBrowserWindow", "&Sutta Search"))
self.action_Sutta_Search.setShortcut(_translate("MemosBrowserWindow", "F5"))
self.action_Dictionary_Search.setText(_translate("MemosBrowserWindow", "&Dictionary Search"))
self.action_Dictionary_Search.setShortcut(_translate("MemosBrowserWindow", "F6"))
self.action_About.setText(_translate("MemosBrowserWindow", "&About"))
self.action_Website.setText(_translate("MemosBrowserWindow", "&Website"))
self.action_Close_Window.setText(_translate("MemosBrowserWindow", "&Close Window"))
self.action_Open.setText(_translate("MemosBrowserWindow", "&Open..."))
self.action_Open.setShortcut(_translate("MemosBrowserWindow", "Ctrl+O"))
self.action_Document_Reader.setText(_translate("MemosBrowserWindow", "D&ocument Reader"))
self.action_Document_Reader.setToolTip(_translate("MemosBrowserWindow", "Document Reader"))
self.action_Document_Reader.setShortcut(_translate("MemosBrowserWindow", "F7"))
self.action_Add.setText(_translate("MemosBrowserWindow", "&Add..."))
self.action_Remove.setText(_translate("MemosBrowserWindow", "&Remove"))
self.action_Remove.setShortcut(_translate("MemosBrowserWindow", "Del"))
self.action_Open_Selected.setText(_translate("MemosBrowserWindow", "&Open Selected"))
self.action_Library.setText(_translate("MemosBrowserWindow", "&Library"))
self.action_Library.setShortcut(_translate("MemosBrowserWindow", "F8"))
self.action_Sync_to_Anki.setText(_translate("MemosBrowserWindow", "&Sync to Anki"))
self.action_Memos.setText(_translate("MemosBrowserWindow", "&Memos"))
self.action_Memos.setToolTip(_translate("MemosBrowserWindow", "Notes"))
self.action_Memos.setShortcut(_translate("MemosBrowserWindow", "F9"))
self.action_Dictionaries_Manager.setText(_translate("MemosBrowserWindow", "Dictionaries &Manager"))
self.action_Dictionaries_Manager.setShortcut(_translate("MemosBrowserWindow", "F10"))
self.action_Clear.setText(_translate("MemosBrowserWindow", "&Clear"))
self.action_Links.setText(_translate("MemosBrowserWindow", "&Links"))
self.action_Links.setShortcut(_translate("MemosBrowserWindow", "F11"))
from simsapa.assets import icons_rc
| 1.828125
| 2
|
generating_address.py
|
ttw225/IOTA_learning
| 0
|
12779025
|
from iota import Iota
from iota.crypto.addresses import AddressGenerator
seed = b'<KEY>'
# generator = AddressGenerator(seed)
generator =\
AddressGenerator(
seed=seed,
security_level=3,
)
# Generate a list of addresses:
# addresses = generator.get_addresses(index=0, count=5)
# NOOO! Document was wrong!!!!!!! use `start` instead `index`
addresses = generator.get_addresses(start=0, count=5)
print(addresses)
print('='*20)
# Generate a list of addresses in reverse order:
# addresses = generator.get_addresses(start=42, count=10, step=-1)
addresses = generator.get_addresses(start=0, count=5)
print(addresses)
| 3.09375
| 3
|
pipeline/medication_stats_logit.py
|
vincent-octo/risteys
| 0
|
12779026
|
<reponame>vincent-octo/risteys
#!/usr/bin/env python3
"""
Compute drug scores related to a given endpoint.
Usage:
python3 medication_stats_logit.py \
<ENDPOINT> \ # FinnGen endpoint for which to compute associated drug scores
<PATH_FIRST_EVENTS> \ # Path to the first events file from FinnGen
<PATH_DETAILED_LONGIT> \ # Path to the detailed longitudinal file from FinnGen
<PATH_ENDPOINT_DEFINITIONS \ # Path to the endpoint definitions file from FinnGen
<PATH_MINIMUM_INFO> \ # Path to the minimum file from FinnGen
<OUTPUT_DIRECTORY> # Path to where to put the output files
Outputs:
- <ENDPOINT>_scores.csv: CSV file with score and standard error for each drug
- <ENDPOINT>_counts.csv: CSV file which breakdowns drugs into their full ATC and counts how the
number of individuals.
"""
import csv
from os import getenv
from pathlib import Path
import pandas as pd
import numpy as np
from numpy.linalg import LinAlgError
from numpy.linalg import multi_dot as mdot
from scipy.stats import norm
from statsmodels.formula.api import logit
### from log import logger
# TODO #
# Copy-pasted the logging configuration here instead of importing it
# from log.py.
# This is because dsub will run this script in a Docker image without
# having access to the log.py file. There might be a solution to do
# this, for example by adding the log.py to the Docker image and
# moving it to the right place afterward.
import logging
level = getenv("LOG_LEVEL", logging.INFO)
logger = logging.getLogger("pipeline")
handler = logging.StreamHandler()
formatter = logging.Formatter(
"%(asctime)s %(levelname)-8s %(module)-21s %(funcName)-25s: %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(level)
# END #
ATC_LEVEL = len('A10BA') # Use broad level of ATC classification instead of full ATC codes
# Time windows
YEAR = 1.0
MONTH = 1 / 12
WEEK = 7 / 365.25
PRE_DURATION = 1 * YEAR
PRE_EXCLUSION = 1 * MONTH
POST_DURATION = 5 * WEEK
STUDY_STARTS = 1998 # inclusive, from 1998-01-01 onward
STUDY_ENDS = 2022 # exclusive, up until 2021-12-31
STUDY_DURATION = 20 * YEAR
MIN_CASES = 15
# Prediction parameters
PRED_FEMALE = 0.5
PRED_YOB = 1960
PRED_FG_ENDPOINT_YEAR = 2021
def main(fg_endpoint, first_events, detailed_longit, endpoint_defs, minimum_info, output_scores, output_counts):
"""Compute a score for the association of a given drug to a FinnGen endpoint"""
line_buffering = 1
# File with drug scores
scores_file = open(output_scores, "x", buffering=line_buffering)
res_writer = csv.writer(scores_file)
res_writer.writerow([
"endpoint",
"drug",
"score",
"stderr",
"n_indivs",
"pvalue"
])
# Results of full-ATC drug counts
counts_file = open(output_counts, "x", buffering=line_buffering)
counts_writer = csv.writer(counts_file)
counts_writer.writerow([
"endpoint",
"drug",
"full_ATC",
"count"
])
# Load endpoint and drug data
df_logit, endpoint_def = load_data(
fg_endpoint,
first_events,
detailed_longit,
endpoint_defs,
minimum_info)
is_sex_specific = pd.notna(endpoint_def.SEX)
for drug in df_logit.ATC.unique():
data_comp_logit(df_logit, fg_endpoint, drug, is_sex_specific, res_writer, counts_writer)
scores_file.close()
counts_file.close()
def load_data(fg_endpoint, first_events, detailed_longit, endpoint_defs, minimum_info):
"""Load the data for the given endpoint and all the drug events"""
fg_endpoint_age = fg_endpoint + "_AGE"
fg_endpoint_year = fg_endpoint + "_YEAR"
# FIRST-EVENT DATA (for logit model)
logger.info("Loading endpoint data")
df_endpoint = pd.read_csv(
first_events,
usecols=[
"FINNGENID",
fg_endpoint,
fg_endpoint_age,
fg_endpoint_year
]
)
# Rename endpoint columns to genereic names for either reference down the line
df_endpoint = df_endpoint.rename(columns={
fg_endpoint: "fg_endpoint",
fg_endpoint_age: "fg_endpoint_age",
fg_endpoint_year: "fg_endpoint_year"
})
# Select only individuals having the endpoint
df_endpoint = df_endpoint.loc[df_endpoint["fg_endpoint"] == 1, :]
# Compute approximate year of birth
df_endpoint["yob"] = df_endpoint["fg_endpoint_year"] - df_endpoint["fg_endpoint_age"]
# Keep only incident cases (individuals having the endpoint after start of study)
df_endpoint = df_endpoint[df_endpoint["fg_endpoint_year"] >= STUDY_STARTS]
# DRUG DATA
logger.info("Loading drug data")
df_drug = pd.read_csv(
detailed_longit,
usecols=["FINNGENID", "SOURCE", "EVENT_AGE", "APPROX_EVENT_DAY", "CODE1"]
)
df_drug.APPROX_EVENT_DAY = pd.to_datetime(df_drug.APPROX_EVENT_DAY) # needed for filtering based on year
df_drug = df_drug.loc[df_drug.SOURCE == "PURCH", :] # keep only drug purchase events
df_drug["ATC"] = df_drug.CODE1.str[:ATC_LEVEL]
# INFO DATA
logger.info("Loading info data")
df_info = pd.read_csv(
minimum_info,
usecols=["FINNGENID", "SEX"]
)
df_info["female"] = df_info.SEX.apply(lambda d: 1.0 if d == "female" else 0.0)
df_info = df_info.drop(columns=["SEX"])
# ENDPOINT DEFINITION
df_endpoint_defs = pd.read_csv(
endpoint_defs,
usecols=["NAME", "SEX"]
)
endpoint_def = df_endpoint_defs.loc[df_endpoint_defs.NAME == fg_endpoint, :].iloc[0]
# Merge the data into a single DataFrame
logger.info("Merging dataframes")
df_logit = df_info.merge(df_endpoint, on="FINNGENID")
df_logit = df_logit.merge(df_drug, on="FINNGENID")
return df_logit, endpoint_def
def data_comp_logit(df, fg_endpoint, drug, is_sex_specific, res_writer, counts_writer):
logger.info(f"Computing for: {fg_endpoint} / {drug}")
df_stats, n_indivs, counts = logit_controls_cases(
df,
drug,
STUDY_DURATION,
PRE_DURATION,
PRE_EXCLUSION,
POST_DURATION)
# Check that we have enough cases
(ncases, _) = df_stats[df_stats.drug == 1.0].shape
if ncases < MIN_CASES:
logger.warning(f"Not enough cases ({ncases} < {MIN_CASES}) for {fg_endpoint} / {drug}")
return
# Write the full-ATC drug counts
for full_atc, count in counts.items():
counts_writer.writerow([
fg_endpoint,
drug,
full_atc,
count
])
# Compute the score for the given endpoint / drug
try:
score, stderr = comp_score_logit(df_stats, is_sex_specific)
except LinAlgError as exc:
logger.warning(f"LinAlgError: {exc}")
else:
pvalue = 2 * norm.cdf(-abs(score / stderr))
res_writer.writerow([
fg_endpoint,
drug,
score,
stderr,
n_indivs,
pvalue
])
def logit_controls_cases(
df,
drug,
study_duration,
pre_duration,
pre_exclusion,
post_duration,
):
"""Build a table of controls and cases"""
logger.debug("Munging data into controls and cases")
df["drug"] = 0.0
# Remove some data based on study_duration
study_starts = STUDY_ENDS - study_duration
keep_data = (
(df.APPROX_EVENT_DAY.dt.year >= study_starts)
& (df["fg_endpoint_year"] >= study_starts))
df = df.loc[keep_data, :]
# Count global number of individuals having the endpoint + drug at some point in time
n_indivs = df.loc[df.ATC == drug, "FINNGENID"].unique().shape[0]
# Check events where the drug happens BEFORE the endpoint
drug_pre_endpoint = (
(df.ATC == drug)
# Pre-endpoint time-window
& (df.EVENT_AGE >= df.fg_endpoint_age - pre_exclusion - pre_duration)
& (df.EVENT_AGE <= df.fg_endpoint_age - pre_exclusion)
)
# Check events where the druge happens AFTER the endpoint
drug_post_endpoint = (
(df.ATC == drug)
# Post-endpoint time-window
& (df.EVENT_AGE >= df.fg_endpoint_age)
& (df.EVENT_AGE <= df.fg_endpoint_age + post_duration)
)
# Define cases
cases = (~ drug_pre_endpoint) & drug_post_endpoint
df_cases = df.loc[cases, :]
df_cases.loc[:, "drug"] = 1.0
# The aggregate function doesn't matter: within each group the rows will differ by EVENT_AGE, but this column will be discarded in the model
df_cases = df_cases.groupby("FINNGENID").min()
# Count the number of individuals for each full ATC code
counts = df_cases.loc[df_cases.ATC == drug, :].groupby("CODE1").count().drug
# Remove unecessary columns
df_cases = df_cases.drop(columns=[
"fg_endpoint",
"fg_endpoint_age",
"EVENT_AGE",
"CODE1",
"ATC"])
df_controls = df.loc[~ cases, ["FINNGENID", "female", "yob", "fg_endpoint_year", "drug"]]
df_controls = df_controls.groupby("FINNGENID").min()
df_stats = pd.concat([df_cases, df_controls], sort=False)
return df_stats, n_indivs, counts
def comp_score_logit(df, is_sex_specific):
logger.info("Model computation score")
# Remove the sex covariate for sex-specific endpoints, otherwise
# it will fail since there will be no females or no males.
model = 'drug ~ yob + yob*yob + fg_endpoint_year + fg_endpoint_year*fg_endpoint_year'
if not is_sex_specific:
model += ' + female'
# Compute score using Logistic model, predict using fixed values
mod = logit(model, df)
res = mod.fit(disp=False) # fit() without displaying convergence messages
predict_data = pd.DataFrame({
"Intercept": [1.0],
"yob": [PRED_YOB],
"fg_endpoint_year": [PRED_FG_ENDPOINT_YEAR],
"female": [PRED_FEMALE]
})
# Force "predict_cata" and "cov_params" matrix to use same column
# order, otherwise it will to a silent bug as their values are put
# together computing the std err with "mdot" below.
col_order = res.cov_params().columns.values
predict_data = predict_data.loc[:, col_order]
# Compute the standard error of the prediction
pred = res.predict(predict_data)
pred_lin = np.log(pred / (1 - pred)) # to scale of the linear predictors
stderr = np.sqrt(mdot([predict_data, res.cov_params(), predict_data.T]))
real_stderr = stderr.flatten() * (np.abs(np.exp(pred_lin)) / (1 + np.exp(pred_lin))**2)
return pred[0], real_stderr[0]
if __name__ == '__main__':
main(
fg_endpoint=getenv("FG_ENDPOINT"),
first_events=Path(getenv("FIRST_EVENTS")),
detailed_longit=Path(getenv("DETAILED_LONGIT")),
endpoint_defs=Path(getenv("ENDPOINT_DEFS")),
minimum_info=Path(getenv("MINIMUM_INFO")),
output_scores=Path(getenv("OUTPUT_SCORES")),
output_counts=Path(getenv("OUTPUT_COUNTS")),
)
| 2.4375
| 2
|
examples/optimization/ex5.py
|
mikelytaev/wave-propagation
| 15
|
12779027
|
<filename>examples/optimization/ex5.py
from propagators._utils import *
from scipy.interpolate import approximate_taylor_polynomial
from scipy.interpolate import pade
def pade_propagator_coefs_m(*, pade_order, diff2, k0, dx, spe=False, alpha=0):
if spe:
def sqrt_1plus(x):
return 1 + x / 2
elif alpha == 0:
def sqrt_1plus(x):
return np.sqrt(1 + x)
else:
raise Exception('alpha not supported')
def propagator_func(s):
return np.exp(1j * k0 * dx * (sqrt_1plus(diff2(s)) - 1))
taylor_coefs = approximate_taylor_polynomial(propagator_func, 0, pade_order[0] + pade_order[1] + 5, 0.01)
p, q = pade(taylor_coefs, pade_order[0], pade_order[1])
pade_coefs = list(zip_longest([-1 / complex(v) for v in np.roots(p)],
[-1 / complex(v) for v in np.roots(q)],
fillvalue=0.0j))
return pade_coefs
coefs = pade_propagator_coefs(pade_order=(2, 2), diff2=lambda x: x, k0=2*cm.pi, dx=1)
coefs_m = pade_propagator_coefs_m(pade_order=(2, 2), diff2=lambda x: x, k0=2*cm.pi, dx=1)
print(coefs)
print(coefs_m)
# dx_res, dz_res, pade_order_res = optimal_params_m(max_angle_deg=3,
# max_distance_wl=100e3,
# threshold=1e-3,
# pade_order=(7, 8),
# z_order=4)
#
# print(dx_res)
# print(dz_res)
# print(pade_order_res)
| 2.46875
| 2
|
Django-Blog/blog/forms.py
|
ArsalanShahid116/Django-Blog-Application
| 1
|
12779028
|
<reponame>ArsalanShahid116/Django-Blog-Application<gh_stars>1-10
from django import forms
from .models import Comment, Post
from django.contrib.auth import get_user_model
class EmailPostForm(forms.Form):
name = forms.CharField(max_length=25)
email = forms.EmailField()
to = forms.EmailField()
comments = forms.CharField(required=False,
widget=forms.Textarea)
class CommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = ('name', 'email', 'body')
class SearchForm(forms.Form):
query = forms.CharField()
class PostForm(forms.ModelForm):
author = forms.ModelChoiceField(
widget=forms.HiddenInput,
queryset=get_user_model().
objects.all(),
disabled=True,
)
class Meta:
model = Post
fields = ('title', 'slug', 'author', 'body', 'status', 'tags')
widgets = {
'status': forms.RadioSelect(choices=Post.STATUS_CHOICES),
}
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('author')
super(PostForm, self).__init__(*args, **kwargs)
| 2.296875
| 2
|
orion/core/operators/collect_wb_indicators_task.py
|
orion-search/orion-backend
| 19
|
12779029
|
"""
Collects indicators from the World Bank. Currently, we collect indicators from the following URLs:
- http://datatopics.worldbank.org/world-development-indicators/themes/economy.html#featured-indicators_1
- http://datatopics.worldbank.org/world-development-indicators/themes/states-and-markets.html#featured-indicators_1
- http://datatopics.worldbank.org/world-development-indicators/themes/global-links.html#featured-indicators_1
- http://datatopics.worldbank.org/world-development-indicators/themes/people.html#featured-indicators_1
We use the pandas-datareader, a Python package that provides access to economic databases
for this as it is straightforward to collect indicators by querying their unique code.
Orion currently collects the following country-level indicators:
* GDP (current US$)
* Research and development expenditure (% of GDP)
* Government expenditure on education, total (% of GDP)
* Ratio of female to male labour force participation rate (%) (modelled ILO estimate)
Users can filter indicators by start and end year as well as country.
"""
from pandas_datareader import wb
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from orion.core.orms.mag_orm import (
WorldBankFemaleLaborForce,
WorldBankGovEducation,
WorldBankResearchDevelopment,
WorldBankGDP,
)
class WBIndicatorOperator(BaseOperator):
"""Fetches indicators from the World Bank."""
@apply_defaults
def __init__(
self,
db_config,
table_name,
indicator,
start_year,
end_year,
country,
*args,
**kwargs
):
super().__init__(**kwargs)
self.db_config = db_config
self.indicator = indicator
self.start_year = start_year
self.end_year = end_year
self.country = country
self.table_name = table_name
self.tables = {
"wb_gdp": WorldBankGDP,
"wb_edu_expenditure": WorldBankGovEducation,
"wb_rnd_expenditure": WorldBankResearchDevelopment,
"wb_female_workforce": WorldBankFemaleLaborForce,
}
def execute(self, context):
# Connect to postgresql db
engine = create_engine(self.db_config)
Session = sessionmaker(engine)
s = Session()
# Fetch WB Indicator
ind = wb.download(
indicator=self.indicator,
country=self.country,
start=self.start_year,
end=self.end_year,
)
# Store in DB
for (area, year), row in ind.iterrows():
s.add(
self.tables[self.table_name](
country=area, year=year, indicator=row[self.indicator]
)
)
s.commit()
| 2.765625
| 3
|
visualise/utilities/camera.py
|
snake-biscuits/bsp_tool_examples
| 0
|
12779030
|
<gh_stars>0
# TODO:
# First-person and Third-person camera need update
# to receive information based on character motion
# An AI that interprets inputs into realistic camera motion would be cool
# Inputs should be typed (Cython)
#
# FIXED CAMERA with either:
# no rotation
# scripted rotation (i.e. security camera)
# player controlled rotation (i.e. PSX Era Square Enix Games)
#
# Sensitivity is changed by setting camera.sensitivity in main()
#
# Scripted Camera Motion?
#
# Per Camera FoV?
import math
from OpenGL.GL import *
from OpenGL.GLU import *
from sdl2 import *
from .vector import vec2, vec3
sensitivity = 0.25
class freecam:
"""Quake / Source Free Camera"""
def __init__(self, position, rotation, speed=0.75):
self.position = vec3(position) if position != None else vec3()
self.rotation = vec3(rotation) if rotation != None else vec3()
self.speed = speed
def update(self, mousepos, keys, dt): #diagonal?
global sensitivity
self.rotation.z = mousepos.x * sensitivity
self.rotation.x = mousepos.y * sensitivity
local_move = vec3()
local_move.x = ((SDLK_d in keys) - (SDLK_a in keys))
local_move.y = ((SDLK_w in keys) - (SDLK_s in keys))
local_move.z = ((SDLK_q in keys) - (SDLK_e in keys))
global_move = local_move.rotate(*-self.rotation)
self.position += global_move * self.speed * dt
def set(self):
glRotate(-90, 1, 0, 0)
try:
glRotate(self.rotation.x, 1, 0, 0)
except Exception as exc:
print(exc)
print(self.rotation)
glRotate(self.rotation.z, 0, 0, 1)
glTranslate(-self.position.x, -self.position.y, -self.position.z)
def __repr__(self):
pos = [round(x, 2) for x in self.position]
pos_string = str(pos)
rot = [round(x, 2) for x in self.rotation]
rot_string = str(rot)
v = round(self.speed, 2)
v_string = str(v)
return ' '.join([pos_string, rot_string, v_string])
class firstperson:
"""First Person Camera (ALL CLIENTS SHOULD HAVE ONE)"""
def __init__(self, rotation=None):
self.rotation = vec3(rotation) if rotation != None else vec3()
def update(self, mouse):
global sensitivity
self.rotation.z += mouse.x * sensitivity
self.rotation.x += mouse.y * sensitivity
def set(self, position):
## glRotate(-90, 1, 0, 0)
glRotate(self.rotation.x - 90, 1, 0, 0)
glRotate(self.rotation.z, 0, 0, 1)
glTranslate(-position.x, -position.y, -position.z)
class thirdperson:
"""Third Person Camera"""
#GDC 2014: 50 Game Camera Mistakes
#http://gdcvault.com/play/1020460/50-Camera
#https://www.youtube.com/watch?v=C7307qRmlMI
def __init__(self, position, rotation, radius, offset=(0, 0)):
self.position = vec3(position) if position != None else vec3()
self.rotation = vec3(rotation) if rotation != None else vec3()
self.radius = radius if radius != None else 0
self.offset = vec2(offset)
def update(self):
#take player, self and environment,
#adjust to more ideal camera position
#raycasts into world and path hints
#adjust all 7 axis
pass
def set(self):
glRotate(self.rotation.x, 1, 0, 0)
glRotate(self.rotation.y, 0, 1, 0)
glRotate(self.rotation.z, 0, 0, 1)
glTranslate(-self.position.x, -self.position.y, -self.position.z)
glTranslate(0, 0, -self.radius)
glTranslate(self.offset.x, self.offset.y, 0)
class fixed:
def __init__(self, position, rotation):
self.position = vec3(position)
self.rotation = vec3(rotation)
def set(self):
glRotate(self.rotation.x - 90, 1, 0, 0)
glRotate(self.rotation.z, 0, 0, 1)
glTranslate(-self.position.x, -self.position.y, -self.position.z)
| 2.515625
| 3
|
myia/operations/macro_embed.py
|
strint/myia
| 222
|
12779031
|
"""Implementation of the 'embed' operation."""
from ..lib import Constant, SymbolicKeyInstance, macro, sensitivity_transform
@macro
async def embed(info, x):
"""Return a constant that embeds the identity of the input node."""
typ = sensitivity_transform(await x.get())
key = SymbolicKeyInstance(x.node, typ)
return Constant(key)
__operation_defaults__ = {
"name": "embed",
"registered_name": "embed",
"mapping": embed,
"python_implementation": None,
}
| 2.203125
| 2
|
python/test_vending_machine.py
|
objarni/VendingMachine-Approval-Kata
| 3
|
12779032
|
<reponame>objarni/VendingMachine-Approval-Kata
import pytest
from approvaltests import verify
from vending_machine import VendingMachine
from vending_machine_printer import VendingMachinePrinter
@pytest.fixture()
def machine():
return VendingMachine()
@pytest.fixture
def printer(machine):
return VendingMachinePrinter(machine)
@pytest.fixture
def coins():
COINS = {"penny": 1,
"nickel": 5,
"dime": 10,
"quarter": 25}
return COINS
def test_accept_coins(machine: VendingMachine, printer: VendingMachinePrinter, coins: dict):
# TODO: use the printer and approvaltests.verify instead of assertions
assert machine.display == "INSERT COIN"
machine.insert_coin(coins["nickel"])
assert machine.balance == 5
assert machine.coins == [5]
assert machine.display == "5"
| 2.640625
| 3
|
variable_and_data_type/string_demo/multiline_string.py
|
pysga1996/python-basic-programming
| 0
|
12779033
|
<gh_stars>0
# You can assign a multiline string to a variable by using three quotes:
a = """Lorem ipsum dolor sit amet,
consectetur adipiscing elit,
sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua."""
print(a)
# Or three single quotes:
a = '''Lorem ipsum dolor sit amet,
consectetur adipiscing elit,
sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua.'''
print(a)
# Note: in the result, the line breaks are inserted at the same position as in the code.
| 2.859375
| 3
|
api/migrations/0002_auto_20190805_1603.py
|
damianomiotek/best_transport_Poland
| 0
|
12779034
|
<filename>api/migrations/0002_auto_20190805_1603.py<gh_stars>0
# Generated by Django 2.1.7 on 2019-08-05 14:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='extendaddress',
name='date',
field=models.DateTimeField(),
),
migrations.AlterField(
model_name='extendaddress',
name='hour',
field=models.TextField(max_length=50),
),
migrations.AlterField(
model_name='order',
name='date',
field=models.DateField(auto_now=True),
),
]
| 1.46875
| 1
|
scripts/tldr_analyze_nuggets.py
|
allenai/scitldr
| 628
|
12779035
|
"""
Some analysis of informational content of TLDR-Auth and TLDR-PR
"""
import os
import csv
from collections import Counter, defaultdict
INFILE = 'tldr_analyze_nuggets/tldr_auth_pr_gold_nuggets_2020-03-31.csv'
# Q1: How many nuggets do TLDRs contain?
# A: Interesting, both author and PR have nearly identical distributions:
# From most to least common: 3 nuggets -> 2 nuggets -> 4 nuggets -> 1 nugget -> ...
# Auth proportions: (34%) (26%) (18%) (11%)
# PR proportions: (32%) (30%) (26%) ( 9%)
author_num_nuggets_to_count = {i: 0 for i in range(0,7)}
pr_num_nuggets_to_count = {i: 0 for i in range(0,7)}
with open(INFILE) as f_in:
reader = csv.DictReader(f_in)
for row in reader:
num_nuggets = sum(map(int, [row['area_field_topic'], row['problem_motivation'], row['mode_of_contrib'], row['details_descrip'], row['results_findings'], row['value_signif']]))
if row['auth_pr'] == 'auth_gold':
author_num_nuggets_to_count[num_nuggets] += 1
if row['auth_pr'] == 'pr_gold':
pr_num_nuggets_to_count[num_nuggets] += 1
print({k: f'{100*v/76:.2f}' for k, v in author_num_nuggets_to_count.items()})
print({k: f'{100*v/76:.2f}' for k, v in pr_num_nuggets_to_count.items()})
# Q2: What are the most common TLDR templates?
# A: Interesting, the top 2 templates (total 42 occurrences) are same between Authors and PRs.
# a) (area_field_topic, mode_of_contrib, details_descrip)
# b) (area_field_topic, mode_of_contrib)
# After that, next 3 starts deviating a bit, but still with the same base:
# authors = (area_field_topic, mode_of_contrib, results_findings)
# (area_field_topic, problem_motivation, mode_of_contrib)
# (area_field_topic, mode_of_contrib, details_descrip, value_signif)
# pr = (area_field_topic, problem_motivation, mode_of_contrib, details_descrip)
# = (area_field_topic, details_descrip)
# = (area_field_topic, mode_of_contrib, results_findings) # same as top 3rd in Auth
author_template_to_count = Counter()
pr_template_to_count = Counter()
with open(INFILE) as f_in:
reader = csv.DictReader(f_in)
for row in reader:
template = (row['area_field_topic'], row['problem_motivation'], row['mode_of_contrib'], row['details_descrip'], row['results_findings'], row['value_signif'])
if row['auth_pr'] == 'auth_gold':
author_template_to_count[template] += 1
if row['auth_pr'] == 'pr_gold':
pr_template_to_count[template] += 1
print(author_template_to_count.most_common())
print(pr_template_to_count.most_common())
# Q3: How often does 'area_field_topic' and 'mode_of_contrib' co-occur?
# n_auth = 48/76 = 63%
# n_pr = 54/76 = 71%
n_auth = 0
n_pr = 0
with open(INFILE) as f_in:
reader = csv.DictReader(f_in)
for row in reader:
if row['area_field_topic'] == '1' and row['mode_of_contrib'] == '1':
if row['auth_pr'] == 'auth_gold':
n_auth += 1
if row['auth_pr'] == 'pr_gold':
n_pr += 1
# Q4: Find examples with exactly the same nuggets but different styles
#
# H1-IBSgMz
# B16yEqkCZ
# SySpa-Z0Z
# rJegl2C9K7
# HJWpQCa7z
# rkgpCoRctm
# rkxkHnA5tX
# B1e9csRcFm
# r1kj4ACp-
# Hk91SGWR-
# r1GaAjRcF7
# SkGMOi05FQ
#
pid_to_templates = defaultdict(set)
with open(INFILE) as f_in:
reader = csv.DictReader(f_in)
for row in reader:
template = (row['area_field_topic'], row['problem_motivation'], row['mode_of_contrib'], row['details_descrip'], row['results_findings'], row['value_signif'])
pid_to_templates[row['paper_id']].add(template)
for pid, templates in pid_to_templates.items():
if len(templates) == 1:
print(pid)
| 2.765625
| 3
|
SubredditBirthdays/sb.py
|
voussoir/redd
| 444
|
12779036
|
import argparse
import bot3
import datetime
import praw3 as praw
import random
import sqlite3
import string
import subprocess
import sys
import time
import tkinter
import traceback
import types
from voussoirkit import betterhelp
from voussoirkit import mutables
from voussoirkit import operatornotify
from voussoirkit import pipeable
from voussoirkit import sqlhelpers
from voussoirkit import vlogging
log = vlogging.getLogger(__name__, 'sb')
USERAGENT = '''
/u/GoldenSights SubredditBirthdays data collection:
Gathering the creation dates of subreddits for visualization.
More at https://github.com/voussoir/reddit/tree/master/SubredditBirthdays
'''.replace('\n', ' ').strip()
LOWERBOUND_STR = '2qh0j'
LOWERBOUND_INT = 4594339
FORMAT_MEMBER = '{idstr:>5s}, {human}, {nsfw}, {name:<25s} {subscribers:>10,}'
FORMAT_MESSAGE_NEW = 'New: {idstr:>5s} : {human} : {nsfw} : {name} : {subscribers}'
FORMAT_MESSAGE_UPDATE = 'Upd: {idstr:>5s} : {human} : {nsfw} : {name} : {subscribers} ({subscriber_diff})'
RANKS_UP_TO = 20000
# For the files sorted by subscriber count, display ranks up to this many.
GOODCHARS = string.ascii_letters + string.digits + '_'
DB_INIT = '''
BEGIN;
--------------------------------------------------------------------------------
CREATE TABLE IF NOT EXISTS subreddits(
idint INT,
idstr TEXT,
created INT,
human TEXT,
name TEXT,
nsfw INT,
subscribers INT,
jumble INT,
subreddit_type INT,
submission_type INT,
last_scanned INT
);
CREATE INDEX IF NOT EXISTS index_subreddits_idstr ON subreddits(idstr);
CREATE INDEX IF NOT EXISTS index_subreddits_name ON subreddits(name);
CREATE INDEX IF NOT EXISTS index_subreddits_created ON subreddits(created);
CREATE INDEX IF NOT EXISTS index_subreddits_subscribers ON subreddits(subscribers);
--CREATE INDEX IF NOT EXISTS index_subreddits_idint ON subreddits(idint);
--CREATE INDEX IF NOT EXISTS index_subreddits_last_scanned ON subreddits(last_scanned);
--------------------------------------------------------------------------------
CREATE TABLE IF NOT EXISTS suspicious(
idint INT,
idstr TEXT,
name TEXT,
subscribers INT,
noticed INT
);
--------------------------------------------------------------------------------
CREATE TABLE IF NOT EXISTS popular(
idstr TEXT,
last_seen INT
);
CREATE INDEX IF NOT EXISTS index_popular_idstr on popular(idstr);
CREATE INDEX IF NOT EXISTS index_popular_last_seen on popular(last_seen);
--------------------------------------------------------------------------------
CREATE TABLE IF NOT EXISTS jumble(
idstr TEXT,
last_seen INT
);
CREATE INDEX IF NOT EXISTS index_jumble_idstr on jumble(idstr);
CREATE INDEX IF NOT EXISTS index_jumble_last_seen on jumble(last_seen);
--------------------------------------------------------------------------------
COMMIT;
'''
sql = sqlite3.connect('D:\\git\\reddit\\subredditbirthdays\\sb.db')
sqlhelpers.executescript(conn=sql, script=DB_INIT)
cur = sql.cursor()
# These numbers are used for interpreting the tuples that come from SELECT
SQL_SUBREDDIT_COLUMNS = [
'idint',
'idstr',
'created',
'human',
'name',
'nsfw',
'subscribers',
'subreddit_type',
'submission_type',
'last_scanned',
]
SQL_SUSPICIOUS_COLUMNS = [
'idint',
'idstr',
'name',
'subscribers',
'noticed',
]
SQL_SUBREDDIT = {key: index for (index, key) in enumerate(SQL_SUBREDDIT_COLUMNS)}
noinfolist = []
monthnumbers = {
'Jan': '01',
'Feb': '02',
'Mar': '03',
'Apr': '04',
'May': '05',
'Jun': '06',
'Jul': '07',
'Aug': '08',
'Sep': '09',
'Oct': '10',
'Nov': '11',
'Dec': '12',
}
SUBREDDIT_TYPE = {
'public': 0,
'restricted': 1,
'private': 2,
'archived': 3,
None: 4,
'employees_only': 5,
'gold_restricted': 6,
'gold_only': 7,
'user': 8,
}
SUBMISSION_TYPE = {
'any': 0,
'link': 1,
'self': 2,
None: 3,
}
SUBREDDIT_TYPE_REVERSE = {v: k for (k, v) in SUBREDDIT_TYPE.items()}
SUBMISSION_TYPE_REVERSE = {v: k for (k, v) in SUBMISSION_TYPE.items()}
SUBMISSION_OBJ = praw.objects.Submission
SUBREDDIT_OBJ = praw.objects.Subreddit
COMMENT_OBJ = praw.objects.Comment
r = None
def login():
global r
print('Logging in.')
r = praw.Reddit(USERAGENT)
bot3.login(r)
def base36encode(number, alphabet='0123456789abcdefghijklmnopqrstuvwxyz'):
'''Converts an integer to a base36 string.'''
if not isinstance(number, (int)):
raise TypeError('number must be an integer')
base36 = ''
sign = ''
if number < 0:
sign = '-'
number = -number
if 0 <= number < len(alphabet):
return sign + alphabet[number]
while number != 0:
number, i = divmod(number, len(alphabet))
base36 = alphabet[i] + base36
return sign + base36
def base36decode(number):
return int(number, 36)
def b36(i):
if type(i) == int:
return base36encode(i)
if type(i) == str:
return base36decode(i)
def chunklist(inputlist, chunksize):
if len(inputlist) < chunksize:
return [inputlist]
else:
outputlist = []
while len(inputlist) > 0:
outputlist.append(inputlist[:chunksize])
inputlist = inputlist[chunksize:]
return outputlist
def completesweep(sleepy=0, orderby='subscribers desc', query=None):
cur = sql.cursor()
if query is None:
if orderby is None:
cur.execute('SELECT idstr FROM subreddits WHERE created > 0')
else:
cur.execute('SELECT idstr FROM subreddits WHERE created > 0 ORDER BY %s' % orderby)
elif query == 'restricted':
cur.execute('SELECT idstr FROM subreddits WHERE created > 0 AND subreddit_type != 0 ORDER BY subscribers DESC')
else:
cur.execute(query)
try:
while True:
hundred = (cur.fetchone() for x in range(100))
hundred = (row for row in hundred if row is not None)
hundred = [idstr for (idstr,) in hundred]
if len(hundred) == 0:
break
for retry in range(20):
try:
processmega(hundred, commit=False)
break
except Exception:
traceback.print_exc()
time.sleep(sleepy)
except KeyboardInterrupt:
pass
except Exception:
traceback.print_exc()
sql.commit()
def fetchgenerator(cur):
while True:
fetch = cur.fetchone()
if fetch is None:
break
yield fetch
def get_jumble_subreddits():
cur.execute('SELECT idstr FROM jumble')
fetch = [x[0] for x in cur.fetchall()]
fetch = ['\'%s\'' % x for x in fetch]
fetch = '(' + ','.join(fetch) + ')'
query = 'SELECT * FROM subreddits WHERE idstr IN %s' % fetch
cur.execute(query)
subreddits = cur.fetchall()
#subreddits = []
#for subreddit in fetch:
# cur.execute('SELECT * FROM subreddits WHERE idstr == ?', [subreddit])
# subreddits.append(cur.fetchone())
return subreddits
def get_newest_sub():
brandnewest = list(r.get_new_subreddits(limit=1))[0]
return brandnewest.id
def get_now():
return datetime.datetime.now(datetime.timezone.utc).timestamp()
def humanize(timestamp):
day = datetime.datetime.utcfromtimestamp(timestamp)
human = datetime.datetime.strftime(day, "%b %d %Y %H:%M:%S UTC")
return human
def modernize(limit=None):
cur.execute('SELECT * FROM subreddits ORDER BY created DESC LIMIT 1')
finalitem = cur.fetchone()
print('Current final item:')
print(finalitem[SQL_SUBREDDIT['idstr']], finalitem[SQL_SUBREDDIT['human']], finalitem[SQL_SUBREDDIT['name']])
finalid = finalitem[SQL_SUBREDDIT['idint']]
print('Newest item:')
newestid = get_newest_sub()
print(newestid)
newestid = b36(newestid)
if limit is not None:
newestid = min(newestid, finalid+limit-1)
modernlist = [b36(x) for x in range(finalid, newestid+1)]
if len(modernlist) > 0:
processmega(modernlist, commit=False)
sql.commit()
def modernize_forever(limit=10000):
while True:
try:
modernize(limit=limit)
except Exception:
log.warning(traceback.format_exc())
time.sleep(300)
def modsfromid(subid):
if 't5_' not in subid:
subid = 't5_' + subid
subreddit = r.get_info(thing_id=subid)
mods = list(subreddit.get_moderators())
for m in mods:
print(m)
return mods
def normalize_subreddit_object(thing):
'''
Given a string, Subreddit, Submission, or Comment object, return
a Subreddit object.
'''
if isinstance(thing, SUBREDDIT_OBJ):
return thing
if isinstance(thing, str):
return r.get_subreddit(thing)
if isinstance(thing, (SUBMISSION_OBJ, COMMENT_OBJ)):
return thing.subreddit
raise ValueError('Dont know how to normalize', type(thing))
def process(
subreddit,
commit=True,
):
'''
Retrieve the API info for the subreddit and save it to the database
subreddit:
The subreddit(s) to process. Can be an individual or list of:
strings or Subreddit, Submission, or Comment objects.
'''
subreddits = []
processed_subreddits = []
if isinstance(subreddit, (tuple, list, set, types.GeneratorType)):
subreddits = iter(subreddit)
else:
subreddits = [subreddit]
for subreddit in subreddits:
subreddit = normalize_subreddit_object(subreddit)
processed_subreddits.append(subreddit)
created = subreddit.created_utc
created_human = humanize(subreddit.created_utc)
idstr = subreddit.id
is_nsfw = int(subreddit.over18 or 0)
name = subreddit.display_name
subscribers = subreddit.subscribers or 0
subreddit_type = SUBREDDIT_TYPE[subreddit.subreddit_type]
submission_type = SUBMISSION_TYPE[subreddit.submission_type]
now = int(get_now())
cur.execute('SELECT * FROM subreddits WHERE idstr == ?', [idstr])
f = cur.fetchone()
if f is None:
message = FORMAT_MESSAGE_NEW.format(
idstr=idstr,
human=created_human,
nsfw=is_nsfw,
name=name,
subscribers=subscribers,
)
print(message)
data = {
'idint': b36(idstr),
'idstr': idstr,
'created': created,
'human': created_human,
'nsfw': is_nsfw,
'name': name,
'subscribers': subscribers,
'subreddit_type': subreddit_type,
'submission_type': submission_type,
'last_scanned': now,
}
(qmarks, bindings) = sqlhelpers.insert_filler(SQL_SUBREDDIT_COLUMNS, data)
query = 'INSERT INTO subreddits VALUES(%s)' % qmarks
cur.execute(query, bindings)
else:
old_subscribers = f[SQL_SUBREDDIT['subscribers']]
subscriber_diff = subscribers - old_subscribers
if subscribers == 0 and old_subscribers > 2 and subreddit_type != SUBREDDIT_TYPE['private']:
print('SUSPICIOUS %s' % name)
data = {
'idint': b36(idstr),
'idstr': idstr,
'name': name,
'subscribers': old_subscribers,
'noticed': int(get_now()),
}
(qmarks, bindings) = sqlhelpers.insert_filler(SQL_SUSPICIOUS_COLUMNS, data)
query = 'INSERT INTO suspicious VALUES(%s)' % qmarks
cur.execute(query, bindings)
message = FORMAT_MESSAGE_UPDATE.format(
idstr=idstr,
human=created_human,
nsfw=is_nsfw,
name=name,
subscribers=subscribers,
subscriber_diff=subscriber_diff
)
print(message)
data = {
'idstr': idstr,
'subscribers': subscribers,
'subreddit_type': subreddit_type,
'submission_type': submission_type,
'last_scanned': now,
}
(query, bindings) = sqlhelpers.update_filler(data, where_key='idstr')
query = 'UPDATE subreddits %s' % query
cur.execute(query, bindings)
#cur.execute('''
# UPDATE subreddits SET
# subscribers = @subscribers,
# subreddit_type = @subreddit_type,
# submission_type = @submission_type,
# last_scanned = @last_scanned
# WHERE idstr == @idstr
# ''', data)
processed_subreddits.append(subreddit)
if commit:
sql.commit()
return processed_subreddits
def process_input():
while True:
x = input('p> ')
try:
process(x)
except KeyboardInterrupt:
break
except Exception:
traceback.print_exc()
def processmega(srinput, isrealname=False, chunksize=100, docrash=False, commit=True):
'''
`srinput` can be a list of subreddit IDs or fullnames, or display names
if `isrealname` is also True.
isrealname:
Interpret `srinput` as a list of actual subreddit names, not IDs.
chunksize:
The number of fullnames to get from api/info at once.
docrash:
If False, ignore HTTPExceptions and keep moving forward.
'''
global noinfolist
if type(srinput) == str:
srinput = srinput.replace(' ', '')
srinput = srinput.split(',')
if isrealname:
for subname in srinput:
process(subname)
return
processed_subreddits = []
remaining = len(srinput)
for x in range(len(srinput)):
if 't5_' not in srinput[x]:
srinput[x] = 't5_' + srinput[x]
srinput = chunklist(srinput, chunksize)
for subset in srinput:
try:
print(subset[0] + ' - ' + subset[-1], remaining)
subreddits = r.get_info(thing_id=subset)
try:
for sub in subreddits:
processed_subreddits.extend(process(sub, commit=commit))
except TypeError:
traceback.print_exc()
noinfolist = subset[:]
if len(noinfolist) == 1:
print('Received no info. See variable `noinfolist`')
else:
#for item in noinfolist:
# processmega([item])
pass
remaining -= len(subset)
except praw.errors.HTTPException as e:
traceback.print_exc()
print(vars(e))
if docrash:
raise
return processed_subreddits
def processrand(count, doublecheck=False, sleepy=0):
'''
Gets random IDs between a known lower bound and the newest collection, and
pass them into processmega().
count:
How many you want
doublecheck:
Should it reroll duplicates before running
sleepy:
Used to sleep longer than the required 2 seconds
'''
lower = LOWERBOUND_INT
cur.execute('SELECT * FROM subreddits ORDER BY idstr DESC LIMIT 1')
upper = cur.fetchone()[SQL_SUBREDDIT['idstr']]
print('<' + b36(lower) + ',', upper + '>', end=', ')
upper = b36(upper)
totalpossible = upper - lower
print(totalpossible, 'possible')
rands = set()
for x in range(count):
rand = random.randint(lower, upper)
rand = b36(rand)
if doublecheck:
while rand in rands:
rand = random.randint(lower, upper)
rand = b36(rand)
rands.add(rand)
processmega(rands)
def show():
file_all_time = open('show\\all-time.txt', 'w')
file_all_name = open('show\\all-name.txt', 'w')
file_all_subscribers = open('show\\all-subscribers.txt', 'w')
file_dirty_time = open('show\\dirty-time.txt', 'w')
file_dirty_name = open('show\\dirty-name.txt', 'w')
file_dirty_subscribers = open('show\\dirty-subscribers.txt', 'w')
file_jumble_sfw = open('show\\jumble.txt', 'w')
file_jumble_nsfw = open('show\\jumble-nsfw.txt', 'w')
file_duplicates = open('show\\duplicates.txt', 'w')
file_missing = open('show\\missing.txt', 'w')
file_stats = open('show\\statistics.txt', 'w')
file_readme = open('README.md', 'r')
cur.execute('SELECT COUNT(idstr) FROM subreddits WHERE created != 0')
itemcount_valid = cur.fetchone()[0]
itemcount_nsfw = 0
name_lengths = {}
print(itemcount_valid, 'subreddits')
print('Writing time files.')
cur.execute('SELECT * FROM subreddits WHERE created !=0 ORDER BY created ASC')
for item in fetchgenerator(cur):
itemf = memberformat(item)
print(itemf, file=file_all_time)
if int(item[SQL_SUBREDDIT['nsfw']]) == 1:
print(itemf, file=file_dirty_time)
itemcount_nsfw += 1
file_all_time.close()
file_dirty_time.close()
print('Writing name files and duplicates.')
previousitem = None
inprogress = False
cur.execute('SELECT * FROM subreddits WHERE created != 0 ORDER BY LOWER(name) ASC')
for item in fetchgenerator(cur):
if previousitem is not None and item[SQL_SUBREDDIT['name']] == previousitem[SQL_SUBREDDIT['name']]:
print(memberformat(previousitem), file=file_duplicates)
inprogress = True
elif inprogress:
print(memberformat(previousitem), file=file_duplicates)
inprogress = False
previousitem = item
name_length = len(item[SQL_SUBREDDIT['name']])
name_lengths[name_length] = name_lengths.get(name_length, 0) + 1
itemf = memberformat(item)
print(itemf, file=file_all_name)
if int(item[SQL_SUBREDDIT['nsfw']]) == 1:
print(itemf, file=file_dirty_name)
file_duplicates.close()
file_all_name.close()
file_dirty_name.close()
name_lengths = {'%02d'%k: v for (k, v) in name_lengths.items()}
print('Writing subscriber files.')
ranks = {'all': 1, 'nsfw': 1}
def write_with_rank(itemf, ranktype, filehandle):
index = ranks[ranktype]
if index <= RANKS_UP_TO:
itemf += '{:>9,}'.format(index)
print(itemf, file=filehandle)
ranks[ranktype] += 1
cur.execute('SELECT * FROM subreddits WHERE created != 0 ORDER BY subscribers DESC')
for item in fetchgenerator(cur):
itemf = memberformat(item)
write_with_rank(itemf, 'all', file_all_subscribers)
if int(item[SQL_SUBREDDIT['nsfw']]) == 1:
write_with_rank(itemf, 'nsfw', file_dirty_subscribers)
file_all_subscribers.close()
file_dirty_subscribers.close()
print('Writing jumble.')
for item in get_jumble_subreddits():
itemf = memberformat(item)
if int(item[SQL_SUBREDDIT['nsfw']]) == 0:
print(itemf, file=file_jumble_sfw)
else:
print(itemf, file=file_jumble_nsfw)
file_jumble_sfw.close()
file_jumble_nsfw.close()
print('Writing missing.')
cur.execute('SELECT * FROM subreddits WHERE created == 0 ORDER BY idstr ASC')
for item in fetchgenerator(cur):
print(item[SQL_SUBREDDIT['idstr']], file=file_missing)
file_missing.close()
print('Writing statistics.')
headline = 'Collected {0:,} subreddits\n'.format(itemcount_valid)
statisticoutput = headline + '\n\n'
statisticoutput += ' SFW: {0:,}\n'.format(itemcount_valid - itemcount_nsfw)
statisticoutput += 'NSFW: {0:,}\n\n\n'.format(itemcount_nsfw)
statisticoutput += 'Subreddit type:\n'
subreddit_types = list(SUBREDDIT_TYPE_REVERSE.keys())
subreddit_types.sort()
subreddit_types = [SUBREDDIT_TYPE_REVERSE[k] for k in subreddit_types]
for subreddit_type in subreddit_types:
index = SUBREDDIT_TYPE[subreddit_type]
cur.execute('SELECT COUNT(*) FROM subreddits WHERE created != 0 AND subreddit_type == ?', [index])
count = cur.fetchone()[0]
statisticoutput += '{:>16s}: {:,}\n'.format(str(subreddit_type), count)
statisticoutput += '\n'
statisticoutput += 'Submission type (None means approved submitters only or inaccessible):\n'
submission_types = list(SUBMISSION_TYPE_REVERSE.keys())
submission_types.sort()
submission_types = [SUBMISSION_TYPE_REVERSE[k] for k in submission_types]
for submission_type in submission_types:
index = SUBMISSION_TYPE[submission_type]
cur.execute('SELECT COUNT(*) FROM subreddits WHERE created != 0 AND submission_type == ?', [index])
count = cur.fetchone()[0]
statisticoutput += '{:>16s}: {:,}\n'.format(str(submission_type), count)
statisticoutput += '\n\n'
cur.execute('SELECT * FROM subreddits WHERE created != 0 ORDER BY created DESC limit 20000')
last20k = cur.fetchall()
timediff = last20k[0][SQL_SUBREDDIT['created']] - last20k[-1][SQL_SUBREDDIT['created']]
statisticoutput += 'Over the last 20,000 subreddits:\n'
statisticoutput += '%.2f subs are created each hour\n' % (20000 / (timediff/3600))
statisticoutput += '%.2f subs are created each day\n\n\n' % (20000 / (timediff/86400))
################################
# Breakdown by time period
# hour of day, day of week, day of month, month of year, month-year, year
def datetimedict(statsdict, strf):
statsdict[strf] = statsdict.get(strf, 0) + 1
hoddict = {}
dowdict = {}
domdict = {}
moydict = {}
myrdict = {}
yerdict = {}
print(' performing time breakdown')
cur.execute('SELECT * FROM subreddits WHERE created != 0')
for item in fetchgenerator(cur):
dt = datetime.datetime.utcfromtimestamp(item[SQL_SUBREDDIT['created']])
datetimedict(hoddict, dt.strftime('%H')) # 01
datetimedict(dowdict, dt.strftime('%A')) # Monday
datetimedict(domdict, dt.strftime('%d')) # 01
datetimedict(moydict, dt.strftime('%B')) # January
datetimedict(myrdict, dt.strftime('%b%Y')) # Jan2015
datetimedict(yerdict, dt.strftime('%Y')) # 2015
print(' forming columns')
plotnum = 0
mapping = [
{'label': 'hour of day', 'specialsort': None, 'dict': hoddict},
{'label': 'day of week', 'specialsort': 'day', 'dict': dowdict},
{'label': 'day of month', 'specialsort': None, 'dict': domdict},
{'label': 'month of year', 'specialsort': 'month', 'dict': moydict},
{'label': 'year', 'specialsort': None, 'dict': yerdict},
{'label': 'month-year', 'specialsort': 'monthyear', 'dict': myrdict},
{'label': 'name length', 'specialsort': None, 'dict': name_lengths},
]
for (index, collection) in enumerate(mapping):
d = collection['dict']
dkeys_primary = list(d.keys())
dkeys_primary.sort(key=d.get)
dkeys_secondary = specialsort(dkeys_primary, collection['specialsort'])
dvals = [d[x] for x in dkeys_secondary]
statisticoutput += collection['label'] + '\n'
for (keyindex, key) in enumerate(dkeys_primary):
val = d[key]
val = '{0:,}'.format(val)
spacer = 34 - (len(key) + len(val))
spacer = '.' * spacer
statisticoutput += key + spacer + val
statisticoutput += ' ' * 8
key = dkeys_secondary[keyindex]
val = d[key]
val = '{0:,}'.format(val)
spacer = 34 - (len(key) + len(val))
spacer = '.' * spacer
statisticoutput += key + spacer + val
statisticoutput += '\n'
statisticoutput += '\n'
if d is name_lengths:
upperlabel = 'Name Lengths'
else:
upperlabel = 'Subreddits created - %s' % collection['label']
plotbars(
filename=upperlabel,
upperlabel=upperlabel,
inputdata=[dkeys_secondary, dvals],
colormid='#43443a',
forcezero=True,
)
plotnum += 1
if d is myrdict:
# In addition to the total month graph, plot the last 15 months
plotbars(
filename=upperlabel + ' short',
upperlabel=upperlabel + ' short',
inputdata=[dkeys_secondary[-15:], dvals[-15:]],
colorbg='#272822',
colorfg='#000',
colormid='#43443a',
forcezero=True,
)
plotnum += 1
#
# Breakdown by time period
################################
print(statisticoutput, file=file_stats)
file_stats.close()
print('Updating Readme')
readmelines = file_readme.readlines()
file_readme.close()
readmelines[3] = '#####' + headline
readmelines[5] = '#####[Today\'s jumble](http://reddit.com/r/%s)\n' % jumble(nsfw=False)
file_readme = open('README.md', 'w')
file_readme.write(''.join(readmelines))
file_readme.close()
time.sleep(2)
subprocess.call('PNGCREATOR.bat', shell=True, cwd='spooky')
print()
def memberformat(member):
member = FORMAT_MEMBER.format(
idstr=member[SQL_SUBREDDIT['idstr']],
human=member[SQL_SUBREDDIT['human']],
nsfw=member[SQL_SUBREDDIT['nsfw']],
name=member[SQL_SUBREDDIT['name']],
subscribers=member[SQL_SUBREDDIT['subscribers']],
)
return member
def dictadding(targetdict, item):
if item not in targetdict:
targetdict[item] = 1
else:
targetdict[item] = targetdict[item] + 1
return targetdict
def specialsort(inlist, mode=None):
if mode == 'month':
return [
'January',
'February',
'March', 'April',
'May',
'June',
'July',
'August',
'September',
'October',
'November',
'December'
]
if mode == 'day':
return [
'Sunday',
'Monday',
'Tuesday',
'Wednesday',
'Thursday',
'Friday',
'Saturday'
]
if mode == 'monthyear':
td = {}
for item in inlist:
nitem = item
nitem = item.replace(item[:3], monthnumbers[item[:3]])
nitem = nitem[3:] + nitem[:3]
td[item] = nitem
tdkeys = list(td.keys())
#print(td)
tdkeys.sort(key=td.get)
#print(tdkeys)
return tdkeys
if mode is None:
return sorted(inlist)
def search(
query='',
casesense=False,
filterout=[],
subscribers=0,
nsfwmode=2,
doreturn=False,
sort=None,
):
'''
Search for a subreddit by name
*str query = The search query
"query" = results where "query" is in the name
"*query" = results where "query" is at the end of the name
"query*" = results where "query" is at the beginning of the name
"*query*" = results where "query" is in the middle of the name
bool casesense = is the search case sensitive
list filterout = [list, of, words] to omit from search. Follows casesense
int subscribers = minimum number of subscribers
int nsfwmode =
0 - Clean only
1 - Dirty only
2 - All
int sort = The integer representing the sql column to sort by. Defaults
to no sort.
'''
querys = ''.join([c for c in query if c in GOODCHARS])
queryx = '%%{term}%%'.format(term=querys)
if '!' in query:
cur.execute('SELECT * FROM subreddits WHERE name LIKE ?', [querys])
return cur.fetchone()
if nsfwmode in [0, 1]:
cur.execute('SELECT * FROM subreddits WHERE name LIKE ? AND subscribers > ? AND nsfw=?', [queryx, subscribers, nsfwmode])
else:
cur.execute('SELECT * FROM subreddits WHERE name LIKE ? AND subscribers > ?', [queryx, subscribers])
results = []
if casesense is False:
querys = querys.lower()
filterout = [x.lower() for x in filterout]
if '*' in query:
positional = True
front = query[-1] == '*'
back = query[0] == '*'
if front and back:
mid = True
front = False
back = False
else:
mid = False
else:
positional = False
lenq = len(querys)
for item in fetchgenerator(cur):
name = item[SQL_SUBREDDIT['name']]
if casesense is False:
name = name.lower()
if querys not in name:
#print('%s not in %s' % (querys, name))
continue
if (positional and front) and (name[:lenq] != querys):
#print('%s not front %s (%s)' % (querys, name, name[:lenq]))
continue
if (positional and back) and (name[-lenq:] != querys):
#print('%s not back %s (%s)' % (querys, name, name[-lenq:]))
continue
if (positional and mid) and (querys not in name[1:-1]):
#print('%s not mid %s (%s)' % (querys, name, name[1:-1]))
continue
if any(filters in name for filters in filterout):
#print('%s not filter %s' % (querys, name))
continue
results.append(item)
if len(results) == 0:
if doreturn:
return []
else:
return
if sort is not None:
is_numeric = isinstance(results[0][sort], int)
if is_numeric:
results.sort(key=lambda x: x[sort], reverse=True)
else:
results.sort(key=lambda x: x[sort].lower())
if doreturn is True:
return results
else:
for item in results:
print(item)
def findwrong():
cur.execute('SELECT * FROM subreddits WHERE name != ?', ['?'])
fetch = cur.fetchall()
fetch.sort(key=lambda x: x[SQL_SUBREDDIT['idstr']])
#sorted by ID
fetch = fetch[25:]
pos = 0
wrongs = []
while pos < len(fetch)-5:
if fetch[pos][1] > fetch[pos+1][1]:
wrongs.append(str(fetch[pos-1]))
wrongs.append(str(fetch[pos]))
wrongs.append(str(fetch[pos+1]) + "\n")
pos += 1
for wrong in wrongs:
print(wrong)
def processjumble(count, nsfw=False):
for x in range(count):
sub = r.get_random_subreddit(nsfw=nsfw)
process(sub, commit=False)
last_seen = int(get_now())
cur.execute('SELECT * FROM jumble WHERE idstr == ?', [sub.id])
if cur.fetchone() is None:
cur.execute('INSERT INTO jumble VALUES(?, ?)', [sub.id, last_seen])
else:
cur.execute(
'UPDATE jumble SET last_seen = ? WHERE idstr == ?',
[sub.id, last_seen]
)
sql.commit()
def processpopular(count, sort='hot'):
subreddit = r.get_subreddit('popular')
if sort == 'hot':
submissions = subreddit.get_hot(limit=count)
elif sort == 'new':
submissions = subreddit.get_new(limit=count)
else:
raise ValueError(sort)
submissions = list(submissions)
subreddit_ids = list({submission.subreddit_id for submission in submissions})
subreddits = processmega(subreddit_ids, commit=False)
last_seen = int(get_now())
for subreddit in subreddits:
cur.execute('SELECT * FROM popular WHERE idstr == ?', [subreddit.id])
if cur.fetchone() is None:
cur.execute('INSERT INTO popular VALUES(?, ?)', [subreddit.id, last_seen])
else:
cur.execute(
'UPDATE popular SET last_seen = ? WHERE idstr == ?',
[last_seen, subreddit.id]
)
sql.commit()
def jumble(count=20, nsfw=False):
subreddits = get_jumble_subreddits()
if nsfw is not None:
subreddits = [x for x in subreddits if x[SQL_SUBREDDIT['nsfw']] == int(bool(nsfw))]
random.shuffle(subreddits)
subreddits = subreddits[:count]
subreddits = [f[:-1] for f in subreddits]
jumble_string = [x[SQL_SUBREDDIT['name']] for x in subreddits]
jumble_string = '+'.join(jumble_string)
output = [jumble_string, subreddits]
return jumble_string
def rounded(x, rounding=100):
return int(round(x/rounding)) * rounding
def plotbars(
filename,
inputdata,
upperlabel='Subreddits created',
colorbg="#fff",
colorfg="#000",
colormid="#888",
forcezero=False,
):
'''
Create postscript vectors of data
filename = Name of the file without extension
inputdata = A list of two lists. First list has the x axis labels, second list
has the y axis data. x label 14 coresponds to y datum 14, etc.
'''
print(' Printing', filename)
t=tkinter.Tk()
canvas = tkinter.Canvas(t, width=3840, height=2160, bg=colorbg)
canvas.pack()
#Y axis
canvas.create_line(430, 250, 430, 1755, width=10, fill=colorfg)
#X axis
canvas.create_line(430, 1750, 3590, 1750, width=10, fill=colorfg)
dkeys = inputdata[0]
dvals = inputdata[1]
entrycount = len(dkeys)
availablespace = 3140
availableheight= 1490
entrywidth = availablespace / entrycount
#print(dkeys, dvals, "Width:", entrywidth)
smallest = min(dvals)
bottom = int(smallest*0.75) - 5
bottom = 0 if bottom < 8 else rounded(bottom, 10)
if forcezero:
bottom = 0
largest = max(dvals)
top = int(largest + (largest / 5))
top = rounded(top, 10)
print(bottom, top)
span = top - bottom
perpixel = span / availableheight
curx = 445
cury = 1735
labelx = 420
labely = 255
#canvas.create_text(labelx, labely, text=str(top), font=("Consolas", 72), anchor="e")
labelspan = 130
canvas.create_text(175, 100, text=upperlabel, font=("Consolas", 72), anchor="w", fill=colorfg)
for x in range(12):
value = int(top -((labely - 245) * perpixel))
value = rounded(value, 10)
value = '{0:,}'.format(value)
canvas.create_text(labelx, labely, text=value, font=("Consolas", 72), anchor="e", fill=colorfg)
canvas.create_line(430, labely, 3590, labely, width=2, fill=colormid)
labely += labelspan
for entrypos in range(entrycount):
entry = dkeys[entrypos]
entryvalue = dvals[entrypos]
entryx0 = curx + 10
entryx1 = entryx0 + (entrywidth-10)
curx += entrywidth
entryy0 = cury
entryy1 = entryvalue - bottom
entryy1 = entryy1/perpixel
#entryy1 -= bottom
#entryy1 /= perpixel
entryy1 = entryy0 - entryy1
#print(perpixel, entryy1)
#print(entry, entryx0,entryy0, entryx1, entryy1)
canvas.create_rectangle(entryx0, entryy0, entryx1, entryy1, fill=colorfg, outline=colorfg)
font0x = entryx0 + (entrywidth / 2)
font0y = entryy1 - 5
font1y = 1760
entryvalue = round(entryvalue)
fontsize0 = len(str(entryvalue))
fontsize0 = round(entrywidth / fontsize0) + 3
fontsize0 = 100 if fontsize0 > 100 else fontsize0
fontsize1 = len(str(entry))
fontsize1 = round(1.5 * entrywidth / fontsize1) + 5
fontsize1 = 60 if fontsize1 > 60 else fontsize1
canvas.create_text(font0x, font0y, text=entryvalue, font=("Consolas", fontsize0), anchor="s", fill=colorfg)
canvas.create_text(font0x, font1y, text=entry, font=("Consolas", fontsize1), anchor="n", fill=colorfg)
canvas.update()
print(' Done')
canvas.postscript(file=f'spooky\\{filename}.ps', width=3840, height=2160)
t.geometry("1x1+1+1")
t.update()
t.destroy()
def _idle():
while True:
try:
modernize()
processpopular(100, 'new')
processjumble(30, nsfw=False)
processjumble(30, nsfw=True)
print('Great job!')
except Exception:
traceback.print_exc()
time.sleep(180)
# Command line #####################################################################################
DOCSTRING = '''
Subreddit Birthdays
===================
{modernize_forever}
{modernize_once}
'''
SUB_DOCSTRINGS = dict(
modernize_forever='''
modernize_forever:
Gather new subreddits forever.
''',
modernize_once='''
modernize_once:
Gather new subreddits once.
''',
)
DOCSTRING = betterhelp.add_previews(DOCSTRING, SUB_DOCSTRINGS)
NOTIFY_EVERY_LINE = mutables.Boolean(False)
@pipeable.ctrlc_return1
def modernize_once_argparse(args):
login()
modernize(limit=args.limit)
return 0
@pipeable.ctrlc_return1
def modernize_forever_argparse(args):
login()
NOTIFY_EVERY_LINE.set(True)
modernize_forever()
return 0
@operatornotify.main_decorator(subject='sb', notify_every_line=NOTIFY_EVERY_LINE)
@vlogging.main_decorator
def main(argv):
parser = argparse.ArgumentParser(description=DOCSTRING)
subparsers = parser.add_subparsers()
p_modernize_once = subparsers.add_parser('modernize_once', aliases=['modernize-once'])
p_modernize_once.add_argument('--limit', default=None)
p_modernize_once.set_defaults(func=modernize_once_argparse)
p_modernize_forever = subparsers.add_parser('modernize_forever', aliases=['modernize-forever'])
p_modernize_forever.set_defaults(func=modernize_forever_argparse)
return betterhelp.subparser_main(argv, parser, DOCSTRING, SUB_DOCSTRINGS)
if __name__ == '__main__':
raise SystemExit(main(sys.argv[1:]))
| 2.265625
| 2
|
src/CiteSoftLocal.py
|
tsikes/Frhodo
| 3
|
12779037
|
<gh_stars>1-10
from __future__ import print_function
from datetime import datetime
#import yaml #assume these are not available.
#import semantic_version #assume these are not available.
import re
import sys
import os
def eprint(*args, **kwargs):#Print to stderr
print(*args, file=sys.stderr, **kwargs)
citations_dict = {}
checkpoint_log_filename = "CiteSoftwareCheckpointsLog.txt"
consolidated_log_filename = "CiteSoftwareConsolidatedLog.txt"
validate_on_fly = True#Flag. If set to true, argument names will be checked in real time, and invalid argument names will result in a printed warning to the user
valid_optional_fields = ["version", "cite", "author", "doi", "url", "encoding", "misc"]
valid_required_fields = ['timestamp', 'unique_id', 'software_name']
#The module_call_cite function is intended to be used as a decorator.
#It is similar to the example "decorator_maker_with_arguments" at https://www.datacamp.com/community/tutorials/decorators-python
#To find the example, search for "decorator_maker_with_arguments" at the above link.
#function "inner" below is named 'decorator' in the above link and 'wrapper' below is named 'wrapper' in the above link.
def module_call_cite(unique_id, software_name, write_immediately=False, **add_args):
#the unique_id and the software_name are the only truly required args.
#Optional args are: ["version", "cite", "author", "doi", "url", "encoding", "misc"]
#Every arg must be a string.
def inner(func):
def wrapper(*args, **kwargs):
add_citation(unique_id, software_name, write_immediately, **add_args)
result = func(*args, **kwargs)
return result
return wrapper
return inner
#The after_call_compile_checkpoints_log function is intended to be used as a decorator.
#It is similar to the example "decorator_maker_with_arguments" at https://www.datacamp.com/community/tutorials/decorators-python
#To find the example, search for "decorator_maker_with_arguments" at the above link.
#function "inner" below is named 'decorator' in the above link and 'wrapper' below is named 'wrapper' in the above link.
def after_call_compile_checkpoints_log(file_path="", empty_checkpoints=True):
def inner(func):
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
compile_checkpoints_log(file_path=file_path, empty_checkpoints=empty_checkpoints)
return result
return wrapper
return inner
#The after_call_compile_consolidated_log function is intended to be used as a decorator.
#It is similar to the example "decorator_maker_with_arguments" at https://www.datacamp.com/community/tutorials/decorators-python
#To find the example, search for "decorator_maker_with_arguments" at the above link.
#function "inner" below is named 'decorator' in the above link and 'wrapper' below is named 'wrapper' in the above link.
def after_call_compile_consolidated_log(file_path="", compile_checkpoints=True):
def inner(func):
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
compile_consolidated_log(file_path=file_path, compile_checkpoints=compile_checkpoints)
return result
return wrapper
return inner
#The import_cite function is intended to be used at the top of a sofware module.
def import_cite(unique_id, software_name, write_immediately=False, **kwargs):
add_citation(unique_id, software_name, write_immediately, **kwargs)
#The add_citation function is the method which actually adds a citation.
def add_citation(unique_id, software_name, write_immediately=False, **kwargs):
new_entry = {'unique_id' : unique_id, 'software_name' : software_name, 'timestamp' : get_timestamp()}
for key in kwargs:
if validate_on_fly:
if not key in valid_optional_fields:
eprint("Warning, " + key + " is not an officially supported argument name. Use of alternative argument names is strongly discouraged.")
if type(kwargs[key]) is not list:#Make sure single optional args are wrapped in a list
kwargs[key] = [kwargs[key]]
new_entry[key] = kwargs[key]
if unique_id in citations_dict:#Check for duplicate entries(e.g. from calling the same function twice)
citations_dict[unique_id] = compare_same_id(citations_dict[unique_id], new_entry)
else:
citations_dict[unique_id] = new_entry
if write_immediately == True:
compile_checkpoints_log()
def compile_checkpoints_log(file_path="", empty_checkpoints=True):
with open(file_path + checkpoint_log_filename, 'a') as file:
write_dict_to_output(file, citations_dict)
if empty_checkpoints==True:
citations_dict.clear()
def compile_consolidated_log(file_path="", compile_checkpoints=True):
if compile_checkpoints == True:
compile_checkpoints_log()
print("Warning: CiteSoftLocal cannot make a consolidated log. Citations have been exported to CiteSoftwareCheckpointsLog.txt")
# consolidated_dict = {}
# if consolidated_log_filename in os.listdir(): #check if the file exists already.
# consolidated_log_exists = True
# else:
# consolidated_log_exists = False
# if consolidated_log_exists == True: #can only read file if it exists.
# with open(file_path + consolidated_log_filename, "r") as file:
# yaml_file_contents = yaml.safe_load_all(file)
# for yaml_document in yaml_file_contents:
# if yaml_document != None: #This is for 'blank' documents of "---" with nothing after that symbol.
# for citation_entry in yaml_document:
# id = citation_entry["unique_id"]
# if id in consolidated_dict:
# consolidated_dict[id] = compare_same_id(consolidated_dict[id], citation_entry)
# else:
# consolidated_dict[id] = citation_entry
# if checkpoint_log_filename in os.listdir(): #check if the file exists already.
# checkpoint_log_exists = True
# else:
# checkpoint_log_exists = False
# if checkpoint_log_exists == True: #can only read file if it exists.
# with open(checkpoint_log_filename, 'r') as file:
# yaml_file_contents = yaml.safe_load_all(file)
# for yaml_document in yaml_file_contents:
# if yaml_document != None: #This is for 'blank' documents of "---" with nothing after that symbol.
# for citation_entry in yaml_document:
# id = citation_entry["unique_id"]
# if id in consolidated_dict:
# consolidated_dict[id] = compare_same_id(consolidated_dict[id], citation_entry)
# else:
# consolidated_dict[id] = citation_entry
# with open(consolidated_log_filename, 'w') as file:
# write_dict_to_output(file, consolidated_dict)
#Takes a dictionary, converts it to CiteSoft-compatible YAML, and writes it to file
def write_dict_to_output(file, dictionary):
file.write('---\n')
for key,dict in dictionary.items():
file.write('-\n')
for s in valid_required_fields:
file.write(' ' + s + ': >-\n')
file.write(' '*2 + dict[s] + '\n')
for subkey in dict:
if subkey not in valid_required_fields:
file.write(' ' + subkey + ':\n')
if type(dict[subkey]) is list:
for i in dict[subkey]:
file.write(' '*2 + '- >-\n')
file.write(' '*3 + i + '\n')
else:
file.write(' '*2 + '- >-\n')
file.write(' '*3 + dict[subkey] + '\n')
#Helper Functions
#Returns a string of the current time in the ISO 8601 format (YYYY-MM-DDThh:mm:ss).
def get_timestamp():
now = datetime.now()
timestamp = now.strftime("%Y-%m-%dT%H:%M:%S")
return timestamp
#Compares two entries
#Returns : The entry which should be kept
def compare_same_id(old_entry, new_entry):
return new_entry #CiteSoftLocal will not do comparisons. It will just return the new_entry.
# old_has_version = "version" in old_entry
# new_has_version = "version" in new_entry
# if old_has_version and new_has_version:#If both entries have a version, compare them return and the return the greater(newer) version
# old_ver_str = str(old_entry["version"][0])
# new_ver_str = str(new_entry["version"][0])
# #Initialize variables, assume strings are valid unless parsing fails
# old_ver_semver_valid = True
# new_ver_semver_valid = True
# decimal_regex_str = "^[0-9]+\.[0-9]+$"#Match string with decimal point enclosed by at least one number on either side
# if re.match(decimal_regex_str, old_ver_str):
# old_ver_str += '.0'#To ensure semantic version parser handles a decimal value correctly
# if re.match(decimal_regex_str, new_ver_str):
# new_ver_str += '.0'#To ensure semantic version parser handles a decimal value correctly
# try:
# old_sv = semantic_version.Version(old_ver_str)
# except ValueError:
# old_ver_semver_valid = False
# try:
# new_sv = semantic_version.Version(new_ver_str)
# except:
# new_ver_semver_valid = False
# if old_ver_semver_valid and new_ver_semver_valid:#If both entries have a valid SemVer version, keep the older one only if it's greater. Else, keep the newer one.
# if old_sv > new_sv:
# return old_entry
# else:
# return new_entry
# elif old_ver_semver_valid:#If only the old entry has a valid SemVer version, keep it
# return old_entry
# elif new_ver_semver_valid:#If only the new entry has a valid SemVer version, keep it
# return new_entry
# else:
# #Version comparison failed, use alphanumeric comparison
# if old_ver_str > new_ver_str:
# return old_entry
# else:
# return new_entry
# elif old_has_version and not new_has_version:#If old entry has a version and the new entry doesn't, the entry with a version takes precedence
# return old_entry
# elif not old_has_version and new_has_version:#Likewise, if new entry has a version and the old entry doesn't, the entry with a version takes precedence
# return new_entry
# else:#If neither entry has a version, save the new entry
# return new_entry
| 2.625
| 3
|
src/cleantxt/__main__.py
|
jemiaymen/cleantxt
| 0
|
12779038
|
from cleantxt import text
from tqdm import tqdm
import argparse
import os
def rule(s):
try:
k, v = map(str, s.split(','))
return k, v
except:
raise argparse.ArgumentTypeError("Escape Rule must be key,value ")
def main():
parser = argparse.ArgumentParser(
prog="cleantxt cleaning text from noise commande line interface",
description="Arguments for cleantxt to clean document from noise (cleantxt)",
usage=""" cleantxt --doc=[path_to_doc]
--out=[path_out_file]
--f=[0]
--t=[100]
--do_lower=True
--white_space=True
--punctuation=True
--duplicated_chars=True
--alpha_num=True
--accent=True
--escape key,value ə,a œ,oe""",
allow_abbrev=False
)
parser.add_argument(
"--doc",
type=str,
help="path of document to clean it",
required=True
)
parser.add_argument(
"--out",
default="out.txt",
type=str,
help="path of clean document (default out.txt)",
required=False
)
parser.add_argument(
"--f",
default=0,
type=int,
help="index of starting document (default 0)",
required=False
)
parser.add_argument(
"--t",
default=None,
type=int,
help="index of end of document (default None) meaning the end of document",
required=False
)
parser.add_argument(
"--escape",
default=False,
type=rule,
help="Custom escape rules list with tuple k,v space k1,v1 ...",
required=False,
nargs='+'
)
parser.add_argument(
"--do_lower",
default=True,
type=bool,
help="Lower case all text (default True)",
required=False
)
parser.add_argument(
"--white_space",
default=True,
type=bool,
help="Escape more then one spaces (default True)",
required=False
)
parser.add_argument(
"--punctuation",
default=False,
type=bool,
help="Escape punctuation (default False)",
required=False
)
parser.add_argument(
"--duplicated_chars",
default=False,
type=bool,
help="Escape duplicated chars more then two time (default False)",
required=False
)
parser.add_argument(
"--alpha_num",
default=True,
type=bool,
help="Escape non alpha numeric chars (default True)",
required=False
)
parser.add_argument(
"--accent",
default=False,
type=bool,
help="Escape accents (default False)",
required=False
)
args = parser.parse_args()
if args.t:
if args.f > args.t:
raise Exception("--f must be lower then --t")
if not os.path.exists(args.doc):
raise FileNotFoundError(
'document not exist : {}'.format(args.doc)
)
if os.path.splitext(args.doc)[1] not in ['.txt', '.tab']:
raise Exception(
'file not accepted please chose (txt) or (tab) file'
)
file = open(args.doc, mode='r', encoding='utf8')
data = file.readlines()
file.close()
if args.t:
data_process = data[args.f:args.t]
else:
data_process = data
if args.escape:
escape = args.escape
else:
escape = None
with open(args.out, mode='w+', encoding='utf8') as out_file:
for x in tqdm(data_process, desc='clean document with cleantxt cli'):
out_file.write(
text.clean_text(
x,
whitespace=args.white_space,
punctuation=args.punctuation,
duplicated=args.duplicated_chars,
alphnum=args.alpha_num,
accent=args.accent,
others=escape
) + '\n'
)
if __name__ == '__main__':
main()
| 2.984375
| 3
|
Metodos_numericos/MinimosQuadrados/MinimosQuadrados.py
|
iOsnaaente/Faculdade_ECA-UFSM
| 0
|
12779039
|
<filename>Metodos_numericos/MinimosQuadrados/MinimosQuadrados.py<gh_stars>0
import matplotlib.pyplot as plt
from random import randint
from sympy import symbols
import numpy as np
from math import *
# Primeiro definimos a variável que será lida f(x) = x
x = symbols('x')
# Função dos mínimos quadrados
def minimos(x,y, fun, val):
q = len(fun)
# Cálculo de A
A = np.zeros((q,q), dtype='float')
for i in range(q):
for j in range(q):
A[i][j] = 0
for k in range(len(x)):
A[i][j] = A[i][j] + fun[i](x[k])*fun[j](x[k])
# Cálculo de B
b = np.zeros(q, dtype='float')
for i in range(q):
b[i] = 0
for j in range(len(x)):
b[i] = b[i] + y[k]*fun[i](x[j])
# Encontrar A.a = b -> a = b.inv(A)
a = np.linalg.inv(A) *b
# Cálcular Yt = fun[i](val)
yt = 0
for i in range(q):
yt = yt + a[i]*fun[i](val)
# Valor do mínimo
return yt
if __name__ == "__main__":
# INSTRUÇÕES
print(' #########################################################')
print(' ### EXEMPLO DE ENTRADA: ###')
print(' ### x**2 + 5*x - 4 ou cos(x)* x**2 ###')
print(' #########################################################\n')
'''
# Entrada da função escolhida para o método
print("Dê entrada com funções F1(x) , F2(x) , ... , Fn(x):")
nomes = input('\n').split('/')
funcoes = [lambda x: eval(i) for i in range(len(nomes))]
'''
funcao1 = 'x**2'
funcao2 = 'x'
funcao3 = '1'
nomes = [funcao1, funcao2, funcao3]
nomes = [ nomes[i].replace('**','^') for i in range(len(nomes))]
# Aplica às funções o valor de x dado de entrada
f1 = lambda x: eval(funcao1)
f2 = lambda x: eval(funcao2)
f3 = lambda x: eval(funcao3)
funcoes = [f1,f2,f3]
#x = map(float, input("Entre com o vetor de X: ").split(' '))
#y = map(float, input("Entre com o vetor de Y: ").slipt(' '))
x = '-1; -0.75; -0.6; -0.5; -0.3; 0; 0.2; 0.4; 0.5; 0.7; 1'
y = '2.05; 1.153; 0.45; 0.40; 0.5; 0; 0.2; 0.6; 0.512; 1.2; 2.05'
x = (x.split(';'))
y = (y.split(';'))
x = [float(x[i]) for i in range(len(x))]
y = [float(y[i]) for i in range(len(y))]
#xt = input("\nDê entrada a um valor de X : ")
xt = 0.6
p = minimos(x, y, funcoes, xt )
print(p)
plt.ylabel('f(x)')
plt.xlabel('x')
plt.title('Método dos mínimos quadrados')
xfuncMMQ = []
yfuncMMQ = []
for fx in np.linspace(round(x[0]),round(x[-1]), round((abs(x[0])+abs(x[-1])*100)) ):
xfuncMMQ.append(fx)
yfuncMMQ.append(p[2]*fx**2 + p[1]*fx + p[0])
plt.plot(x , y , 'o',label = 'Pontos definidos')
plt.plot(xfuncMMQ , yfuncMMQ , '-',label = 'Função aproximada %5.4fx² %5.4fx %5.4f' %(p[2],p[1],p[0]))
plt.legend(fancybox = True)
plt.show()
| 3.515625
| 4
|
src/model.py
|
sarveshwar22/BISAG_Weather_Forecasting
| 0
|
12779040
|
import utils as util
import tensorflow as tf
import numpy as np
def forecast_model(series, time,forecastDays):
split_time=2555
time_train=time[:split_time]
x_train=series[:split_time]
split_time_test=3285
time_valid=time[split_time:split_time_test]
x_valid=series[split_time:split_time_test]
time_test=time[split_time_test:]
x_test=series[split_time_test:]
window_size=30
batch_size=32
shuffle_buffer_size=1000
tf.keras.backend.clear_session()
tf.random.set_seed(51)
np.random.seed(51)
train_set = util.windowed_dataset(x_train, window_size=60, batch_size=100, shuffle_buffer=shuffle_buffer_size)
valid_set=util.windowed_dataset(x_valid,window_size,batch_size,shuffle_buffer_size)
model = tf.keras.models.Sequential([
tf.keras.layers.Conv1D(filters=60, kernel_size=5,
strides=1, padding="causal",
activation="relu",
input_shape=[None, 1]),
tf.keras.layers.LSTM(60, return_sequences=True),
tf.keras.layers.LSTM(60, return_sequences=True),
tf.keras.layers.Dense(30, activation="relu"),
tf.keras.layers.Dense(10, activation="relu"),
tf.keras.layers.Dense(1),
tf.keras.layers.Lambda(lambda x: x * 400)
])
optimizer = tf.keras.optimizers.SGD(lr=1e-5, momentum=0.9)
model.compile(loss=tf.keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
history = model.fit(train_set,validation_data=(valid_set),epochs=5)
rnn_forecast = util.model_forecast(model, series[..., np.newaxis], window_size)
rnn_forecast = rnn_forecast[split_time - window_size:-1, -1, 0]
mae=tf.keras.metrics.mean_absolute_error(x_test, rnn_forecast[:365]).numpy()
accuracy=100-mae
return (accuracy,mae,rnn_forecast[:forecastDays])
| 2.546875
| 3
|
magpylib/_src/obj_classes/class_BaseExcitations.py
|
OrtnerMichael/magPyLib
| 0
|
12779041
|
"""BaseHomMag class code
DOCSTRINGS V4 READY
"""
from magpylib._src.input_checks import check_format_input_scalar
from magpylib._src.input_checks import check_format_input_vector
class BaseHomMag:
"""provides the magnetization attribute for homogeneously magnetized magnets"""
def __init__(self, magnetization):
self.magnetization = magnetization
@property
def magnetization(self):
"""Object magnetization attribute getter and setter."""
return self._magnetization
@magnetization.setter
def magnetization(self, mag):
"""Set magnetization vector, array_like, shape (3,), unit [mT]."""
self._magnetization = check_format_input_vector(
mag,
dims=(1,),
shape_m1=3,
sig_name="magnetization",
sig_type="array_like (list, tuple, ndarray) with shape (3,)",
allow_None=True,
)
class BaseCurrent:
"""provides scalar current attribute"""
def __init__(self, current):
self.current = current
@property
def current(self):
"""Object current attribute getter and setter."""
return self._current
@current.setter
def current(self, current):
"""Set current value, scalar, unit [A]."""
# input type and init check
self._current = check_format_input_scalar(
current,
sig_name="current",
sig_type="`None` or a number (int, float)",
allow_None=True,
)
| 2.640625
| 3
|
Python/Buch_ATBS/Teil_2/Kapitel_17_Bildbearbeitung/03_formen_zeichnen/03_formen_zeichnen.py
|
Apop85/Scripts
| 0
|
12779042
|
<gh_stars>0
# 03_formen_zeichnen.py
# In dieser Übung geht es darum Formen zu zeichnen mit der Funktion ImageDraw
from PIL import Image, ImageDraw
import os
os.chdir(os.path.dirname(__file__))
target_file='.\\drawed_image.png'
if os.path.exists(target_file):
os.remove(target_file)
new_image=Image.new('RGBA', (200,200), 'white')
# Erstelle Draw-Objekt
draw=ImageDraw.Draw(new_image)
# Die Parameter "fill" und "outline" sind Optional, werden diese weggelassen verwendet DrawImage dafür die Farbe Weiss
# Punkte zeichnen
point_coordinates=[(160,10),(160,30),(160,50),(160,70),(160,90)]
draw.point(point_coordinates, fill='black')
# Linien zeichnen
line_coordinates=[(10,10),(10,60),(60,60)]
draw.line(line_coordinates, fill='black', width=5)
# Rechtecke zeichnen mit Rechtecktuple (links,oben,rechts,unten)
square_props=(100,100,150,150)
draw.rectangle(square_props, fill='red', outline='green')
# Ellypsen zeichnen mit Rechtecktuple
ellipse_props=(50,150,100,200)
draw.ellipse(ellipse_props, fill='blue', outline='magenta')
# Polygone zeichnen
polygon_props=[(10,180), (30,170), (45,150), (25,145), (15,160)]
draw.polygon(polygon_props, fill='black')
for i in range(110, 200, 10):
line_coordinates=[(0,i),(i-100,200)]
draw.line(line_coordinates, fill='red', width=2)
new_image.save(target_file)
| 2.671875
| 3
|
experimentor/models/experiments/exceptions.py
|
aquilesC/experimentor
| 4
|
12779043
|
<reponame>aquilesC/experimentor
# ##############################################################################
# Copyright (c) 2021 <NAME>, Dispertech B.V. #
# exceptions.py is part of experimentor #
# This file is released under an MIT license. #
# See LICENSE.MD for more information. #
# ##############################################################################
from experimentor.models.exceptions import ModelException
class ExperimentException(ModelException):
pass
| 1.601563
| 2
|
server/main/views/importer.py
|
zhwycsz/edd
| 1
|
12779044
|
# coding: utf-8
"""
Views handling the legacy import to EDD.
"""
import json
import logging
import uuid
from django.conf import settings
from django.contrib import messages
from django.http import HttpResponse, JsonResponse
from django.shortcuts import render
from django.utils.translation import ugettext as _
from django.views import generic
from requests import codes
from edd.notify.backend import RedisBroker
from .. import models as edd_models
from .. import tasks
from ..importer import parser, table
from .study import StudyObjectMixin
logger = logging.getLogger(__name__)
# /study/<study_id>/import/
class ImportTableView(StudyObjectMixin, generic.DetailView):
def delete(self, request, *args, **kwargs):
study = self.object = self.get_object()
if not study.user_can_write(request.user):
# TODO: uncovered code
return HttpResponse(status=codes.forbidden)
# END uncovered code
# Note: we validate the input UUID to avoid exposing the capability to delete any
# arbitrary cache entry from redis. As a stopgap, we'll allow any authenticated user to
# delete the temporary cache for the import. we should revisit this when re-casting
# imports as REST resources. Low risk ATM for a user to delete someone else's WIP import,
# since they'd have to both catch it before it's processed AND have its UUID.
import_id = request.body.decode("utf-8")
try:
uuid.UUID(import_id)
except ValueError:
return HttpResponse(
f'Invalid import id "{import_id}"', status=codes.bad_request
)
try:
broker = table.ImportBroker()
broker.clear_pages(import_id)
return HttpResponse(status=codes.ok)
# TODO: uncovered code
except Exception as e:
logger.exception(f"Import delete failed: {e}")
# return error synchronously so it can be displayed right away in context.
# no need for a separate notification here
messages.error(request, str(e))
# END uncovered code
def get(self, request, *args, **kwargs):
# TODO: uncovered code
study = self.object = self.get_object()
user_can_write = study.user_can_write(request.user)
# FIXME protocol display on import page should be an autocomplete
protocols = edd_models.Protocol.objects.order_by("name")
return render(
request,
"main/import.html",
context={
"study": study,
"protocols": protocols,
"writable": user_can_write,
"import_id": uuid.uuid4(),
"page_size_limit": settings.EDD_IMPORT_PAGE_SIZE,
"page_count_limit": settings.EDD_IMPORT_PAGE_LIMIT,
},
)
# END uncovered code
def _parse_payload(self, request):
# init storage for task and parse request body
broker = table.ImportBroker()
payload = json.loads(request.body)
# check requested import parameters are acceptable
import_id = payload["importId"]
series = payload["series"]
pages = payload["totalPages"]
broker.check_bounds(import_id, series, pages)
# store the series of points for the task to read later
count = broker.add_page(import_id, json.dumps(series))
# only on the first page, store the import context
if payload["page"] == 1:
del payload["series"]
# include an update record from the original request
update = edd_models.Update.load_request_update(request)
payload.update(update_id=update.id)
broker.set_context(import_id, json.dumps(payload))
return import_id, count == pages
def post(self, request, *args, **kwargs):
study = self.object = self.get_object()
try:
import_id, done = self._parse_payload(request)
if done:
# once all pages are parsed, submit task and send notification
logger.debug(f"Submitting Celery task for import {import_id}")
result = tasks.import_table_task.delay(
study.pk, request.user.pk, import_id
)
RedisBroker(request.user).notify(
_(
"Data is submitted for import. You may continue to use EDD, "
"another message will appear once the import is complete."
),
uuid=result.id,
)
return JsonResponse(data={}, status=codes.accepted)
# TODO: uncovered code
except table.ImportTooLargeException as e:
return HttpResponse(str(e), status=codes.request_entity_too_large)
except table.ImportBoundsException as e:
return HttpResponse(str(e), status=codes.bad_request)
except table.ImportException as e:
return HttpResponse(str(e), status=codes.server_error)
except RuntimeError as e:
logger.exception(f"Data import failed: {e}")
# return error synchronously so it can be displayed right away in context.
# no need for a separate notification here
messages.error(request, e)
# END uncovered
# /utilities/parsefile/
# To reach this function, files are sent from the client by the Utl.FileDropZone class (in Utl.ts).
def utilities_parse_import_file(request):
"""
Attempt to process posted data as either a TSV or CSV file or Excel spreadsheet and extract a
table of data automatically.
"""
file = request.FILES.get("file")
import_mode = request.POST.get("import_mode", parser.ImportModeFlags.STANDARD)
parse_fn = parser.find_parser(import_mode, file.content_type)
if parse_fn:
try:
result = parse_fn(file)
return JsonResponse(
{"file_type": result.file_type, "file_data": result.parsed_data}
)
# TODO: uncovered code
except Exception as e:
logger.exception(f"Import file parse failed: {e}")
return JsonResponse({"python_error": str(e)}, status=codes.server_error)
# END uncovered
return JsonResponse(
{
"python_error": _(
"The uploaded file could not be interpreted as either an Excel "
"spreadsheet or an XML file. Please check that the contents are "
"formatted correctly. (Word documents are not allowed!)"
)
},
status=codes.server_error,
)
| 2.0625
| 2
|
Tty except.py
|
dimagela29/Python-POO
| 0
|
12779045
|
# -*- coding: utf-8 -*-
"""Try except.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/14WiGGPNcZvzQap4fhgTkWTKv3ybjJnvc
"""
try:
a = "Curso python Orientado a objetos"
print(a)
except NameError as erro:
print('Erro do desenvolvedor, fale com ele')
except(IndexError, KeyError) as erro:
print('Erro de indice ou chave')
except Exception as erro:
print('Ocorreu um erro inesperado')
else:
print('Seu código foi executado com sucesso')
print(a)
finally:
print('Finalmente')
print('Bora continuar')
| 2.96875
| 3
|
yardstick/vTC/apexlake/tests/api_test.py
|
alexnemes/yardstick_enc
| 1
|
12779046
|
# Copyright (c) 2015 Intel Research and Development Ireland Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import unittest
import mock
import os
import experimental_framework.common as common
from experimental_framework import APEX_LAKE_ROOT
from experimental_framework.api import FrameworkApi
from experimental_framework.benchmarking_unit import BenchmarkingUnit
import experimental_framework.benchmarks.\
instantiation_validation_benchmark as iv
from six.moves import map
from six.moves import range
class DummyBenchmarkingUnit(BenchmarkingUnit):
def __init__(self):
BenchmarkingUnit.__init__(self)
@staticmethod
def get_available_test_cases():
return ['BenchA', 'BenchB']
@staticmethod
def get_required_benchmarks(required_benchmarks):
common.BASE_DIR = "base_dir/"
return [iv.InstantiationValidationBenchmark('benchmark', dict())]
class DummyBenchmarkingUnit2(BenchmarkingUnit):
counter_init = 0
counter_finalize = 0
counter_run = 0
def __init__(self, base_heat_template, credentials,
heat_template_parameters, iterations, test_cases):
DummyBenchmarkingUnit.counter_init = 0
DummyBenchmarkingUnit.counter_finalize = 0
DummyBenchmarkingUnit.counter_run = 0
def initialize(self):
DummyBenchmarkingUnit2.counter_init += 1
def run_benchmarks(self):
DummyBenchmarkingUnit2.counter_run += 1
def finalize(self):
DummyBenchmarkingUnit2.counter_finalize += 1
class TestGeneratesTemplate(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
@mock.patch('experimental_framework.common.init')
def test_init_for_success(self, mock_init):
FrameworkApi.init()
mock_init.assert_called_once_with(api=True)
# @mock.patch('experimental_framework.benchmarking_unit.BenchmarkingUnit.'
# 'get_available_test_cases',
# side_effect=DummyBenchmarkingUnit.get_available_test_cases)
# def test_get_available_test_cases_for_success(self, mock_bench):
# expected = ['BenchA', 'BenchB']
# output = FrameworkApi.get_available_test_cases()
# self.assertEqual(expected, output)
@mock.patch('experimental_framework.benchmarking_unit.BenchmarkingUnit.'
'get_required_benchmarks',
side_effect=DummyBenchmarkingUnit.get_required_benchmarks)
def test_get_test_case_features_for_success(self, mock_get_req_bench):
expected = dict()
expected['description'] = 'Instantiation Validation Benchmark'
expected['parameters'] = [
iv.THROUGHPUT,
iv.VLAN_SENDER,
iv.VLAN_RECEIVER]
expected['allowed_values'] = dict()
expected['allowed_values'][iv.THROUGHPUT] = \
list(map(str, list(range(0, 100))))
expected['allowed_values'][iv.VLAN_SENDER] = \
list(map(str, list(range(-1, 4096))))
expected['allowed_values'][iv.VLAN_RECEIVER] = \
list(map(str, list(range(-1, 4096))))
expected['default_values'] = dict()
expected['default_values'][iv.THROUGHPUT] = '1'
expected['default_values'][iv.VLAN_SENDER] = '-1'
expected['default_values'][iv.VLAN_RECEIVER] = '-1'
test_case = 'instantiation_validation_benchmark.' \
'InstantiationValidationBenchmark'
output = FrameworkApi.get_test_case_features(test_case)
self.assertEqual(expected, output)
def test__get_test_case_features__for_failure(self):
self.assertRaises(
ValueError, FrameworkApi.get_test_case_features, 111)
@mock.patch('experimental_framework.common.init')
@mock.patch('experimental_framework.common.LOG')
@mock.patch('experimental_framework.common.get_credentials')
@mock.patch('experimental_framework.heat_template_generation.'
'generates_templates')
@mock.patch('experimental_framework.benchmarking_unit.BenchmarkingUnit',
side_effect=DummyBenchmarkingUnit2)
def test_execute_framework_for_success(self, mock_b_unit, mock_heat,
mock_credentials, mock_log,
mock_common_init):
common.TEMPLATE_DIR = os.path.join(APEX_LAKE_ROOT,
'tests/data/generated_templates/')
test_cases = dict()
iterations = 1
heat_template = 'VTC_base_single_vm_wait.tmp'
heat_template_parameters = dict()
deployment_configuration = ''
openstack_credentials = dict()
openstack_credentials['ip_controller'] = ''
openstack_credentials['heat_url'] = ''
openstack_credentials['user'] = ''
openstack_credentials['password'] = ''
openstack_credentials['auth_uri'] = ''
openstack_credentials['project'] = ''
FrameworkApi.execute_framework(
test_cases, iterations, heat_template,
heat_template_parameters, deployment_configuration,
openstack_credentials)
| 1.960938
| 2
|
main.py
|
zThorn/Chip-8-Emulator
| 0
|
12779047
|
import pyglet
from emulator import emulator
def start(dt):
pyglet.clock.schedule_interval(emulator.main, 1/1000)
#need this for pyglet
def update(dt):
if emulator.cpu.opcode != 0x1210:
emulator.cpu.cycle()
else:
pyglet.clock.unschedule(update)
pyglet.clock.schedule_once(start, 3)
if __name__ == '__main__':
template = pyglet.gl.Config(double_buffer=True)
emulator = emulator(640, 320, config=template, caption="Chip-8 emulator")
emulator.loadROM('IBM.ch8')
pyglet.clock.schedule(update)
pyglet.app.run()
| 2.96875
| 3
|
lightconvpoint/utils/functional.py
|
valeoai/POCO
| 13
|
12779048
|
<filename>lightconvpoint/utils/functional.py<gh_stars>10-100
import torch
def batch_gather(input, dim, index):
index_shape = list(index.shape)
input_shape = list(input.shape)
views = [input.shape[0]] + [
1 if i != dim else -1 for i in range(1, len(input.shape))
]
expanse = list(input.shape)
expanse[0] = -1
expanse[dim] = -1
index = index.view(views).expand(expanse)
output = torch.gather(input, dim, index)
# compute final shape
output_shape = input_shape[0:dim] + index_shape[1:] + input_shape[dim+1:]
return output.reshape(output_shape)
| 2.265625
| 2
|
clone/admin.py
|
gamersdestiny/SB-Admin-clone
| 0
|
12779049
|
from django.contrib import admin
import clone.models as mod
admin.site.register(mod.lineChart)
admin.site.register(mod.donutChart)
| 1.328125
| 1
|
code/traditional/TCA/TCA.py
|
lw0517/transferlearning
| 3
|
12779050
|
# encoding=utf-8
"""
Created on 21:29 2018/11/12
@author: <NAME>
"""
import numpy as np
import scipy.io
import scipy.linalg
import sklearn.metrics
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
def kernel(ker, X1, X2, gamma):
K = None
if not ker or ker == 'primal':
K = X1
elif ker == 'linear':
if X2 is not None:
K = sklearn.metrics.pairwise.linear_kernel(np.asarray(X1).T, np.asarray(X2).T)
else:
K = sklearn.metrics.pairwise.linear_kernel(np.asarray(X1).T)
elif ker == 'rbf':
if X2 is not None:
K = sklearn.metrics.pairwise.rbf_kernel(np.asarray(X1).T, np.asarray(X2).T, gamma)
else:
K = sklearn.metrics.pairwise.rbf_kernel(np.asarray(X1).T, None, gamma)
return K
class TCA:
def __init__(self, kernel_type='primal', dim=30, lamb=1, gamma=1):
'''
Init func
:param kernel_type: kernel, values: 'primal' | 'linear' | 'rbf'
:param dim: dimension after transfer
:param lamb: lambda value in equation
:param gamma: kernel bandwidth for rbf kernel
'''
self.kernel_type = kernel_type
self.dim = dim
self.lamb = lamb
self.gamma = gamma
def fit(self, Xs, Xt):
'''
Transform Xs and Xt
:param Xs: ns * n_feature, source feature
:param Xt: nt * n_feature, target feature
:return: Xs_new and Xt_new after TCA
'''
X = np.hstack((Xs.T, Xt.T))
X /= np.linalg.norm(X, axis=0)
m, n = X.shape
ns, nt = len(Xs), len(Xt)
e = np.vstack((1 / ns * np.ones((ns, 1)), -1 / nt * np.ones((nt, 1))))
M = e * e.T
M = M / np.linalg.norm(M, 'fro')
H = np.eye(n) - 1 / n * np.ones((n, n))
K = kernel(self.kernel_type, X, None, gamma=self.gamma)
n_eye = m if self.kernel_type == 'primal' else n
a, b = np.linalg.multi_dot([K, M, K.T]) + self.lamb * np.eye(n_eye), np.linalg.multi_dot([K, H, K.T])
w, V = scipy.linalg.eig(a, b)
ind = np.argsort(w)
A = V[:, ind[:self.dim]]
Z = np.dot(A.T, K)
Z /= np.linalg.norm(Z, axis=0)
Xs_new, Xt_new = Z[:, :ns].T, Z[:, ns:].T
return Xs_new, Xt_new
def fit_predict(self, Xs, Ys, Xt, Yt):
'''
Transform Xs and Xt, then make predictions on target using 1NN
:param Xs: ns * n_feature, source feature
:param Ys: ns * 1, source label
:param Xt: nt * n_feature, target feature
:param Yt: nt * 1, target label
:return: Accuracy and predicted_labels on the target domain
'''
Xs_new, Xt_new = self.fit(Xs, Xt)
clf = KNeighborsClassifier(n_neighbors=1)
clf.fit(Xs_new, Ys.ravel())
y_pred = clf.predict(Xt_new)
acc = sklearn.metrics.accuracy_score(Yt, y_pred)
return acc, y_pred
def fit_new(self, Xs, Xt, Xt2):
'''
Map Xt2 to the latent space created from Xt and Xs
:param Xs : ns * n_feature, source feature
:param Xt : nt * n_feature, target feature
:param Xt2: n_s, n_feature, target feature to be mapped
:return: Xt2_new, mapped Xt2 with projection created by Xs and Xt
'''
# Computing projection matrix A from Xs an Xt
X = np.hstack((Xs.T, Xt.T))
X /= np.linalg.norm(X, axis=0)
m, n = X.shape
ns, nt = len(Xs), len(Xt)
e = np.vstack((1 / ns * np.ones((ns, 1)), -1 / nt * np.ones((nt, 1))))
M = e * e.T
M = M / np.linalg.norm(M, 'fro')
H = np.eye(n) - 1 / n * np.ones((n, n))
K = kernel(self.kernel_type, X, None, gamma=self.gamma)
n_eye = m if self.kernel_type == 'primal' else n
a, b = np.linalg.multi_dot([K, M, K.T]) + self.lamb * np.eye(n_eye), np.linalg.multi_dot([K, H, K.T])
w, V = scipy.linalg.eig(a, b)
ind = np.argsort(w)
A = V[:, ind[:self.dim]]
# Compute kernel with Xt2 as target and X as source
Xt2 = Xt2.T
K = kernel(self.kernel_type, X1 = Xt2, X2 = X, gamma=self.gamma)
# New target features
Xt2_new = K @ A
return Xt2_new
def fit_predict_new(self, Xt, Xs, Ys, Xt2, Yt2):
'''
Transfrom Xt and Xs, get Xs_new
Transform Xt2 with projection matrix created by Xs and Xt, get Xt2_new
Make predictions on Xt2_new using classifier trained on Xs_new
:param Xt: ns * n_feature, target feature
:param Xs: ns * n_feature, source feature
:param Ys: ns * 1, source label
:param Xt2: nt * n_feature, new target feature
:param Yt2: nt * 1, new target label
:return: Accuracy and predicted_labels on the target domain
'''
Xs_new, _ = self.fit(Xs, Xt)
Xt2_new = self.fit_new(Xs, Xt, Xt2)
clf = KNeighborsClassifier(n_neighbors=1)
clf.fit(Xs_new, Ys.ravel())
y_pred = clf.predict(Xt2_new)
acc = sklearn.metrics.accuracy_score(Yt2, y_pred)
return acc, y_pred
if __name__ == '__main__':
domains = ['caltech.mat', 'amazon.mat', 'webcam.mat', 'dslr.mat']
for i in [1]:
for j in [2]:
if i != j:
src, tar = 'data/' + domains[i], 'data/' + domains[j]
src_domain, tar_domain = scipy.io.loadmat(src), scipy.io.loadmat(tar)
Xs, Ys, Xt, Yt = src_domain['feas'], src_domain['labels'], tar_domain['feas'], tar_domain['labels']
# Split target data
Xt1, Xt2, Yt1, Yt2 = train_test_split(Xt, Yt, train_size=50, stratify=Yt, random_state=42)
# Create latent space and evaluate using Xs and Xt1
tca = TCA(kernel_type='linear', dim=30, lamb=1, gamma=1)
acc1, ypre1 = tca.fit_predict(Xs, Ys, Xt1, Yt1)
# Project and evaluate Xt2 existing projection matrix and classifier
acc2, ypre2 = tca.fit_predict_new(Xt1, Xs, Ys, Xt2, Yt2)
print(f'Accuracy of mapped source and target1 data : {acc1:.3f}') #0.800
print(f'Accuracy of mapped target2 data : {acc2:.3f}') #0.706
| 2.5
| 2
|