blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a71aa97fcfae548e30320b7b1ecfaf7083324ede
|
bd17496302ed843e56c40c78f6e41bd3379309d6
|
/cruft/_commands/utils/generate.py
|
3e955567998125f725f0fbdb7afee16e430510df
|
[
"MIT"
] |
permissive
|
juhuebner/cruft
|
9720962fb07b77298e2f92c384d06d646521141d
|
074b0a3e60381cf5c08f2c12c1a422f5c432d191
|
refs/heads/master
| 2023-03-24T10:07:30.682818
| 2021-02-04T13:32:43
| 2021-02-04T13:32:43
| 335,247,032
| 1
| 1
|
MIT
| 2021-02-12T10:15:36
| 2021-02-02T10:14:39
|
Python
|
UTF-8
|
Python
| false
| false
| 4,262
|
py
|
import os
from pathlib import Path
from shutil import move, rmtree
from tempfile import TemporaryDirectory
from typing import Optional, Set
from cookiecutter.generate import generate_files
from git import Repo
from .cookiecutter import CookiecutterContext, generate_cookiecutter_context
from .cruft import CruftState
try:
import toml # type: ignore
except ImportError: # pragma: no cover
toml = None # type: ignore
def cookiecutter_template(
output_dir: Path,
repo: Repo,
cruft_state: CruftState,
project_dir: Path = Path("."),
cookiecutter_input: bool = False,
checkout: Optional[str] = None,
deleted_paths: Set[Path] = None,
update_deleted_paths: bool = False,
) -> CookiecutterContext:
"""Generate a clean cookiecutter template in output_dir."""
if deleted_paths is None:
deleted_paths = set()
pyproject_file = project_dir / "pyproject.toml"
commit = checkout or repo.remotes.origin.refs["HEAD"]
repo.head.reset(commit=commit, working_tree=True)
context = _generate_output(cruft_state, Path(repo.working_dir), cookiecutter_input, output_dir)
# Get all paths that we are supposed to skip before generating the diff and applying updates
skip_paths = _get_skip_paths(cruft_state, pyproject_file)
# We also get the list of paths that were deleted from the project
# directory but were present in the template that the project is linked against
# This is to avoid introducing changes that won't apply cleanly to the current project.
if update_deleted_paths:
deleted_paths.update(_get_deleted_files(output_dir, project_dir))
# We now remove skipped and deleted paths from the project
_remove_paths(output_dir, skip_paths | deleted_paths)
return context
#####################################
# Generating clean outputs for diff #
#####################################
def _generate_output(
cruft_state: CruftState, project_dir: Path, cookiecutter_input: bool, output_dir: Path
) -> CookiecutterContext:
inner_dir = project_dir / (cruft_state.get("directory") or "")
new_context = generate_cookiecutter_context(
cruft_state["template"],
inner_dir,
extra_context=cruft_state["context"]["cookiecutter"],
no_input=not cookiecutter_input,
)
# This generates the cookiecutter template.
# Unfortunately, cookiecutter doesn't let us output the template in an
# arbitrary directory. It insists on creating the initial project directory.
# Therefore we have to move the directory content to the expected output_dir.
# See https://github.com/cookiecutter/cookiecutter/pull/907
output_dir.mkdir(parents=True, exist_ok=True)
with TemporaryDirectory() as tmpdir_:
tmpdir = Path(tmpdir_)
# Kindly ask cookiecutter to generate the template
template_dir = generate_files(
repo_dir=inner_dir, context=new_context, overwrite_if_exists=True, output_dir=tmpdir
)
template_dir = Path(template_dir)
# Move the template content to the output directory
for name in os.listdir(template_dir):
move(str(template_dir / name), str(output_dir))
return new_context
##############################
# Removing unnecessary files #
##############################
def _get_skip_paths(cruft_state: CruftState, pyproject_file: Path) -> Set[Path]:
skip_cruft = cruft_state.get("skip", [])
if toml and pyproject_file.is_file():
pyproject_cruft = toml.loads(pyproject_file.read_text()).get("tool", {}).get("cruft", {})
skip_cruft.extend(pyproject_cruft.get("skip", []))
return set(map(Path, skip_cruft))
def _get_deleted_files(template_dir: Path, project_dir: Path):
cwd = Path.cwd()
os.chdir(template_dir)
template_paths = set(Path(".").glob("**/*"))
os.chdir(cwd)
os.chdir(project_dir)
deleted_paths = set(filter(lambda path: not path.exists(), template_paths))
os.chdir(cwd)
return deleted_paths
def _remove_paths(root: Path, paths_to_remove: Set[Path]):
for path_to_remove in paths_to_remove:
path = root / path_to_remove
if path.is_dir():
rmtree(path)
elif path.is_file():
path.unlink()
|
[
"noreply@github.com"
] |
juhuebner.noreply@github.com
|
8b63e5294856d883658c03c47ecbc8dd9c494857
|
b2c5fb38def287fa50222b46f2d645b4245a06f5
|
/pplan/src/pplan/makegraph_edgetime_equilateral.py
|
893e8f1db69b17d39edc2af518538eb8c71d525c
|
[] |
no_license
|
jonbinney/informative_path_planning
|
1e228aa719f16b8d1d121e86a19fc60b1defc22b
|
6d5e82ef9760ed994b597f2903e7cc77c195e466
|
refs/heads/master
| 2016-09-06T03:51:30.575364
| 2013-08-01T23:12:44
| 2013-08-01T23:12:44
| 7,231,493
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,240
|
py
|
import numpy as np
from scipy import linalg
import ppas
def add_point(points, p):
'''
Add p to points, and return the index for p
'''
for ii in range(len(points)):
# hack alert! because of precision, sometimes points that should
# be the same are actually slightly different, so we use an ugly
# hack to get around this
if linalg.norm(p - points[ii]) < 1e-10:
return ii
# point not in list yet, add it at the end
points.append(p)
return len(points) - 1
def makegraph_edgetime_equilateral(roi_properties, graph_properties):
'''
Construct a graph where all edges have equal lengths
'''
spatial_points = []
nrows, ncols = graph_properties['shape']
edge_distance = graph_properties['edge_distance']
row_spacing = ppas.roms.meters_to_degrees(edge_distance, 0.0)[0]
col_spacing = ppas.roms.meters_to_degrees(
0.0, np.cos(np.pi/6.)*edge_distance)[1]
for col_i in range(ncols):
lon = col_i * col_spacing + roi_properties['lon0']
if col_i % 2 == 1:
offset = 0.5 * row_spacing
nrows_thiscol = nrows - 1
else:
offset = 0.0
nrows_thiscol = nrows
for row_i in range(nrows_thiscol):
lat = offset + row_spacing * row_i + roi_properties['lat0']
spatial_points.append(np.array((lat, lon)))
spatial_points = np.array(spatial_points)
# make the graph
nodes = range(len(spatial_points))
G = ppas.graph.Graph(nodes, 'discrete_time')
G.node_points = spatial_points
starttime = ppas.datetime_to_seconds(roi_properties['starttime'])
points = []
for v_i in nodes:
sp_i = spatial_points[v_i]
for v_j in nodes:
sp_j = spatial_points[v_j]
meters_delta = ppas.roms.degrees_to_meters(
sp_i[0] - sp_j[0], sp_i[1] - sp_j[1])
distance = linalg.norm(meters_delta)
if distance <= edge_distance * 1.01 and v_i != v_j: # fudge factor...
length_dict = {}
sample_dict = {}
for t in graph_properties['time_list']:
length_dict[t] = graph_properties['edge_len']
samples = set()
ppe = graph_properties['ppe']
# technically each sample should be at a different time, but
# then we would end up with more points (due to points being
# at different times depending on which direction the edge
# is taversed)
t_s = 0.5 * t + 0.5 * (t + graph_properties['edge_len'])
for r in np.linspace(0., 1., ppe):
sp = (1 - r)*sp_i + r*sp_j
depth = roi_properties['depth']
p = np.array((sp[0], sp[1], depth, t_s))
p_ii = add_point(points, p)
samples.add(p_ii)
sample_dict[t] = samples
e = ppas.graph.DiscreteTimeEdge(v_i, v_j, length_dict, sample_dict)
G.add_edge(e)
return G, np.array(points)
|
[
"jbinney@bih.willowgarage.com"
] |
jbinney@bih.willowgarage.com
|
2973b9afb0d60bb23ad9537dfdd7c01cf45442e8
|
e01bb802ba807b5bce32bd3d3a060186515405fc
|
/celebA_preproc.py
|
ef0a5f3226b80f70adf9b25562d999446d10be16
|
[] |
no_license
|
MonicaVillanueva/Image2Image_Translation
|
005676197ea1db7e2babc1abdc9829a2b52d4375
|
c7cad94c2f7497f0147e26a6e4e8553bfeefb55b
|
refs/heads/master
| 2021-09-05T15:19:05.513243
| 2018-01-29T07:27:51
| 2018-01-29T07:27:51
| 113,840,715
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,873
|
py
|
#!/usr/bin/python
# -*- coding: latin-1 -*-
from PIL import Image
import os
import matplotlib.pyplot as plt
import scipy.misc as sci
import numpy as np
# Constants
celeba_path = os.getcwd() + '\celebA\img_align_celeba'
out_path = 'celebA\processed'
attr_path = os.getcwd() + '\celebA\Anno\list_attr_celeba.txt'
BLACK = 8
BLOND = 9
BROWN = 11
MALE = 20
YOUNG = 39
attrs = [BLACK, BLOND, BROWN, MALE, YOUNG]
## Preprocess images in database
attr = open(attr_path, 'r')
attr.readline()
attr.readline() # disregard first two lines
celeba_path = celeba_path.decode('iso8859_15')
# out_path = out_path.decode('iso8859_15')
for filename in os.listdir(celeba_path):
filename = filename.encode('ascii','ignore')
# Open
img = Image.open(os.path.join(celeba_path, filename))
# img = sci.imread(os.path.join(celeba_path, filename))
# plt.figure()
# plt.imshow(img)
# We crop the initial 178x218 size images to 178x178
img = img.crop((0, 20, 178, 198))
# img = img[20:198, 0:178, :]
# plt.figure()
# plt.imshow(img)
# Then resize them as 128x128
img = img.resize((128, 128), Image.ANTIALIAS)
# img = sci.imresize(img, (128,128))
# plt.figure()
# plt.imshow(img)
# We construct seven domains using the following attributes: hair color (black, blond, brown), gender (male/female), and age (young/old).
line = attr.readline()
splits = line.split()
labels = splits[1:]
new_labels = []
for idx, value in enumerate(labels):
if idx in attrs:
if int(value) == 1:
new_labels.append(1)
else:
new_labels.append(0)
labels.append(new_labels)
# Save modified images
pic_path = filename.split('.')[0] + '_' + str(new_labels) + '.jpg'
# pic_path = os.path.join(out_path, pic_path)
img.save(pic_path)
break
|
[
"monicavi@kth.se"
] |
monicavi@kth.se
|
21506168c06a23ad11428c0c4c3d152ac7118125
|
509e9d64744f720392fda2b978d783f985c60824
|
/python2.7/site-packages/numpy/ma/core.py
|
7e1ea2b54b893c2252e95a7cda30ca172bc9932b
|
[] |
no_license
|
theideasmith/Instant-OpenCV-FFMPEG
|
0560598fba630ded533b4e6c111c61c9b0b7502b
|
234e359af245b4832b3e7ade6070e91c81b65de0
|
refs/heads/master
| 2021-01-11T20:39:33.443984
| 2017-01-16T23:09:46
| 2017-01-16T23:09:46
| 79,162,330
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 131
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:e83576d33a9a1a0dd7ddd47d954efbf285527a2a66b9a5f7c6a62368b7fb46f2
size 247843
|
[
"aclscientist@gmail.com"
] |
aclscientist@gmail.com
|
e9aa8307ab7e17962e321ed2057fac07d37bdaf0
|
e97451f42a5bb5834baf581dcbbcf273eb6562ff
|
/[052] Permuted multiples/main.py
|
8c2761dd6a3711d0073ff5c96961bd38885794f7
|
[] |
no_license
|
m-tkach/ProjectEuler
|
561d13137a8e5e9e63f028e0dd3abd57aa788b9e
|
ee118e0889fa0c48662f62b42708c2009ddbc4ce
|
refs/heads/master
| 2020-04-15T01:54:24.682833
| 2015-04-13T20:23:44
| 2015-04-13T20:23:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 854
|
py
|
MAX_MULTIPLIER = 6
def to_int(l):
ans = 0
for x in l:
ans = ans * 10 + x
return ans
def to_sort_list(i):
if i == 0:
return [0]
ret = []
while i > 0:
ret.append(i % 10)
i //= 10
ret.sort()
return ret
def next(v):
v += 1
max_multi_v = v * MAX_MULTIPLIER
len_v = len(str(v))
if len_v != len(str(max_multi_v)):
v = int('1' + ('0' * len_v))
return v
def check(val):
checked = to_int(to_sort_list(val))
for multiplier in range(2, MAX_MULTIPLIER+1):
x = val * multiplier
if checked != to_int(to_sort_list(x)):
return False
return True
def calc():
i = 10
while True:
if (check(i)):
return i
i = next(i)
return None
print(calc())
|
[
"m.tkach@samsung.com"
] |
m.tkach@samsung.com
|
615815d159d11c1a11dd487d30418a377ac10f96
|
6501237d5e687ba1ad51f8f134f998fb2c25b205
|
/models/transformer.py
|
9ef76ae139ff364596d6f3467ce1b4ed432e326f
|
[] |
no_license
|
jdegange/multilingual_nmt
|
e29cbb86de1556060a7ec009de0a7e463ca120bc
|
4c5cc5a5b7986ae621d4399ed0fc7ca998bc9c1f
|
refs/heads/master
| 2020-04-09T06:36:05.338816
| 2018-09-11T04:09:29
| 2018-09-11T04:09:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 24,786
|
py
|
# encoding: utf-8
import numpy as np
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
import math
import scipy.stats as stats
import utils
import search_strategy
import preprocess
from expert_utils import PadRemover
cudnn.benchmark = True
def input_like(tensor, val=0):
"""
Use clone() + fill_() to make sure that a tensor ends up on the right
device at runtime.
"""
return tensor.clone().fill_(val)
def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=np.float32):
"""Outputs random values from a truncated normal distribution.
The generated values follow a normal distribution with specified mean
and standard deviation, except that values whose magnitude is more
than 2 standard deviations from the mean are dropped and re-picked.
API from: https://www.tensorflow.org/api_docs/python/tf/truncated_normal
"""
lower = -2 * stddev + mean
upper = 2 * stddev + mean
X = stats.truncnorm((lower - mean) / stddev,
(upper - mean) / stddev,
loc=mean,
scale=stddev)
values = X.rvs(size=shape)
return torch.from_numpy(values.astype(dtype))
class ScaledEmbedding(nn.Embedding):
"""
Embedding layer that initialises its values
to using a truncated normal variable scaled by the inverse
of the embedding dimension.
"""
def reset_parameters(self):
"""
Initialize parameters using Truncated Normal Initializer (default in Tensorflow)
"""
# Initialize the embedding parameters (Default)
# This works well too
# self.embed_word.weight.data.uniform_(-3. / self.num_embeddings,
# 3. / self.num_embeddings)
self.weight.data = truncated_normal(shape=(self.num_embeddings,
self.embedding_dim),
stddev=1.0 / math.sqrt(self.embedding_dim))
if self.padding_idx is not None:
self.weight.data[self.padding_idx].fill_(0)
class LayerNorm(nn.Module):
def __init__(self, features, eps=1e-6):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
def sentence_block_embed(embed, x):
"""Computes sentence-level embedding representation from word-ids.
:param embed: nn.Embedding() Module
:param x: Tensor of batched word-ids
:return: Tensor of shape (batchsize, dimension, sentence_length)
"""
batch, length = x.shape
_, units = embed.weight.size()
e = embed(x)
assert (e.size() == (batch, length, units))
return e
def seq_func(func, x, reconstruct_shape=True, pad_remover=None):
"""Change implicitly function's input x from ndim=3 to ndim=2
:param func: function to be applied to input x
:param x: Tensor of batched sentence level word features
:param reconstruct_shape: boolean, if the output needs to be
of the same shape as input x
:return: Tensor of shape (batchsize, dimension, sentence_length)
or (batchsize x sentence_length, dimension)
"""
batch, length, units = x.shape
e = x.view(batch * length, units)
if pad_remover:
e = pad_remover.remove(e)
e = func(e)
if pad_remover:
e = pad_remover.restore(e)
if not reconstruct_shape:
return e
out_units = e.shape[1]
e = e.view(batch, length, out_units)
assert (e.shape == (batch, length, out_units))
return e
class LayerNormSent(LayerNorm):
"""Position-wise layer-normalization layer for array of shape
(batchsize, dimension, sentence_length)."""
def __init__(self, n_units, eps=1e-3):
super(LayerNormSent, self).__init__(n_units, eps=eps)
def forward(self, x):
y = seq_func(super(LayerNormSent, self).forward, x)
return y
class LinearSent(nn.Module):
"""Position-wise Linear Layer for sentence block. array of shape
(batchsize, dimension, sentence_length)."""
def __init__(self, input_dim, output_dim, bias=True):
super(LinearSent, self).__init__()
self.L = nn.Linear(input_dim, output_dim, bias=bias)
# self.L.weight.data.uniform_(-3. / input_dim, 3. / input_dim)
# Using Xavier Initialization
# self.L.weight.data.uniform_(-math.sqrt(6.0 / (input_dim + output_dim)),
# math.sqrt(6.0 / (input_dim + output_dim)))
# LeCun Initialization
self.L.weight.data.uniform_(-math.sqrt(3.0 / input_dim),
math.sqrt(3.0 / input_dim))
if bias:
self.L.bias.data.fill_(0.)
self.output_dim = output_dim
def forward(self, x, pad_remover=None):
output = seq_func(self.L, x, pad_remover=pad_remover)
return output
class MultiHeadAttention(nn.Module):
"""Multi-Head Attention Layer for Sentence Blocks.
For computational efficiency, dot-product to calculate
query-key scores is performed in all the heads together.
Positional Attention is introduced in
"Non-Autoregressive Neural Machine Translation"
(https://arxiv.org/abs/1711.02281)
"""
def __init__(self, n_units, multi_heads=8, attention_dropout=0.1,
pos_attn=False):
super(MultiHeadAttention, self).__init__()
self.W_Q = nn.Linear(n_units,
n_units,
bias=False)
self.W_K = nn.Linear(n_units,
n_units,
bias=False)
self.W_V = nn.Linear(n_units,
n_units,
bias=False)
self.finishing_linear_layer = nn.Linear(n_units,
n_units,
bias=False)
self.h = multi_heads
self.pos_attn = pos_attn
self.scale_score = 1. / (n_units // multi_heads) ** 0.5
self.dropout = nn.Dropout(attention_dropout)
def forward(self, x, z=None, mask=None):
h = self.h
Q = self.W_Q(x)
if not self.pos_attn:
if z is None:
K, V = self.W_K(x), self.W_V(x)
else:
K, V = self.W_K(z), self.W_V(z)
else:
K, V = self.W_K(x), self.W_V(z)
batch, n_querys, n_units = Q.shape
_, n_keys, _ = K.shape
# Calculate attention scores with mask for zero-padded areas
# Perform multi-head attention using pseudo batching all together
# at once for efficiency
Q = torch.cat(torch.chunk(Q, h, dim=2), dim=0)
K = torch.cat(torch.chunk(K, h, dim=2), dim=0)
V = torch.cat(torch.chunk(V, h, dim=2), dim=0)
assert (Q.shape == (batch * h, n_querys, n_units // h))
assert (K.shape == (batch * h, n_keys, n_units // h))
assert (V.shape == (batch * h, n_keys, n_units // h))
mask = torch.cat([mask] * h, dim=0)
Q.mul_(self.scale_score)
batch_A = torch.bmm(Q, K.transpose(1, 2).contiguous())
# batch_A = batch_A.masked_fill(1. - mask, -np.inf) # Works in v0.4
batch_A = batch_A.masked_fill(mask == 0, -1e18)
batch_A = F.softmax(batch_A, dim=2)
# Replaces 'NaN' with zeros and other values with the original ones
batch_A = batch_A.masked_fill(batch_A != batch_A, 0.)
assert (batch_A.shape == (batch * h, n_querys, n_keys))
# Attention Dropout
batch_A = self.dropout(batch_A)
# Calculate Weighted Sum
C = torch.bmm(batch_A, V)
assert (C.shape == (batch * h, n_querys, n_units // h))
# Joining the Multiple Heads
C = torch.cat(torch.chunk(C, h, dim=0), dim=2)
assert (C.shape == (batch, n_querys, n_units))
# Final linear layer
C = self.finishing_linear_layer(C)
return C
class FeedForwardLayer(nn.Module):
def __init__(self, n_units, n_hidden, relu_dropout=0.1):
super(FeedForwardLayer, self).__init__()
self.W_1 = nn.Linear(n_units, n_hidden)
self.act = nn.ReLU()
self.dropout = nn.Dropout(relu_dropout)
self.W_2 = nn.Linear(n_hidden, n_units)
def forward(self, e, pad_remover=None):
e = self.dropout(self.act(self.W_1(e)))
e = self.W_2(e)
return e
class EncoderLayer(nn.Module):
def __init__(self, n_units, multi_heads=8,
layer_prepostprocess_dropout=0.1, n_hidden=2048,
attention_dropout=0.1, relu_dropout=0.1):
super(EncoderLayer, self).__init__()
self.ln_1 = LayerNorm(n_units,
eps=1e-3)
self.self_attention = MultiHeadAttention(n_units,
multi_heads,
attention_dropout)
self.dropout1 = nn.Dropout(layer_prepostprocess_dropout)
self.ln_2 = LayerNorm(n_units,
eps=1e-3)
self.feed_forward = FeedForwardLayer(n_units,
n_hidden,
relu_dropout)
self.dropout2 = nn.Dropout(layer_prepostprocess_dropout)
def forward(self, e, xx_mask, pad_remover=None):
sub = self.self_attention(self.ln_1(e),
mask=xx_mask)
e = e + self.dropout1(sub)
sub = self.feed_forward(self.ln_2(e),
pad_remover=pad_remover)
e = e + self.dropout2(sub)
return e
class DecoderLayer(nn.Module):
def __init__(self, n_units, multi_heads=8,
layer_prepostprocess_dropout=0.1,
pos_attention=False, n_hidden=2048,
attention_dropout=0.1, relu_dropout=0.1):
super(DecoderLayer, self).__init__()
self.pos_attention = pos_attention
self.ln_1 = LayerNorm(n_units,
eps=1e-3)
self.self_attention = MultiHeadAttention(n_units,
multi_heads,
attention_dropout)
self.dropout1 = nn.Dropout(layer_prepostprocess_dropout)
if pos_attention:
pos_enc_block = Transformer.initialize_position_encoding(500,
n_units)
self.pos_enc_block = nn.Parameter(torch.FloatTensor(pos_enc_block),
requires_grad=False)
self.register_parameter("Position Encoding Block",
self.pos_enc_block)
self.ln_pos = LayerNorm(n_units,
eps=1e-3)
self.pos_attention = MultiHeadAttention(n_units,
multi_heads,
attention_dropout,
pos_attn=True)
self.dropout_pos = nn.Dropout(layer_prepostprocess_dropout)
self.ln_2 = LayerNorm(n_units,
eps=1e-3)
self.source_attention = MultiHeadAttention(n_units,
multi_heads,
attention_dropout)
self.dropout2 = nn.Dropout(layer_prepostprocess_dropout)
self.ln_3 = LayerNorm(n_units,
eps=1e-3)
self.feed_forward = FeedForwardLayer(n_units,
n_hidden,
relu_dropout)
self.dropout3 = nn.Dropout(layer_prepostprocess_dropout)
def forward(self, e, s, xy_mask, yy_mask, pad_remover):
batch, units, length = e.shape
sub = self.self_attention(self.ln_1(e),
mask=yy_mask)
e = e + self.dropout1(sub)
if self.pos_attention:
p = self.pos_enc_block[:, :length, :]
p = p.expand(batch, length, units)
sub = self.pos_attention(p,
self.ln_pos(e),
mask=yy_mask)
e = e + self.dropout_pos(sub)
sub = self.source_attention(self.ln_2(e),
s,
mask=xy_mask)
e = e + self.dropout2(sub)
sub = self.feed_forward(self.ln_3(e),
pad_remover=pad_remover)
e = e + self.dropout3(sub)
return e
class Encoder(nn.Module):
def __init__(self, n_layers, n_units, multi_heads=8,
layer_prepostprocess_dropout=0.1, n_hidden=2048,
attention_dropout=0.1, relu_dropout=0.1):
super(Encoder, self).__init__()
self.layers = torch.nn.ModuleList()
for i in range(n_layers):
layer = EncoderLayer(n_units,
multi_heads,
layer_prepostprocess_dropout,
n_hidden,
attention_dropout,
relu_dropout)
self.layers.append(layer)
self.ln = LayerNorm(n_units,
eps=1e-3)
def forward(self, e, xx_mask, pad_remover):
for layer in self.layers:
e = layer(e,
xx_mask,
pad_remover)
e = self.ln(e)
return e
class Decoder(nn.Module):
def __init__(self, n_layers, n_units, multi_heads=8,
layer_prepostprocess_dropout=0.1, pos_attention=False,
n_hidden=2048, attention_dropout=0.1,
relu_dropout=0.1):
super(Decoder, self).__init__()
self.layers = torch.nn.ModuleList()
for i in range(n_layers):
layer = DecoderLayer(n_units,
multi_heads,
layer_prepostprocess_dropout,
pos_attention,
n_hidden,
attention_dropout,
relu_dropout)
self.layers.append(layer)
self.ln = LayerNorm(n_units,
eps=1e-3)
def forward(self, e, source, xy_mask, yy_mask, pad_remover):
for layer in self.layers:
e = layer(e,
source,
xy_mask,
yy_mask,
pad_remover)
e = self.ln(e)
return e
class Transformer(nn.Module):
def __init__(self, config):
super(Transformer, self).__init__()
self.scale_emb = config.n_units ** 0.5
self.padding_idx = 0
self.embed_word = ScaledEmbedding(config.n_vocab,
config.n_units,
padding_idx=self.padding_idx)
pos_enc_block = self.initialize_position_encoding(config.max_length,
config.n_units)
self.pos_enc_block = nn.Parameter(torch.FloatTensor(pos_enc_block),
requires_grad=False)
self.register_parameter("Position Encoding Block",
self.pos_enc_block)
self.embed_dropout = nn.Dropout(config.dropout)
self.n_hidden = config.n_units * 4
self.encoder = Encoder(config.layers,
config.n_units,
config.multi_heads,
config.layer_prepostprocess_dropout,
self.n_hidden,
config.attention_dropout,
config.relu_dropout)
self.decoder = Decoder(config.layers,
config.n_units,
config.multi_heads,
config.layer_prepostprocess_dropout,
config.pos_attention,
self.n_hidden,
config.attention_dropout,
config.relu_dropout)
self.use_pad_remover = config.use_pad_remover
if config.embed_position:
self.embed_pos = nn.Embedding(config.max_length,
config.n_units,
padding_idx=0)
if config.tied:
self.affine = self.tied_linear
self.affine_bias = nn.Parameter(torch.Tensor(config.n_vocab))
stdv = 1. / math.sqrt(config.n_units)
self.affine_bias.data.uniform_(-stdv, stdv)
else:
self.affine = nn.Linear(config.n_units,
config.n_vocab,
bias=True)
self.n_target_vocab = config.n_vocab
self.dropout = config.dropout
self.label_smoothing = config.label_smoothing
assert (0.0 <= self.label_smoothing <= 1.0)
if self.label_smoothing > 0:
# When label smoothing is turned on,
# KL-divergence between q_{smoothed ground truth prob.}(w)
# and p_{prob. computed by model}(w) is minimized.
# If label smoothing value is set to zero, the loss
# is equivalent to NLLLoss or CrossEntropyLoss.
# All non-true labels are uniformly set to low-confidence.
self.criterion = nn.KLDivLoss(size_average=False,
reduce=True)
one_hot = torch.randn(1, config.n_vocab)
one_hot.fill_(self.label_smoothing / (config.n_vocab - 2))
one_hot[0][self.padding_idx] = 0
self.register_buffer('one_hot', one_hot)
else:
weight = torch.ones(config.n_vocab)
weight[self.padding_idx] = 0
self.criterion = nn.NLLLoss(weight,
size_average=False)
self.confidence = 1.0 - self.label_smoothing
@staticmethod
def initialize_position_encoding(length, emb_dim):
channels = emb_dim
position = np.arange(length, dtype='f')
num_timescales = channels // 2
log_timescale_increment = (np.log(10000. / 1.) / (float(num_timescales) - 1))
inv_timescales = 1. * np.exp(np.arange(num_timescales).astype('f') * -log_timescale_increment)
scaled_time = np.expand_dims(position, 1) * np.expand_dims(inv_timescales, 0)
signal = np.concatenate([np.sin(scaled_time), np.cos(scaled_time)], axis=1)
signal = np.reshape(signal, [1, length, channels])
return signal
def make_input_embedding(self, embed, block):
batch, length = block.shape
emb_block = sentence_block_embed(embed, block) * self.scale_emb
emb_block += self.pos_enc_block[:, :length, :]
if hasattr(self, 'embed_pos'):
emb_block += sentence_block_embed(self.embed_pos,
np.broadcast_to(np.arange(length).astype('i')[None, :],
block.shape))
emb_block = self.embed_dropout(emb_block)
return emb_block
def make_attention_mask(self, source_block, target_block):
mask = (target_block[:, None, :] >= 1) * \
(source_block[:, :, None] >= 1)
# (batch, source_length, target_length)
return mask
def make_history_mask(self, block):
batch, length = block.shape
arange = np.arange(length)
history_mask = (arange[None,] <= arange[:, None])[None,]
history_mask = np.broadcast_to(history_mask,
(batch, length, length))
history_mask = history_mask.astype(np.int32)
history_mask = Variable(torch.ByteTensor(history_mask).type(utils.BYTE_TYPE),
requires_grad=False)
return history_mask
def tied_linear(self, h):
return F.linear(h, self.embed_word.weight, self.affine_bias)
def output(self, h):
return self.affine(h)
def output_and_loss(self, h_block, t_block):
batch, length, units = h_block.shape
# shape : (batch * sequence_length, num_classes)
logits_flat = seq_func(self.affine,
h_block,
reconstruct_shape=False)
# shape : (batch * sequence_length, num_classes)
log_probs_flat = F.log_softmax(logits_flat,
dim=-1)
rebatch, _ = logits_flat.shape
concat_t_block = t_block.view(rebatch)
weights = (concat_t_block >= 1).float()
n_correct, n_total = utils.accuracy(logits_flat.data,
concat_t_block.data,
ignore_index=0)
if self.confidence < 1:
tdata = concat_t_block.data
mask = torch.nonzero(tdata.eq(self.padding_idx)).squeeze()
tmp_ = self.one_hot.repeat(concat_t_block.size(0), 1)
tmp_.scatter_(1, tdata.unsqueeze(1), self.confidence)
if mask.dim() > 0 and mask.numel() > 0:
tmp_.index_fill_(0, mask, 0)
concat_t_block = Variable(tmp_, requires_grad=False)
loss = self.criterion(log_probs_flat,
concat_t_block)
loss = loss.sum() / (weights.sum() + 1e-13)
stats = utils.Statistics(loss=loss.data.cpu() * n_total,
n_correct=n_correct,
n_words=n_total)
return loss, stats
def forward(self, x_block, y_in_block, y_out_block, get_prediction=False,
z_blocks=None):
batch, x_length = x_block.shape
batch, y_length = y_in_block.shape
if z_blocks is None:
ex_block = self.make_input_embedding(self.embed_word,
x_block)
xx_mask = self.make_attention_mask(x_block,
x_block)
xpad_obj = None
if self.use_pad_remover:
xpad_obj = PadRemover(x_block >= preprocess.Vocab_Pad.PAD)
# Encode Sources
z_blocks = self.encoder(ex_block,
xx_mask,
xpad_obj)
# (batch, n_units, x_length)
ey_block = self.make_input_embedding(self.embed_word,
y_in_block)
# Make Masks
xy_mask = self.make_attention_mask(y_in_block,
x_block)
yy_mask = self.make_attention_mask(y_in_block,
y_in_block)
yy_mask *= self.make_history_mask(y_in_block)
# Create PadRemover objects
ypad_obj = None
if self.use_pad_remover:
ypad_obj = PadRemover(y_in_block >= preprocess.Vocab_Pad.PAD)
# Encode Targets with Sources (Decode without Output)
h_block = self.decoder(ey_block,
z_blocks,
xy_mask,
yy_mask,
ypad_obj)
# (batch, n_units, y_length)
if get_prediction:
return self.output(h_block[:, -1, :]), z_blocks
else:
return self.output_and_loss(h_block,
y_out_block)
def translate(self, x_block, max_length=50, beam=5, alpha=0.6):
if beam > 1:
obj = search_strategy.BeamSearch(beam_size=beam,
max_len=max_length,
alpha=alpha)
id_list, score = obj.generate_output(self,
x_block)
return id_list
else:
obj = search_strategy.GreedySearch(max_len=max_length)
id_list = obj.generate_output(self,
x_block)
return id_list
|
[
"devendra.singh@petuum.com"
] |
devendra.singh@petuum.com
|
624fece2eb26804725675757255cafe640ad30a5
|
d69b7c615390e56cdb2cf0a29c7dfcd1f034098f
|
/main.py
|
f32b536be72ca27aff4cf19901d3c29ae823755a
|
[] |
no_license
|
islamuzkg/tip-calculator-start
|
aa535bee503af4f011d78ca6fe829c1968fb18f7
|
1acea524cf30a6ad49defa0958d6733d1de7bbee
|
refs/heads/master
| 2023-03-18T11:12:16.948903
| 2021-03-10T13:57:26
| 2021-03-10T13:57:26
| 346,373,866
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,332
|
py
|
#If the bill was $150.00, split between 5 people, with 12% tip.
#Each person should pay (150.00 / 5) * 1.12 = 33.6
#Format the result to 2 decimal places = 33.60
#Tip: There are 2 ways to round a number. You might have to do some Googling to solve this.💪
#HINT 1: https://www.google.com/search?q=how+to+round+number+to+2+decimal+places+python&oq=how+to+round+number+to+2+decimal
#HINT 2: https://www.kite.com/python/answers/how-to-limit-a-float-to-two-decimal-places-in-python
print("Welcome to the tip calculator")
bill = input("What was the total bill! ")
bill_as_float = float(bill)
current_bill = (f"Your current bill is {bill_as_float}")
print(current_bill)
tip = input("What percentage tip would you like to leave? 10, 12 or 15? ")
tip_as_int = int(tip)
tip_percent = tip_as_int / 100
how_many_people = int(input("How many people to split the bill?"))
total_tip_amount = bill_as_float * tip_percent
total_bill = total_tip_amount + bill_as_float
total_bill_as_rounded = round(total_bill, 2)
each_person = bill_as_float / how_many_people * (1 + tip_percent)
print(tip_percent)
each_person_as_rounded = "{:.2f}".format(each_person)
message = f" Total bill is {total_bill_as_rounded}, you are giving {tip_as_int} percent tips, between {how_many_people} people each person will pay {each_person_as_rounded}"
print(message)
|
[
"Islamuzkg@gmail.com"
] |
Islamuzkg@gmail.com
|
6065cbd309d83c85efd3a6015055736c8a10ecce
|
fe6bc15f12bfa3e73cf6c9e26ed454d574beb394
|
/bioinformatics_stronghold/Intro_Alternative_Splicing/solution.py
|
1dc7c7a9932f84a3ec9a65ebf22356d9004e6b77
|
[] |
no_license
|
therealcooperpark/Rosalind-Project
|
af55aadcd9839b75fc487f671bbb199edd3e36f4
|
b5291e97b3eb3b48b6979d98341fa6dd84440936
|
refs/heads/master
| 2022-12-08T04:43:15.414723
| 2020-08-27T23:10:42
| 2020-08-27T23:10:42
| 290,542,264
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 411
|
py
|
#! /usr/bin/env python3
'''
Calculate all combinations given the sample size and number of items
'''
from scipy.special import comb
def main():
N = int(input("Enter N:\n"))
M = int(input("Enter M:\n"))
total_combs = 0
for rng in range(M, N + 1):
total_combs += comb(N, rng, exact = True)
print( total_combs % 1000000 )
if __name__ == "__main__":
main()
|
[
"therealcooperpark@gmail.com"
] |
therealcooperpark@gmail.com
|
77c6fe22238d3f0e4658663f756f998f20e9bd7f
|
8afb5afd38548c631f6f9536846039ef6cb297b9
|
/_COURSES/beginning-python-3ed/Chapter15/listing15-1.py
|
d02df92eb18d79c48f0b3c2bce82b4df5ef63834
|
[
"MIT"
] |
permissive
|
bgoonz/UsefulResourceRepo2.0
|
d87588ffd668bb498f7787b896cc7b20d83ce0ad
|
2cb4b45dd14a230aa0e800042e893f8dfb23beda
|
refs/heads/master
| 2023-03-17T01:22:05.254751
| 2022-08-11T03:18:22
| 2022-08-11T03:18:22
| 382,628,698
| 10
| 12
|
MIT
| 2022-10-10T14:13:54
| 2021-07-03T13:58:52
| null |
UTF-8
|
Python
| false
| false
| 228
|
py
|
from urllib.request import urlopen
import re
p = re.compile('<a href="(/jobs/\\d+)/">(.*?)</a>')
text = urlopen("http://python.org/jobs").read().decode()
for url, name in p.findall(text):
print("{} ({})".format(name, url))
|
[
"bryan.guner@gmail.com"
] |
bryan.guner@gmail.com
|
622bdf44dac3fc1f32f7e9b161111e0c859cf790
|
7e9ef90658a02fa8c840fdb51350f95b0695dedb
|
/data_conversions/raw_for_broken.py
|
2a2386ff4582d30d1c30d162079a9ae20ac8a30e
|
[] |
no_license
|
bartekkroczek/FANET_Analysis
|
2da16c4cbe86dc4ceba30d2a3e36af56ad96d441
|
fb50aefb5781e804501e80ade46e9cb17256e976
|
refs/heads/master
| 2022-05-05T07:11:40.964940
| 2022-04-24T08:10:51
| 2022-04-24T08:10:51
| 123,772,184
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,253
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 20 15:50:41 2017
@author: bkroczek
"""
import pandas as pd
from tqdm import tqdm
def RepresentsInt(s):
try:
int(s)
return True
except ValueError:
return False
def RepresentsFloat(s):
try:
float(s)
return True
except ValueError:
return False
files = ['12MALE21.asc', '14FEMALE19.asc', '62FEMALE39.asc', '83MALE27.asc', '130MALE18.asc','142FEMALE29.asc', '165FEMALE20.asc']
# /home/bkroczek/Dropbox/Data/FAN_ET/Badanie P/2017-05-06_Badanie_P/BadanieP_FAN_ET/Dane trackingowe/asc_to_fix
#%%
with tqdm(total = len(files)) as pbar:
for f_name in files:
blk_no = 0
pbar.set_postfix(file = f_name)
pbar.update(1)
f = open(f_name, 'r').readlines()
res = [['time', "ps", "block"]]
for line in f:
if line.startswith('START'):
blk_no += 1
line = line.split()
if line:
if RepresentsInt(line[0]) and blk_no != 0 and RepresentsFloat(line[-2]):
res.append([int(line[0]), float(line[-2]), blk_no])
pd.DataFrame(res).to_csv(f_name.split('.')[0] + '_raw.csv', index=False)
|
[
"bkroczek@localhost.localdomain"
] |
bkroczek@localhost.localdomain
|
9c7cfab4afca4c9a6855488b3e76722f0015b0c3
|
342b29790c7e2910e74bc4be32e55c1de02b9319
|
/app/core/tests/test_admin.py
|
2691ac0bfb4ba01d09cb86083011fca847611c08
|
[] |
no_license
|
yieniggu/demo-isw-django-docker
|
c6f8233286dfbf952f41e6c01c431f5b5e9cec19
|
e6fe9556386f5a6db74e8c6b08d86377faae7cce
|
refs/heads/master
| 2022-11-09T00:36:53.479639
| 2020-06-23T21:30:48
| 2020-06-23T21:30:48
| 272,803,150
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,734
|
py
|
from django.test import TestCase, Client
from django.contrib.auth import get_user_model
from django.urls import reverse
class AdminSiteTest(TestCase):
def setUp(self):
"""Configuracion previa a la ejecucion de pruebas"""
#Crear cliente
self.client = Client()
#Crear admin
self.admin_user = get_user_model().objects.create_superuser(
email = 'admin@admin.com',
password = 'admin123'
)
#Crear una conexion con el admin que hemos creado
self.client.force_login(self.admin_user)
#Crear un usuario
self.user = get_user_model().objects.create_user(
email = 'user@user.com',
password = 'user123',
name = 'Usuario basico de prueba'
)
def test_user_listed(self):
"""Prueba que los usuarios se listan en el panel de admin"""
url = reverse('admin:core_user_changelist')
#print(url)
# Peticion get a la url
res = self.client.get(url)
self.assertContains(res, self.user.name)
self.assertContains(res, self.user.email)
def test_user_change_page(self):
"""Prueba que la pagina para editar usuarios carga correctamente"""
url = reverse('admin:core_user_change', args=[self.user.id])
#print(url)
res = self.client.get(url)
print(res)
# Verificar que el codigo devuelto este ok (200)
self.assertEqual(res.status_code, 200)
def test_user_create_page(self):
"""Prueba que la pagina para crear usuarios carga correctamente"""
url = reverse('admin:core_user_add')
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
|
[
"bastianandresxb@gmail.com"
] |
bastianandresxb@gmail.com
|
aef6f8c929de67ad8c8d4fcd75e47e015a721192
|
a0467b04977f8db115faa50f7cb28c0943ff5616
|
/wpcolab/mobigame/urls.py
|
8f9c104b28edf392dbaaf2b2875a7187b0665728
|
[] |
no_license
|
squirell4/Wits_Praekelt_CoLab
|
013b82d66f1b7becf26bcf7027e1d26c7f16fe26
|
96cceb9cb0fe7ff5a6142010f607857f8156d2c9
|
refs/heads/master
| 2021-01-23T15:56:53.631776
| 2011-11-22T14:07:19
| 2011-11-22T14:07:19
| 2,761,338
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 460
|
py
|
from mobigame import views
from django.conf.urls.defaults import patterns, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', views.index, name='index'),
url(r'^login/', views.login, name='login'),
url(r'^signout/', views.signout, name='signout'),
url(r'^scores/', views.scores, name='scores'),
url(r'^play/', views.play, name='play'),
url(r'^api/v1/', views.api_v1, name='apiv1'),
)
|
[
"hodgestar@gmail.com"
] |
hodgestar@gmail.com
|
d62497d39ddd24227f946321f0e613b8b8239ff0
|
b57768feba5db90ee1ac65244fbfd351cdf2aca4
|
/Python/TwitterStream2.py
|
2eb74a724b53efcfaed9c52d6c9109a17fe09147
|
[] |
no_license
|
abhiking-agrawal/StreamTwitterDataNodeJS
|
2c895587b46638d1ad9e8764552b6982bd66bbd0
|
637e78930d391fb22433f623acf4ff0d15111755
|
refs/heads/master
| 2020-03-20T22:23:19.630564
| 2018-06-26T20:40:42
| 2018-06-26T20:40:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,133
|
py
|
#Import the necessary methods from tweepy library
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
#Variables that contains the user credentials to access Twitter API
consumer_key = 'HTNc6rm4tX63VYQKWQCV40TFm'
consumer_secret = 'bAWS75kNkDMKrWvGvzoGVYrRdjrtrZk7BaPwTBcBzdpfdHr0Il'
access_token = '969603656080330753-ucYDl5fW5aJmzT2AJIuR6wtKaa8Jf7A'
access_token_secret = 'HrI9XHNBqtQ0xxouDPqeY6OOOzLav1moLk1BgP7l29an9'
#This is a basic listener that just prints received tweets to stdout.
class StdOutListener(StreamListener):
def on_data(self, data):
print(data)
return True
def on_error(self, status):
print(status)
if __name__ == '__main__':
#This handles Twitter authetification and the connection to Twitter Streaming API
l = StdOutListener()
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
stream = Stream(auth, l)
#This line filter Twitter Streams to capture data by the keywords: 'python', 'javascript', 'ruby'
stream.filter(track=['#women'])
|
[
"s530670@nwmissouri.edu"
] |
s530670@nwmissouri.edu
|
9cde266c8bb9da6c3adb3d65bb550bd61cef22f5
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/equiLeader_20200827165802.py
|
f08526aa75586e4953dfdca2be33a359af6602d4
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 645
|
py
|
def equi(A):
# return the number of equal leaders that both occur in the sequences
# first find the equileader
# an equileader is an index such that when two sequences are formed
# the two sequences have the same leader
# 0,1,2,3,4,5
store = {}
leader = -1
count = 0
for i in A:
if i in store:
store[i] +=1
else:
store[i] = 1
for i in store:
if store[i] > (len(A) // 2):
leader = i
i = 0
while i < len(A)-1:
# print(A[i+1])
print(A[:i+1])
i+=1
equi([4, 4, 2, 5, 3, 4, 4, 4] )
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
7e2e5990f25827e91fe635536bdbc26870b2c772
|
8a15ab8556221f998430219dcae25fc08b6d0eac
|
/src/aster/exporter/run.py
|
4b3ad95d46916958b8e3f6ac169ac7ec390603c5
|
[] |
no_license
|
chris010970/geology
|
47d32984e8daca0eb3740f41a141599674e4ae97
|
25cec20d7d73a18f3ae1e0edfb0bae9a3cfbde87
|
refs/heads/master
| 2020-12-26T18:20:58.989757
| 2020-02-10T08:49:16
| 2020-02-10T08:49:16
| 237,592,768
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,205
|
py
|
import os
import sys
import argparse
from exporter import Exporter
sys.path.append( os.path.join( os.path.dirname( sys.path[0]), '../utility' ) )
from fs import getFileList
def getSceneList(args):
"""
Placeholder
"""
# assume single scene - else collect list
scenes = [ args.scene ]
if args.batch is True:
scenes = getFileList( args.scene, 'AST*.hdf' )
return scenes
def parseArguments(args=None):
"""
Placeholder
"""
# parse command line arguments
parser = argparse.ArgumentParser(description='aster l1t exporter')
parser.add_argument('scene', action="store")
# batch processing
parser.set_defaults(batch=False)
parser.add_argument('--batch',
help='batch',
dest='batch', action='store_true' )
return parser.parse_args(args)
def main():
"""
Placeholder
"""
# parse arguments
args = parseArguments()
obj = Exporter()
# for each scene
scenes = getSceneList( args )
for scene in scenes:
# export hdf sub-datasets to geotiff
obj.process( scene )
return
# execute main
if __name__ == '__main__':
main()
|
[
"c.r.williams0109@gmail.com"
] |
c.r.williams0109@gmail.com
|
a0be6281101cdbdb3abe6903e3a925c9759910ed
|
787eee171019dfac7a4866c3f6461207a11de174
|
/EthanAndElisaRampDataAnalysis.py
|
90c3d93b824169c72144c87f7ba4c896a375d8de
|
[] |
no_license
|
davidzane/FindDaBadPixels
|
84f7081b7eb58d9633d547f9af83720bdd93aaca
|
e5de02bc78cf6ade5d336cc4cb1a2d839b9b5783
|
refs/heads/master
| 2020-06-18T10:03:35.067281
| 2019-07-19T23:11:58
| 2019-07-19T23:11:58
| 196,264,405
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,761
|
py
|
#Built using Andres Code. For getting coefficients for curve fitting. And comparing them for data analysis
#%% import stuff
import numpy as np
import numpy.ma as ma
import sys
sys.path.append("/Users/mrpoopoopants/desktop/cryonirsp/cnpipeline-summer/cnpipeline/lib")
import EthansToolbox as ct
from EthansCnDataCube import CnDataCube
import naming
import os
import matplotlib.pyplot as plt
import math
#%% Let's make the data cube!
a = CnDataCube("/Users/mrpoopoopants/desktop/TestFitsImages",
"slow-tripple-SiIX-t0-00020-*.fits",
"pizzabox")
a.read_ramp(naming.SLOW_CAMERA_MODE, verbose=True)
a.invert_16bit_data()
print("this is the current shape of the data:", a.data.shape)
#what is dead beef?
#%% Now let's try the subarray
a.get_subarray([4, 4],2040 ,2040)
print("this is the current shape of the data:", a.subarray.shape)
#%% Now try reshape subarray data from 3D to 2D so we can do stuff with it
a.reshape_data("down")
print("this is the current shape of the data:", a.subarray.shape)
#%% now run a for loop that loops 10 times, each with different slicings. Coef array files will be saved.
for element in range(1,11):
#%% Now slice the data
sliced_subarray = a.subarray[::element]
#print("this is the current shape of the data (after slicing):", sliced_subarray.shape)
#%% Okay, time to make a threshold to take out bad pixels (I think)
threshold = 27000
order = 2
nrPixels = sliced_subarray.shape[1]
thresholdMask = np.zeros(sliced_subarray.shape)
#print("this is the shape of the threshold mask:", thresholdMask.shape)
fitColumnIndex = []
if a.nrRamps == 1:
fitColumnMask = np.zeros((order+1, sliced_subarray.shape[1]))
firstNan = np.zeros((sliced_subarray.shape[1]))
howMany = np.zeros((a.nrRamps, order+1)) #what is this do?
for i in range(a.nrRamps):
if a.nrRamps ==1:
thresholdMask, firstNan = ct.old_get_threshold_mask(sliced_subarray, threshold)
tmp, fitColumnMask = ct.check_mask_vs_fitorder(firstNan, order)
#print(tmp[0].shape, tmp[0].dtype)
for j in range(order+1):
howMany[i,j] = len(tmp[j])
fitColumnIndex.append(tmp)
print("how many?", howMany, "\n")
#%% Now finally, some curve fitting (I think)
dataTime = np.arange(sliced_subarray.shape[0])*.502 + 0.502
if a.nrRamps == 1:
coef = ct.masked_polyfit(dataTime, sliced_subarray, order, thresholdMask, firstNan)
print("the bias", ma.median(coef[-1]))
#print("the shape of the coef is:", coef.shape)
#%% And now save the sliced_subarray as an outfile
dir = "/Users/mrpoopoopants/desktop/cryonirsp/cnpipeline-summer/cnpipeline/"
coef.dump(os.path.join(dir,"elisaslice"+str(element)))
print("file made")
# # Now get the median percent error
# data_everyNDR = np.load('/Users/mrpoopoopants/desktop/cryonirsp/cnpipeline-summer/cnpipeline/elisaslice1')
# data_slicedNDR = np.load('/Users/mrpoopoopants/desktop/cryonirsp/cnpipeline-summer/cnpipeline/elisaslice'+str(element))
# linear_coef_allNDR = data_everyNDR[1]
# linear_coef_sliceoNDR = data_slicedNDR[1]
# percent_error_multiplier = a.data.shape[0] / sliced_subarray.shape[0]
# #print("percent error multiplier", percent_error_multiplier)
# percent_error = 100 * ( percent_error_multiplier * linear_coef_allNDR - linear_coef_sliceoNDR ) / (percent_error_multiplier * linear_coef_allNDR)
# print("median percent error for", element,"is", ma.median(percent_error))
#%% Now make histograms to see bad pixels!!!!!!!!!!!!!!!!
#Load the coef array that we are comparing against
coef_allNDR = np.load('/Users/mrpoopoopants/desktop/cryonirsp/cnpipeline-summer/cnpipeline/elisaslice1')
#loop through to load each slicing and make a histogram for each
for item in range (2, 11):
coef_sliceoNDR = np.load('/Users/mrpoopoopants/desktop/cryonirsp/cnpipeline-summer/cnpipeline/elisaslice'+str(item))
#select the coeffeciant for the linear term
linear_coef_allNDR = coef_allNDR[-2]
linear_coef_sliceoNDR = coef_sliceoNDR[-2]
#calculate the percent error
percent_error = ( 100 * ( linear_coef_sliceoNDR -
(item * linear_coef_allNDR) ) /
(item * linear_coef_allNDR) )
#calculate the median value of the percent error array
median_percent_error = ma.median(percent_error)
print("the median percent off of",item,"slice coef from no slice coef is:", median_percent_error)
plt.hist(percent_error, 1000, density=True, range=(-60, 60))
plt.title = str(item)
plt.savefig(os.path.join(dir, 'histogram'+str(item)+'.png'))
plt.show()
#%%
|
[
"noreply@github.com"
] |
davidzane.noreply@github.com
|
e91ed5ee381345d4cb599c6557e1542b7a721666
|
30d1c962ae880fa962ce0a730926d7c71efd5783
|
/compare/models.py
|
c7990718286dd034f7533772023d2cb7e7191c8d
|
[] |
no_license
|
fortable1999/covermash
|
184c642b4d6598fbdbacfee761f5c6f5d4612398
|
7059092182c2afd92c86a8feca6cc16bee3fb3c6
|
refs/heads/master
| 2021-01-20T10:06:31.134610
| 2014-08-28T16:21:13
| 2014-08-28T16:21:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,716
|
py
|
"""
compare admin page generated by template
template author: Meng Zhao fortable1999@gmail.com
"""
from django.db import models
from django.core.urlresolvers import reverse_lazy as reverse
from django.utils.translation import ugettext as _
class Compare(models.Model):
"""
compare template model
"""
dmm_id1 = models.CharField(
_("dmm id 1"),
max_length=20)
dmm_id2 = models.CharField(
_("dmm id 2"),
max_length=20)
dmm_count1 = models.IntegerField(
_('dmm count 1'), default=0)
dmm_count2 = models.IntegerField(
_('dmm count 2'), default=0)
created_datetime = models.DateTimeField(
_("created datetime"),
auto_now_add=True)
updated_datetime = models.DateTimeField(
_("updated datetime"),
auto_now=True)
def get_absolute_url(self):
"""
return detail page url
"""
return reverse('compare', kwargs={'id1': self.dmm_id1, 'id2':self.dmm_id2})
def get_img1(self):
print('0000')
return "http://pics.dmm.co.jp/digital/video/%s/%spl.jpg" % (self.dmm_id1,self.dmm_id1)
def get_img2(self):
return "http://pics.dmm.co.jp/digital/video/%s/%spl.jpg" % (self.dmm_id2,self.dmm_id2)
def __unicode__(self):
"""
<CAUTION>
this will be used to indicate a model object.
it should never be long that 200 charactor when you use admin!
"""
return "<Compare Object>"
class Meta:
"""
<Usually used meta options>
"""
ordering = (
"created_datetime",
)
get_latest_by = "created_datetime"
|
[
"fortable1999@gmail.com"
] |
fortable1999@gmail.com
|
84fd4c105e8f3c76cc7e7faf646ced0c9eee9175
|
0c4db2e5ef3c1632585dcc8848aba036589fd038
|
/biblioteca/apps/libro/migrations/0001_initial.py
|
cfbb5fce35d20ddb0148a9f74466783fc0f7ed83
|
[] |
no_license
|
jonwilm/django2.0
|
1c358e8b7eb65f3da55702e29ec01ca8e2b69400
|
5bb2c8268ec405da5c2bd97e42379c4f9360b0cc
|
refs/heads/master
| 2023-04-28T10:48:07.218411
| 2019-11-28T21:04:40
| 2019-11-28T21:04:40
| 223,832,456
| 0
| 0
| null | 2023-04-21T20:41:26
| 2019-11-25T00:41:06
|
Python
|
UTF-8
|
Python
| false
| false
| 635
|
py
|
# Generated by Django 2.0.6 on 2019-11-21 03:29
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Autor',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('nombre', models.CharField(max_length=200)),
('apellidos', models.CharField(max_length=220)),
('nacionalidad', models.CharField(max_length=100)),
('descripcion', models.TextField()),
],
),
]
|
[
"jparedes84@gmail.com"
] |
jparedes84@gmail.com
|
37808558e8dda82d4d47379d05d2dc1ca3cf04cc
|
9bd361a1b0f1c067d58c0be57e5f679ccad18e59
|
/algorithms/226. Invert Binary Tree.py
|
34645cfeadc0cb05909ee4f5670f95c1e6ffa832
|
[] |
no_license
|
yyassif/codinginterviewquestions
|
1c672dee69df5e4fa31195cede31a37ce0e46430
|
870215d4b4ed73426eedefd7d2d08e0f540dc426
|
refs/heads/master
| 2022-08-13T19:22:55.730273
| 2020-05-18T21:19:42
| 2020-05-18T21:19:42
| 265,077,410
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,193
|
py
|
#https://leetcode.com/problems/invert-binary-tree/discuss/360867/Python3-recursively-and-iteratively
#Recursively:
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def invertTree(self, root: TreeNode) -> TreeNode:
if root == None:
return None
r = self.invertTree(root.right)
l = self.invertTree(root.left)
root.right =l
root.left= r
return root
#Iteratively BFS
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
from collections import deque
class Solution:
def invertTree(self, root: TreeNode) -> TreeNode:
if root == None:
return None
queue = deque([root])
while queue:
cur = queue.popleft()
temp = cur.right
cur.right = cur.left
cur.left = temp
if cur.left:
queue.append(cur.left)
if cur.right:
queue.append(cur.right)
return root
|
[
"noreply@github.com"
] |
yyassif.noreply@github.com
|
31f5579b5a676a2c1bf975a6aa551896448d172b
|
3b7cbaad8d6048f35f60c923616b1cfb2462ea33
|
/Python/convert_gatk.py
|
b6dd452b95a82ababc971642eff1e058093a1a56
|
[] |
no_license
|
tianxiahuihui/bioinformatics
|
14192499825e9dfeaec65cc7273c02239cc7de0c
|
28f263dcdcdc32b214289b73ffdd8b8acf1e4432
|
refs/heads/master
| 2020-05-21T19:55:31.348679
| 2020-04-08T09:03:54
| 2020-04-08T09:03:54
| 64,446,379
| 0
| 0
| null | 2019-09-07T08:36:06
| 2016-07-29T03:11:08
|
Python
|
UTF-8
|
Python
| false
| false
| 4,196
|
py
|
import sys, getopt, os
def parse_ped_file(ped_file):
ped_reader = open(ped_file)
lines = ped_reader.readlines()
trio = ['', '', '']
for line in lines:
record = line.split()
if(record[2] == '0' and record[3] == '0' and record[4] == '1'):
trio[0] = record[1]
if(record[2] == '0' and record[3] == '0' and record[4] == '2'):
trio[1] = record[1]
if(record[2] != '0' and record[3] != '0'):
trio[2] = record[1]
ped_reader.close()
return trio
def str_wrap_by(start_str, end_str, line):
start = line.find(start_str)
if start >= 0:
start += len(start_str)
end = line.find(end_str)
if end >= 0:
return line[start:end].strip()
def get_family_id(ped_file):
ped_reader = open(ped_file)
first_line = ped_reader.readline()
ped_reader.close()
return first_line.split()[0]
def get_gt(genotype):
return genotype.split(':')[0]
def get_dp(genotype):
return genotype.split(':')[2]
def dp_passed(f_dp, m_dp, o_dp, min_dp):
if(f_dp >= min_dp and m_dp >= min_dp and o_dp >= min_dp):
return True
def is_denovo(f_gt, m_gt, o_gt):
if((f_gt == '0/0' or f_gt== '0|0') and (m_gt == '0/0' or m_gt=='0|0') and (o_gt == '0/1' or o_gt=='0|1' or o_gt=='1|0')):
return True
def parse(vcf_file, ped_file, min_dp):
trio = parse_ped_file(ped_file)
family_id = get_family_id(ped_file)
vcf_reader = open(vcf_file)
lines = vcf_reader.readlines()
samples = ['', '', '']
for line in lines:
if(line.startswith('#CHROM')):
record = line.split()
samples[0] = record[9]
samples[1] = record[10]
samples[2] = record[11]
if(not line.startswith('#') and 'DP' in line.split()[8]):
record = line.split()
genotypes = dict()
genotypes[samples[0]] = record[9]
genotypes[samples[1]] = record[10]
genotypes[samples[2]] = record[11]
f_gt = get_gt(genotypes[trio[0]])
m_gt = get_gt(genotypes[trio[1]])
o_gt = get_gt(genotypes[trio[2]])
if(is_denovo(f_gt, m_gt, o_gt)):
f_dp = get_dp(genotypes[trio[0]])
m_dp = get_dp(genotypes[trio[1]])
o_dp = get_dp(genotypes[trio[2]])
if(not f_dp=='.'and not m_dp=='.' and not o_dp=='.'and dp_passed(int(f_dp), int(m_dp), int(o_dp), int(min_dp))):
print family_id + ',' + record[0].strip() + ',' + record[1].strip()
vcf_reader.close()
def usage():
print 'Usage: ' + sys.argv[0] + ' [OPTIONS]'
print 'OPTIONS:'
print '-p, --ped\t<FILE>\tpedigree file (required)'
print '-v, --vcf\t<FILE>\tgatk output vcf file (required)'
print '-d, --depth\t<INT>\tmin read depth (optional, default 10)'
print '-h, --help\t\thelp information (optional)'
def isValidated(vcf_path, ped_path, min_dp):
is_passed = True
if (not os.path.isfile(vcf_path)):
print '##ERROR: The gatk output vcf file is not correctly specified!'
is_passed = False
if (not os.path.isfile(ped_path)):
print '##ERROR: The pedigree file is not correctly specified!'
is_passed = False
if (not (min_dp[0] == '-' and min_dp[1:] or min_dp).isdigit()):
print '##ERROR: The min_dp must be an integer!'
is_passed = False
return is_passed
def main():
try:
opts, args= getopt.getopt(sys.argv[1:], 'p:v:d:h', ['ped=', 'vcf=', 'depth=', 'help'])
except getopt.GetoptError:
usage()
sys.exit()
ped_path = ''
vcf_path = ''
min_dp = ''
for op, value in opts:
if op in ('-p', '--ped'):
ped_path = value
elif op in ('-v', '--vcf'):
vcf_path = value
elif op in ('-d', '--depth'):
min_dp = value
elif op in ('-h', '--help'):
usage()
sys.exit()
if(min_dp == ''):
min_dp = '10'
if(isValidated(vcf_path, ped_path, min_dp)):
parse(vcf_path, ped_path, min_dp)
else:
usage()
sys.exit()
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
tianxiahuihui.noreply@github.com
|
f2111da069473d4ca652cbc2deee1b5dbbd69258
|
69f239a861d3b9e196161df01279c07a64ee84f6
|
/Kyi-Win-individual-project/Codes/final_cuda_Adagrad.py
|
1d44e77ecca69b36e9d2dcfd13479fa5e1dde130
|
[
"MIT"
] |
permissive
|
mfarmer11/Final-Project-Group9
|
d168e3e52ff0dabe147f9e6182faefb362c5f5b6
|
dd1336157e69dd237d557e12d157cd152331f99d
|
refs/heads/master
| 2020-04-09T03:42:15.433953
| 2018-12-05T00:18:26
| 2018-12-05T00:18:26
| 159,993,443
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,750
|
py
|
# coding: utf-8
import torchvision.transforms as tt
from torchvision.datasets import ImageFolder
from torch.utils.data import DataLoader
import time
import torch.nn as nn
import torch
import torchvision
import torch.nn as nn
from torch.autograd import Variable
import torchvision.transforms as transforms
from torchvision.datasets import ImageFolder
from torch.utils.data import DataLoader
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from PIL import ImageFile
import itertools
torch.manual_seed(1122)
from torchvision import models
ImageFile.LOAD_TRUNCATED_IMAGES = True
#------------------------------------------------------------------------------------
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
#------------------------------------------------------------------------------------
transforms = transforms.Compose([transforms.Resize((224, 224)), transforms.ToTensor(), transforms.Normalize(mean=[0.5, 0.5, 0.5],std=[0.5, 0.5, 0.5])])
batch_size = 64
data_path = 'Dataset/Train/'
train_dataset = torchvision.datasets.ImageFolder(
root=data_path,
transform=transforms
)
train_loader = torch.utils.data.DataLoader(dataset = train_dataset,
batch_size= batch_size,
shuffle=True, num_workers=4
)
data_path = 'Dataset/Test/'
test_dataset = torchvision.datasets.ImageFolder(
root=data_path,
transform=transforms
)
test_loader = torch.utils.data.DataLoader( dataset = test_dataset,
batch_size= batch_size,
shuffle=False, num_workers=4
)
classes =('bags', 'dresses', 'footwear', 'outerwear', 'skirts', 'tops')
train_iter = iter(train_loader)
print(type(train_iter))
images, labels = train_iter.next()
print('images shape on batch size = {}'.format(images.size()))
print('labels shape on batch size = {}'.format(labels.size()))
# In[3]:
# grid = torchvision.utils.make_grid(images)
# plt.imshow(grid.numpy().transpose((1, 2, 0)))
# plt.axis('off')
# plt.title(labels.numpy());
# plt.show()
num_epochs = 5
learning_rate = 0.01
# CNN Model (2 conv layer)
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=32, kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.MaxPool2d(2))
self.layer2 = nn.Sequential(
nn.Conv2d(32, 16, kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(16),
nn.ReLU(),
nn.MaxPool2d(2))
self.layer3 = nn.Sequential(
nn.Conv2d(16, 16, kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(16),
nn.ReLU())
self.layer4 = nn.Sequential(
nn.Conv2d(16, 32, kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.MaxPool2d(2))
self.layer5 = nn.Sequential(
nn.Conv2d(32, 32, kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.MaxPool2d(2))
self.fc = nn.Linear(14 * 14 * 32, 1000)
self.fc1 = nn.Linear(1000, 6)
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = self.layer5(out)
out = out.view(out.size(0), -1)
out = self.fc(out)
out = self.fc1(out)
return out
cnn = CNN()
cnn.cuda()
# -----------------------------------------------------------------------------------
# Loss and Optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adagrad(cnn.parameters(), lr=learning_rate)
# -----------------------------------------------------------------------------------
# Train the Model
start_time = time.clock()
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
images = Variable(images).to(device)
labels = Variable(labels).to(device)
# Forward + Backward + Optimize
optimizer.zero_grad()
outputs = cnn(images)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
if (i + 1) % 100 == 0:
print('Epoch [%d/%d], Iter [%d/%d] Loss: %.4f'
% (epoch + 1, num_epochs, i + 1, len(train_dataset) // batch_size, loss.item()))
# -----------------------------------------------------------------------------------
# Test the Model
cnn.eval() # Change model to 'eval' mode (BN uses moving mean/var).
correct = 0
total = 0
for images, labels in test_loader:
images = Variable(images).to(device)
outputs = cnn(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels.to(device)).sum()
end_time = time.clock()
print(str(end_time-start_time) + " seconds")
# -----------------------------------------------------------------------------------
print('Test Accuracy of the model on the test images: %d %%' % (100 * correct / total))
# -----------------------------------------------------------------------------------
# Save the Trained Model
torch.save(cnn.state_dict(), 'cnn.pkl')
#
class_correct = list(0. for i in range(6))
class_total = list(0. for i in range(6))
true_label = []
predicted_label = []
for data in test_loader:
images, labels = data
images = Variable(images).to(device)
outputs = cnn(images)
_, predicted = torch.max(outputs.data, 1)
# mine
y_proba, predicted = torch.max(outputs.data, 1)
# end mine
labels = labels.cpu().numpy()
c = (predicted.cpu().numpy() == labels)
#Add true labels
true_label.extend(labels)
#Add predicted labels
predicted_label.extend(predicted.cpu().numpy())
for i in range(4):
label = labels[i]
class_correct[label] += c[i]
class_total[label] += 1
# --------------------------------------------------------------------------------------------
for i in range(6):
print('Accuracy of %5s : %2d %%' % (classes[i], 100 * class_correct[i] / class_total[i]))
# --------------------------------------------------------------------------------------------
#cnf_matrix = confusion_matrix(labels.cpu(), predicted.cpu())
#Change later
cnf_matrix = confusion_matrix(true_label,predicted_label)
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
print('Confusion matrix')
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
# Compute confusion matrix
np.set_printoptions(precision=2)
# Plot non-normalized confusion matrix
plt.figure(figsize=(15, 10))
plot_confusion_matrix(cnf_matrix, classes=classes,
title='Confusion matrix')
plt.show()
target_names = classes
print(classification_report(true_label,predicted_label, target_names=target_names))
#
# # The accuracy rate
# print('Accuracy Rate')
# accuracy_rate = np.sum(np.diagonal(cnf_matrix)) / np.sum(cnf_matrix)
# print(accuracy_rate)
#
# print()
# # The misclassifcation rate
# print('Miscalculation Rate')
# print(1 - (np.sum(np.diagonal(cnf_matrix)) / np.sum(cnf_matrix)))
#ROC and AUC
from sklearn.metrics import roc_curve, auc
#change the class to stop at 5
from sklearn.preprocessing import label_binarize
y_label = label_binarize(true_label, classes=[0, 1, 2, 3, 4, 5])
y_predict = label_binarize(predicted_label, classes=[0, 1, 2, 3, 4, 5])
n_classes = len(classes)
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_label[:, i], y_predict[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
#colors = cycle(['blue', 'red', 'green'])
print(roc_auc)
plt.figure(figsize=(15,10))
for i in range(n_classes):
plt.plot(fpr[i], tpr[i],
label='ROC curve of class {0} ----- (area = {1:0.2f})'
''.format(classes[i], roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.0])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic for multi-class data')
plt.legend()
plt.show()
# Test the model
# In[18]:
size = 4
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=size,
shuffle=False, num_workers=4
)
# In[19]:
dataiter = iter(test_loader)
images, labels = dataiter.next()
# print images
imshow(torchvision.utils.make_grid(images))
print('GroundTruth: ', ' '.join('%5s' % classes[labels[j]] for j in range(size)))
plt.show()
# In[20]:
images = Variable(images).to(device)
outputs = cnn(images)
# In[21]:
_, predicted = torch.max(outputs, 1)
print('Predicted: ', ' '.join('%5s' % classes[predicted[j]]
for j in range(size)))
|
[
"manoahf@gwu.edu"
] |
manoahf@gwu.edu
|
0209d99d4efce92a99a77e26d7e5efd07b850cd9
|
00271b9a8cb7531d81427bcdda386b71e836a804
|
/GUI/APE_GUI_Node.py
|
93abb36808248aaffdb3861b5144f8cefc3df345
|
[] |
no_license
|
machinekoder/APE
|
c2d99dad90943cbf8be4a7440262b5070765fd11
|
23084d53ac3e34826c8ae5044502375aca45b93f
|
refs/heads/master
| 2020-06-14T15:08:51.935881
| 2019-07-02T13:29:48
| 2019-07-02T13:29:48
| 195,036,751
| 0
| 0
| null | 2019-07-03T11:01:49
| 2019-07-03T11:01:49
| null |
UTF-8
|
Python
| false
| false
| 1,661
|
py
|
from zmqNode import zmqNode
import threading
import APE
from multiprocessing import Process
class APE_GUI_Node():
def __init__(self):
self.node = zmqNode()
self.node.target = self
def startNode(self, address):
self.node.logfile = 'GUINode.txt'
self.node.connect('APE', address)
self.node.start_listening()
def setValue(self, app_address, value):
kwargs = {'command': 'setValue', 'information': {'infoAddress': app_address, 'value': value}}
message = {'subject': 'target', 'action': 'CMD_Apparatus', 'kwargs': kwargs}
self.node.send('APE', message)
def getValue(self, app_address, local_method, local_args='', local_kwargs=''):
#Build expected reply
ereply = {}
ereply['subject'] = 'target'
ereply['action'] = local_method # This is a string not a method!
if local_args != '':
ereply['args'] = local_args
if local_kwargs != '':
ereply['kwargs'] = local_kwargs
# Build primary message
kwargs = {'command': 'getValue', 'information': {'infoAddress': app_address}}
message = {'subject': 'target', 'action': 'CMD_Apparatus', 'kwargs': kwargs, 'ereply': ereply}
self.node.send('APE', message)
def test_print(self, message):
print(str(message))
if __name__ == '__main__':
address = "tcp://127.0.0.1:5562"
proc_APE = Process(target=APE.StartAPE, args=(address,))
proc_APE.start()
banana = APE_GUI_Node()
banana.startNode(address)
banana.getValue(['information', 'calibrationfile'], 'test_print', local_args=['e_reply'])
|
[
"43216607+jhardin4@users.noreply.github.com"
] |
43216607+jhardin4@users.noreply.github.com
|
02cefe093965de7477f472d83242cdc96428f342
|
27666aa90c6b735c1142a18147f700494c454649
|
/personal_finance/hooks.py
|
717082a8fde9c7705ddcfb61cfb2f0b908a9903d
|
[
"MIT"
] |
permissive
|
hpema/personal_finance
|
f4c739665b1f000cea8d9347df6269e230490106
|
33b2e329cadcf8e34bc0206090d94887d572bcf3
|
refs/heads/master
| 2020-12-24T18:50:51.705934
| 2016-05-05T18:31:47
| 2016-05-05T18:31:47
| 58,152,071
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,620
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
app_name = "personal_finance"
app_title = "Personal Finance"
app_publisher = "Hemant Pema"
app_description = "Application to manage your personal finance"
app_icon = "octicon octicon-file-directory"
app_color = "grey"
app_email = "hemant@pema.co.za"
app_version = "0.0.1"
app_license = "MIT"
# Includes in <head>
# ------------------
# include js, css files in header of desk.html
# app_include_css = "/assets/personal_finance/css/personal_finance.css"
# app_include_js = "/assets/personal_finance/js/personal_finance.js"
# include js, css files in header of web template
# web_include_css = "/assets/personal_finance/css/personal_finance.css"
# web_include_js = "/assets/personal_finance/js/personal_finance.js"
# Home Pages
# ----------
# application home page (will override Website Settings)
# home_page = "login"
# website user home page (by Role)
# role_home_page = {
# "Role": "home_page"
# }
# Website user home page (by function)
# get_website_user_home_page = "personal_finance.utils.get_home_page"
# Generators
# ----------
# automatically create page for each record of this doctype
# website_generators = ["Web Page"]
# Installation
# ------------
# before_install = "personal_finance.install.before_install"
# after_install = "personal_finance.install.after_install"
# Desk Notifications
# ------------------
# See frappe.core.notifications.get_notification_config
# notification_config = "personal_finance.notifications.get_notification_config"
# Permissions
# -----------
# Permissions evaluated in scripted ways
# permission_query_conditions = {
# "Event": "frappe.desk.doctype.event.event.get_permission_query_conditions",
# }
#
# has_permission = {
# "Event": "frappe.desk.doctype.event.event.has_permission",
# }
# Document Events
# ---------------
# Hook on document methods and events
# doc_events = {
# "*": {
# "on_update": "method",
# "on_cancel": "method",
# "on_trash": "method"
# }
# }
# Scheduled Tasks
# ---------------
# scheduler_events = {
# "all": [
# "personal_finance.tasks.all"
# ],
# "daily": [
# "personal_finance.tasks.daily"
# ],
# "hourly": [
# "personal_finance.tasks.hourly"
# ],
# "weekly": [
# "personal_finance.tasks.weekly"
# ]
# "monthly": [
# "personal_finance.tasks.monthly"
# ]
# }
# Testing
# -------
# before_tests = "personal_finance.install.before_tests"
# Overriding Whitelisted Methods
# ------------------------------
#
# override_whitelisted_methods = {
# "frappe.desk.doctype.event.event.get_events": "personal_finance.event.get_events"
# }
|
[
"hemant@pema.co.za"
] |
hemant@pema.co.za
|
0f138a3cbe085ec7985adaa99bf4cc6bb5ae4c96
|
ef7d8cb635d346a3f13c69b65915f318fab2a5b5
|
/pamap2/sn/sn_conv_oe_n.py
|
55057a4ac829b837b6583f92a5d4959937af107b
|
[] |
no_license
|
anjanaw/Deep-Metric-Learning-for-HAR
|
f7dc1cb6305238dc2252f0349060f164a57f9cfd
|
54822089c6938cc4c44a2c6bee97ba48ee3bf7ea
|
refs/heads/master
| 2023-02-15T17:20:54.927945
| 2021-01-15T12:08:37
| 2021-01-15T12:08:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,114
|
py
|
import numpy as np
import random
import heapq
from sklearn.metrics.pairwise import cosine_similarity
from keras.models import Model
from keras.layers import Dense, Input, Lambda, Conv1D, MaxPooling1D, Flatten
from keras.layers.normalization import BatchNormalization
from keras import backend as K
import read
import tensorflow as tf
np.random.seed(1)
tf.set_random_seed(2)
num_test_classes = 2
mini_batch_size = 200
batch_size = 60
steps_per_epoch = mini_batch_size
feature_length = read.dct_length * 3 * 3
epochs = 10
k_shot = 5
k = 3
def get_neighbours(instance, dataset, n):
return np.argsort(np.linalg.norm(dataset - instance, axis=1))[:n]
def get_accuracy(test_labels, predictions):
correct = 0
for j in range(len(test_labels)):
if test_labels[j] == predictions[j]:
correct += 1
return (correct / float(len(test_labels))) * 100.0
# Define Euclidean distance function
def euclidean_distance(vects):
x, y = vects
return K.sqrt(K.sum(K.square(x - y), axis=1, keepdims=True))
# Define the shape of the output of Euclidean distance
def eucl_dist_output_shape(shapes):
shape1, shape2 = shapes
return (shape1[0], 1)
# Define the contrastive loss function (as from Hadsell et al [1].)
def contrastive_loss(y_true, y_pred):
margin = 15
sqaure_pred = K.square(y_pred)
margin_square = K.square(K.maximum(margin - y_pred, 0))
return K.mean(y_true * sqaure_pred + (1 - y_true) * margin_square)
def create_pairs(x, digit_indices, num_classes):
pairs = []
labels = []
n = min([len(digit_indices[d]) for d in range(num_classes)]) - 1
for d in range(num_classes):
for i in range(n):
z1, z2 = digit_indices[d][i], digit_indices[d][i + 1]
pairs += [[x[z1], x[z2]]]
inc = random.randrange(1, num_classes)
dn = (d + inc) % num_classes
z1, z2 = digit_indices[d][i], digit_indices[dn][i]
pairs += [[x[z1], x[z2]]]
labels += [1, 0]
return np.array(pairs), np.array(labels)
def build_conv_model():
_input = Input(shape=(feature_length, 1))
x = Conv1D(12, kernel_size=3, activation='relu')(_input)
x = MaxPooling1D(pool_size=2)(x)
x = BatchNormalization()(x)
x = Flatten()(x)
x = Dense(1200, activation='relu')(x)
return Model(inputs=_input, outputs=x, name='embedding')
feature_data = read.read()
test_ids = list(feature_data.keys())
all_labels = list(feature_data[test_ids[0]].keys())
for test_id in test_ids:
for _int in range(5):
test_labels_indices = np.random.choice(len(all_labels), num_test_classes, False)
test_labels = [a for ii, a in enumerate(all_labels) if ii in test_labels_indices]
print(test_labels)
train_labels = [a for ii, a in enumerate(all_labels) if ii not in test_labels_indices]
print(train_labels)
_train_data, _test_data = read.split(feature_data, test_id)
_train_data = read.remove_class(_train_data, test_labels)
_support_data, _test_data = read.support_set_split(_test_data, k_shot)
_train_data, _train_labels = read.flatten(_train_data)
_support_data, _support_labels = read.flatten(_support_data)
_train_data = np.array(_train_data)
_train_data = np.expand_dims(_train_data, 3)
_support_data = np.array(_support_data)
_support_data = np.expand_dims(_support_data, 3)
_train_labels = np.array(_train_labels)
_support_labels = np.array(_support_labels)
base_network = build_conv_model()
input_a = Input(shape=(feature_length, 1))
input_b = Input(shape=(feature_length, 1))
processed_a = base_network(input_a)
processed_b = base_network(input_b)
distance = Lambda(euclidean_distance, output_shape=eucl_dist_output_shape)([processed_a, processed_b])
model = Model(input=[input_a, input_b], output=distance)
model.compile(loss=contrastive_loss, optimizer='adam')
for x in range(epochs):
digit_indices = [np.where(_train_labels == i)[0] for i in train_labels]
x_pairs, y_pairs = create_pairs(_train_data, digit_indices, len(train_labels))
model.fit([x_pairs[:, 0], x_pairs[:, 1]], y_pairs, verbose=1, batch_size=batch_size, epochs=1)
_support_preds = base_network.predict(_support_data)
for _l in list(_test_data[test_id].keys()):
_test_label_data = _test_data[test_id][_l]
_test_labels = [_l for i in range(len(_test_label_data))]
_test_label_data = np.array(_test_label_data)
_test_label_data = np.expand_dims(_test_label_data, 3)
_test_labels = np.array(_test_labels)
_test_preds = base_network.predict(_test_label_data)
acc = read.cos_knn(k, _test_preds, _test_labels, _support_preds, _support_labels)
result = 'sn_conv, 3nn,' + str(num_test_classes) + ',' + str(test_id) + ',' + ','.join([str(t) for t in test_labels]) + ',' + str(_l) + ',' + str(acc)
read.write_data('sn_conv_oe_n.csv', result)
|
[
"shyameniw@gmail.com"
] |
shyameniw@gmail.com
|
d52b59f1f40718023ecf4eeaa487a668f48e8c6a
|
3be1ddf42236a1b33ec74ed3bfdd0f8918513733
|
/coding-challenges/week07/day01/Q3.Twice.py
|
2a78bdb85cd56fb61bca2832de08a7d3ae9cbc3a
|
[] |
no_license
|
aabhishek-chaurasia-au17/MyCoding_Challenge
|
84ef926b550b3f511f1c642fe35f4303c8abb949
|
419d02ad8740a2c00403fd30c661074266d2ba8f
|
refs/heads/main
| 2023-08-29T09:52:36.796504
| 2021-11-07T07:32:09
| 2021-11-07T07:32:09
| 359,842,173
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 570
|
py
|
"""
Given an integer array , every element is repeated TWICE , except one
element , Find that element ?
Input : - [1 , 2 , 1, 2 ,4 , 3 ,4 ,3]
Output: - 3
Explanation : HINT : - Use XOR operator ;
"""
def findSingle( ar, n):
res = ar[0]
# Do XOR of all elements and return
for i in range(1,n):
res = res ^ ar[i]
return res
# Driver code
ar = [2, 3, 5, 4, 5, 3, 4]
# arra = int(input("Enter numbers in array format:"))
print("Element occurring once is", findSingle(ar, len(ar)))
#Time Complexity O(n)
#Space Complexity: O(n)
|
[
"abhishekc838@gmail.com"
] |
abhishekc838@gmail.com
|
f866b0fc0f1edf6197ab857fd8bb5ae40d07d0bb
|
cf4bbf762ffca3bf9632b90ad367bd8cad867b3a
|
/Machine Learning A-Z/Part 3 - Classification/Section 16 - Support Vector Machine (SVM)/SVM.py
|
6861fbe49f79389f0a6b4b72fdb50373db435fe1
|
[] |
no_license
|
schnebly/Machine-Learning
|
b57601cb4e361a6191466a6a522610968c02dfc8
|
c3f6a179aa6182a49540be359df58d3b3a7148bd
|
refs/heads/master
| 2021-05-15T07:55:42.435282
| 2018-03-01T20:54:10
| 2018-03-01T20:54:10
| 108,628,058
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,651
|
py
|
#James Schnebly
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Social_Network_Ads.csv')
X = dataset.iloc[:, [2, 3]].values
y = dataset.iloc[:, 4].values
# Splitting the dataset into the Training set and Test set
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# Fitting classifier to the Training set
from sklearn.svm import SVC
classifier = SVC(kernel = 'linear')
classifier.fit(X_train, y_train)
# Predicting the Test set results
y_pred = classifier.predict(X_test)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
# Visualising the Training set results
from matplotlib.colors import ListedColormap
X_set, y_set = X_train, y_train
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('SVM (Training set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
# Visualising the Test set results
from matplotlib.colors import ListedColormap
X_set, y_set = X_test, y_test
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('SVM (Test set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
|
[
"noreply@github.com"
] |
schnebly.noreply@github.com
|
316052d06f6e1b68b3bbb4333879346f07326e91
|
f12f9c1455b1134bfe1b444dfb9bab40849ab433
|
/opencv/pycv_tutorial/drawing.py
|
0141cd424ab4d70847e3996e03abeed3c890f61f
|
[
"MIT"
] |
permissive
|
whaison/PyIntroduction
|
b4444d9c6e0fa6d0afd1abed8e259ea60e47a3e2
|
433142b25de36552867b209649b17113ca2e11c6
|
refs/heads/master
| 2021-01-11T15:56:48.331293
| 2016-11-15T07:28:19
| 2016-11-15T07:28:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,500
|
py
|
# -*- coding: utf-8 -*-
## @package pycv_tutorial.drawing
#
# 入出力とGUI: OpenCVの描画
# @author tody
# @date 2016/06/26
import cv2
import numpy as np
# 描画用の空画像の作成
def emptyImage():
img = np.zeros((512, 512, 3), np.uint8)
return img
# 直線の描画
def drawLine(img, pt1, pt2, color, width):
print(dir(cv2))
img = cv2.line(img, pt1, pt2, color, width, cv2.LINE_AA)
# 円の描画
def drawCircle(img, center, radius, color, width):
img = cv2.circle(img, center, radius, color, width, cv2.LINE_AA)
# 楕円の描画
def drawElipse(img, center, axes, angle, startAngle, endAngle, color, width):
img = cv2.ellipse(img, center, axes, angle, startAngle, endAngle, color, width, cv2.LINE_AA)
# 四角形の描画
def drawRectangle(img, pt1, pt2, color, width):
img = cv2.rectangle(img, pt1, pt2, color, width, cv2.LINE_AA)
# ポリゴンの描画
def drawPolylines(img, pts, isClosed, color, width):
img = cv2.polylines(img, pts, isClosed, color, width, cv2.LINE_AA)
font_types = [cv2.FONT_HERSHEY_SIMPLEX, cv2.FONT_HERSHEY_PLAIN, cv2.FONT_HERSHEY_COMPLEX,
cv2.FONT_HERSHEY_COMPLEX_SMALL, cv2.FONT_HERSHEY_DUPLEX,
cv2.FONT_HERSHEY_SCRIPT_COMPLEX, cv2.FONT_HERSHEY_SCRIPT_SIMPLEX,
cv2.FONT_HERSHEY_TRIPLEX, cv2.FONT_ITALIC]
# 文字の描画
def drawText(img, text, org, fontFace, fontScale, color, width):
img = cv2.putText(img, text, org, fontFace, fontScale, color, width, cv2.LINE_AA)
def drawingDemo():
img = emptyImage()
# 太さ2の直線描画
drawLine(img, (10, 10), (200, 200), (0, 0, 255), 2)
# 太さに-1を指定すると,塗りつぶしになる
drawCircle(img, (300, 100), 80, (0, 255, 0), -1)
# 中と外を両方描画
drawRectangle(img, (10, 210), (210, 350), (100, 100, 0), -1)
drawRectangle(img, (10, 210), (210, 350), (255, 0, 0), 3)
# 楕円の描画
drawElipse(img, (450, 100), (30, 80), 0, 0, 360, (0, 100, 100), -1)
# ポリゴンの描画
pts = np.array([[(250, 240), (270, 280), (350, 320), (500, 300), (450, 230), (350, 210)]], dtype=np.int32)
drawPolylines(img, pts, True, (255, 100, 100), 5)
# テキストの描画
drawText(img, 'OpenCV', (20, 450), font_types[0], 4, (200, 200, 200), 2)
cv2.namedWindow('DrawingDemo', cv2.WINDOW_AUTOSIZE)
cv2.imshow('DrawingDemo', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == '__main__':
drawingDemo()
|
[
"tody411@gmail.com"
] |
tody411@gmail.com
|
20a736f400abdec53fc7cbf139a8ce66474b7350
|
4feb1e08190ec89c85e4bdc2f185743508432c77
|
/Files/PyQt Widgets/07_Checkbox.py
|
fb02ab7954a6f5d8fc03cb2f6addaa61e4b0dd53
|
[] |
no_license
|
franksalas/pyqtDev
|
cb1ddb358b1f45220855b4495427f6183b373348
|
017b86933a6a82c6a666bb74849c1e2713509959
|
refs/heads/master
| 2021-01-10T11:44:37.742673
| 2016-04-05T21:42:15
| 2016-04-05T21:42:15
| 55,462,510
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 439
|
py
|
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import sys
app = QApplication(sys.argv)
window = QWidget()
window.setWindowTitle('Hello PyQT')
window.setWindowIcon(QIcon('pyqt.png'))
window.setGeometry(50,50,600,400)
checkbox = QCheckBox(window, text='PyQt')
checkbox.move(200,200)
checkbox.setChecked(True) # bool = True or False
checkbox2 = QCheckBox(window, text ='kivy')
checkbox2.move(200,250)
window.show()
app.exec_()
|
[
"Frank.salas@gmail.com"
] |
Frank.salas@gmail.com
|
ed58daa6f322d35c56fc1a469e630dd245d478a2
|
a46ce15cb0b86ee9d04fdc89848cc25171fdbf57
|
/src/class/predict_datas.py
|
962efce84695cdb7d271aa10f5dc9b6ceeba10e9
|
[] |
no_license
|
Luning644182206/trec
|
d429a0ce1c27a6d56d8a1994ecb9e9f522f2eb16
|
618edd0451f789c51e4383b403f4e767e2f3797d
|
refs/heads/master
| 2020-03-22T10:41:21.492132
| 2018-09-05T02:56:25
| 2018-09-05T02:56:25
| 139,920,244
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,891
|
py
|
# coding=utf8
# Created on Aug 16, 2018
# @author: luning644182206@emails.bjut.edu.cn
import csv
import os
import re
from nltk.corpus import stopwords
from sklearn.externals import joblib
import numpy as np
from sklearn import metrics
label = {
'GoodsServices': 'Request-GoodsServices',
'SearchAndRescue': 'Request-SearchAndRescue',
'InformationWanted': 'Request-InformationWanted',
'Volunteer': 'CallToAction-Volunteer',
'FundRaising': 'CallToAction-FundRaising',
'Donations': 'CallToAction-Donations',
'MovePeople': 'CallToAction-MovePeople',
'FirstPartyObservation': 'Report-FirstPartyObservation',
'ThirdPartyObservation': 'Report-ThirdPartyObservation',
'Weather': 'Report-Weather',
'EmergingThreats': 'Report-EmergingThreats',
'SignificantEventChange': 'Report-SignificantEventChange',
'MultimediaShare': 'Report-MultimediaShare',
'ServiceAvailable': 'Report-ServiceAvailable',
'Factoid': 'Report-Factoid',
'Official': 'Report-Official',
'CleanUp': 'Report-CleanUp',
'Hashtags': 'Report-Hashtags',
'PastNews': 'Other-PastNews',
'ContinuingNews': 'Other-ContinuingNews',
'Advice': 'Other-Advice',
'Sentiment': 'Other-Sentiment',
'Discussion': 'Other-Discussion',
'Irrelevant': 'Other-Irrelevant',
'Unknown': 'Other-Unknown',
'KnownAlready': 'Other-KnownAlready'
}
'''
clearIn: 去标点
Input: none
Output: none
others: none
'''
def clearIn(text):
interpunctions = [';', '_', '’', '…', 'rt', 'via', '-', '[', ']', '(', ')', '"', ':', "'", '.', ',', '?', '//', '/', '{', '}', '!', '&', '\r', '\t', '\f']
text = text.lower()
text = text.strip(' ')
text = ' '.join([word for word in text.strip().split() if word not in stopwords.words("english")])
for inter in interpunctions:
if ((inter != '\r') or (inter != '\t') or (inter != '\f')):
text = text.replace(inter, '')
else:
text = text.replace(inter, ' ')
return text
# # 读取测试数据
# def readDatas(path):
# testDatas = []
# testOption = []
# # 打开文件
# with open(path, 'r') as csvfile:
# reader = csv.reader(csvfile)
# for row in reader:
# content = row[5].strip() + ' ' + row[6].strip()
# # content = row[3].strip() + ' ' + row[4].strip()
# replace = ' '
# urlRE = r'(http|ftp|https):\/\/[\w\-_]+(\.[\w\-_]+)+([\w\-\.,@?^=%&:/~\+#]*[\w\-\@?^=%&/~\+#])?'
# content = re.sub(urlRE, replace, str(content))
# content = clearIn(content)
# testOption.append(label[row[2]])
# testDatas.append(content)
# return testDatas,testOption
# 读取训练集(ontology)
def readDatas(paths):
testDatas1 = []
testDatas2 = []
for path in paths:
fileName = path.split('/')[3]
nameSplit = fileName.split('_')
source = nameSplit[0]
fatherLabel = nameSplit[1]
sonLabel = '_'.join(nameSplit[2:-1])
counter = 0
# 打开文件
with open(path, 'r') as csvfile:
reader = csv.reader(csvfile)
datas = []
for row in reader:
if (len(row) > 0):
content = ''
counter += 1
if (source != 'twitterNews'):
content = clearIn(row[2].strip())
else:
content = row[1].strip() + ' ' + row[2].strip()
replace = ' '
urlRE = r'(http|ftp|https):\/\/[\w\-_]+(\.[\w\-_]+)+([\w\-\.,@?^=%&:/~\+#]*[\w\-\@?^=%&/~\+#])?'
content = re.sub(urlRE, replace, str(content))
content = clearIn(content)
dataPush = [content, fatherLabel]
# dataPush = [content, sonLabel]
datas.append(dataPush)
splitNum = int(counter/2)
# trainingDatas += datas[:splitNum]
testDatas1 += datas[:splitNum]
testDatas2 += datas[splitNum:]
return testDatas2
def splitOptionAndData(datas):
data = []
label = []
for oneData in datas:
data.append(oneData[0])
label.append(oneData[1])
return data,label
# 主函数
if __name__ == '__main__':
# path = '../data/training_data/training_data.csv'
# # 读取预测数据
# testDatas, testOption = readDatas(path)
# =======
path = '../data/news/'
# path = '../data/training_data/training1/'
allFile = os.listdir(path)
filePaths = []
for fileName in allFile:
if (fileName != '.DS_Store'):
name = path + fileName
filePaths.append(name)
testDatas = readDatas(filePaths)
testContent, testOption = splitOptionAndData(testDatas)
# ========
# 读取本地预测模型
textClf = joblib.load('../model/ontology_train_model.m')
predicted = textClf.predict(testContent)
predictedProba = textClf.predict_proba(testContent)
# 统计 正确几个 不正确几个 正确率
counterLabel = {
'Request-GoodsServices': [0, 0, 0, 0, 0, 0],
'Request-SearchAndRescue': [0, 0, 0, 0, 0, 0],
'Request-InformationWanted': [0, 0, 0, 0, 0, 0],
'CallToAction-Volunteer': [0, 0, 0, 0, 0, 0],
'CallToAction-FundRaising': [0, 0, 0, 0, 0, 0],
'CallToAction-Donations': [0, 0, 0, 0, 0, 0],
'CallToAction-MovePeople': [0, 0, 0, 0, 0, 0],
'Report-FirstPartyObservation': [0, 0, 0, 0, 0, 0],
'Report-ThirdPartyObservation': [0, 0, 0, 0, 0, 0],
'Report-Weather': [0, 0, 0, 0, 0, 0],
'Report-EmergingThreats': [0, 0, 0, 0, 0, 0],
'Report-SignificantEventChange': [0, 0, 0, 0, 0, 0],
'Report-MultimediaShare': [0, 0, 0, 0, 0, 0],
'Report-ServiceAvailable': [0, 0, 0, 0, 0, 0],
'Report-Factoid': [0, 0, 0, 0, 0, 0],
'Report-Official': [0, 0, 0, 0, 0, 0],
'Report-CleanUp': [0, 0, 0, 0, 0, 0],
'Report-Hashtags': [0, 0, 0, 0, 0, 0],
'Other-PastNews': [0, 0, 0, 0, 0, 0],
'Other-ContinuingNews': [0, 0, 0, 0, 0, 0],
'Other-Advice': [0, 0, 0, 0, 0, 0],
'Other-Sentiment': [0, 0, 0, 0, 0, 0],
'Other-Discussion': [0, 0, 0, 0, 0, 0],
'Other-Irrelevant': [0, 0, 0, 0, 0, 0],
'Other-Unknown': [0, 0, 0, 0, 0, 0],
'Other-KnownAlready': [0, 0, 0, 0, 0, 0]
}
file = open('predicted.txt', 'ab+')
# 分析结果
sumRight = 0
allDataNum = 0
for index, element in enumerate(predicted):
# 添加人工筛选
# pro = round(max(predictedProba[index]),2)
# print(pro)
if (testOption[index] == element):
# 一样 TP
counterLabel[element][0] += 1
sumRight += 1
else:
# 不一样
# FN 不属于C的,分成了C
counterLabel[element][1] += 1
# TN 属于C的被分成了其他类
counterLabel[testOption[index]][2] += 1
allDataNum += 1
sumTP = 0
sumFN = 0
sumTN = 0
for item in counterLabel:
# FP 别人都分对的个数
counterLabel[item][3] = sumRight - counterLabel[item][0]
if ((counterLabel[item][1] + counterLabel[item][0]) != 0):
# p
counterLabel[item][4] = counterLabel[item][0]/(counterLabel[item][1] + counterLabel[item][0])
if ((counterLabel[item][1] + counterLabel[item][2]) != 0):
# r
counterLabel[item][5] = counterLabel[item][0]/(counterLabel[item][0] + counterLabel[item][2])
sumTP += counterLabel[item][0]
sumFN += counterLabel[item][1]
sumTN += counterLabel[item][2]
# data = item + ' ' + str(counterLabel[item][0]) + ' ' + str(counterLabel[item][1]) + ' ' + str(counterLabel[item][2]) + ' ' + str(counterLabel[item][3]) + ' ' + str(counterLabel[item][4]) + ' ' + str(counterLabel[item][5]) + '\n'
data = item + str(counterLabel[item][4]) + ' ' + str(counterLabel[item][5]) + '\n'
file.write(data.encode('utf-8'))
print(sumTP, sumFN, sumTN)
# 存储
file.write('details\n'.encode('utf-8'))
# for index, element in enumerate(predicted):
# data = testOption[index] + ' ' + element + '\n'
# file.write(data.encode('utf-8'))
file.close()
# print ('SVC',np.mean(predicted == testOption))
microP = sumTP/(sumTP + sumFN)
microR = sumTP/allDataNum
microF = 2 * microP * microR/(microP + microR)
print ('microP', microP)
print ('microR', microR)
print ('微平均', microF)
print (set(predicted))
# 混淆矩阵 1,2,3 意思为真值为2,预测值为3的有1个
# print (metrics.confusion_matrix(testOption, predicted)) # 混淆矩阵
|
[
"luning04@baidu.com"
] |
luning04@baidu.com
|
8e0cd76c2ddd913712e3ba1fe50c64da72ba6d4f
|
1a911748c1f320340dcd32fe31950dbd82abddb3
|
/utils/vision_utils.py
|
cb9e13d2ea35dacdc9c5df14427ab13cfe121444
|
[] |
no_license
|
icebox365/RE-OCR
|
8157618dd387fe21ae73483711fb705e8355c067
|
30707591d18671c006b6b887cc55fb3cd0e2fc11
|
refs/heads/master
| 2021-04-09T11:10:08.682805
| 2018-03-16T08:40:52
| 2018-03-16T08:40:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,513
|
py
|
import base64
import json
import math
import cv2
import numpy as np
import requests
from PIL import Image, ExifTags
from logger import *
def load_image(image_path):
try:
image = Image.open(image_path)
orientation = None
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
exif = dict(image._getexif().items())
if exif[orientation] == 3:
image = image.rotate(180, expand=True)
elif exif[orientation] == 6:
image = image.rotate(270, expand=True)
elif exif[orientation] == 8:
image = image.rotate(90, expand=True)
cv_img = np.array(image)
cv_img = cv_img[:, :, ::-1].copy()
return cv_img
except (AttributeError, KeyError, IndexError):
cv_img = cv2.imread(image_path)
return cv_img
def rect_orientation(anno):
points = anno['boundingBox']['vertices']
cen_x = .0
cen_y = .0
for i in range(4):
if 'x' not in points[i].keys():
points[i]['x'] = 0
if 'y' not in points[i].keys():
points[i]['y'] = 0
cen_x += points[i]['x']
cen_y += points[i]['y']
cen_x /= 4
cen_y /= 4
x0 = points[0]['x']
y0 = points[0]['y']
if x0 < cen_x:
if y0 < cen_y:
return ORIENTATION_NORMAL
else:
return ORIENTATION_270_DEGREE
else:
if y0 < cen_y:
return ORIENTATION_90_DEGREE
else:
return ORIENTATION_180_DEGREE
def correlate_orientation(anno, orientation, img_width, img_height):
points = anno['boundingBox']['vertices']
for i in range(4):
point = points[i]
if 'x' not in point.keys():
point['x'] = 0
if 'y' not in point.keys():
point['y'] = 0
if orientation == ORIENTATION_NORMAL:
new_x = point['x']
new_y = point['y']
elif orientation == ORIENTATION_270_DEGREE:
new_x = img_height - point['y']
new_y = point['x']
elif orientation == ORIENTATION_90_DEGREE:
new_x = point['y']
new_y = img_width - point['x']
elif orientation == ORIENTATION_180_DEGREE:
new_x = img_width - point['x']
new_y = img_height - point['y']
points[i]['x'] = new_x
points[i]['y'] = new_y
def make_request(cv_img, feature_types):
request_list = []
# Read the image and convert to the base string to send as json
h, w = cv_img.shape[:2]
gray = cv2.cvtColor(cv_img, cv2.COLOR_BGR2GRAY)
_ratio = math.sqrt(float(MAXIMUM_SIZE) / float(h * w))
gray = cv2.resize(gray, (int(w * _ratio), int(h * _ratio)))
resz_img = cv2.resize(cv_img, (int(w * _ratio), int(h * _ratio)))
_quality = 100
content_obj = {'content': base64.b64encode(
cv2.imencode('.jpg', gray, [cv2.IMWRITE_JPEG_QUALITY, _quality])[1].tostring()).decode('UTF-8')}
feature_obj = []
for feature_type in feature_types:
feature_obj.append({'type': feature_type})
context_obj = {"languageHints": ['en']}
request_list.append(
{'image': content_obj,
'features': feature_obj,
'imageContext': context_obj
}
)
return json.dumps({'requests': request_list}).encode(), resz_img
class VisionUtils:
def __init__(self, show_result=True):
self.endpoint_url = ENDPOINT_URL
self.api_key = API_KEY
self.show_result = show_result
def __get_response(self, json_data):
try:
response = requests.post(
url=self.endpoint_url,
data=json_data,
params={'key': self.api_key},
headers={'Content-Type': 'application/json'})
# print(response)
ret_json = json.loads(response.text)
return ret_json['responses'][0]
except Exception as e:
log_print("\t\texcept: {}".format(e))
return None
def __get_orientation(self, annos):
oris = [0, 0, 0, 0]
for anno in annos:
ori = rect_orientation(anno=anno)
oris[ori] += 1
if self.show_result:
log_print(" {}".format(oris))
return oris.index(max(oris))
def detect_text(self, path, idx, proc_queue):
try:
img = load_image(path)
requests, resz_img = make_request(cv_img=img, feature_types=['DOCUMENT_TEXT_DETECTION', 'TEXT_DETECTION',
'LABEL_DETECTION'])
response = self.__get_response(requests)
img = resz_img
if response is None:
result = None
else:
_flag = False
for i in range(5):
if response['labelAnnotations'][i]['description'] != 'text':
_flag = True
break
if not _flag:
ret_label = response['labelAnnotations'][0]
result = {'id': idx,
'annotations': None,
'label': ret_label,
'orientation': None,
'image': img}
log_print("\t Not proper Invoice Document{}".format(ret_label))
else:
annos = []
document = response['fullTextAnnotation']
for page in document['pages']:
for block in page['blocks']:
for paragraph in block['paragraphs']:
for word in paragraph['words']:
text = ""
for symbol in word['symbols']:
text += symbol['text']
if type(text) is not str:
text = text.encode("utf-8")
anno = {
'boundingBox': word['boundingBox'],
'text': text
}
annos.append(anno)
# recognize the orientation
ori = self.__get_orientation(annos=annos)
height, width = img.shape[:2]
if ori != ORIENTATION_NORMAL:
img = cv2.rotate(img, rotateCode=ori)
for anno in annos:
correlate_orientation(anno=anno, orientation=ori, img_width=width, img_height=height)
if self.show_result: # display the line rect
pt0 = anno['boundingBox']['vertices'][0]
pt1 = anno['boundingBox']['vertices'][2]
cv2.line(img, (pt0['x'], pt0['y']), (pt1['x'], pt1['y']), (0, 255, 0), 1)
result = {'id': idx,
'annotations': annos,
'label': 'text',
'orientation': ori,
'image': img}
proc_queue.put(result, True, 1)
except Exception as e:
log_print("\t exception :" + str(e))
pass
|
[
"ice.box@email.com"
] |
ice.box@email.com
|
fd1b1cf74dbd869f6021d7d713f9dc3417f28f25
|
f3c7e2c147447ce62e5583765d6881c3d64d56e1
|
/jingdong/jingdong/spiders/jdspider.py
|
97500ebef1219169ecde54983479b8dcffd5b967
|
[] |
no_license
|
YNZ-ying/scrapyjingdong
|
3d4bb789724c4a87782821779cfed4330a17dc9c
|
0f22a4fe9b701661c29a44b97a37329a7d7bb4b0
|
refs/heads/master
| 2020-04-29T20:56:00.461158
| 2019-03-19T01:23:40
| 2019-03-19T01:23:40
| 176,397,560
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,172
|
py
|
# -*- coding: utf-8 -*-
import scrapy
import re
import requests
import json
from jingdong.items import JingdongItem
class JdspiderSpider(scrapy.Spider):
name = 'jdspider'
allowed_domains = ['jd.com']
start_urls = ['https://search.jd.com/Search?keyword=python']
def start_requests(self):#重写start_reqouests方法
#京东网关于python类的商品只放出了100页,如果正常访问只能获取静态网页的前30个商品,后面30个商品是用动态加载,使用下面的Url就可以保证每一页只有3个商品,但是页数会翻倍
for page in range(0,201):
url = 'https://search.jd.com/Search?keyword=python&enc=utf-8&qrst=1&rt=1&stop=1&vt=2&stock=1&page={}&click=0&scrolling=y'.format(page)
yield scrapy.Request(url=url,callback=self.goods)
def goods(self, response):#获取搜索页面中的每一个商品的url
book_urls = response.xpath("//div[@class='p-name']/a/@href").getall()
bookurls = []
for book_url in book_urls:
a = "https:"
if a in book_url:
bookurl = book_url
else:
bookurl = "https:" + str(book_url)
bookurls.append(bookurl)
for bookurl in bookurls:
yield scrapy.Request(url=bookurl, callback=self.get_page, meta={"bookurl": bookurl})
def get_page(self,response):#解析商品页
#获取商品名称
bookname = response.xpath("//title/text()").get().split(r"(")[0]
#获取作者
author = response.xpath("//div[@class='p-author']/a/text()").get()
#获取商品Id
bookid = re.findall("https://item.jd.com/(.*?).html",str(response))
bookid = "".join(bookid)
#通过调用json文件获取商品价格
price = self.get_book_price(bookid)
#通过调用json文件获取商品评价数
commentcount = self.get_commentcount(bookid)
#获取出版社
putlish = response.xpath("//div[@class='p-parameter']//li/@title").get()
item = JingdongItem()
item["bookname"] = bookname
item["author"] = author
item["price"] = price
item["commentcount"] = commentcount
item["putlish"] = putlish
item["bookurl"] = response.meta["bookurl"]
yield item
def get_book_price(self,id):
#获取商品价格
url = "https://p.3.cn/prices/mgets?skuIds=J_" + id
response = requests.get(url)
js = json.loads(response.text)
price = js[0]["p"]
return price
def get_commentcount(self,id):
#获取商品评价数
url = "https://club.jd.com/comment/productCommentSummaries.action?referenceIds=" + id
response = requests.get(url)
js = json.loads(response.text)
commentcount = js["CommentsCount"][0]["CommentCountStr"]
return commentcount
# https://search.jd.com/Search?keyword =python &enc = utf - 8 & qrst=1& rt = 1 & stop = 1 & vt = 2 & wq = python & page = 1
# https://search.jd.com/s_new.php?keyword = python & enc = utf - 8 & qrst = 1 & rt = 1 & stop = 1 & vt = 2 & wq = python & page = 3
|
[
"44636518+YNZ-ying@users.noreply.github.com"
] |
44636518+YNZ-ying@users.noreply.github.com
|
2df4bb235e74bddda5a598651e549633c9efd2e1
|
cb4aefac26b2a0e9bc755057073e8964c1cda094
|
/AdaBoost_Project2/AdaBoost.py
|
ace404c51efbb3ee0e3ac0b003db16955608d5ab
|
[] |
no_license
|
xj260098061/Machine-Learning-in-Action-Python3
|
7644666aec766a1dd6ccb96f49a99d819950d129
|
17a2613b63d5430f55e0c62f1eca86310485da22
|
refs/heads/master
| 2021-08-09T01:18:22.479560
| 2020-12-08T13:53:48
| 2020-12-08T13:53:48
| 229,924,707
| 0
| 0
| null | 2019-12-24T10:52:18
| 2019-12-24T10:52:17
| null |
UTF-8
|
Python
| false
| false
| 7,105
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 26 20:14:03 2018
@author: wzy
"""
import numpy as np
import matplotlib.pyplot as plt
"""
函数说明:加载文件
Parameters:
fileName - 文件名
Returns:
dataMat - 数据矩阵
labelMat - 数据标签
Modify:
2018-07-26
"""
def loadDataSet(fileName):
# 特征个数
numFeat = len((open(fileName).readline().split('\t')))
dataMat = []
labelMat = []
fr = open(fileName)
for line in fr.readlines():
lineArr = []
curLine = line.strip().split('\t')
for i in range(numFeat - 1):
lineArr.append(float(curLine[i]))
dataMat.append(lineArr)
labelMat.append(float(curLine[-1]))
return dataMat, labelMat
"""
函数说明:单层决策树分类函数
Parameters:
dataMatrix - 数据矩阵
dimen - 第dimen列,也就是第几个特征
threshVal - 阈值
threshIneq - 标志
Returns:
retArray - 分类结果
Modify:
2018-07-26
"""
def stumpClassify(dataMatrix, dimen, threshVal, threshIneq):
# 初始化retArray为全1列向量
retArray = np.ones((np.shape(dataMatrix)[0], 1))
if threshIneq == 'lt':
# 如果小于阈值则赋值为-1
retArray[dataMatrix[:, dimen] <= threshVal] = -1.0
else:
# 如果大于阈值则赋值为-1
retArray[dataMatrix[:, dimen] > threshVal] = -1.0
return retArray
"""
函数说明:找到数据集上最佳的单层决策树
Parameters:
dataArr - 数据矩阵
classLabels - 数据标签
D - 样本权重,每个样本权重相等 1/n
Returns:
bestStump - 最佳单层决策树信息
minError - 最小误差
bestClassEst - 最佳的分类结果
Modify:
2018-07-26
"""
def buildStump(dataArr, classLabels, D):
# 输入数据转为矩阵(5, 2)
dataMatrix = np.mat(dataArr)
# 将标签矩阵进行转置(5, 1)
labelMat = np.mat(classLabels).T
# m=5, n=2
m, n = np.shape(dataMatrix)
numSteps = 10.0
bestStump = {}
# (5, 1)全零列矩阵
bestClasEst = np.mat(np.zeros((m, 1)))
# 最小误差初始化为正无穷大inf
minError = float('inf')
# 遍历所有特征
for i in range(n):
# 找到(每列)特征中的最小值和最大值
rangeMin = dataMatrix[:, i].min()
rangeMax = dataMatrix[:, i].max()
# 计算步长
stepSize = (rangeMax - rangeMin) / numSteps
for j in range(-1, int(numSteps) + 1):
# 大于和小于的情况均遍历,lt:Less than gt:greater than
for inequal in ['lt', 'gt']:
# 计算阈值
threshVal = (rangeMin + float(j) * stepSize)
# 计算分类结果
predictedVals = stumpClassify(dataMatrix, i, threshVal, inequal)
# 初始化误差矩阵
errArr = np.mat(np.ones((m, 1)))
# 分类正确的,赋值为0
errArr[predictedVals == labelMat] = 0
# 计算误差
weightedError = D.T * errArr
print("split: dim %d, thresh %.2f, thresh ineqal: %s, the weighted error is %.3f" % (i, threshVal, inequal, weightedError))
# 找到误差最小的分类方式
if weightedError < minError:
minError = weightedError
bestClasEst = predictedVals.copy()
bestStump['dim'] = i
bestStump['thresh'] = threshVal
bestStump['ineq'] = inequal
return bestStump, minError, bestClasEst
"""
函数说明:使用AdaBoost进行优化
Parameters:
dataArr - 数据矩阵
classLabels - 数据标签
numIt - 最大迭代次数
Returns:
weakClassArr - 存储单层决策树的list
aggClassEsc - 训练的label
Modify:
2018-07-26
"""
def adaBoostTrainDS(dataArr, classLabels, numIt=60):
weakClassArr = []
# 获取数据集的行数
m = np.shape(dataArr)[0]
# 样本权重,每个样本权重相等,即1/n
D = np.mat(np.ones((m, 1)) / m)
# 初始化为全零列
aggClassEst = np.mat(np.zeros((m, 1)))
# 迭代
for i in range(numIt):
# 构建单层决策树
bestStump, error, classEst = buildStump(dataArr, classLabels, D)
# print("D:", D.T)
# 计算弱学习算法权重alpha,使error不等于0,因为分母不能为0
alpha = float(0.5 * np.log((1.0 - error) / max(error, 1e-16)))
# 存储弱学习算法权重
bestStump['alpha'] = alpha
# 存储单层决策树
weakClassArr.append(bestStump)
# 打印最佳分类结果
# print("classEst: ", classEst.T)
# 计算e的指数项
expon = np.multiply(-1 * alpha * np.mat(classLabels).T, classEst)
# 计算递推公式的分子
D = np.multiply(D, np.exp(expon))
# 根据样本权重公式,更新样本权重
D = D / D.sum()
# 计算AdaBoost误差,当误差为0的时候,退出循环
# 以下为错误率累计计算
aggClassEst += alpha * classEst
# print("aggClassEst: ", aggClassEst.T)
# 计算误差
aggErrors = np.multiply(np.sign(aggClassEst) != np.mat(classLabels).T, np.ones((m, 1)))
errorRate = aggErrors.sum() / m
# print("total error:", errorRate)
if errorRate == 0.0:
# 误差为0退出循环
break
return weakClassArr, aggClassEst
"""
函数说明:AdaBoost分类函数
Parameters:
datToClass - 待分类样例
classifierArr - 训练好的分类器
Returns:
分类结果
Modify:
2018-07-26
"""
def adaClassify(datToClass, classifierArr):
dataMatrix = np.mat(datToClass)
m = np.shape(dataMatrix)[0]
aggClassEst = np.mat(np.zeros((m, 1)))
for i in range(len(classifierArr)):
# 遍历所有分类器进行分类
classEst = stumpClassify(dataMatrix, classifierArr[i]['dim'], classifierArr[i]['thresh'], classifierArr[i]['ineq'])
aggClassEst += classifierArr[i]['alpha'] * classEst
# print(aggClassEst)
return np.sign(aggClassEst)
if __name__ == '__main__':
dataArr, LabelArr = loadDataSet('horseColicTraining2.txt')
weakClassArr, aggClassEst = adaBoostTrainDS(dataArr, LabelArr)
testArr, testLabelArr = loadDataSet('horseColicTest2.txt')
print(weakClassArr)
predictions = adaClassify(dataArr, weakClassArr)
errArr = np.mat(np.ones((len(dataArr), 1)))
print('训练集的错误率:%.3f%%' % float(errArr[predictions != np.mat(LabelArr).T].sum() / len(dataArr) * 100))
predictions = adaClassify(testArr, weakClassArr)
errArr = np.mat(np.ones((len(testArr), 1)))
print('测试集的错误率:%.3f%%' % float(errArr[predictions != np.mat(testLabelArr).T].sum() / len(testArr) * 100))
|
[
"noreply@github.com"
] |
xj260098061.noreply@github.com
|
f2b463399f3b009736d6d97a1624e506b6eb9cc2
|
29db15f8fa63ba3be683f5728a03c0574c87e1ab
|
/examples/benchmark.py
|
9be5f75366c1e9c9a8e3946bbe8d4eb1f5871ceb
|
[
"Apache-2.0"
] |
permissive
|
gc-ss/python-lsm
|
aaf2f0f97bc2bbce82488eb5a904fe81008d18d8
|
e44ede1fcf4677085a9be374534c09f936496ba5
|
refs/heads/master
| 2023-06-13T09:07:46.533135
| 2021-07-12T14:25:59
| 2021-07-12T14:25:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,430
|
py
|
import json
import os
import struct
import tempfile
from argparse import ArgumentParser, Action
from glob import glob
from multiprocessing import cpu_count
from random import shuffle
from threading import local, RLock
from typing import Union
from pathlib import Path
import lsm
from multiprocessing.pool import ThreadPool
from tqdm import tqdm
from mimesis import Person, Address, Business, Datetime
class AppendConstAction(Action):
def __init__(self, option_strings, dest, const=None, default=None,
type=None, choices=None, required=False,
help=None, metavar=None):
assert const, "const= required"
super().__init__(option_strings, dest, const=const, nargs=0,
default=default, type=type, choices=choices,
required=required, help=help, metavar=metavar)
def __call__(self, parser, namespace, value, option_string=None):
if getattr(namespace, self.dest) is None:
setattr(namespace, self.dest, list())
lst = getattr(namespace, self.dest)
lst.append(self.const)
parser = ArgumentParser()
parser.add_argument("-n", "--count", default=100000, type=int)
parser.add_argument("--pool-size", type=int, default=cpu_count())
parser.add_argument(
"--clear",
help="Keep existent database before writing",
action="store_true",
)
parser.add_argument(
"--path",
default=os.path.join(tempfile.gettempdir(), "lsm-compressed"),
)
parser.add_argument("--run-sequentially", action="store_true")
group = parser.add_argument_group("cases")
group.add_argument(
"--case-all",
dest="cases",
const="all",
action=AppendConstAction,
)
group.add_argument(
"--case-lz4",
dest="cases",
const="lz4",
action=AppendConstAction,
)
group.add_argument(
"--case-zstd",
dest="cases",
const="zstd",
action=AppendConstAction,
)
group.add_argument(
"--case-raw",
dest="cases",
const="raw",
action=AppendConstAction,
)
group = parser.add_argument_group("benchmarks")
group.add_argument(
"--bench-all",
dest="benchmarks",
const="all",
action=AppendConstAction,
)
group.add_argument(
"--bench-insert",
dest="benchmarks",
const="insert",
action=AppendConstAction,
)
group.add_argument(
"--bench-select-seq",
dest="benchmarks",
const="select-seq",
action=AppendConstAction,
)
group.add_argument(
"--bench-select-rnd",
dest="benchmarks",
const="select-rnd",
action=AppendConstAction,
)
group.add_argument(
"--bench-copy-seq",
dest="benchmarks",
const="copy-seq",
action=AppendConstAction,
)
person_generator = Person('en')
address_generator = Address('en')
business_generator = Business('en')
datetime_generator = Datetime('en')
class Cases:
@classmethod
def lz4(cls, path):
return [
path + ".lsm.lz4",
dict(multiple_processes=False, compress="lz4")
]
@classmethod
def zstd(cls, path):
return [
path + ".lsm.zst",
dict(multiple_processes=False, compress="zstd")
]
@classmethod
def raw(cls, path):
return [
path + ".lsm",
dict(multiple_processes=False)
]
def get_key(idx) -> Union[bytes, str]:
return struct.pack("I", idx)
def get_value(idx) -> Union[bytes, str]:
return json.dumps({
"id": idx,
"person": {
"full_name": person_generator.full_name(),
"email": person_generator.email(domains=[
'gmail.com',
'hotmail.com',
'yandex.ru',
'mail.ru',
]),
"phone": person_generator.telephone(mask='+7(9##)-###-####'),
"avatar": person_generator.avatar(),
"language": person_generator.language(),
},
"gps": {
"lat": address_generator.latitude(),
"lon": address_generator.longitude(),
},
"address": {
"city": address_generator.city(),
"country": address_generator.country_code(),
"address": address_generator.address(),
"zip": address_generator.zip_code(),
"region": address_generator.region(),
},
"business": {
"company": business_generator.company(),
"type": business_generator.company_type(),
"copyright": business_generator.copyright(),
"currency": business_generator.currency_symbol(),
},
"registration": {
"join_date": datetime_generator.datetime().isoformat(),
}
}).encode()
DATA_HEADER = struct.Struct("!I")
def gen_data(path, n):
with open(path, "a+b") as fp:
fp.seek(0)
head = fp.read(DATA_HEADER.size)
if len(head) == DATA_HEADER.size and DATA_HEADER.unpack(head)[0] == n:
print("Using previously generated file. Skipping")
return
fp.truncate(0)
fp.flush()
fp.write(b"\x00" * DATA_HEADER.size)
for i in tqdm(range(n), total=n, desc="Generate data"):
value = get_value(i)
fp.write(DATA_HEADER.pack(len(value)))
fp.write(value)
fp.seek(0)
os.pwrite(fp.fileno(), DATA_HEADER.pack(n), 0)
fp.flush()
def fill_db(path, *, pool_size, data_file, **kwargs):
print("Opening:", path, "with", kwargs)
with ThreadPool(pool_size) as pool, \
lsm.LSM(path, **kwargs) as db, \
open(data_file, "rb") as fp:
n = DATA_HEADER.unpack(fp.read(DATA_HEADER.size))[0]
read_lock = RLock()
def insert(i):
with read_lock:
line = fp.read(
DATA_HEADER.unpack(
fp.read(DATA_HEADER.size)
)[0]
)
db[get_key(i)] = line
for _ in tqdm(
pool.imap_unordered(insert, range(n)),
desc=f"insert {kwargs.get('compress', 'none')}", total=n
):
pass
db.work(complete=True)
def select_thread_pool(path, *, keys_iter, keys_total, pool_size, **kwargs):
tls = local()
db_pool = set()
print("Opening:", path, "with", kwargs)
with ThreadPool(pool_size) as pool:
def select(k):
if not hasattr(tls, 'db'):
tls.db = lsm.LSM(path, readonly=True, **kwargs)
tls.db.open()
db_pool.add(tls.db)
_ = tls.db[get_key(k)]
return 0
for _ in tqdm(
pool.imap_unordered(select, keys_iter),
desc=f"select {kwargs.get('compress', 'none')}",
total=keys_total
):
pass
for conn in db_pool:
conn.close()
def copy_seq(path, **kwargs):
print("Opening:", path, "with", kwargs)
with tempfile.TemporaryDirectory() as dest:
dest = lsm.LSM(os.path.join(dest, "lsm-copy"), **kwargs)
src = lsm.LSM(path, readonly=True, **kwargs)
with src, dest:
for key, value in tqdm(src.items(), total=len(src.keys())):
dest[key] = value
def run_parallel(func, cases):
with ThreadPool(len(cases)) as pool:
for _ in pool.imap_unordered(func, cases):
pass
def run_sequentially(func, cases):
for case in cases:
func(case)
def main():
arguments = parser.parse_args()
run_insert = False
run_select_seq = False
run_select_rnd = False
run_copy_seq = False
if not arguments.benchmarks:
run_insert = True
run_select_seq = True
run_select_rnd = True
run_copy_seq = True
else:
if "insert" in arguments.benchmarks:
run_insert = True
if "select-seq" in arguments.benchmarks:
run_select_seq = True
if "select-rnd" in arguments.benchmarks:
run_select_rnd = True
if "copy-seq" in arguments.benchmarks:
run_copy_seq = True
if "all" in arguments.benchmarks:
run_insert = True
run_select_seq = True
run_select_rnd = True
run_copy_seq = True
if not arguments.cases or "all" in arguments.cases:
cases = [
Cases.zstd(arguments.path),
Cases.lz4(arguments.path),
Cases.raw(arguments.path),
]
else:
cases = []
if "zstd" in arguments.cases:
cases.append(Cases.zstd(arguments.path))
if "lz4" in arguments.cases:
cases.append(Cases.lz4(arguments.path))
if "raw" in arguments.cases:
cases.append(Cases.raw(arguments.path))
if arguments.run_sequentially:
run = run_sequentially
else:
run = run_parallel
if arguments.clear:
for file_name in glob(arguments.path + ".*"):
print("Removing: ", file_name)
os.remove(file_name)
data_path = Path(arguments.path).parent / "data.json"
def fill_job(item):
path, kwargs = item
return fill_db(
path,
pool_size=arguments.pool_size,
data_file=data_path,
**kwargs
)
if run_insert:
print("Prepare data")
gen_data(data_path, arguments.count)
print("Filling DB")
run(fill_job, cases)
def select_job(item):
path, kwargs = item
return select_thread_pool(
path,
pool_size=arguments.pool_size,
keys_iter=range(arguments.count),
keys_total=arguments.count,
**kwargs
)
if run_select_seq:
print("Select all keys sequentially")
run(select_job, cases)
def select_random_job(item):
path, kwargs = item
keys = list(range(arguments.count))
shuffle(keys)
return select_thread_pool(
path,
pool_size=arguments.pool_size,
keys_iter=iter(keys),
keys_total=arguments.count,
**kwargs
)
if run_select_rnd:
print("Select all keys random")
run(select_random_job, cases)
def copy_seq_job(item):
path, kwargs = item
return copy_seq(path, **kwargs)
if run_copy_seq:
print("Copy database")
run(copy_seq_job, cases)
if __name__ == '__main__':
main()
input()
|
[
"me@mosquito.su"
] |
me@mosquito.su
|
f7ec5455f6feef4864826a255008ce732cbf0b1e
|
c0770e3350c79c63b56a8038a1a22078c53e46de
|
/scraper/views.py
|
fa63cd41198adba0a83797f510cc82f4d176f9f0
|
[] |
no_license
|
iamksm/mdundoscraper
|
2c6f43d43326452637a8724eb5d51ff587ece3d5
|
fcba6a77bc36ed62bb7e1ddad6ae94064f27a913
|
refs/heads/main
| 2023-05-28T13:57:35.185158
| 2021-06-07T08:29:12
| 2021-06-07T08:29:12
| 373,785,249
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,940
|
py
|
from django.http import HttpResponse, JsonResponse
from django.shortcuts import render
from .forms import ArtistForm, ArtistPrefix
from .scraper import find_songs, list_artists
def indexDF(request): # indexDF - Index Django Form
"""
The Main Django Form Page
Creates 2 Forms using Django Forms
Form gets input as an artist name
and displays songs they are involved in
Form2 gets input to list artists with names
starting with the prefix provided
"""
if request.method == "POST":
form = ArtistForm(request.POST)
form2 = ArtistPrefix(request.POST)
if form.is_valid():
the_songs = find_songs(str(form.cleaned_data["artist_name"]))
if the_songs == []:
return HttpResponse(
f'No results for {form.cleaned_data["artist_name"]}'
)
artistname = str(form.cleaned_data["artist_name"])
return JsonResponse(
{f"Search Result for {artistname}": the_songs}, safe=False
)
elif form2.is_valid():
names = list_artists(form2.cleaned_data["name_prefix"])
the_prefix = str(form2.cleaned_data["name_prefix"])
if names == []:
return HttpResponse(
f'No results for "{form2.cleaned_data["name_prefix"]}"'
)
return JsonResponse(
{f"Artsits name starting with {the_prefix}": names}, safe=False
)
else:
return HttpResponse("Please input some valid data")
else:
form = ArtistForm()
form2 = ArtistPrefix()
return render(request, "djangoforms.html", {"form": form, "form2": form2})
def indexHF(request): # indexHF - index HTML Form
"""
The Main HTML Forms Landing page
Just like in indexDF
this view produces 2 forms that are coded into the HTML file
Basically has the same functionality as in indexDF
"""
return render(request, "htmlforms.html")
def HFresultsAN(request): # HFresultsAN - HTML Form results Artist Name
"""
This View gets the artist name from the IndexHF artist name form
and returns the result in JSON Format
"""
name = request.POST["artist_name"]
the_songs = find_songs(str(name))
if the_songs != []:
return JsonResponse({f"Search Resulst for {name}": the_songs}, safe=False)
else:
return HttpResponse("Please Input a valid artist name")
def HFresultsAP(request): # HFresultsAP - HTML Form results Artist Prefix
"""
This View gets the prefix from the IndexHF prefix form
and returns the result in JSON Format
"""
prefix = request.POST["artist_prefix"]
names = list_artists(prefix)
if names != []:
return JsonResponse({f"Artsits Name starting with {prefix}": names}, safe=False)
else:
return HttpResponse("Please input a valid prefix")
|
[
"kossam.ouma@healthcloud.co.ke"
] |
kossam.ouma@healthcloud.co.ke
|
9edd0a4b85f54aeb166db6a8ce12ab39e5b12ff1
|
14ce83026e0b6342b8efc3a443f097acb40f737f
|
/treinamento_yale.py
|
f419b92b8f2d59d0252d4d166301b19c0e62a9bb
|
[] |
no_license
|
Theus-Simons/Projeto-Integrador
|
7f10770f93e487d8c6869ca250f47fa12395ada1
|
69d006e0d2bf2d85f23ef20b97ed99027c7dff95
|
refs/heads/master
| 2021-05-19T14:15:16.301062
| 2020-04-02T18:44:49
| 2020-04-02T18:44:49
| 251,752,033
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,054
|
py
|
import cv2
import os
import numpy as np
from PIL import Image
eigenface = cv2.face.EigenFaceRecognizer_create(40, 8000)
fisherface = cv2.face.FisherFaceRecognizer_create(3, 2000)
lbph = cv2.face.LBPHFaceRecognizer_create(2, 2, 7, 7, 50)
def getImagemComId():
caminhos = [os.path.join('yalefaces/treinamento', f) for f in os.listdir('yalefaces/treinamento')]
faces = []
ids = []
for caminhoImagem in caminhos:
imagemFace = Image.open(caminhoImagem).convert('L')
imagemNP = np.array(imagemFace, 'uint8')
id = int(os.path.split(caminhoImagem)[1].split(".")[0].replace("subject", ""))
ids.append(id)
faces.append(imagemNP)
return np.array(ids), faces
ids, faces = getImagemComId()
print("Treinando...")
eigenface.train(faces, ids)
eigenface.write('classificadores/classificadorEigenYale.yml')
fisherface.train(faces, ids)
fisherface.write('classificadores/classificadorFisherYale.yml')
lbph.train(faces, ids)
lbph.write('classificadores/classificadorLBPHYale.yml')
print("Treinamento realizado")
|
[
"nino.schmidt10@gmail.com"
] |
nino.schmidt10@gmail.com
|
89691ae5e0b50fad2f7b8edcd5d74695601db8de
|
8954988f7c9aa9dd9ae984b5f214d45d981b612b
|
/_temp/GED.py
|
9ef5151453404b9ed3e2c1e17842c6b44f53e9e5
|
[] |
no_license
|
MoamerEncsConcordiaCa/GED_python
|
5494c7f0523d938b9832ee155d71c0dca5d3e1f6
|
7ad9f96e4d7b2ca6b1b91ec2386b4fa93ceacd67
|
refs/heads/master
| 2021-01-10T22:11:57.151730
| 2016-11-19T06:48:14
| 2016-11-19T06:48:14
| 42,900,832
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,111
|
py
|
'''
Created on Apr 2, 2014
@author: mo_amer
'''
import networkx as nx
#from costCommon import *
from costWordGraphs import *
def mapNodeToNode(Node1,Node2):
return [Node1, Node2]
def addToMapS(mapNew, maps):
allMaps = []
allMaps.extend(maps)
allMaps.append(mapNew)
return allMaps
# def bigCNodes(cMap):
# a = 0.2
# Tn = 1
# Te = 0.5
#
# if cMap[0] == 'E' or cMap[1] == 'E':
# return a* Tn
# else:
# return a*(abs(cMap[0] - cMap[1]))
#
# def bigCEdges(edgesMaped):
# a = 0.2
# Tn = 1
# Te = 0.5
#
# if edgesMaped[0] == 'E' or edgesMaped[1] == 'E':
# return (1-a)* Te
# else:
#
# if len(edgesMaped[0][0]) == 3 and len(edgesMaped[1][0] )== 3:
# dist1 = edgesMaped[0][0][2]['weight']
# dist2 = edgesMaped[1][0][2]['weight']
#
# else:
# dist1 = 0
# dist2 = 0
# return (1-a)*(abs(dist1 - dist2))
#
def EPC(cPath, G1, G2):
cost = 0;
G1Nodes = G1.nodes()
G2Nodes = G2.nodes()
G1NodesData = G1.nodes(data = True)
G2NodesData = G2.nodes(data = True)
G1Degree = nx.degree(G1).values()
G2Degree = nx.degree(G2).values()
for cMap in cPath:
if cMap[0] == 'E' and cMap[1] == 'E':
continue
if cMap[0] == 'E':
#insertion
cost = cost + bigCNodes(cMap)
i = G2Nodes.index(cMap[1])
cost = cost + G2Degree[i] * bigCEdges(['E','E']) / 2
elif cMap[1] == 'E':
# deletion
cost = cost + bigCNodes(cMap)
i = G1Nodes.index(cMap[0])
cost = cost + G1Degree[i] * bigCEdges(['E','E']) /2
#print 'f'
else:
i = G1Nodes.index(cMap[0])
j = G2Nodes.index(cMap[1])
nodeMap = (G1NodesData[i], G2NodesData[j])
cost = cost + bigCNodes(nodeMap)
# substitution for an edge is when both nodes are mapped
# first node of edge is in cMap which is here
# it needs to just test the second node of each adjacent edge
insertedEdges = []
deletedEdges = []
substitutedEdges = []
insertedEdges.extend(list(G2.edges(cMap[1])))
deletedEdges.extend(list(G1.edges(cMap[0])))
#G1SourceNeighbors = G1.neighbors(cMap[0])
#G2TargetNeighbors = G2.neighbors(cMap[1])
# to see which edges are substituted. Remove them form insert and
# delete list
U1 = cMap[0]
V1 = cMap[1]
G1UEdgesData = G1.edges(U1, data = True)
G2VEdgesData = G2.edges(V1, data = True)
G1UEdges = G1.edges(U1)
G2VEdges = G2.edges(V1)
for i in range(0, len(G1UEdges)):
for j in range(0, len(G2VEdges)):
p = G1UEdges[i]
q = G2VEdges[j]
U2 = p[0]
if U2 == cMap[0]:
U2 = p[1]
V2 = q[0]
if V2 == cMap[1]:
V2 = q[1]
if [U2, V2] in cPath:
deletedEdges.remove((U1, U2))
insertedEdges.remove((V1, V2))
substitutedEdges.append((G1UEdgesData[i], G2VEdgesData[j]))
# for G1U in G1SourceNeighbors:
# for G2V in G2TargetNeighbors:
#
# if [G1U, G2V] in cPath:
#
# substitutedEdges.append([(cMap[0], G1U),(cMap[1], G2V)])
#
# deletedEdges.remove((cMap[0], G1U))
# #deletedEdges.remove((G1U, cMap[0]))
#
# insertedEdges.remove((cMap[1],G2V))
# #insertedEdges.remove([G2V,cMap[1]])
#
# print 'substitude edge : '
# print substitutedEdges
for substEdge in substitutedEdges:
# to do : this line returnes all edges must be only the substituted edges
# edge1 = G1.edges((substEdge[0][0],substEdge[0][1]), data = True)
# edge2 = G2.edges((substEdge[1][0],substEdge[1][1]), data = True)
#
# cost = cost + bigCEdges([edge1, edge2])
cost += bigCEdges(substEdge) / 2
for deleEdges in deletedEdges:
cost = cost + bigCEdges([deleEdges, 'E']) / 2
for insEdge in insertedEdges:
cost = cost + bigCEdges(['E', insEdge]) /2
return cost
# def hurstic(cPath, G1, G2):
# '''
# hurstic is buggy so we are using the trivial case 0 now
#
# '''
#
# cost = 0
# return cost
#
# G1NodesRemained = []
# G2NodesRemained = []
# G1NodesRemained.extend(G1.nodes())
# G2NodesRemained.extend(G2.nodes())
#
# for cMap in cPath:
#
# if cMap[0] != 'E':
# G1NodesRemained.remove(cMap[0])
# if cMap[1] != 'E':
# G2NodesRemained.remove(cMap[1])
# minVal = []
#
# for u in G1NodesRemained:
# for v in G2NodesRemained:
# cVal1 = bigCNodes([u, v])
# cVal2 = bigCNodes([u, 'E']) + bigCNodes(['E', v])
# cVal = min(cVal1, cVal2)
# if minVal == []:
# minVal = cVal
#
# if cVal < minVal:
# minVal = cVal
#
# #substitution with minVal of all
# if minVal == []:
# minVal = 0
# cost = cost + minVal* min(len(G1NodesRemained), len(G2NodesRemained))
# cost = cost + max(0, len(G1NodesRemained) - len(G2NodesRemained)) * bigCNodes([u, 'E'])
# cost = cost + max(0, len(G2NodesRemained) - len(G1NodesRemained)) * bigCNodes(['E', v])
#
# ######################## For Edges
# G1EdgesRemained = []
# G2EdgesRemained = []
# for u in G1NodesRemained:
# G1EdgesRemained.extend(G1.edges(u, True))
#
# for v in G2NodesRemained:
# G2EdgesRemained.extend(G2.edges(v, True))
#
# minVal = []
# for u in G1EdgesRemained:
# for v in G2EdgesRemained:
# #cVal1 = bigCEdges([u, v])
# cVal2 = bigCEdges([u, 'E']) + bigCEdges(['E', v])
# cVal = min(cVal1, cVal2)
# if minVal == []:
# minVal = cVal
#
# if cVal < minVal:
# minVal = cVal
#
# #substitution with minVal of all
# if minVal == []:
# minVal = 0
# cost = cost + minVal* min(len(G1EdgesRemained), len(G2EdgesRemained))
# cost = cost + max(0, len(G1EdgesRemained) - len(G2EdgesRemained)) * bigCEdges([u, 'E'])
# cost = cost + max(0, len(G1EdgesRemained) - len(G2EdgesRemained)) * bigCEdges(['E', v])
#
#
#
#
# #return cost
# return 0
def argMin(openSet, G1, G2):
if len(openSet) == 0:
return []
minIndex = -1
minCost = 0
for cPath in openSet:
if minIndex == -1 :
minCost = EPC(cPath, G1, G2) + 0 #hurstic(cPath, G1, G2)
minIndex = openSet.index(cPath)
continue
cCost = EPC(cPath, G1, G2) + 0 #hurstic(cPath, G1, G2)
if minCost > cCost:
minCost = cCost
minIndex = openSet.index(cPath)
if minIndex == -1:
return []
#print minCost
#print openSet[minIndex]
return openSet[minIndex]
def isCompletePath(pMin, G1Nodes, G2Nodes):
'''
a path is complete if it contains all the nodes from both g1 and g2 graph
mapped.
'''
G1Mapped = []
G2Mapped = []
for m in pMin:
if m[0] != 'E':
G1Mapped.append(m[0])
if m[1] != 'E':
G2Mapped.append(m[1])
for n in G1Nodes:
if not(n in G1Mapped):
return False
for n in G2Nodes:
if not(n in G2Mapped):
return False
return True
def GED(G1, G2):
'''
elements in openSet are path and defined like p_i = [[1,3],[2,2], [3,E]]
it is mapping of node 1 to 3 and node 2 to 2 and 3 to empty
and openSet contains openSet = [p_1, p_2, ..., p_n]
'''
G1Nodes = G1.nodes()
G2Nodes = G2.nodes()
openSet = []
for i in range(0, G2.number_of_nodes()):
openSet.append(addToMapS(mapNodeToNode(G1Nodes[0], G2Nodes[i]), []))
openSet.append(addToMapS(mapNodeToNode(G1Nodes[0], 'E'),[]))
#print openSet
pMin = []
while True:
pMin = argMin(openSet, G1, G2)
# return first element by now
if isCompletePath(pMin, G1Nodes, G2Nodes):
#print 'found complete map'
#print pMin
break
else:
k = len(pMin)
openSet.remove(pMin)
if k < len(G1Nodes):
pNew = addToMapS(mapNodeToNode(G1Nodes[k], 'E'), pMin)
openSet.append(pNew)
G2Mapped = []
for m in pMin:
if m[1] != 'E':
G2Mapped.append(m[1])
G2UnMapped = []
G2UnMapped.extend(G2Nodes)
for m in G2Mapped:
G2UnMapped.remove(m)
for m in G2UnMapped:
pNew = addToMapS(mapNodeToNode(G1Nodes[k], m), pMin)
openSet.append(pNew)
else:
G2Mapped = []
for m in pMin:
if m[1] != 'E':
G2Mapped.append(m[1])
pNew = []
pNew.extend(pMin)
G2UnMapped = []
G2UnMapped.extend(G2Nodes)
for m in G2Mapped:
G2UnMapped.remove(m)
for m in G2UnMapped:
pNew = addToMapS(mapNodeToNode('E', m), pNew)
openSet.append(pNew)
#print 'a new complete map'
#print pNew
#print '##################'
pMin = argMin(openSet, G1, G2)
costMin = EPC(pMin, G1, G2)
mapSubstitude ={}
mapInsert = {}
mapDelete ={}
for p in pMin:
if p[0] != 'E' and p[1] !='E':
mapSubstitude[p[0]] = p[1]
elif p[0] =='E' and p[1] != 'E':
mapInsert[p[1]] = 'E'
elif p[0] != 'E' and p[1] == 'E':
mapDelete[p[0]] = 'E'
# fix path before sendin them
return[costMin, [mapSubstitude, mapDelete, mapInsert]]
# print 'GED finished'
return [costMin, pMin]
#for p in openSet:
#print p
#print EPC(p, G1, G2)
return pMin
|
[
"mo_amer@encs.concordia.ca"
] |
mo_amer@encs.concordia.ca
|
6ecc78f6d086a53dde0ae983405f8ee8e2f545c7
|
2aaf0ae5d269ddbfcafd5a97eb69b3f8e6f33848
|
/pandas_test_mines.py
|
9fc3543ec14b128b28448f5511968fc853f48230
|
[] |
no_license
|
vshumanov/random
|
e992d60b05d857565db8502271082dece7c61e0b
|
69c5133467f7015863a653342308c034bfe628ed
|
refs/heads/master
| 2021-10-25T09:39:56.398000
| 2019-04-03T14:58:29
| 2019-04-03T14:58:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 372
|
py
|
# -*- coding: utf-8 -*-
file = 'datasets/rocks_vs_mines.csv'
import pandas as pd
data = pd.read_csv(file,header=None,prefix='V')
#print data.head()
#print data.tail()
print 'Data Summary'
print '-'*15
#print data.describe()
dataRow2= data.iloc[1,0:60]
print dataRow2.mean()
mean = 0.0
nEl = len(dataRow2)
for i in range(nEl):
mean += dataRow2[i]/nEl
print mean
|
[
"vl.shumanov@gmail.com"
] |
vl.shumanov@gmail.com
|
b41c95a9b28983c9ed8e4ba2abdaeea6f64bebb8
|
16ec9ff55483dfd3b01e39d24c51ffd4f7eb76ed
|
/drl/openai/cartpole.py
|
a6c15792d7d48ffc5d09c7ce5b4577c5b1f889e6
|
[] |
no_license
|
msiegenthaler/deep-reinforced-learning-course
|
e3d7c3bf35bee279d4046470c6c6e0fb3da45dc5
|
ca185748151d4836d328998a92b08138527ee600
|
refs/heads/master
| 2020-04-21T13:03:27.305322
| 2019-03-19T18:28:19
| 2019-03-19T18:28:19
| 169,585,640
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 644
|
py
|
from torch import Tensor
from drl.deepq.game import Action
from drl.openai.game_openai import OpenAIFrameGame
import torchvision.transforms as T
class CartPoleVisual(OpenAIFrameGame):
actions = [Action('left', 0, 0),
Action('right', 1, 1)]
def __init__(self, x: int, y: int, t: int):
self.transform = T.Compose([T.ToPILImage(), T.Resize((y, x)), T.Grayscale(), T.ToTensor()])
super().__init__('CartPole-v0', t)
@property
def name(self) -> str:
return 'cardpole'
def _get_frame(self, env_state) -> Tensor:
image = self.transform(self._get_raw_frame()[330:660, 0:1200, :])
return image.squeeze(0)
|
[
"mario.siegenthaler@linkyard.ch"
] |
mario.siegenthaler@linkyard.ch
|
41c637bc21bc87c0fcd0cdd0a30c6b647570568f
|
5757f1767ba1c7bface68914a45992c77cca4e85
|
/decodeqr.py
|
12f3d7cde2728946101fb598c78a1971c11b90d0
|
[] |
no_license
|
MaybeSHAH/store-image-flaskapp
|
fa88e67131ca27b6426b9b436e46cafe57d2485a
|
66ac8d15080a0943fb28df9c619163e27bafdae5
|
refs/heads/master
| 2023-03-11T07:12:54.681416
| 2021-02-24T14:54:15
| 2021-02-24T14:54:15
| 343,741,646
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 736
|
py
|
import cv2
# initalize the cam
cap = cv2.VideoCapture(0)
# initialize the cv2 QRCode detector
detector = cv2.QRCodeDetector()
while True:
_, img = cap.read()
# detect and decode
data, bbox, _ = detector.detectAndDecode(img)
# check if there is a QRCode in the image
if bbox is not None:
# display the image with lines
for i in range(len(bbox)):
# draw all lines
cv2.line(img, tuple(bbox[i][0]), tuple(bbox[(i+1) % len(bbox)][0]), color=(255, 0, 0), thickness=2)
if data:
print("[+] QR Code detected, data:", data)
# display the result
cv2.imshow("img", img)
if cv2.waitKey(1) == ord("q"):
break
cap.release()
cv2.destroyAllWindows()
|
[
"liyakatshaikh07@gmail.com"
] |
liyakatshaikh07@gmail.com
|
8730fe40d6afee5489efb681abbd8a1558376fde
|
fa9bae32c203323dfb345d9a415d4eaecb27a931
|
/300. Longest Increasing Subsequence.py
|
b82de73f335d402df481d244806faaeea8693e58
|
[] |
no_license
|
IUIUN/The-Best-Time-Is-Now
|
48a0c2e9d449aa2f4b6e565868a227b6d555bf29
|
fab660f98bd36715d1ee613c4de5c7fd2b69369e
|
refs/heads/master
| 2020-09-14T12:06:24.074973
| 2020-02-15T06:55:08
| 2020-02-15T06:55:08
| 223,123,743
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,532
|
py
|
class Solution:
def lengthOfLIS(self, nums: List[int]) -> int:
if not nums:
return 0
dp = [1 for _ in range(len(nums))]
for i in range(len(nums)):
for j in range(i):
if nums[i] > nums[j] and dp[i] < dp[j] + 1:
dp[i] = dp[j] + 1
return max(dp)
//
Python
class Solution(object):
def lengthOfLIS(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if not nums: return 0
n = len(nums)
dp, g = [1] * n, [sys.maxint] * (n + 2)
for i in xrange(n):
k = self.lower_bound(1,n+1,nums[i],g)
dp[i] = k
g[k] = nums[i]
return max(dp)
def lower_bound(self,L, R, x,g):
while L < R:
mid = (L + R) >> 1
if g[mid] < x:
L = mid + 1
else:
R = mid
return L
//
class Solution(object):
def lengthOfLIS(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if not nums: return 0
n = len(nums)
dp, g = [1] * n, [sys.maxint] * (n + 2)
for i in xrange(n):
k = self.lower_bound(1,n+1,nums[i],g)
dp[i] = k
g[k] = nums[i]
return max(dp)
def lower_bound(self,L, R, x,g):
while L < R:
mid = (L + R) >> 1
if g[mid] < x:
L = mid + 1
else:
R = mid
return L
//
|
[
"liuyijun0621@hotmail.com"
] |
liuyijun0621@hotmail.com
|
4d52c8a3c4fec04aa406993409e869ae52c90037
|
1a95919667484ee14dd6e10a066d3f692a3e3e41
|
/src/webchart/test/migrations/0001_initial.py
|
31a2da26d4afcf527e157fbe7a4b7728ad6374f7
|
[] |
no_license
|
Dmaner/HKDATA
|
0f020592973519158cec4563a8c4a731c10df191
|
6d31c092a5f7c10cb8ac6f57530c8039076643bf
|
refs/heads/master
| 2020-08-27T09:46:00.358207
| 2019-11-26T14:56:01
| 2019-11-26T14:56:01
| 217,322,026
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 539
|
py
|
# Generated by Django 2.0.4 on 2019-11-07 09:21
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='WORD',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('word_text', models.CharField(max_length=100)),
('times', models.IntegerField()),
],
),
]
|
[
"2663515256@qq.com"
] |
2663515256@qq.com
|
515a1a3aa905f76ef6b4c1b87ae05f03d8e35b6d
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02803/s686310385.py
|
3a37e7ca42661e163c159312bf8de6178b6bd0a3
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,011
|
py
|
import sys
h, w = map(int, input().split())
C = [list(input()) for i in range(h)]
ans = 0
for i in range(h):
for j in range(w):
visited = [[0 for i in range(w)] for i in range(h)]
data = []
if C[i][j] == '.':
data.append([i, j])
visited[i][j] = 1
dy_dx = [[1,0], [0,1], [-1,0], [0,-1]]
while len(data) > 0:
now = data.pop(0)
for k in range(4):
y = now[0] + dy_dx[k][0]
x = now[1] + dy_dx[k][1]
if 0 <= y and y < h and 0 <= x and x < w:
if C[y][x] != '#' and visited[y][x] == 0:
#移動回数カウント
visited[y][x] = visited[now[0]][now[1]] + 1
data.append([y, x])
for l in range(h):
for m in range(w):
ans = max(ans, visited[l][m])
print(ans - 1)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
c19f30d455d48c06b2f777ebb53f33baa357faf8
|
1ee8f4707c9025ffe93152e91c2486cf1b332dd0
|
/Code/site-packages/docplex/mp/sttck.py
|
133637369b745831d2d7fd11d858a9d626131b68
|
[] |
no_license
|
qfizik/BEP
|
7f05795dd7f7a4cf796aae2808eedbc426472eb9
|
f2848e3121e976540fb10171fdfbc6670dd28459
|
refs/heads/master
| 2023-02-02T03:54:55.452530
| 2020-12-18T16:02:46
| 2020-12-18T16:02:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,842
|
py
|
# --------------------------------------------------------------------------
# Source file provided under Apache License, Version 2.0, January 2004,
# http://www.apache.org/licenses/
# (c) Copyright IBM Corp. 2015, 2016
# --------------------------------------------------------------------------
# gendoc: ignore
from docplex.mp.utils import is_number
import math
class StaticTypeChecker(object):
@staticmethod
def typecheck_as_power(mdl, e, power):
# INTERNAL: checks <power> is 0,1,2
if power < 0 or power > 2:
mdl.fatal("Cannot raise {0!s} to the power {1}. A variable's exponent must be 0, 1 or 2.", e, power)
@staticmethod
def cannot_be_used_as_denominator_error(mdl, denominator, numerator):
mdl.fatal("{1!s} / {0!s} : operation not supported, only numbers can be denominators", denominator, numerator)
@classmethod
def typecheck_as_denominator(cls, mdl, denominator, numerator):
if not is_number(denominator):
cls.cannot_be_used_as_denominator_error(mdl, denominator, numerator)
else:
float_e = float(denominator)
if 0 == float_e:
mdl.fatal("Zero divide on {0!s}", numerator)
@classmethod
def typecheck_discrete_expression(cls, logger, expr, msg):
if not expr.is_discrete():
logger.fatal('{0}, expression: ({1!s}) is not discrete', msg, expr)
@classmethod
def typecheck_discrete_constraint(cls, logger, ct, msg):
if not ct.is_discrete():
logger.fatal('{0}, {1!s} is not discrete', msg, ct)
@classmethod
def typecheck_added_constraint(cls, mdl, ct):
if not ct.has_valid_index():
mdl.fatal("Constraint: {0!s} has not been added to any model".format(ct))
elif mdl is not ct.model:
mdl.fatal("Constraint: {0!s} belongs to a different model".format(ct))
@classmethod
def mul_quad_lin_error(cls, logger, f1, f2):
logger.fatal(
"Cannot multiply {0!s} by {1!s}, some terms would have degree >= 3. Maximum polynomial degree is 2.",
f1, f2)
@classmethod
def typecheck_callable(cls, logger, arg, msg):
if not callable(arg):
logger.fatal(msg)
@classmethod
def typecheck_num_nan_inf(cls, logger, arg, caller=None):
# check for a "real" number, not a NaN, not infinity
caller_string = "{0}: ".format(caller) if caller is not None else ""
if not is_number(arg):
logger.fatal("{0}Expecting number, got: {1!r}", caller_string, arg)
elif math.isnan(arg):
logger.fatal("{0}NaN value detected", caller_string)
elif math.isinf(arg):
logger.fatal("{0}Infinite value detected", caller_string)
|
[
"martijnswenne@hotmail.com"
] |
martijnswenne@hotmail.com
|
f9ecbc5c0152d0cce3a931c93ed51505e3581f35
|
b7f3edb5b7c62174bed808079c3b21fb9ea51d52
|
/third_party/blink/tools/blinkpy/bindings/print_idl_diff.py
|
fd3cb868b5ac75e97baa0645c5fbfdcbc196a0d2
|
[
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GPL-2.0-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft",
"BSD-3-Clause"
] |
permissive
|
otcshare/chromium-src
|
26a7372773b53b236784c51677c566dc0ad839e4
|
64bee65c921db7e78e25d08f1e98da2668b57be5
|
refs/heads/webml
| 2023-03-21T03:20:15.377034
| 2020-11-16T01:40:14
| 2020-11-16T01:40:14
| 209,262,645
| 18
| 21
|
BSD-3-Clause
| 2023-03-23T06:20:07
| 2019-09-18T08:52:07
| null |
UTF-8
|
Python
| false
| false
| 14,275
|
py
|
#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Print a diff generated by generate_idl_diff.py.
Before printing, sort the diff in the alphabetical order or the order of
diffing tags.
Usage: print_idl_diff.py diff_file.json order
diff.json:
Output of generate_idl_diff.py. The json file contains a dictionary
that represents a diff between two different Chromium versions. The
structure of the dictionary is like below.
order:
Specify how to sort. Either by "ALPHABET" or "TAG".
"""
from collections import OrderedDict
import sys
from blinkpy.bindings.generate_idl_diff import load_json_file
from blinkpy.bindings.generate_idl_diff import EXTATTRIBUTES_AND_MEMBER_TYPES
from blinkpy.bindings.generate_idl_diff import DIFF_TAG
from blinkpy.bindings.generate_idl_diff import DIFF_TAG_ADDED
from blinkpy.bindings.generate_idl_diff import DIFF_TAG_DELETED
# pylint: disable=W0105
"""Refer to the explanation of generate_idl_diff.py's input files.
The deffference between the input structure of generate_idl_diff.py and
that of print_diff.py is whether diffing tags are included or not.
{'Interface': {
'diff_tag': 'deleted'
'ExtAttributes': [{'Name': '...'
'diff_tag': 'deleted'},
...,
],
'Consts': [{'Type': '...',
'Name': '...',
'Value': '...'
'diff_tag': 'deleted'},
...,
],
'Attributes': [{'Type': '...',
'Name': '...',
'ExtAttributes':[{'Name': '...'},
...,
]
'diff_tag': 'deleted'},
...,
],
'Operations': [{'Type': '...',
'Name': '...',
'ExtAttributes':[{'Name': '...'},
...,
],
'Arguments': [{'Type': '...',
'Name': '...'},
...,
]
'diff_tag': 'deleted'},
...,
],
'Name': '...'
},
{
'ExtAttributes': [{'Name': '...'},
...,
],
'Consts': [{'Type': '...',
'Name': '...',
'Value': '...'
'diff_tag': 'added'},
...,
],
'Attributes': [{'Type': '...',
'Name': '...',
'ExtAttributes':[{'Name': '...'},
...,
]},
...,
],
'Operations': [{'Type': '...',
'Name': '...',
'ExtAttributes':[{'Name': '...'},
...,
],
'Arguments': [{'Type': '...',
'Name': '...'},
...,
]
'diff_tag': 'deleted'},
...,
],
'Name': '...'
},
...,
}
"""
class Colorize(object):
"""This class outputs a colored text to sys.stdout.
TODO(bashi): This class doesn't work on Windows. Provide a way to suppress
escape sequences.
"""
BLACK = 30
RED = 31
GREEN = 32
YELLOW = 33
COLORS = (BLACK, RED, GREEN, YELLOW)
def __init__(self, out):
self.out = out
def reset_color(self):
"""Reset text's color to default."""
self.out.write('\033[0m')
def change_color(self, color):
"""Change text's color by specifing arguments.
Args:
color: A new color to change. It should be one of |COLORS|.
"""
if color in self.COLORS:
self.out.write('\033[' + str(color) + 'm')
else:
raise Exception('Unsupported color.')
def writeln(self, string):
"""Print text with a line-break."""
self.out.write(string + '\n')
def write(self, string):
"""Print text without a line-break."""
self.out.write(string)
def sort_member_types(interface):
"""Sort the members in the order of EXTATTRIBUTES_AND_MEMBER_TYPES.
Args:
interface: An "interface" object
Returns:
A sorted "interface" object
"""
sorted_interface = OrderedDict()
for member_type in EXTATTRIBUTES_AND_MEMBER_TYPES:
sorted_interface[member_type] = interface.get(member_type)
sorted_interface[DIFF_TAG] = interface.get(DIFF_TAG)
return sorted_interface
def group_by_tag(interface_or_member_list):
"""Group members of |interface_or_member_list| by tags.
Args:
interface_or_member_list: A list of interface names or a list of "members"
Returns:
A tuple of (removed, added, unchanged) where
removed: A list of removed members
added: A list of added members
unspecified: A list of other members
"""
removed = []
added = []
unspecified = []
for interface_or_member in interface_or_member_list:
if DIFF_TAG in interface_or_member:
if interface_or_member[DIFF_TAG] == DIFF_TAG_DELETED:
removed.append(interface_or_member)
elif interface_or_member[DIFF_TAG] == DIFF_TAG_ADDED:
added.append(interface_or_member)
else:
unspecified.append(interface_or_member)
return (removed, added, unspecified)
def sort_interface_names_by_tags(interfaces):
"""Sort interface names as follows.
[names of deleted "interface"s
-> names of added "interface"s
-> names of other "interface"s]
Args:
interfaces: "interface" objects.
Returns:
A list of sorted interface names
"""
interface_list = interfaces.values()
removed, added, unspecified = group_by_tag(interface_list)
# pylint: disable=W0110
removed = map(lambda interface: interface['Name'], removed)
# pylint: disable=W0110
added = map(lambda interface: interface['Name'], added)
# pylint: disable=W0110
unspecified = map(lambda interface: interface['Name'], unspecified)
sorted_interface_names = removed + added + unspecified
return sorted_interface_names
def sort_members_by_tags(interface):
"""Sort members of a given interface in the order of diffing tags.
Args:
An "interface" object
Returns:
A sorted "interface" object
"""
sorted_interface = OrderedDict()
if DIFF_TAG in interface:
return interface
for member_type in EXTATTRIBUTES_AND_MEMBER_TYPES:
member_list = interface[member_type]
removed, added, unspecified = group_by_tag(member_list)
sorted_interface[member_type] = removed + added + unspecified
return sorted_interface
def sort_diff_by_tags(interfaces):
"""Sort an "interfaces" object in the order of diffing tags.
Args:
An "interfaces" object loaded by load_json_data().
Returns:
A sorted "interfaces" object
"""
sorted_interfaces = OrderedDict()
sorted_interface_names = sort_interface_names_by_tags(interfaces)
for interface_name in sorted_interface_names:
interface = sort_members_by_tags(interfaces[interface_name])
sorted_interfaces[interface_name] = sort_member_types(interface)
return sorted_interfaces
def sort_members_in_alphabetical_order(interface):
"""Sort a "members" object in the alphabetical order.
Args:
An "interface" object
Returns:
A sorted "interface" object
"""
sorted_interface = OrderedDict()
for member_type in EXTATTRIBUTES_AND_MEMBER_TYPES:
sorted_members = sorted(
interface[member_type], key=lambda member: member['Name'])
sorted_interface[member_type] = sorted_members
return sorted_interface
def sort_diff_in_alphabetical_order(interfaces):
"""Sort an "interfaces" object in the alphabetical order.
Args:
An "interfaces" object.
Returns:
A sorted "interfaces" object
"""
sorted_interfaces = OrderedDict()
for interface_name in sorted(interfaces.keys()):
interface = interfaces[interface_name]
sorted_interface = sort_members_in_alphabetical_order(interface)
sorted_interface[DIFF_TAG] = interface.get(DIFF_TAG)
sorted_interfaces[interface_name] = sorted_interface
return sorted_interfaces
def print_member_with_color(member, out):
"""Print the "member" with a colored text. '+' is added to an added
"member". '-' is added to a removed "member".
Args:
member: A "member" object
"""
if DIFF_TAG in member:
if member[DIFF_TAG] == DIFF_TAG_DELETED:
out.change_color(Colorize.RED)
out.write('- ')
elif member[DIFF_TAG] == DIFF_TAG_ADDED:
out.change_color(Colorize.GREEN)
out.write('+ ')
else:
out.change_color(Colorize.BLACK)
out.write(' ')
def print_extattributes(extattributes, out):
"""Print extattributes in an "interface" object.
Args:
A list of "ExtAttributes" in the "interface" object
"""
for extattribute in extattributes:
out.write(' ')
print_member_with_color(extattribute, out)
out.writeln(extattribute['Name'])
def print_consts(consts, out):
"""Print consts in an "interface" object.
Args:
A list of "Consts" of the "interface" object
"""
for const in consts:
out.write(' ')
print_member_with_color(const, out)
out.write(str(const['Type']))
out.write(' ')
out.write(const['Name'])
out.write(' ')
out.writeln(const['Value'])
def print_items(items, callback, out):
"""Calls |callback| for each item in |items|, printing commas between
|callback| calls.
Args:
items: extattributes or arguments
"""
count = 0
for item in items:
callback(item)
count += 1
if count < len(items):
out.write(', ')
def print_extattributes_in_member(extattributes, out):
"""Print extattributes in a "member" object.
Args:
A list of "ExtAttributes" in the "member" object
"""
def callback(extattribute):
out.write(extattribute['Name'])
out.write('[')
print_items(extattributes, callback, out)
out.write(']')
def print_attributes(attributes, out):
"""Print attributes in an "interface" object.
Args:
A list of "Attributes" in the "interface" object
"""
for attribute in attributes:
out.write(' ')
print_member_with_color(attribute, out)
if attribute['ExtAttributes']:
print_extattributes_in_member(attribute['ExtAttributes'], out)
out.write(str(attribute['Type']))
out.write(' ')
out.writeln(attribute['Name'])
def print_arguments(arguments, out):
"""Print arguments in a "members" object named "Operations".
Args: A list of "Arguments"
"""
def callback(argument):
out.write(argument['Name'])
out.write('(')
print_items(arguments, callback, out)
out.writeln(')')
def print_operations(operations, out):
"""Print operations in a "member" object.
Args:
A list of "Operations"
"""
for operation in operations:
out.write(' ')
print_member_with_color(operation, out)
if operation['ExtAttributes']:
print_extattributes_in_member(operation['ExtAttributes'], out)
out.write(str(operation['Type']))
out.write(' ')
if operation['Arguments']:
out.write(operation['Name'])
print_arguments(operation['Arguments'], out)
else:
out.writeln(operation['Name'])
def print_diff(diff, out):
"""Print the diff on a shell.
Args:
A sorted diff
"""
for interface_name, interface in diff.iteritems():
print_member_with_color(interface, out)
out.change_color(Colorize.YELLOW)
out.write('[[')
out.write(interface_name)
out.writeln(']]')
out.reset_color()
for member_name, member in interface.iteritems():
if member_name == 'ExtAttributes':
out.writeln('ExtAttributes')
print_extattributes(member, out)
elif member_name == 'Consts':
out.writeln(' Consts')
print_consts(member, out)
elif member_name == 'Attributes':
out.writeln(' Attributes')
print_attributes(member, out)
elif member_name == 'Operations':
out.writeln(' Operations')
print_operations(member, out)
out.reset_color()
def print_usage():
"""Show usage."""
sys.stdout.write(
'Usage: print_diff.py <diff_file.json> <"TAG"|"ALPHABET">\n')
def main(argv):
if len(argv) != 2:
print_usage()
exit(1)
json_data = argv[0]
order = argv[1]
diff = load_json_file(json_data)
if order == 'TAG':
sort_func = sort_diff_by_tags
elif order == 'ALPHABET':
sort_func = sort_diff_in_alphabetical_order
else:
print_usage()
exit(1)
sorted_diff = sort_func(diff)
out = Colorize(sys.stdout)
print_diff(sorted_diff, out)
if __name__ == '__main__':
main(sys.argv[1:])
|
[
"commit-bot@chromium.org"
] |
commit-bot@chromium.org
|
b92d585af88133a85043e6133717faed799afdb6
|
ac282aae4170238122ac8df8dce7a00b77153fe6
|
/sdk/python/pulumi_kubernetes/coordination/v1beta1/outputs.py
|
2462eb82dc850a328a571b9f112a8a21e6b6b928
|
[
"Apache-2.0"
] |
permissive
|
vizv/pulumi-kubernetes
|
1b85a90477c2f7b0c0309b372239ca074fab65b6
|
80d6793e07e833b7f2b6b678a2d5c447caa99fcc
|
refs/heads/master
| 2023-07-12T03:00:11.964743
| 2021-08-19T23:45:40
| 2021-08-19T23:45:40
| 398,131,961
| 0
| 0
|
Apache-2.0
| 2021-08-20T02:28:07
| 2021-08-20T02:28:06
| null |
UTF-8
|
Python
| false
| false
| 8,072
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by pulumigen. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ... import meta as _meta
__all__ = [
'Lease',
'LeaseSpec',
]
@pulumi.output_type
class Lease(dict):
"""
Lease defines a lease concept.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "apiVersion":
suggest = "api_version"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in Lease. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
Lease.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
Lease.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
api_version: Optional[str] = None,
kind: Optional[str] = None,
metadata: Optional['_meta.v1.outputs.ObjectMeta'] = None,
spec: Optional['outputs.LeaseSpec'] = None):
"""
Lease defines a lease concept.
:param str api_version: APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
:param str kind: Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
:param '_meta.v1.ObjectMetaArgs' metadata: More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
:param 'LeaseSpecArgs' spec: Specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
"""
if api_version is not None:
pulumi.set(__self__, "api_version", 'coordination.k8s.io/v1beta1')
if kind is not None:
pulumi.set(__self__, "kind", 'Lease')
if metadata is not None:
pulumi.set(__self__, "metadata", metadata)
if spec is not None:
pulumi.set(__self__, "spec", spec)
@property
@pulumi.getter(name="apiVersion")
def api_version(self) -> Optional[str]:
"""
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
"""
return pulumi.get(self, "api_version")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def metadata(self) -> Optional['_meta.v1.outputs.ObjectMeta']:
"""
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
"""
return pulumi.get(self, "metadata")
@property
@pulumi.getter
def spec(self) -> Optional['outputs.LeaseSpec']:
"""
Specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
"""
return pulumi.get(self, "spec")
@pulumi.output_type
class LeaseSpec(dict):
"""
LeaseSpec is a specification of a Lease.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "acquireTime":
suggest = "acquire_time"
elif key == "holderIdentity":
suggest = "holder_identity"
elif key == "leaseDurationSeconds":
suggest = "lease_duration_seconds"
elif key == "leaseTransitions":
suggest = "lease_transitions"
elif key == "renewTime":
suggest = "renew_time"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in LeaseSpec. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
LeaseSpec.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
LeaseSpec.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
acquire_time: Optional[str] = None,
holder_identity: Optional[str] = None,
lease_duration_seconds: Optional[int] = None,
lease_transitions: Optional[int] = None,
renew_time: Optional[str] = None):
"""
LeaseSpec is a specification of a Lease.
:param str acquire_time: acquireTime is a time when the current lease was acquired.
:param str holder_identity: holderIdentity contains the identity of the holder of a current lease.
:param int lease_duration_seconds: leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed RenewTime.
:param int lease_transitions: leaseTransitions is the number of transitions of a lease between holders.
:param str renew_time: renewTime is a time when the current holder of a lease has last updated the lease.
"""
if acquire_time is not None:
pulumi.set(__self__, "acquire_time", acquire_time)
if holder_identity is not None:
pulumi.set(__self__, "holder_identity", holder_identity)
if lease_duration_seconds is not None:
pulumi.set(__self__, "lease_duration_seconds", lease_duration_seconds)
if lease_transitions is not None:
pulumi.set(__self__, "lease_transitions", lease_transitions)
if renew_time is not None:
pulumi.set(__self__, "renew_time", renew_time)
@property
@pulumi.getter(name="acquireTime")
def acquire_time(self) -> Optional[str]:
"""
acquireTime is a time when the current lease was acquired.
"""
return pulumi.get(self, "acquire_time")
@property
@pulumi.getter(name="holderIdentity")
def holder_identity(self) -> Optional[str]:
"""
holderIdentity contains the identity of the holder of a current lease.
"""
return pulumi.get(self, "holder_identity")
@property
@pulumi.getter(name="leaseDurationSeconds")
def lease_duration_seconds(self) -> Optional[int]:
"""
leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed RenewTime.
"""
return pulumi.get(self, "lease_duration_seconds")
@property
@pulumi.getter(name="leaseTransitions")
def lease_transitions(self) -> Optional[int]:
"""
leaseTransitions is the number of transitions of a lease between holders.
"""
return pulumi.get(self, "lease_transitions")
@property
@pulumi.getter(name="renewTime")
def renew_time(self) -> Optional[str]:
"""
renewTime is a time when the current holder of a lease has last updated the lease.
"""
return pulumi.get(self, "renew_time")
|
[
"noreply@github.com"
] |
vizv.noreply@github.com
|
3a71a6954b18540b5826f16931684603f424880a
|
37e552e107dcd193b278eea58f51107ab75b2232
|
/Lesson1/test1.py
|
516a2ff5a7d5954e84d161b46790ea5587d48bf1
|
[] |
no_license
|
IrenSpecter/LearnProject
|
6aded99e3ed1b617045f558b193a032cdd4fda7a
|
8308b5b393c511f368592d311d8dad50ac57cbf3
|
refs/heads/master
| 2020-03-21T09:24:26.314197
| 2018-07-12T20:53:56
| 2018-07-12T20:53:56
| 138,398,275
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 587
|
py
|
user={
'Andrew':{'city':'Москва','temp':'25','wind':'восточный'},
'Max':{'city':'Spb','temp':'20','wind':'северный'},
'Alex':{'city':'Amsterdam','temp':'22','wind':'северо-западный'}
}
name=input('назовите ваше имя: ')
# if name=='Alex':
# print(user['Alex'])
# elif name=='Max':
# print(user['Max'])
# elif name=='Andrew':
# print(user['Andrew'])
# else:
# print('not available')
# print(name)
# user.index('Andrew')
# user.get('Max', 'not available')
# user.get('ALex','not available')
user.get('name')
print(user.get('name'))
|
[
"dubrovina.iri@gmail.com"
] |
dubrovina.iri@gmail.com
|
c33cdfd37fc9fdf05fe22f55f1193cc6370acf98
|
25f31909afa432e49b0a77fc469cd9d6e6d72d70
|
/lab assingnments/ex1.py
|
2aa0a52c9fcdfd0104d3512bac7ca89a72d4604d
|
[] |
no_license
|
VenkySVR/Python-DataStructures
|
7d13e044b705fd232db9f0997981ee11f9fb88ad
|
bac0e1697f4da00b93c9fc879f027ddb7371e252
|
refs/heads/master
| 2021-04-19T19:52:05.154734
| 2020-05-09T10:50:57
| 2020-05-09T10:50:57
| 249,631,604
| 1
| 0
| null | 2020-05-06T17:14:54
| 2020-03-24T06:37:05
|
Python
|
UTF-8
|
Python
| false
| false
| 1,489
|
py
|
#problem1
#create a variable savings. this is a comment line in python
mySavings=550
print(mySavings)
#print out savings
#problem2
#Create a variable savings
mySavings=550
#Create a variable balance
Balance=1.1
#Calculate result
result=100*1.1**7
#Print out result
print(result)
#problem3
#Create a variable others
others="compound interest"
#Create a variable profitable
profitable=False
print(profitable)
a=int(profitable)
print(a)
profitable=False*True
print(profitable)
#problem 5
savings=550
balance=1.1
others="compound interest"
#Assign product of balance and mySavings to first_year_balance
first_year_balance=others*mySavings #########////this line will prints the string others for 550 times where mySavings=550////
#Print the type of first_year_balance
print(type(first_year_balance))
#Assign sum of others and others to double others
double_value_others=others + others ###//this + sign do string concainate//
print(double_value_others)
#problem 6
#Definition of savings and result
savings=100
result=100*1.1**7
#Fix the printout
print("My account had $"+str(mySavings)+" to begin with and now have $"+str(balance)+" Awesome!")
#Definition of pi_string
pi_string="3.1415926"
#Converting pi_string into float: pi_float
pi_float=float(pi_string)
# """
# output
# 550
# 194.87171000000012
# False
# 0
# 0
# <class 'str'>
# compound interestcompound interest
# My account had $550 to begin with and now have $1.1 Awesome!
# """
|
[
"venky.s.vr13@gmail.com"
] |
venky.s.vr13@gmail.com
|
5441774bb7af2ab794278518d094012e4d0d7bf2
|
db28c236fe2d68a0b2d44663f11b56bb3ac963c0
|
/tests/attr/test_shapley.py
|
4bf8248caa5eb9c463f07593c99a29ba024fcd90
|
[
"BSD-3-Clause"
] |
permissive
|
kolvia/captum
|
30e26b959a04a8e4b790df0ef228d49606c52c8a
|
4b6280296dbc740df09afdc592b74e27e0b7ce88
|
refs/heads/master
| 2020-12-27T22:20:23.103700
| 2020-02-18T19:46:25
| 2020-02-18T19:46:25
| 238,082,789
| 0
| 0
|
BSD-3-Clause
| 2020-02-03T23:22:42
| 2020-02-03T23:22:41
| null |
UTF-8
|
Python
| false
| false
| 10,349
|
py
|
#!/usr/bin/env python3
from typing import Any, Callable, Optional, Tuple, Union
import unittest
import torch
from torch import Tensor
from captum.attr._core.shapley_value import ShapleyValueSampling
from captum.attr._utils.typing import TensorOrTupleOfTensors
from .helpers.basic_models import (
BasicModel_MultiLayer,
BasicModel_MultiLayer_MultiInput,
)
from .helpers.utils import assertTensorTuplesAlmostEqual, BaseTest
class Test(BaseTest):
def test_simple_shapley_sampling(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[20.0, 50.0, 30.0]], requires_grad=True)
self._shapley_test_assert(
net,
inp,
[76.66666, 196.66666, 116.66666],
perturbations_per_eval=(1, 2, 3),
n_samples=250,
)
def test_simple_shapley_sampling_with_mask(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[20.0, 50.0, 30.0]], requires_grad=True)
self._shapley_test_assert(
net,
inp,
[275.0, 275.0, 115.0],
feature_mask=torch.tensor([[0, 0, 1]]),
perturbations_per_eval=(1, 2, 3),
)
def test_simple_shapley_sampling_with_baselines(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[20.0, 50.0, 30.0]])
self._shapley_test_assert(
net,
inp,
[248.0, 248.0, 104.0],
feature_mask=torch.tensor([[0, 0, 1]]),
baselines=4,
perturbations_per_eval=(1, 2, 3),
)
def test_multi_sample_shapley_sampling(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[2.0, 10.0, 3.0], [20.0, 50.0, 30.0]])
self._shapley_test_assert(
net,
inp,
[[7.0, 32.5, 10.5], [76.66666, 196.66666, 116.66666]],
perturbations_per_eval=(1, 2, 3),
n_samples=200,
)
def test_multi_sample_shapley_sampling_with_mask(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[2.0, 10.0, 3.0], [20.0, 50.0, 30.0]], requires_grad=True)
mask = torch.tensor([[0, 0, 1], [1, 1, 0]])
self._shapley_test_assert(
net,
inp,
[[39.5, 39.5, 10.5], [275.0, 275.0, 115.0]],
feature_mask=mask,
perturbations_per_eval=(1, 2, 3),
)
def test_multi_input_shapley_sampling_without_mask(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[23.0, 0.0, 0.0], [20.0, 50.0, 30.0]])
inp2 = torch.tensor([[20.0, 0.0, 50.0], [0.0, 100.0, 0.0]])
inp3 = torch.tensor([[0.0, 100.0, 10.0], [0.0, 10.0, 0.0]])
expected = (
[[90, 0, 0], [78.0, 198.0, 118.0]],
[[78, 0, 198], [0.0, 398.0, 0.0]],
[[0, 398, 38], [0.0, 38.0, 0.0]],
)
self._shapley_test_assert(
net, (inp1, inp2, inp3), expected, additional_input=(1,), n_samples=200,
)
def test_multi_input_shapley_sampling_with_mask(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[23.0, 100.0, 0.0], [20.0, 50.0, 30.0]])
inp2 = torch.tensor([[20.0, 50.0, 30.0], [0.0, 100.0, 0.0]])
inp3 = torch.tensor([[0.0, 100.0, 10.0], [2.0, 10.0, 3.0]])
mask1 = torch.tensor([[1, 1, 1], [0, 1, 0]])
mask2 = torch.tensor([[0, 1, 2]])
mask3 = torch.tensor([[0, 1, 2], [0, 0, 0]])
expected = (
[[1088.6666, 1088.6666, 1088.6666], [255.0, 595.0, 255.0]],
[[76.6666, 1088.6666, 156.6666], [255.0, 595.0, 0.0]],
[[76.6666, 1088.6666, 156.6666], [255.0, 255.0, 255.0]],
)
self._shapley_test_assert(
net,
(inp1, inp2, inp3),
expected,
additional_input=(1,),
feature_mask=(mask1, mask2, mask3),
)
expected_with_baseline = (
[[1040, 1040, 1040], [184, 580.0, 184]],
[[52, 1040, 132], [184, 580.0, -12.0]],
[[52, 1040, 132], [184, 184, 184]],
)
self._shapley_test_assert(
net,
(inp1, inp2, inp3),
expected_with_baseline,
additional_input=(1,),
feature_mask=(mask1, mask2, mask3),
baselines=(2, 3.0, 4),
perturbations_per_eval=(1, 2, 3),
)
# Remaining tests are for cases where forward function returns a scalar
# per batch, as either a float, integer, 0d tensor or 1d tensor.
def test_single_shapley_batch_scalar_float(self) -> None:
net = BasicModel_MultiLayer()
self._single_input_one_sample_batch_scalar_shapley_assert(
lambda inp: torch.sum(net(inp)).item()
)
def test_single_shapley_batch_scalar_tensor_0d(self) -> None:
net = BasicModel_MultiLayer()
self._single_input_one_sample_batch_scalar_shapley_assert(
lambda inp: torch.sum(net(inp))
)
def test_single_shapley_batch_scalar_tensor_1d(self) -> None:
net = BasicModel_MultiLayer()
self._single_input_one_sample_batch_scalar_shapley_assert(
lambda inp: torch.sum(net(inp)).reshape(1)
)
def test_single_shapley_batch_scalar_tensor_int(self) -> None:
net = BasicModel_MultiLayer()
self._single_input_one_sample_batch_scalar_shapley_assert(
lambda inp: int(torch.sum(net(inp)).item())
)
def test_multi_sample_shapley_batch_scalar_float(self) -> None:
net = BasicModel_MultiLayer()
self._single_input_multi_sample_batch_scalar_shapley_assert(
lambda inp: torch.sum(net(inp)).item()
)
def test_multi_sample_shapley_batch_scalar_tensor_0d(self) -> None:
net = BasicModel_MultiLayer()
self._single_input_multi_sample_batch_scalar_shapley_assert(
lambda inp: torch.sum(net(inp))
)
def test_multi_sample_shapley_batch_scalar_tensor_1d(self) -> None:
net = BasicModel_MultiLayer()
self._single_input_multi_sample_batch_scalar_shapley_assert(
lambda inp: torch.sum(net(inp)).reshape(1)
)
def test_multi_sample_shapley_batch_scalar_tensor_int(self) -> None:
net = BasicModel_MultiLayer()
self._single_input_multi_sample_batch_scalar_shapley_assert(
lambda inp: int(torch.sum(net(inp)).item())
)
def test_multi_inp_shapley_batch_scalar_float(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
self._multi_input_batch_scalar_shapley_assert(
lambda *inp: torch.sum(net(*inp)).item()
)
def test_multi_inp_shapley_batch_scalar_tensor_0d(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
self._multi_input_batch_scalar_shapley_assert(lambda *inp: torch.sum(net(*inp)))
def test_multi_inp_shapley_batch_scalar_tensor_1d(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
self._multi_input_batch_scalar_shapley_assert(
lambda *inp: torch.sum(net(*inp)).reshape(1)
)
def test_mutli_inp_shapley_batch_scalar_tensor_int(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
self._multi_input_batch_scalar_shapley_assert(
lambda *inp: int(torch.sum(net(*inp)).item())
)
def _single_input_one_sample_batch_scalar_shapley_assert(
self, func: Callable
) -> None:
inp = torch.tensor([[2.0, 10.0, 3.0]], requires_grad=True)
mask = torch.tensor([[0, 0, 1]])
self._shapley_test_assert(
func,
inp,
[[79.0, 79.0, 21.0]],
feature_mask=mask,
perturbations_per_eval=(1,),
target=None,
)
def _single_input_multi_sample_batch_scalar_shapley_assert(
self, func: Callable
) -> None:
inp = torch.tensor([[2.0, 10.0, 3.0], [20.0, 50.0, 30.0]], requires_grad=True)
mask = torch.tensor([[0, 0, 1]])
self._shapley_test_assert(
func,
inp,
[[629.0, 629.0, 251.0]],
feature_mask=mask,
perturbations_per_eval=(1,),
target=None,
)
def _multi_input_batch_scalar_shapley_assert(self, func: Callable) -> None:
inp1 = torch.tensor([[23.0, 100.0, 0.0], [20.0, 50.0, 30.0]])
inp2 = torch.tensor([[20.0, 50.0, 30.0], [0.0, 100.0, 0.0]])
inp3 = torch.tensor([[0.0, 100.0, 10.0], [20.0, 10.0, 13.0]])
mask1 = torch.tensor([[1, 1, 1]])
mask2 = torch.tensor([[0, 1, 2]])
mask3 = torch.tensor([[0, 1, 2]])
expected = (
[[3850.6666, 3850.6666, 3850.6666]],
[[306.6666, 3850.6666, 410.6666]],
[[306.6666, 3850.6666, 410.6666]],
)
self._shapley_test_assert(
func,
(inp1, inp2, inp3),
expected,
additional_input=(1,),
feature_mask=(mask1, mask2, mask3),
perturbations_per_eval=(1,),
target=None,
n_samples=700,
)
def _shapley_test_assert(
self,
model: Callable,
test_input: TensorOrTupleOfTensors,
expected_attr,
feature_mask: Optional[TensorOrTupleOfTensors] = None,
additional_input: Any = None,
perturbations_per_eval: Tuple[int, ...] = (1,),
baselines: Optional[
Union[Tensor, int, float, Tuple[Union[Tensor, int, float], ...]]
] = None,
target: Optional[int] = 0,
n_samples: int = 100,
delta: float = 1.0,
) -> None:
for batch_size in perturbations_per_eval:
shapley_samp = ShapleyValueSampling(model)
attributions = shapley_samp.attribute(
test_input,
target=target,
feature_mask=feature_mask,
additional_forward_args=additional_input,
baselines=baselines,
perturbations_per_eval=batch_size,
n_samples=n_samples,
)
assertTensorTuplesAlmostEqual(
self, attributions, expected_attr, delta=delta, mode="max"
)
if __name__ == "__main__":
unittest.main()
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
4cb63b631290bc2f13d74431375d4cdb27a24618
|
eb64b799ff1d7ef3a244bf8e6f9f4e9118d5cfcd
|
/tests/components/mqtt/test_device_trigger.py
|
972b0678ed276b84265f91a5dcfc00732b056518
|
[
"Apache-2.0"
] |
permissive
|
JeffLIrion/home-assistant
|
53966b81b5d5816679f12fc761f79e8777c738d6
|
8f4ec89be6c2505d8a59eee44de335abe308ac9f
|
refs/heads/dev
| 2023-08-22T09:42:02.399277
| 2022-02-16T01:26:13
| 2022-02-16T01:26:13
| 136,679,169
| 5
| 2
|
Apache-2.0
| 2023-09-13T06:59:25
| 2018-06-09T00:58:35
|
Python
|
UTF-8
|
Python
| false
| false
| 43,657
|
py
|
"""The tests for MQTT device triggers."""
import json
import pytest
import homeassistant.components.automation as automation
from homeassistant.components.device_automation import DeviceAutomationType
from homeassistant.components.mqtt import _LOGGER, DOMAIN, debug_info
from homeassistant.helpers import device_registry as dr
from homeassistant.helpers.trigger import async_initialize_triggers
from homeassistant.setup import async_setup_component
from tests.common import (
assert_lists_same,
async_fire_mqtt_message,
async_get_device_automations,
async_mock_service,
mock_device_registry,
mock_registry,
)
from tests.components.blueprint.conftest import stub_blueprint_populate # noqa: F401
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_get_triggers(hass, device_reg, entity_reg, mqtt_mock):
"""Test we get the expected triggers from a discovered mqtt device."""
data1 = (
'{ "automation_type":"trigger",'
' "device":{"identifiers":["0AFFD2"]},'
' "payload": "short_press",'
' "topic": "foobar/triggers/button1",'
' "type": "button_short_press",'
' "subtype": "button_1" }'
)
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla/config", data1)
await hass.async_block_till_done()
device_entry = device_reg.async_get_device({("mqtt", "0AFFD2")})
expected_triggers = [
{
"platform": "device",
"domain": DOMAIN,
"device_id": device_entry.id,
"discovery_id": "bla",
"type": "button_short_press",
"subtype": "button_1",
},
]
triggers = await async_get_device_automations(
hass, DeviceAutomationType.TRIGGER, device_entry.id
)
assert_lists_same(triggers, expected_triggers)
async def test_get_unknown_triggers(hass, device_reg, entity_reg, mqtt_mock):
"""Test we don't get unknown triggers."""
# Discover a sensor (without device triggers)
data1 = (
'{ "device":{"identifiers":["0AFFD2"]},'
' "state_topic": "foobar/sensor",'
' "unique_id": "unique" }'
)
async_fire_mqtt_message(hass, "homeassistant/sensor/bla/config", data1)
await hass.async_block_till_done()
device_entry = device_reg.async_get_device({("mqtt", "0AFFD2")})
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": device_entry.id,
"discovery_id": "bla1",
"type": "button_short_press",
"subtype": "button_1",
},
"action": {
"service": "test.automation",
"data_template": {"some": ("short_press")},
},
},
]
},
)
triggers = await async_get_device_automations(
hass, DeviceAutomationType.TRIGGER, device_entry.id
)
assert_lists_same(triggers, [])
async def test_get_non_existing_triggers(hass, device_reg, entity_reg, mqtt_mock):
"""Test getting non existing triggers."""
# Discover a sensor (without device triggers)
data1 = (
'{ "device":{"identifiers":["0AFFD2"]},'
' "state_topic": "foobar/sensor",'
' "unique_id": "unique" }'
)
async_fire_mqtt_message(hass, "homeassistant/sensor/bla/config", data1)
await hass.async_block_till_done()
device_entry = device_reg.async_get_device({("mqtt", "0AFFD2")})
triggers = await async_get_device_automations(
hass, DeviceAutomationType.TRIGGER, device_entry.id
)
assert_lists_same(triggers, [])
@pytest.mark.no_fail_on_log_exception
async def test_discover_bad_triggers(hass, device_reg, entity_reg, mqtt_mock):
"""Test bad discovery message."""
# Test sending bad data
data0 = (
'{ "automation_type":"trigger",'
' "device":{"identifiers":["0AFFD2"]},'
' "payloads": "short_press",'
' "topics": "foobar/triggers/button1",'
' "type": "button_short_press",'
' "subtype": "button_1" }'
)
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla/config", data0)
await hass.async_block_till_done()
assert device_reg.async_get_device({("mqtt", "0AFFD2")}) is None
# Test sending correct data
data1 = (
'{ "automation_type":"trigger",'
' "device":{"identifiers":["0AFFD2"]},'
' "payload": "short_press",'
' "topic": "foobar/triggers/button1",'
' "type": "button_short_press",'
' "subtype": "button_1" }'
)
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla/config", data1)
await hass.async_block_till_done()
device_entry = device_reg.async_get_device({("mqtt", "0AFFD2")})
expected_triggers = [
{
"platform": "device",
"domain": DOMAIN,
"device_id": device_entry.id,
"discovery_id": "bla",
"type": "button_short_press",
"subtype": "button_1",
},
]
triggers = await async_get_device_automations(
hass, DeviceAutomationType.TRIGGER, device_entry.id
)
assert_lists_same(triggers, expected_triggers)
async def test_update_remove_triggers(hass, device_reg, entity_reg, mqtt_mock):
"""Test triggers can be updated and removed."""
config1 = {
"automation_type": "trigger",
"device": {"identifiers": ["0AFFD2"]},
"payload": "short_press",
"topic": "foobar/triggers/button1",
"type": "button_short_press",
"subtype": "button_1",
}
config1["some_future_option_1"] = "future_option_1"
data1 = json.dumps(config1)
config2 = {
"automation_type": "trigger",
"device": {"identifiers": ["0AFFD2"]},
"payload": "short_press",
"topic": "foobar/triggers/button1",
"type": "button_short_press",
"subtype": "button_2",
}
config2["topic"] = "foobar/tag_scanned2"
data2 = json.dumps(config2)
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla/config", data1)
await hass.async_block_till_done()
device_entry = device_reg.async_get_device({("mqtt", "0AFFD2")})
expected_triggers1 = [
{
"platform": "device",
"domain": DOMAIN,
"device_id": device_entry.id,
"discovery_id": "bla",
"type": "button_short_press",
"subtype": "button_1",
},
]
expected_triggers2 = [dict(expected_triggers1[0])]
expected_triggers2[0]["subtype"] = "button_2"
triggers = await async_get_device_automations(
hass, DeviceAutomationType.TRIGGER, device_entry.id
)
assert_lists_same(triggers, expected_triggers1)
# Update trigger
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla/config", data2)
await hass.async_block_till_done()
triggers = await async_get_device_automations(
hass, DeviceAutomationType.TRIGGER, device_entry.id
)
assert_lists_same(triggers, expected_triggers2)
# Remove trigger
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla/config", "")
await hass.async_block_till_done()
device_entry = device_reg.async_get_device({("mqtt", "0AFFD2")})
assert device_entry is None
async def test_if_fires_on_mqtt_message(hass, device_reg, calls, mqtt_mock):
"""Test triggers firing."""
data1 = (
'{ "automation_type":"trigger",'
' "device":{"identifiers":["0AFFD2"]},'
' "payload": "short_press",'
' "topic": "foobar/triggers/button1",'
' "type": "button_short_press",'
' "subtype": "button_1" }'
)
data2 = (
'{ "automation_type":"trigger",'
' "device":{"identifiers":["0AFFD2"]},'
' "payload": "long_press",'
' "topic": "foobar/triggers/button1",'
' "type": "button_long_press",'
' "subtype": "button_2" }'
)
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla1/config", data1)
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla2/config", data2)
await hass.async_block_till_done()
device_entry = device_reg.async_get_device({("mqtt", "0AFFD2")})
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": device_entry.id,
"discovery_id": "bla1",
"type": "button_short_press",
"subtype": "button_1",
},
"action": {
"service": "test.automation",
"data_template": {"some": ("short_press")},
},
},
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": device_entry.id,
"discovery_id": "bla2",
"type": "button_1",
"subtype": "button_long_press",
},
"action": {
"service": "test.automation",
"data_template": {"some": ("long_press")},
},
},
]
},
)
# Fake short press.
async_fire_mqtt_message(hass, "foobar/triggers/button1", "short_press")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "short_press"
# Fake long press.
async_fire_mqtt_message(hass, "foobar/triggers/button1", "long_press")
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["some"] == "long_press"
async def test_if_fires_on_mqtt_message_template(hass, device_reg, calls, mqtt_mock):
"""Test triggers firing."""
data1 = (
'{ "automation_type":"trigger",'
' "device":{"identifiers":["0AFFD2"]},'
" \"payload\": \"{{ 'foo_press'|regex_replace('foo', 'short') }}\","
' "topic": "foobar/triggers/button{{ sqrt(16)|round }}",'
' "type": "button_short_press",'
' "subtype": "button_1",'
' "value_template": "{{ value_json.button }}"}'
)
data2 = (
'{ "automation_type":"trigger",'
' "device":{"identifiers":["0AFFD2"]},'
" \"payload\": \"{{ 'foo_press'|regex_replace('foo', 'long') }}\","
' "topic": "foobar/triggers/button{{ sqrt(16)|round }}",'
' "type": "button_long_press",'
' "subtype": "button_2",'
' "value_template": "{{ value_json.button }}"}'
)
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla1/config", data1)
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla2/config", data2)
await hass.async_block_till_done()
device_entry = device_reg.async_get_device({("mqtt", "0AFFD2")})
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": device_entry.id,
"discovery_id": "bla1",
"type": "button_short_press",
"subtype": "button_1",
},
"action": {
"service": "test.automation",
"data_template": {"some": ("short_press")},
},
},
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": device_entry.id,
"discovery_id": "bla2",
"type": "button_1",
"subtype": "button_long_press",
},
"action": {
"service": "test.automation",
"data_template": {"some": ("long_press")},
},
},
]
},
)
# Fake short press.
async_fire_mqtt_message(hass, "foobar/triggers/button4", '{"button":"short_press"}')
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "short_press"
# Fake long press.
async_fire_mqtt_message(hass, "foobar/triggers/button4", '{"button":"long_press"}')
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["some"] == "long_press"
async def test_if_fires_on_mqtt_message_late_discover(
hass, device_reg, calls, mqtt_mock
):
"""Test triggers firing of MQTT device triggers discovered after setup."""
data0 = (
'{ "device":{"identifiers":["0AFFD2"]},'
' "state_topic": "foobar/sensor",'
' "unique_id": "unique" }'
)
data1 = (
'{ "automation_type":"trigger",'
' "device":{"identifiers":["0AFFD2"]},'
' "payload": "short_press",'
' "topic": "foobar/triggers/button1",'
' "type": "button_short_press",'
' "subtype": "button_1" }'
)
data2 = (
'{ "automation_type":"trigger",'
' "device":{"identifiers":["0AFFD2"]},'
' "payload": "long_press",'
' "topic": "foobar/triggers/button1",'
' "type": "button_long_press",'
' "subtype": "button_2" }'
)
async_fire_mqtt_message(hass, "homeassistant/sensor/bla0/config", data0)
await hass.async_block_till_done()
device_entry = device_reg.async_get_device({("mqtt", "0AFFD2")})
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": device_entry.id,
"discovery_id": "bla1",
"type": "button_short_press",
"subtype": "button_1",
},
"action": {
"service": "test.automation",
"data_template": {"some": ("short_press")},
},
},
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": device_entry.id,
"discovery_id": "bla2",
"type": "button_1",
"subtype": "button_long_press",
},
"action": {
"service": "test.automation",
"data_template": {"some": ("long_press")},
},
},
]
},
)
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla1/config", data1)
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla2/config", data2)
await hass.async_block_till_done()
# Fake short press.
async_fire_mqtt_message(hass, "foobar/triggers/button1", "short_press")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "short_press"
# Fake long press.
async_fire_mqtt_message(hass, "foobar/triggers/button1", "long_press")
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["some"] == "long_press"
async def test_if_fires_on_mqtt_message_after_update(
hass, device_reg, calls, mqtt_mock
):
"""Test triggers firing after update."""
data1 = (
'{ "automation_type":"trigger",'
' "device":{"identifiers":["0AFFD2"]},'
' "topic": "foobar/triggers/button1",'
' "type": "button_short_press",'
' "subtype": "button_1" }'
)
data2 = (
'{ "automation_type":"trigger",'
' "device":{"identifiers":["0AFFD2"]},'
' "topic": "foobar/triggers/buttonOne",'
' "type": "button_long_press",'
' "subtype": "button_2" }'
)
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla1/config", data1)
await hass.async_block_till_done()
device_entry = device_reg.async_get_device({("mqtt", "0AFFD2")})
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": device_entry.id,
"discovery_id": "bla1",
"type": "button_short_press",
"subtype": "button_1",
},
"action": {
"service": "test.automation",
"data_template": {"some": ("short_press")},
},
},
]
},
)
# Fake short press.
async_fire_mqtt_message(hass, "foobar/triggers/button1", "")
await hass.async_block_till_done()
assert len(calls) == 1
# Update the trigger with different topic
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla1/config", data2)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "foobar/triggers/button1", "")
await hass.async_block_till_done()
assert len(calls) == 1
async_fire_mqtt_message(hass, "foobar/triggers/buttonOne", "")
await hass.async_block_till_done()
assert len(calls) == 2
# Update the trigger with same topic
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla1/config", data2)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "foobar/triggers/button1", "")
await hass.async_block_till_done()
assert len(calls) == 2
async_fire_mqtt_message(hass, "foobar/triggers/buttonOne", "")
await hass.async_block_till_done()
assert len(calls) == 3
async def test_no_resubscribe_same_topic(hass, device_reg, mqtt_mock):
"""Test subscription to topics without change."""
data1 = (
'{ "automation_type":"trigger",'
' "device":{"identifiers":["0AFFD2"]},'
' "topic": "foobar/triggers/button1",'
' "type": "button_short_press",'
' "subtype": "button_1" }'
)
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla1/config", data1)
await hass.async_block_till_done()
device_entry = device_reg.async_get_device({("mqtt", "0AFFD2")})
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": device_entry.id,
"discovery_id": "bla1",
"type": "button_short_press",
"subtype": "button_1",
},
"action": {
"service": "test.automation",
"data_template": {"some": ("short_press")},
},
},
]
},
)
call_count = mqtt_mock.async_subscribe.call_count
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla1/config", data1)
await hass.async_block_till_done()
assert mqtt_mock.async_subscribe.call_count == call_count
async def test_not_fires_on_mqtt_message_after_remove_by_mqtt(
hass, device_reg, calls, mqtt_mock
):
"""Test triggers not firing after removal."""
data1 = (
'{ "automation_type":"trigger",'
' "device":{"identifiers":["0AFFD2"]},'
' "topic": "foobar/triggers/button1",'
' "type": "button_short_press",'
' "subtype": "button_1" }'
)
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla1/config", data1)
await hass.async_block_till_done()
device_entry = device_reg.async_get_device({("mqtt", "0AFFD2")})
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": device_entry.id,
"discovery_id": "bla1",
"type": "button_short_press",
"subtype": "button_1",
},
"action": {
"service": "test.automation",
"data_template": {"some": ("short_press")},
},
},
]
},
)
# Fake short press.
async_fire_mqtt_message(hass, "foobar/triggers/button1", "short_press")
await hass.async_block_till_done()
assert len(calls) == 1
# Remove the trigger
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla1/config", "")
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "foobar/triggers/button1", "short_press")
await hass.async_block_till_done()
assert len(calls) == 1
# Rediscover the trigger
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla1/config", data1)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "foobar/triggers/button1", "short_press")
await hass.async_block_till_done()
assert len(calls) == 2
async def test_not_fires_on_mqtt_message_after_remove_from_registry(
hass, device_reg, calls, mqtt_mock
):
"""Test triggers not firing after removal."""
data1 = (
'{ "automation_type":"trigger",'
' "device":{"identifiers":["0AFFD2"]},'
' "topic": "foobar/triggers/button1",'
' "type": "button_short_press",'
' "subtype": "button_1" }'
)
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla1/config", data1)
await hass.async_block_till_done()
device_entry = device_reg.async_get_device({("mqtt", "0AFFD2")})
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": device_entry.id,
"discovery_id": "bla1",
"type": "button_short_press",
"subtype": "button_1",
},
"action": {
"service": "test.automation",
"data_template": {"some": ("short_press")},
},
},
]
},
)
# Fake short press.
async_fire_mqtt_message(hass, "foobar/triggers/button1", "short_press")
await hass.async_block_till_done()
assert len(calls) == 1
# Remove the device
device_reg.async_remove_device(device_entry.id)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "foobar/triggers/button1", "short_press")
await hass.async_block_till_done()
assert len(calls) == 1
async def test_attach_remove(hass, device_reg, mqtt_mock):
"""Test attach and removal of trigger."""
data1 = (
'{ "automation_type":"trigger",'
' "device":{"identifiers":["0AFFD2"]},'
' "payload": "short_press",'
' "topic": "foobar/triggers/button1",'
' "type": "button_short_press",'
' "subtype": "button_1" }'
)
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla1/config", data1)
await hass.async_block_till_done()
device_entry = device_reg.async_get_device({("mqtt", "0AFFD2")})
calls = []
def callback(trigger):
calls.append(trigger["trigger"]["payload"])
remove = await async_initialize_triggers(
hass,
[
{
"platform": "device",
"domain": DOMAIN,
"device_id": device_entry.id,
"discovery_id": "bla1",
"type": "button_short_press",
"subtype": "button_1",
},
],
callback,
DOMAIN,
"mock-name",
_LOGGER.log,
)
# Fake short press.
async_fire_mqtt_message(hass, "foobar/triggers/button1", "short_press")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0] == "short_press"
# Remove the trigger
remove()
await hass.async_block_till_done()
# Verify the triggers are no longer active
async_fire_mqtt_message(hass, "foobar/triggers/button1", "short_press")
await hass.async_block_till_done()
assert len(calls) == 1
async def test_attach_remove_late(hass, device_reg, mqtt_mock):
"""Test attach and removal of trigger ."""
data0 = (
'{ "device":{"identifiers":["0AFFD2"]},'
' "state_topic": "foobar/sensor",'
' "unique_id": "unique" }'
)
data1 = (
'{ "automation_type":"trigger",'
' "device":{"identifiers":["0AFFD2"]},'
' "payload": "short_press",'
' "topic": "foobar/triggers/button1",'
' "type": "button_short_press",'
' "subtype": "button_1" }'
)
async_fire_mqtt_message(hass, "homeassistant/sensor/bla0/config", data0)
await hass.async_block_till_done()
device_entry = device_reg.async_get_device({("mqtt", "0AFFD2")})
calls = []
def callback(trigger):
calls.append(trigger["trigger"]["payload"])
remove = await async_initialize_triggers(
hass,
[
{
"platform": "device",
"domain": DOMAIN,
"device_id": device_entry.id,
"discovery_id": "bla1",
"type": "button_short_press",
"subtype": "button_1",
},
],
callback,
DOMAIN,
"mock-name",
_LOGGER.log,
)
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla1/config", data1)
await hass.async_block_till_done()
# Fake short press.
async_fire_mqtt_message(hass, "foobar/triggers/button1", "short_press")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0] == "short_press"
# Remove the trigger
remove()
await hass.async_block_till_done()
# Verify the triggers are no longer active
async_fire_mqtt_message(hass, "foobar/triggers/button1", "short_press")
await hass.async_block_till_done()
assert len(calls) == 1
async def test_attach_remove_late2(hass, device_reg, mqtt_mock):
"""Test attach and removal of trigger ."""
data0 = (
'{ "device":{"identifiers":["0AFFD2"]},'
' "state_topic": "foobar/sensor",'
' "unique_id": "unique" }'
)
data1 = (
'{ "automation_type":"trigger",'
' "device":{"identifiers":["0AFFD2"]},'
' "payload": "short_press",'
' "topic": "foobar/triggers/button1",'
' "type": "button_short_press",'
' "subtype": "button_1" }'
)
async_fire_mqtt_message(hass, "homeassistant/sensor/bla0/config", data0)
await hass.async_block_till_done()
device_entry = device_reg.async_get_device({("mqtt", "0AFFD2")})
calls = []
def callback(trigger):
calls.append(trigger["trigger"]["payload"])
remove = await async_initialize_triggers(
hass,
[
{
"platform": "device",
"domain": DOMAIN,
"device_id": device_entry.id,
"discovery_id": "bla1",
"type": "button_short_press",
"subtype": "button_1",
},
],
callback,
DOMAIN,
"mock-name",
_LOGGER.log,
)
# Remove the trigger
remove()
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla1/config", data1)
await hass.async_block_till_done()
# Verify the triggers are no longer active
async_fire_mqtt_message(hass, "foobar/triggers/button1", "short_press")
await hass.async_block_till_done()
assert len(calls) == 0
async def test_entity_device_info_with_connection(hass, mqtt_mock):
"""Test MQTT device registry integration."""
registry = dr.async_get(hass)
data = json.dumps(
{
"automation_type": "trigger",
"topic": "test-topic",
"type": "foo",
"subtype": "bar",
"device": {
"connections": [[dr.CONNECTION_NETWORK_MAC, "02:5b:26:a8:dc:12"]],
"manufacturer": "Whatever",
"name": "Beer",
"model": "Glass",
"sw_version": "0.1-beta",
},
}
)
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device(
set(), {(dr.CONNECTION_NETWORK_MAC, "02:5b:26:a8:dc:12")}
)
assert device is not None
assert device.connections == {(dr.CONNECTION_NETWORK_MAC, "02:5b:26:a8:dc:12")}
assert device.manufacturer == "Whatever"
assert device.name == "Beer"
assert device.model == "Glass"
assert device.sw_version == "0.1-beta"
async def test_entity_device_info_with_identifier(hass, mqtt_mock):
"""Test MQTT device registry integration."""
registry = dr.async_get(hass)
data = json.dumps(
{
"automation_type": "trigger",
"topic": "test-topic",
"type": "foo",
"subtype": "bar",
"device": {
"identifiers": ["helloworld"],
"manufacturer": "Whatever",
"name": "Beer",
"model": "Glass",
"sw_version": "0.1-beta",
},
}
)
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")})
assert device is not None
assert device.identifiers == {("mqtt", "helloworld")}
assert device.manufacturer == "Whatever"
assert device.name == "Beer"
assert device.model == "Glass"
assert device.sw_version == "0.1-beta"
async def test_entity_device_info_update(hass, mqtt_mock):
"""Test device registry update."""
registry = dr.async_get(hass)
config = {
"automation_type": "trigger",
"topic": "test-topic",
"type": "foo",
"subtype": "bar",
"device": {
"identifiers": ["helloworld"],
"connections": [[dr.CONNECTION_NETWORK_MAC, "02:5b:26:a8:dc:12"]],
"manufacturer": "Whatever",
"name": "Beer",
"model": "Glass",
"sw_version": "0.1-beta",
},
}
data = json.dumps(config)
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")})
assert device is not None
assert device.name == "Beer"
config["device"]["name"] = "Milk"
data = json.dumps(config)
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")})
assert device is not None
assert device.name == "Milk"
async def test_cleanup_trigger(hass, device_reg, entity_reg, mqtt_mock):
"""Test trigger discovery topic is cleaned when device is removed from registry."""
config = {
"automation_type": "trigger",
"topic": "test-topic",
"type": "foo",
"subtype": "bar",
"device": {"identifiers": ["helloworld"]},
}
data = json.dumps(config)
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla/config", data)
await hass.async_block_till_done()
# Verify device registry entry is created
device_entry = device_reg.async_get_device({("mqtt", "helloworld")})
assert device_entry is not None
triggers = await async_get_device_automations(
hass, DeviceAutomationType.TRIGGER, device_entry.id
)
assert triggers[0]["type"] == "foo"
device_reg.async_remove_device(device_entry.id)
await hass.async_block_till_done()
await hass.async_block_till_done()
# Verify device registry entry is cleared
device_entry = device_reg.async_get_device({("mqtt", "helloworld")})
assert device_entry is None
# Verify retained discovery topic has been cleared
mqtt_mock.async_publish.assert_called_once_with(
"homeassistant/device_automation/bla/config", "", 0, True
)
async def test_cleanup_device(hass, device_reg, entity_reg, mqtt_mock):
"""Test removal from device registry when trigger is removed."""
config = {
"automation_type": "trigger",
"topic": "test-topic",
"type": "foo",
"subtype": "bar",
"device": {"identifiers": ["helloworld"]},
}
data = json.dumps(config)
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla/config", data)
await hass.async_block_till_done()
# Verify device registry entry is created
device_entry = device_reg.async_get_device({("mqtt", "helloworld")})
assert device_entry is not None
triggers = await async_get_device_automations(
hass, DeviceAutomationType.TRIGGER, device_entry.id
)
assert triggers[0]["type"] == "foo"
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla/config", "")
await hass.async_block_till_done()
# Verify device registry entry is cleared
device_entry = device_reg.async_get_device({("mqtt", "helloworld")})
assert device_entry is None
async def test_cleanup_device_several_triggers(hass, device_reg, entity_reg, mqtt_mock):
"""Test removal from device registry when the last trigger is removed."""
config1 = {
"automation_type": "trigger",
"topic": "test-topic",
"type": "foo",
"subtype": "bar",
"device": {"identifiers": ["helloworld"]},
}
config2 = {
"automation_type": "trigger",
"topic": "test-topic",
"type": "foo2",
"subtype": "bar",
"device": {"identifiers": ["helloworld"]},
}
data1 = json.dumps(config1)
data2 = json.dumps(config2)
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla1/config", data1)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla2/config", data2)
await hass.async_block_till_done()
# Verify device registry entry is created
device_entry = device_reg.async_get_device({("mqtt", "helloworld")})
assert device_entry is not None
triggers = await async_get_device_automations(
hass, DeviceAutomationType.TRIGGER, device_entry.id
)
assert len(triggers) == 2
assert triggers[0]["type"] == "foo"
assert triggers[1]["type"] == "foo2"
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla1/config", "")
await hass.async_block_till_done()
# Verify device registry entry is not cleared
device_entry = device_reg.async_get_device({("mqtt", "helloworld")})
assert device_entry is not None
triggers = await async_get_device_automations(
hass, DeviceAutomationType.TRIGGER, device_entry.id
)
assert len(triggers) == 1
assert triggers[0]["type"] == "foo2"
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla2/config", "")
await hass.async_block_till_done()
# Verify device registry entry is cleared
device_entry = device_reg.async_get_device({("mqtt", "helloworld")})
assert device_entry is None
async def test_cleanup_device_with_entity1(hass, device_reg, entity_reg, mqtt_mock):
"""Test removal from device registry for device with entity.
Trigger removed first, then entity.
"""
config1 = {
"automation_type": "trigger",
"topic": "test-topic",
"type": "foo",
"subtype": "bar",
"device": {"identifiers": ["helloworld"]},
}
config2 = {
"name": "test_binary_sensor",
"state_topic": "test-topic",
"device": {"identifiers": ["helloworld"]},
"unique_id": "veryunique",
}
data1 = json.dumps(config1)
data2 = json.dumps(config2)
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla1/config", data1)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "homeassistant/binary_sensor/bla2/config", data2)
await hass.async_block_till_done()
# Verify device registry entry is created
device_entry = device_reg.async_get_device({("mqtt", "helloworld")})
assert device_entry is not None
triggers = await async_get_device_automations(
hass, DeviceAutomationType.TRIGGER, device_entry.id
)
assert len(triggers) == 3 # 2 binary_sensor triggers + device trigger
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla1/config", "")
await hass.async_block_till_done()
# Verify device registry entry is not cleared
device_entry = device_reg.async_get_device({("mqtt", "helloworld")})
assert device_entry is not None
triggers = await async_get_device_automations(
hass, DeviceAutomationType.TRIGGER, device_entry.id
)
assert len(triggers) == 2 # 2 binary_sensor triggers
async_fire_mqtt_message(hass, "homeassistant/binary_sensor/bla2/config", "")
await hass.async_block_till_done()
# Verify device registry entry is cleared
device_entry = device_reg.async_get_device({("mqtt", "helloworld")})
assert device_entry is None
async def test_cleanup_device_with_entity2(hass, device_reg, entity_reg, mqtt_mock):
"""Test removal from device registry for device with entity.
Entity removed first, then trigger.
"""
config1 = {
"automation_type": "trigger",
"topic": "test-topic",
"type": "foo",
"subtype": "bar",
"device": {"identifiers": ["helloworld"]},
}
config2 = {
"name": "test_binary_sensor",
"state_topic": "test-topic",
"device": {"identifiers": ["helloworld"]},
"unique_id": "veryunique",
}
data1 = json.dumps(config1)
data2 = json.dumps(config2)
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla1/config", data1)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "homeassistant/binary_sensor/bla2/config", data2)
await hass.async_block_till_done()
# Verify device registry entry is created
device_entry = device_reg.async_get_device({("mqtt", "helloworld")})
assert device_entry is not None
triggers = await async_get_device_automations(
hass, DeviceAutomationType.TRIGGER, device_entry.id
)
assert len(triggers) == 3 # 2 binary_sensor triggers + device trigger
async_fire_mqtt_message(hass, "homeassistant/binary_sensor/bla2/config", "")
await hass.async_block_till_done()
# Verify device registry entry is not cleared
device_entry = device_reg.async_get_device({("mqtt", "helloworld")})
assert device_entry is not None
triggers = await async_get_device_automations(
hass, DeviceAutomationType.TRIGGER, device_entry.id
)
assert len(triggers) == 1 # device trigger
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla1/config", "")
await hass.async_block_till_done()
# Verify device registry entry is cleared
device_entry = device_reg.async_get_device({("mqtt", "helloworld")})
assert device_entry is None
async def test_trigger_debug_info(hass, mqtt_mock):
"""Test debug_info.
This is a test helper for MQTT debug_info.
"""
registry = dr.async_get(hass)
config1 = {
"platform": "mqtt",
"automation_type": "trigger",
"topic": "test-topic",
"type": "foo",
"subtype": "bar",
"device": {
"connections": [[dr.CONNECTION_NETWORK_MAC, "02:5b:26:a8:dc:12"]],
"manufacturer": "Whatever",
"name": "Beer",
"model": "Glass",
"sw_version": "0.1-beta",
},
}
config2 = {
"platform": "mqtt",
"automation_type": "trigger",
"topic": "test-topic2",
"type": "foo",
"subtype": "bar",
"device": {
"connections": [[dr.CONNECTION_NETWORK_MAC, "02:5b:26:a8:dc:12"]],
},
}
data = json.dumps(config1)
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla1/config", data)
data = json.dumps(config2)
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla2/config", data)
await hass.async_block_till_done()
device = registry.async_get_device(
set(), {(dr.CONNECTION_NETWORK_MAC, "02:5b:26:a8:dc:12")}
)
assert device is not None
debug_info_data = debug_info.info_for_device(hass, device.id)
assert len(debug_info_data["entities"]) == 0
assert len(debug_info_data["triggers"]) == 2
topic_map = {
"homeassistant/device_automation/bla1/config": config1,
"homeassistant/device_automation/bla2/config": config2,
}
assert (
topic_map[debug_info_data["triggers"][0]["discovery_data"]["topic"]]
!= topic_map[debug_info_data["triggers"][1]["discovery_data"]["topic"]]
)
assert (
debug_info_data["triggers"][0]["discovery_data"]["payload"]
== topic_map[debug_info_data["triggers"][0]["discovery_data"]["topic"]]
)
assert (
debug_info_data["triggers"][1]["discovery_data"]["payload"]
== topic_map[debug_info_data["triggers"][1]["discovery_data"]["topic"]]
)
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla1/config", "")
await hass.async_block_till_done()
debug_info_data = debug_info.info_for_device(hass, device.id)
assert len(debug_info_data["entities"]) == 0
assert len(debug_info_data["triggers"]) == 1
assert (
debug_info_data["triggers"][0]["discovery_data"]["topic"]
== "homeassistant/device_automation/bla2/config"
)
assert debug_info_data["triggers"][0]["discovery_data"]["payload"] == config2
|
[
"noreply@github.com"
] |
JeffLIrion.noreply@github.com
|
cea6e3c324c54e393cc9a78f37e3a1ee0d0f0722
|
1dfc6282f1f29e9704345d7eeed05458a3e44bdc
|
/com/muhaitian/chapter_eleven/ElevenInstance.py
|
3bd1b46d801829f69dcebdcfc11baa3e552badc9
|
[] |
no_license
|
MuhaitianJose/PythonBasicTutorial
|
1b6713065c08782b6cc8ae90f8a11a04f98fbcde
|
479c9cf87b363b2bbeecc88e477764bdfd974b6c
|
refs/heads/master
| 2021-08-24T06:06:55.960579
| 2017-12-08T10:07:13
| 2017-12-08T10:07:13
| 112,460,289
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 56
|
py
|
value_a = set([1, 2, 3, 4])
value_b = set([2, 3, 4, 5])
|
[
"muhaitian_Jose@126.com"
] |
muhaitian_Jose@126.com
|
da12eac167a9b7c626814f70ce49e2c51647a476
|
25e695fdd953fd955664ce618b75f263cde1e22f
|
/blog/views.py
|
d760091b42b1453235ecff343a9f8c0a6a699bcd
|
[] |
no_license
|
AsmenKoc/my-first-blog
|
d587d00832ae90d46dbd76e87a67b4e57f7ecf01
|
0fdb9793e119940c88a8c32ea2eda701eb218074
|
refs/heads/master
| 2021-08-24T07:24:24.993300
| 2017-12-08T16:16:19
| 2017-12-08T16:16:19
| 113,586,781
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 136
|
py
|
from django.shortcuts import render
def post_list(request):
return render(request, 'blog/post_list.html')
# Create your views here.
|
[
"asmen.koc@siroop.ch"
] |
asmen.koc@siroop.ch
|
2ee2f919c8e14ff226d2352904de3c165f600a07
|
ec1051c870297ec7123578093d34f3b3508949b6
|
/examples/scripts/ct_astra_3d_tv_admm.py
|
c37647d32e006cf4247a549ae6a8d329b36a704b
|
[
"BSD-3-Clause"
] |
permissive
|
lanl/scico
|
920af63872fe99cf27f76c3914ad80b63d27d915
|
6fe8536343d63024a27275c7d8cfd39ecb492ced
|
refs/heads/main
| 2023-08-08T03:18:37.450592
| 2023-08-04T01:45:55
| 2023-08-04T01:45:55
| 408,799,370
| 74
| 17
|
BSD-3-Clause
| 2023-08-04T01:45:56
| 2021-09-21T11:50:47
|
Python
|
UTF-8
|
Python
| false
| false
| 3,246
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of the SCICO package. Details of the copyright
# and user license can be found in the 'LICENSE.txt' file distributed
# with the package.
r"""
3D TV-Regularized Sparse-View CT Reconstruction
===============================================
This example demonstrates solution of a sparse-view, 3D CT
reconstruction problem with isotropic total variation (TV)
regularization
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - A \mathbf{x}
\|_2^2 + \lambda \| C \mathbf{x} \|_{2,1} \;,$$
where $A$ is the Radon transform, $\mathbf{y}$ is the sinogram, $C$ is
a 3D finite difference operator, and $\mathbf{x}$ is the desired
image.
"""
import numpy as np
import jax
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scico import functional, linop, loss, metric, plot
from scico.examples import create_tangle_phantom
from scico.linop.radon_astra import TomographicProjector
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.util import device_info
"""
Create a ground truth image and projector.
"""
Nx = 128
Ny = 256
Nz = 64
tangle = create_tangle_phantom(Nx, Ny, Nz)
tangle = jax.device_put(tangle)
n_projection = 10 # number of projections
angles = np.linspace(0, np.pi, n_projection) # evenly spaced projection angles
A = TomographicProjector(
tangle.shape, [1.0, 1.0], [Nz, max(Nx, Ny)], angles
) # Radon transform operator
y = A @ tangle # sinogram
"""
Set up ADMM solver object.
"""
λ = 2e0 # L1 norm regularization parameter
ρ = 5e0 # ADMM penalty parameter
maxiter = 25 # number of ADMM iterations
cg_tol = 1e-4 # CG relative tolerance
cg_maxiter = 25 # maximum CG iterations per ADMM iteration
# The append=0 option makes the results of horizontal and vertical
# finite differences the same shape, which is required for the L21Norm,
# which is used so that g(Cx) corresponds to isotropic TV.
C = linop.FiniteDifference(input_shape=tangle.shape, append=0)
g = λ * functional.L21Norm()
f = loss.SquaredL2Loss(y=y, A=A)
x0 = A.T(y)
solver = ADMM(
f=f,
g_list=[g],
C_list=[C],
rho_list=[ρ],
x0=x0,
maxiter=maxiter,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": cg_tol, "maxiter": cg_maxiter}),
itstat_options={"display": True, "period": 5},
)
"""
Run the solver.
"""
print(f"Solving on {device_info()}\n")
solver.solve()
hist = solver.itstat_object.history(transpose=True)
tangle_recon = solver.x
print(
"TV Restruction\nSNR: %.2f (dB), MAE: %.3f"
% (metric.snr(tangle, tangle_recon), metric.mae(tangle, tangle_recon))
)
"""
Show the recovered image.
"""
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(7, 5))
plot.imview(tangle[32], title="Ground truth (central slice)", cbar=None, fig=fig, ax=ax[0])
plot.imview(
tangle_recon[32],
title="TV Reconstruction (central slice)\nSNR: %.2f (dB), MAE: %.3f"
% (metric.snr(tangle, tangle_recon), metric.mae(tangle, tangle_recon)),
fig=fig,
ax=ax[1],
)
divider = make_axes_locatable(ax[1])
cax = divider.append_axes("right", size="5%", pad=0.2)
fig.colorbar(ax[1].get_images()[0], cax=cax, label="arbitrary units")
fig.show()
input("\nWaiting for input to close figures and exit")
|
[
"noreply@github.com"
] |
lanl.noreply@github.com
|
f5bb2ad42db44259d72eaaff7e472bd1ac6e57f5
|
59c42110d525ec398dddc2d84fde2b76a777e1b9
|
/manage.py
|
ba0d1be589eb2fc9f873a1503c6897a6ec46cf5f
|
[] |
no_license
|
likun-github/boat-back
|
a5b65ef022f1b923be1e45e0964f63e13933368d
|
f8ac2aa12a616ebe0a30bf050a96903a9c826ec5
|
refs/heads/master
| 2020-05-01T16:26:19.858300
| 2019-04-05T05:44:52
| 2019-04-05T05:44:52
| 177,571,886
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 538
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "weixin.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
[
"1397608894@qq.com"
] |
1397608894@qq.com
|
90a5fb3b23d10eab0070b43d5fe84a11b02fbc56
|
ac042704660f07263a9b7918c9d19e8027e2c01b
|
/qn 56.py
|
14480e85e7a17ca6896cc345322b794f040f1065
|
[] |
no_license
|
Prashant414/python
|
23387f2d205ceb36f141e4b4529ff9c3e80d2679
|
f5ff2b280b4bf29df2723b9d1d16690e65aaf62f
|
refs/heads/main
| 2023-03-21T10:23:32.119726
| 2021-03-09T17:59:45
| 2021-03-09T17:59:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 138
|
py
|
# **********
# **********
# **********
# **********
for i in range (4):
for j in range (1,11):
print("*",end="")
print()
|
[
"prashantshyam09@gmail.com"
] |
prashantshyam09@gmail.com
|
fe8a76e55d0a3954f0764b28af99e19087bbb644
|
0dac4df338635ce076a9c3868c667892d1e18a33
|
/storage/utils/random_number.py
|
3a5017c1f822cd36e95ad914cab40ea32fca0786
|
[] |
no_license
|
kierodeveloper/registro_kiero
|
eb47fdd9d06e56fb5bf44e2aeea208456d38fce9
|
e65a04df99cc46145f9ff66db2d04fa4bfec977e
|
refs/heads/master
| 2020-07-27T06:03:11.027740
| 2019-10-02T14:15:07
| 2019-10-02T14:15:07
| 208,894,889
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 641
|
py
|
# the random generation of string id's
import random
import string
from datetime import datetime, timedelta
# defining function for random
# string id with parameter
def ran_gen(size, chars=string.ascii_uppercase + string.digits):
generateID = ''.join(random.choice(chars) for x in range(size))
return generateID
def ran_gen_with_date(size, chars=string.ascii_uppercase + string.digits ):
generateID = ''.join(random.choice(chars) for x in range(size))
date = datetime.now().strftime('%Y%m%d')
reference_code = '{0}-{1}-KIERO-CC'.format(date,str(generateID))
return reference_code
|
[
"root@ip-172-31-29-247.us-west-2.compute.internal"
] |
root@ip-172-31-29-247.us-west-2.compute.internal
|
87147507c8e1cd5130cc800f955dde6f2ba83b93
|
5a96a225f2be824b6f4d6c7262d6ef3ca858ed75
|
/computer_science/data_structures/graph/graph.py
|
8058fb6c2aac676ab08b994443f0e5ce20d968ee
|
[
"MIT"
] |
permissive
|
paaqwazi/algorithms
|
e216901bd1b8291cab147c1345689071dffec52f
|
cc35be3e626849bb9df379bb844d54745e77fae5
|
refs/heads/master
| 2020-12-12T22:09:03.510761
| 2020-01-13T00:39:55
| 2020-01-13T00:39:55
| 234,241,516
| 1
| 0
|
MIT
| 2020-01-16T05:19:25
| 2020-01-16T05:19:25
| null |
UTF-8
|
Python
| false
| false
| 410
|
py
|
class Vertex:
def __init__(self, key):
self.key = key
self.connected_to = {}
def add_neighbor(self, neighbor, weight):
self.connected_to[neighbor] = weight
class Graph:
def __init__(self):
self.vertices_list = {}
self.number_of_vertices = 0
def add_vertex(self, key):
self.vertices_list[key] = Vertex(key)
self.number_of_vertices += 1
|
[
"leandrotk100@gmail.com"
] |
leandrotk100@gmail.com
|
c4797707b058479632ef3c760dbd3484f31d24b1
|
14d501bec1c03b601855dec5da37d5e68bfe0456
|
/server.py
|
9a0168f4162b9a28baae3ed9a0d60d8635957c62
|
[] |
no_license
|
butlerwilson/python-machine-learning-api-service
|
3ea2fa25dddd004cd6fc5795eb761b78df4f10a9
|
7c5a11d4f6fe2e5d2406cf74c6ff891c131ca0e9
|
refs/heads/master
| 2022-12-13T03:00:06.168708
| 2019-12-23T13:26:28
| 2019-12-23T13:26:28
| 229,757,544
| 0
| 0
| null | 2022-12-08T03:20:53
| 2019-12-23T13:25:16
|
Python
|
UTF-8
|
Python
| false
| false
| 950
|
py
|
from flask import Flask
from flask import request
from flask import jsonify
import joblib
import numpy as np
MODEL_PATH = "./model/rfc.model"
model = joblib.load(MODEL_PATH)
app = Flask(__name__)
@app.route('/', methods=["POST"])
def predict():
if not request.json or "feature" not in request.json:
response = {
"errno": 1,
"errmsg": "No feature!!!",
"label": "Unknown"
}
else:
try:
feature = request.json["feature"]
label = model.predict(np.array([feature]))
response = {
"errno": 0,
"errmsg": "predict success.",
"label": int(label[0])
}
except Exception as e:
response = {
"errno": 2,
"errmsg": str(e),
"label": "Unknown"
}
return jsonify(response)
if __name__ == '__main__':
app.run()
|
[
"youngcy.youngcy@gmail.com"
] |
youngcy.youngcy@gmail.com
|
53d8e4826322099bd5d975c7f752bccd7559efc4
|
1bdc2b98e4c20a4d6af9171ea718c1a1791183bf
|
/manage.py
|
2dab35b65444641238438c27df7b1d5514504b54
|
[] |
no_license
|
coderfender/Travello
|
01cb912faf5a3f7318d3be97d78b4be1400dd213
|
ab58170515d31ebf791d386534c8013e3481f05e
|
refs/heads/master
| 2021-01-10T13:09:13.852848
| 2015-05-26T15:37:19
| 2015-05-26T15:37:19
| 36,289,512
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 251
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Travello.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"="
] |
=
|
c0a8cde7f00cf0185826da2f753d256bf80f6ff6
|
732947cb3c34f591817b269d1139c647452e3e00
|
/sched/adaptdl_sched/config.py
|
0123972cbfe8caaef25bdb94ff4068cf89004799
|
[
"Apache-2.0"
] |
permissive
|
rohitpandey13/adaptdl
|
903318196e8c2e4634aabdc7889bcc3f136aeea7
|
0edd48bb9ed3c67b84f0218cd62f97cf0e7d7e4f
|
refs/heads/master
| 2022-12-22T11:02:00.766040
| 2020-09-18T23:42:42
| 2020-09-18T23:42:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,674
|
py
|
# Copyright 2020 Petuum, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
ADAPTDL_PH_LABEL = 'adaptdl/placeholder'
def allowed_taints(taints):
if not taints:
return True
return (len(taints) == 1 and taints[0].key == "petuum.com/nodegroup" and
taints[0].value == "adaptdl")
def get_namespace():
# for code running outside of AdaptDL
if not os.path.exists("/var/run/secrets/kubernetes.io/serviceaccount/namespace"): # noqa: E501
return "default"
with open("/var/run/secrets/kubernetes.io/serviceaccount/namespace") as f:
return f.read()
def get_image():
return os.environ["ADAPTDL_IMAGE"]
def get_adaptdl_deployment():
return os.environ["ADAPTDL_SCHED_DEPLOYMENT"]
def get_supervisor_url():
return os.environ["ADAPTDL_SUPERVISOR_URL"]
def get_supervisor_port():
return os.getenv("ADAPTDL_SUPERVISOR_SERVICE_PORT", 8080)
def get_storage_subpath():
return os.environ["ADAPTDL_STORAGE_SUBPATH"]
def get_job_default_resources():
val = os.getenv("ADAPTDL_JOB_DEFAULT_RESOURCES")
return json.loads(val) if val is not None else None
|
[
"qiao@aurick.net"
] |
qiao@aurick.net
|
49d55034755b4bf81a61b7e64c7061aa94808c3d
|
cf7e21d1ef3a281c77e8b459b9c34d8996574934
|
/scripts/scenario_2/03-kmeans.py
|
3611873650d51ea1ca70acd505fd1f8d8e41560f
|
[
"MIT"
] |
permissive
|
StevenGolovkine/fcubt
|
f8d056cc621259f41cb9b3b96b099791357b2473
|
fdf7c7c11c9e94733c50ecb90a641868394fd347
|
refs/heads/main
| 2023-07-28T16:17:23.369730
| 2021-09-12T12:54:27
| 2021-09-12T12:54:27
| 304,015,361
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,930
|
py
|
import multiprocessing
import numpy as np
import pickle
import sys
import time
from FDApy.representation.functional_data import DenseFunctionalData
from FDApy.clustering.fcubt import Node, FCUBT
from joblib import Parallel, delayed
from skfda import FDataGrid
from skfda.ml.clustering import KMeans
from sklearn.metrics import adjusted_rand_score
NUM_CORES = multiprocessing.cpu_count()
def analyze_data(idx):
argvals = np.loadtxt('./data/argvals.csv')
values_A = np.loadtxt(f'./data/scenario_2_{idx}_A_smooth.csv',
delimiter=',')
values_B = np.loadtxt(f'./data/scenario_2_{idx}_B_smooth.csv',
delimiter=',')
labels = np.loadtxt('./data/labels.csv')
data_fd_A = DenseFunctionalData({'input_dim_0': argvals}, values_A)
data_fd_B = DenseFunctionalData({'input_dim_0': argvals}, values_B)
data_fd_A_smooth = data_fd_A.smooth(points=0.5, neighborhood=6)
data_fd_B_smooth = data_fd_B.smooth(points=0.5, neighborhood=6)
data_matrix = np.stack([data_fd_A_smooth.values, data_fd_B_smooth.values], axis=-1)
sample_points = data_fd_A_smooth.argvals['input_dim_0']
fdata = FDataGrid(data_matrix, sample_points)
results_file = {}
for n_clus in np.arange(2, 9, 1):
kmeans = KMeans(n_clus)
final_labels = kmeans.fit_predict(fdata)
ARI = adjusted_rand_score(labels, final_labels)
results_file[n_clus] = ARI
return results_file
def analyze_data_derivative(idx):
argvals = np.loadtxt('./data/argvals.csv')
values_A = np.loadtxt(f'./data/scenario_2_{idx}_A_smooth.csv',
delimiter=',')
values_B = np.loadtxt(f'./data/scenario_2_{idx}_B_smooth.csv',
delimiter=',')
labels = np.loadtxt('./data/labels.csv')
data_fd_A = DenseFunctionalData({'input_dim_0': argvals}, values_A)
data_fd_B = DenseFunctionalData({'input_dim_0': argvals}, values_B)
data_fd_A_smooth = data_fd_A.smooth(points=0.5, neighborhood=6)
data_fd_B_smooth = data_fd_B.smooth(points=0.5, neighborhood=6)
data_matrix = np.stack([data_fd_A_smooth.values, data_fd_B_smooth.values],
axis=-1)
sample_points = data_fd_A_smooth.argvals['input_dim_0']
fdata = FDataGrid(data_matrix, sample_points)
fdata_derivative = fdata.derivative(order=1)
results_file = {}
for n_clus in np.arange(2, 9, 1):
kmeans = KMeans(n_clus)
final_labels = kmeans.fit_predict(fdata_derivative)
ARI = adjusted_rand_score(labels, final_labels)
results_file[n_clus] = ARI
return results_file
def main():
inputs = range(500)
start = time.time()
results = Parallel(n_jobs=NUM_CORES)(delayed(analyze_data)(i)
for i in inputs)
print(f'{time.time() - start}')
file = open("./results_kmeans.pkl", "wb")
pickle.dump(results, file)
file.close()
start = time.time()
results = Parallel(n_jobs=NUM_CORES)(delayed(analyze_data_derivative)(i)
for i in inputs)
print(f'{time.time() - start}')
file = open("./results_kmeans_derivatives.pkl", "wb")
pickle.dump(results, file)
file.close()
if __name__ == '__main__':
main()
|
[
"steven_golovkine@icloud.com"
] |
steven_golovkine@icloud.com
|
3a5f896d5ba0aac048d5ea1e134f177046072d1e
|
2c74bb301f1ed83b79254944183ac5a18a639fdf
|
/homeassistant/components/arcam_fmj/device_trigger.py
|
13f1acc7244dcccb42cae2800e78bda4f5782e16
|
[
"Apache-2.0"
] |
permissive
|
Adminiuga/home-assistant
|
5bec93007ddac1a268cc359bf7e48530c5f73b38
|
dcf68d768e4f628d038f1fdd6e40bad713fbc222
|
refs/heads/dev
| 2023-02-22T22:03:31.013931
| 2022-11-09T00:27:20
| 2022-11-09T00:27:20
| 123,929,062
| 5
| 4
|
Apache-2.0
| 2023-02-22T06:14:31
| 2018-03-05T14:11:09
|
Python
|
UTF-8
|
Python
| false
| false
| 2,641
|
py
|
"""Provides device automations for Arcam FMJ Receiver control."""
from __future__ import annotations
import voluptuous as vol
from homeassistant.components.device_automation import DEVICE_TRIGGER_BASE_SCHEMA
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_DEVICE_ID,
CONF_DOMAIN,
CONF_ENTITY_ID,
CONF_PLATFORM,
CONF_TYPE,
)
from homeassistant.core import CALLBACK_TYPE, Event, HassJob, HomeAssistant, callback
from homeassistant.helpers import config_validation as cv, entity_registry
from homeassistant.helpers.trigger import TriggerActionType, TriggerInfo
from homeassistant.helpers.typing import ConfigType
from .const import DOMAIN, EVENT_TURN_ON
TRIGGER_TYPES = {"turn_on"}
TRIGGER_SCHEMA = DEVICE_TRIGGER_BASE_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Required(CONF_TYPE): vol.In(TRIGGER_TYPES),
}
)
async def async_get_triggers(
hass: HomeAssistant, device_id: str
) -> list[dict[str, str]]:
"""List device triggers for Arcam FMJ Receiver control devices."""
registry = entity_registry.async_get(hass)
triggers = []
# Get all the integrations entities for this device
for entry in entity_registry.async_entries_for_device(registry, device_id):
if entry.domain == "media_player":
triggers.append(
{
CONF_PLATFORM: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "turn_on",
}
)
return triggers
async def async_attach_trigger(
hass: HomeAssistant,
config: ConfigType,
action: TriggerActionType,
trigger_info: TriggerInfo,
) -> CALLBACK_TYPE:
"""Attach a trigger."""
trigger_data = trigger_info["trigger_data"]
job = HassJob(action)
if config[CONF_TYPE] == "turn_on":
entity_id = config[CONF_ENTITY_ID]
@callback
def _handle_event(event: Event):
if event.data[ATTR_ENTITY_ID] == entity_id:
hass.async_run_hass_job(
job,
{
"trigger": {
**trigger_data, # type: ignore[arg-type] # https://github.com/python/mypy/issues/9117
**config,
"description": f"{DOMAIN} - {entity_id}",
}
},
event.context,
)
return hass.bus.async_listen(EVENT_TURN_ON, _handle_event)
return lambda: None
|
[
"noreply@github.com"
] |
Adminiuga.noreply@github.com
|
e0a887f8f0744c208b8845e00093e2284ad4b53f
|
954de20148e5cc14a79439e532b51d790b4ef15a
|
/Chapter05/cars.py
|
d4bd3814fb1dde3955d03f0a6ee8f7019768f02b
|
[] |
no_license
|
d1rtyst4r/archivetempLearningPythonGPDVWA
|
8803114f0bf9a3bbce55610bed477533c58b3630
|
794cab59c3f2cfabb677d7add5f3ecf3daf71e44
|
refs/heads/master
| 2023-04-10T01:42:51.505569
| 2023-04-02T10:38:04
| 2023-04-02T10:38:04
| 138,033,431
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 231
|
py
|
# IF, True and False
cars = ['bmw', 'audi', 'toyota', 'subaru']
for car in cars:
if car == 'bmw':
print(car.upper())
else:
print(car.title())
car = 'Audi'
print(car == 'audi')
print(car.lower() == 'audi')
|
[
"33938604+d1rtyst4r@users.noreply.github.com"
] |
33938604+d1rtyst4r@users.noreply.github.com
|
ed93a4271eec05eab703e58e673e8f9dd0da1e3d
|
d05a59feee839a4af352b7ed2fd6cf10a288a3cb
|
/xlsxwriter/test/comparison/test_chart_column02.py
|
da03686f3aa3f77a1c4ba136699b681a49eb3040
|
[
"BSD-2-Clause-Views"
] |
permissive
|
elessarelfstone/XlsxWriter
|
0d958afd593643f990373bd4d8a32bafc0966534
|
bb7b7881c7a93c89d6eaac25f12dda08d58d3046
|
refs/heads/master
| 2020-09-24T06:17:20.840848
| 2019-11-24T23:43:01
| 2019-11-24T23:43:01
| 225,685,272
| 1
| 0
|
NOASSERTION
| 2019-12-03T18:09:06
| 2019-12-03T18:09:05
| null |
UTF-8
|
Python
| false
| false
| 1,319
|
py
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2019, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('chart_column02.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'column', 'subtype': 'stacked'})
chart.axis_ids = [49388544, 69387008]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({'values': '=Sheet1!$A$1:$A$5'})
chart.add_series({'values': '=Sheet1!$B$1:$B$5'})
chart.add_series({'values': '=Sheet1!$C$1:$C$5'})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
|
[
"jmcnamara@cpan.org"
] |
jmcnamara@cpan.org
|
8ebcae65d2c1f6f4951608633db89ad5033ad7b8
|
3410950092562bf858b8611bdfef6e950b5560a5
|
/Easy_inOut_AutoParking/scripts/controller_right.py
|
5e7d02cc21fdeb53687dd21664c7f3270126b264
|
[] |
no_license
|
hajeonghan/wecar
|
116ad7f029dbb398a22979363745af09097c05ce
|
f267643d2598604569e0dde0e1ca1cb408fcb9c8
|
refs/heads/master
| 2023-01-06T02:14:31.898083
| 2020-11-06T03:00:18
| 2020-11-06T03:00:18
| 278,515,229
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,914
|
py
|
#!/usr/bin/env python
# echo_server.py
#-*- coding:utf-8 -*-
import rospy
from sensor_msgs.msg import LaserScan,PointCloud
from std_msgs.msg import Float64
from vesc_msgs.msg import VescStateStamped
from math import cos,sin,pi
from geometry_msgs.msg import Point32
class right_controller:
def __init__(self):
# rospy.init_node('simple_controller',anonymous=True)
rospy.Subscriber("/scan",LaserScan,self.laser_callback)
self.motor_pub = rospy.Publisher('commands/motor/speed',Float64,queue_size=1)
self.servo_pub = rospy.Publisher('commands/servo/position',Float64,queue_size=1)
self.pcd_pub = rospy.Publisher('laser2pcd',PointCloud,queue_size=1)
self.control=True
self.laser_data=0
self.stop=0
self.start = 0
print("control start!")
while not rospy.is_shutdown():
rospy.spin()
def laser_callback(self,msg):
pcd = PointCloud()
motor_msg = Float64()
servo_value = Float64()
pcd.header.frame_id = msg.header.frame_id
angle = 0
servo_value = 0.555
for r in msg.ranges:
tmp_point= Point32()
tmp_point.x = r*cos(angle)
tmp_point.y = r*sin(angle)
angle = angle + (1.0/180*pi)
if r<2 :
pcd.points.append(tmp_point)
right=0
left=0
front=0
ready=0
point_get = []
for pd in range(0, 360):
# Range so big
if str(msg.ranges[pd]) == 'inf':
point_get.append(float(0.0))
else:
point_get.append(float(msg.ranges[pd]))
# #right
if point_get[89]>point_get[90] and point_get[89]>point_get[91]:
right=point_get[89]
elif point_get[90]>point_get[89] and point_get[90]>point_get[91]:
right=point_get[90]
elif point_get[91]>point_get[89] and point_get[91]>point_get[90]:
right=point_get[91]
# left
if point_get[269]>point_get[270] and point_get[269]>point_get[271]:
left=point_get[269]
elif point_get[270]>point_get[269] and point_get[270]>point_get[271]:
left=point_get[270]
elif point_get[271]>point_get[269] and point_get[271]>point_get[270]:
leftt=point_get[271]
# front
if point_get[179]>point_get[180] and point_get[179]>point_get[181]:
front=point_get[179]
elif point_get[180]>point_get[179] and point_get[180]>point_get[181]:
front=point_get[180]
elif point_get[181]>point_get[179] and point_get[181]>point_get[180]:
front=point_get[180]
print("right: ", right)
print("left: ", left)
print("front: ",front)
if right == 0 and left == 0 and front==0:
motor_msg.data = 0
print("00000000000")
# go ahead
if self.start == 0:
if 0 < front < 1.0:
self.start =1
else:
servo_value=0.5555
motor_msg.data=1000
# right
elif self.start == 1:
if 1.2 < left < 1.3:
servo_value = 0.555
motor_msg.data = 0
print("ok")
self.control=False
rospy.signal_shutdown("reason")
else:
print("right!!!!!!!!!!!")
servo_value = 0.99
motor_msg.data = 1000
self.motor_pub.publish(motor_msg)
self.servo_pub.publish(servo_value)
self.pcd_pub.publish(pcd)
# if __name__ == '__main__':
# try:
# test_track = simple_controller()
# except rospy.ROSInterruptException:
# pass
|
[
"noreply@github.com"
] |
hajeonghan.noreply@github.com
|
89f0113b3df959891f12d41f1f47971505436938
|
3ca67d69abd4e74b7145b340cdda65532f90053b
|
/LeetCode/Math/Unique Binary Search Trees/6047198844.py
|
5bd76d22baf3f68d298c888dc5b618012c22a961
|
[] |
no_license
|
DKU-STUDY/Algorithm
|
19549516984b52a1c5cd73e1ed1e58f774d6d30e
|
6f78efdbefd8eedab24e43d74c7dae7f95c2893b
|
refs/heads/master
| 2023-02-18T06:48:39.309641
| 2023-02-09T07:16:14
| 2023-02-09T07:16:14
| 258,455,710
| 175
| 49
| null | 2023-02-09T07:16:16
| 2020-04-24T08:42:27
|
Python
|
UTF-8
|
Python
| false
| false
| 242
|
py
|
class Solution:
def numTrees(self, n: int) -> int:
memo = [0]*(n+1)
memo[0] = memo[1] = 1
for i in range(2, n+1):
for j in range(i):
memo[i] += memo[j]*memo[i-j-1]
return memo[n]
|
[
"2615240@gmail.com"
] |
2615240@gmail.com
|
c1f2b57678f0104c5139a3a786e43dbc126dbea2
|
58f943c7e0b063a5ef689e52ee31188577f6110d
|
/2. Using Python to Interact with the Operating System/Week 4/Data Streams/streams.py
|
4d8e110a6bd0f88a95800a27e1859d26ab1e8018
|
[] |
no_license
|
nikolayninov/Google
|
f99f2aaf8224b37beaf407145a5a112d6901039f
|
c46440d8209875eb3236d9c680d80dd1b430244b
|
refs/heads/master
| 2020-12-29T07:03:37.246804
| 2020-04-04T12:46:49
| 2020-04-04T12:46:49
| 238,504,477
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 165
|
py
|
#!/usr/bin/python3
data = input("This will come from STDIN: ")
print("Now we write it to STDOUT: " + data)
print("Now we generate an error to STDERR: " + data + 1)
|
[
"nikininov1@gmail.com"
] |
nikininov1@gmail.com
|
793b2e5a024fc539bd31302e8dc1454beb432387
|
6bb45c5892b4c9692dcc44116fb73dc9e7ab90ff
|
/sagemaker-pipeline-compare-model-versions/evaluate.py
|
7961a7861c292fa262341b056359d45a950321af
|
[
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
aws/amazon-sagemaker-examples
|
8359afe544e873662bda5b8d2b07399c437213c9
|
43dae4b28531cde167598f104f582168b0a4141f
|
refs/heads/main
| 2023-08-26T04:42:52.342776
| 2023-08-25T14:37:19
| 2023-08-25T14:37:19
| 107,937,815
| 4,797
| 3,519
|
Apache-2.0
| 2023-09-14T19:47:03
| 2017-10-23T05:55:22
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,694
|
py
|
"""Evaluation script for measuring model accuracy."""
import json
import logging
import pathlib
import pickle
import tarfile
import numpy as np
import pandas as pd
import xgboost
from sklearn.metrics import (
accuracy_score,
precision_score,
recall_score,
confusion_matrix,
roc_curve,
)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler())
if __name__ == "__main__":
model_path = "/opt/ml/processing/model/model.tar.gz"
with tarfile.open(model_path) as tar:
tar.extractall(path="..")
logger.debug("Loading xgboost model.")
model = pickle.load(open("xgboost-model", "rb"))
logger.debug("Loading test input data.")
test_path = "/opt/ml/processing/test/test.csv"
df = pd.read_csv(test_path, header=None)
logger.debug("Reading test data.")
y_test = df.iloc[:, 0].to_numpy()
df.drop(df.columns[0], axis=1, inplace=True)
X_test = xgboost.DMatrix(df.values)
logger.info("Performing predictions against test data.")
prediction_probabilities = model.predict(X_test)
predictions = np.round(prediction_probabilities)
precision = precision_score(y_test, predictions)
recall = recall_score(y_test, predictions)
accuracy = accuracy_score(y_test, predictions)
conf_matrix = confusion_matrix(y_test, predictions)
fpr, tpr, _ = roc_curve(y_test, prediction_probabilities)
logger.debug("Accuracy: {}".format(accuracy))
logger.debug("Precision: {}".format(precision))
logger.debug("Recall: {}".format(recall))
logger.debug("Confusion matrix: {}".format(conf_matrix))
# Available metrics to add to model: https://docs.aws.amazon.com/sagemaker/latest/dg/model-monitor-model-quality-metrics.html
report_dict = {
"binary_classification_metrics": {
"accuracy": {"value": accuracy, "standard_deviation": "NaN"},
"precision": {"value": precision, "standard_deviation": "NaN"},
"recall": {"value": recall, "standard_deviation": "NaN"},
"confusion_matrix": {
"0": {"0": int(conf_matrix[0][0]), "1": int(conf_matrix[0][1])},
"1": {"0": int(conf_matrix[1][0]), "1": int(conf_matrix[1][1])},
},
"receiver_operating_characteristic_curve": {
"false_positive_rates": list(fpr),
"true_positive_rates": list(tpr),
},
},
}
output_dir = "/opt/ml/processing/evaluation"
pathlib.Path(output_dir).mkdir(parents=True, exist_ok=True)
evaluation_path = f"{output_dir}/evaluation.json"
with open(evaluation_path, "w") as f:
f.write(json.dumps(report_dict))
|
[
"noreply@github.com"
] |
aws.noreply@github.com
|
2619773e6b9abc0cb10f16574e1453baf9e0b51f
|
8f988f4fd1044b10c11039a0e38e65980e7af28b
|
/service/comment.py
|
3c80ec41993b6026ce98890edd7e1d3589fcd323
|
[] |
no_license
|
hljyunxi/personal_site
|
fbaa276a90c3e42ec7e7a715525e648b80575b15
|
31cd67712e6fdff134578bf4b18c6d3e7b884973
|
refs/heads/master
| 2021-01-19T06:26:43.109865
| 2012-12-24T07:18:59
| 2012-12-24T07:18:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,688
|
py
|
import datetime
from google.appengine.ext import ndb
from google.appengine.api import users
from model import *
from singleton import Singleton
from base_service import BaseService
class CommentService(Singleton, BaseService):
def get_comments(self, entity_key_string):
key = ndb.Key(urlsafe = entity_key_string)
comments = Comment.query(ancestor = key).fetch()
return comments
def delete_comment(self, comment_key_string):
key = ndb.Key(urlsafe = comment_key_string)
entity_key = key.parent()
key.delete()
self.update_comment_num(entity_key)
def add_comment(self, account, entity_key_string, content):
# TODO: Check anonymouse status for user on question.
# anonymouse user couldn't comment, except the question author or answer author.
parent_key = ndb.Key(urlsafe = entity_key_string)
comment = Comment(
parent = parent_key,
author = account.key,
real_author = account.key,
content = content
)
comment.put()
self.update_comment_num(parent_key)
return comment
def update_comment_num(self, entity_key):
count = Comment.query(ancestor = entity_key).count()
entity = entity_key.get()
entity.comment_num = count
entity.put()
# def get_question_comments(self, question_id):
# question = Question.get_by_id(int(question_id))
# return self.get_entity_comments(question)
#
# def delete_answer_comment(self, question_id, comment_id):
# question = Question.get_by_id(int(question_id))
# self.delete_entity_comment(question, comment_id)
#
# def add_answer_comment(self, account, question_id, content):
# question = Question.get_by_id(int(question_id))
# self.add_entity_comment(account, question, content)
#
#
# def get_answer_comments(self, question_id, answer_id):
# question = Question.get_by_id(int(question_id))
# answer = Answer.get_by_id(parent = question.key, id = int(answer_id))
# return self.get_entity_comments(answer)
#
# def delete_answer_comment(self, question_id, answer_id, comment_id):
# question = Question.get_by_id(int(question_id))
# answer = Answer.get_by_id(parent = question.key, id = int(answer_id))
# self.delete_entity_comment(answer, comment_id)
#
# def add_answer_comment(self, account, question_id, answer_id, content):
# question = Question.get_by_id(int(question_id))
# answer = Answer.get_by_id(parent = question.key, id = int(answer_id))
# self.add_entity_comment(account, answer, content)
#
#
#
|
[
"matin0728@gmail.com"
] |
matin0728@gmail.com
|
79c22466fafa880444dd06055e1e1df8d366216b
|
a140fe192fd643ce556fa34bf2f84ddbdb97f091
|
/.history/함수기본값_20200705103750.py
|
59bb53b6f9122637051f3596b1582ea96f8c3536
|
[] |
no_license
|
sangha0719/py-practice
|
826f13cb422ef43992a69f822b9f04c2cb6d4815
|
6d71ce64bf91cc3bccee81378577d84ba9d9c121
|
refs/heads/master
| 2023-03-13T04:40:55.883279
| 2021-02-25T12:02:04
| 2021-02-25T12:02:04
| 342,230,484
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 473
|
py
|
# def profile(name, age, main_lang):
# print("이름 : {0}\t나이 : {1}\t주 사용 언어: {2}"
# .format(name, age, main_lang))
# profile("유재석", 20, "파이썬")
# profile("김태호", 25, "자바")
# 같은 학교 같은 학년 같은 반 같은 수업
def profile(name="dd", age, main_lang="파이썬"):
print("이름 : {0}\t나이 : {1}\t주 사용 언어: {2}"
.format(name, age, main_lang))
profile("")
profile("김태호")
|
[
"sangha0719@gmail.com"
] |
sangha0719@gmail.com
|
d34aeef1be122ce5b6a7a2bc4f5c489acabb65b3
|
1e0a2ec012905f5c7441701dd32c983b5bb6cd93
|
/code/rnn_python/testing_all_data.py
|
e44d6d3b307e99ef15277be1e9eec2cbfd45108b
|
[] |
no_license
|
benkha/RRN-Weather
|
55740001837e1f2cb690b1cda14e06e7c5bdc5c1
|
dd0f273d94b849282438b21532fbbc4fbe523cc8
|
refs/heads/master
| 2021-01-24T03:43:01.392832
| 2018-04-27T19:53:42
| 2018-04-27T19:53:42
| 122,902,174
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,932
|
py
|
from rnn import *
from parse_data import *
out_path = './data/models_summary/'
HIDDEN_LAYERS = 10
PAST_DAYS = 7
TESTING_PERCENTAGE = 0.1
n_epochs = 300
normalization = 'zcore'
n_in = 18
n_out = 2
print("Hidden Lyers ", HIDDEN_LAYERS)
print("PAST_DAYS ", PAST_DAYS)
print("TESTING_PERCENTAGE ", TESTING_PERCENTAGE)
print("Epochs Num ", n_epochs)
print("Normalization ", normalization)
Experiment_KEY = 'Hidden_'+str(HIDDEN_LAYERS)+'_DAYS_'+str(PAST_DAYS)+'_nepoch_'+str(n_epochs)+'_normalization_'+str(normalization)+'_features_'+str(n_in)
print(Experiment_KEY)
cluster_subset=[1,5,11,19]
airports_clusters = get_clusters()
tst_indexes = get_test_indexes()
airports, airports_data = parse_data(normalization=normalization)
real_test_data=get_real_test_target(tst_indexes)
airports_clusters_data = {}
airports_all_data = {'features_trn': [], 'targets_trn': [], 'features_tst': [], 'targets_tst': [],'targets_tst_real':[]}
for air_id in airports:
cluster_id = airports_clusters[air_id]
if cluster_id in cluster_subset:
data = []
for rec in airports_data[air_id]:
data.append(rec)
features_trn, targets_trn, features_tst, targets_tst ,targets_tst_real = divide_to_sequence(data, tst_indexes, feature_num=n_in,
PAST_DAYS=PAST_DAYS,real_test_data=real_test_data)
if airports_clusters_data.get(cluster_id, 0) == 0:
airports_clusters_data[cluster_id] = {}
airports_clusters_data[cluster_id]['features_trn'] = []
airports_clusters_data[cluster_id]['targets_trn'] = []
airports_clusters_data[cluster_id]['features_tst'] = []
airports_clusters_data[cluster_id]['targets_tst'] = []
airports_clusters_data[cluster_id]['targets_tst_real'] = []
for rec in features_trn:
airports_clusters_data[cluster_id]['features_trn'].append(rec)
airports_all_data['features_trn'].append(rec)
for rec in targets_trn:
airports_clusters_data[cluster_id]['targets_trn'].append(rec)
airports_all_data['targets_trn'].append(rec)
for rec in features_tst:
airports_clusters_data[cluster_id]['features_tst'].append(rec)
airports_all_data['features_tst'].append(rec)
for rec in targets_tst:
airports_clusters_data[cluster_id]['targets_tst'].append(rec)
airports_all_data['targets_tst'].append(rec)
for rec in targets_tst_real:
airports_clusters_data[cluster_id]['targets_tst_real'].append(rec)
airports_all_data['targets_tst_real'].append(rec)
features_trn = airports_all_data['features_trn']
targets_trn = airports_all_data['targets_trn']
features_tst = airports_all_data['features_tst']
targets_tst = airports_all_data['targets_tst']
targets_tst_real = airports_all_data['targets_tst_real']
model = MetaRNN()
model.load(out_path + "/all_data/" + Experiment_KEY + '_obj.save')
counter = 0
err_mintmp = 0
err_maxtmp = 0
if (len(features_tst) > 0):
for one_seq in features_tst:
guess = model.predict(one_seq)
from denrmalization import *
if normalization == 'minmax':
prediction = minmax(guess[-1][0], temp_max_min, temp_max_max)
else:
prediction = zscore(guess[-1][0], temp_max_mean, temp_max_sigma)
real = float(targets_tst_real[counter][0])
err_maxtmp += abs(prediction-real)
if normalization == 'minmax':
prediction = minmax(guess[-1][1], temp_min_min, temp_min_max)
else:
prediction = zscore(guess[-1][1], temp_min_mean, temp_min_sigma)
real = float(targets_tst_real[counter][1])
err_mintmp += abs(prediction - real)
counter += 1
all_error_min = err_mintmp / counter
all_error_max = err_maxtmp / counter
print(all_error_min)
print(all_error_max)
|
[
"ben.kha@outlook.com"
] |
ben.kha@outlook.com
|
2f72d31f342afd2fb69bd53b974ec2df152958ac
|
5233ad777ffe38c9d090db3e50a048f5ae40efeb
|
/adomicilio/urls.py
|
4c8d99810c9af31bbd657835ad29dae4b21b1913
|
[
"MIT"
] |
permissive
|
EmaSMach/adomicilio
|
410b97c29453112bea941da5b7103f82c496862a
|
1b4124735c267623ab2d85bfc2ece201cb3962a0
|
refs/heads/master
| 2023-07-11T02:27:48.418810
| 2021-08-17T15:36:01
| 2021-08-17T15:36:01
| 293,381,445
| 0
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,062
|
py
|
"""adomicilio URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('apps.home.urls')),
path('cuentas/', include('apps.cuentas.urls')),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) + static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
|
[
"davidemanuelsandoval@gmail.com"
] |
davidemanuelsandoval@gmail.com
|
e95f53ad8929e7ea33f03c001ca8cc88127d3825
|
cc73545251d247a2937b8c8c8b083d2afcb45f51
|
/ML-In-Action/Supervised-Learning-Algorithms/decision-trees/trees.py
|
b4a7d82cfe67a31eeab425f32b4177c494d3b30e
|
[] |
no_license
|
rustydigg918/cracking-the-data-science-interview
|
be5b83ecdde10ef14699fe9aa461512cc51bb51c
|
c2f3e21065885cb7bf81e38a9dc5d7be1785bf39
|
refs/heads/master
| 2020-06-20T04:11:51.055877
| 2019-07-13T02:24:01
| 2019-07-13T02:24:01
| 196,987,607
| 1
| 0
| null | 2019-07-15T11:42:45
| 2019-07-15T11:42:45
| null |
UTF-8
|
Python
| false
| false
| 3,850
|
py
|
from math import log
import operator
def createDataSet():
dataSet = [[1, 1, 'yes'],
[1, 1, 'yes'],
[1, 0, 'no'],
[0, 1, 'no'],
[0, 1, 'no']]
labels = ['no surfacing','flippers']
#change to discrete values
return dataSet, labels
def calcShannonEnt(dataSet):
numEntries = len(dataSet)
labelCounts = {}
# the the number of unique elements and their occurance
for featVec in dataSet:
currentLabel = featVec[-1]
if currentLabel not in labelCounts.keys(): labelCounts[currentLabel] = 0
labelCounts[currentLabel] += 1
shannonEnt = 0.0
for key in labelCounts:
prob = float(labelCounts[key])/numEntries
shannonEnt -= prob * log(prob,2) #log base 2
return shannonEnt
def splitDataSet(dataSet, axis, value):
# Create separate list
retDataSet = []
for featVec in dataSet:
if featVec[axis] == value:
reducedFeatVec = featVec[:axis] #chop out axis used for splitting
reducedFeatVec.extend(featVec[axis+1:])
retDataSet.append(reducedFeatVec)
return retDataSet
def chooseBestFeatureToSplit(dataSet):
numFeatures = len(dataSet[0]) - 1 #the last column is used for the labels
baseEntropy = calcShannonEnt(dataSet)
bestInfoGain = 0.0; bestFeature = -1
for i in range(numFeatures): #iterate over all the features
featList = [example[i] for example in dataSet]#create a list of all the examples of this feature
uniqueVals = set(featList) #get a set of unique values
newEntropy = 0.0
for value in uniqueVals:
subDataSet = splitDataSet(dataSet, i, value)
prob = len(subDataSet)/float(len(dataSet))
newEntropy += prob * calcShannonEnt(subDataSet)
infoGain = baseEntropy - newEntropy #calculate the info gain; ie reduction in entropy
if (infoGain > bestInfoGain): #compare this to the best gain so far
bestInfoGain = infoGain #if better than current best, set to best
bestFeature = i
return bestFeature #returns an integer
def majorityCnt(classList):
classCount={}
for vote in classList:
if vote not in classCount.keys(): classCount[vote] = 0
classCount[vote] += 1
sortedClassCount = sorted(classCount.iteritems(), key=operator.itemgetter(1), reverse=True)
return sortedClassCount[0][0]
def createTree(dataSet,labels):
classList = [example[-1] for example in dataSet]
# stop splitting when all of the classes are equal
if classList.count(classList[0]) == len(classList):
return classList[0]
# stop splitting when there are no more features in dataSet
if len(dataSet[0]) == 1:
return majorityCnt(classList)
bestFeat = chooseBestFeatureToSplit(dataSet)
bestFeatLabel = labels[bestFeat]
myTree = {bestFeatLabel:{}}
# Get list of unique values
del(labels[bestFeat])
featValues = [example[bestFeat] for example in dataSet]
uniqueVals = set(featValues)
for value in uniqueVals:
# copy all of labels, so trees don't mess up existing labels
subLabels = labels[:]
myTree[bestFeatLabel][value] = createTree(splitDataSet(dataSet, bestFeat, value),subLabels)
return myTree
def classify(inputTree, featLabels, testVec):
firstStr = inputTree.keys()[0]
secondDict = inputTree[firstStr]
# Translate label string to index
featIndex = featLabels.index(firstStr)
for key in secondDict.keys():
if testVec[featIndex] == key:
if type(secondDict[key]).__name__ == 'dict':
classLabel = classify(secondDict[key], featLabels, testVec)
else: classLabel = secondDict[key]
return classLabel
|
[
"le_j6@denison.edu"
] |
le_j6@denison.edu
|
3d13591f1686e2388bab57bd82896252e3ae5ad1
|
1dcd99bb96d9c51b2b561e7c2e54615cf2bc5ced
|
/Question/sw_expert/D1/2070.py
|
ad4fe43c458e966ab5707e6cd08c70e0610bd6f7
|
[] |
no_license
|
dongsik93/HomeStudy
|
62bbcad93be49ed396fe9d50e840f921bb751d4e
|
3a28ff8c0b522a546ea2ed07c939f49bac3699c7
|
refs/heads/master
| 2020-04-15T02:12:36.615830
| 2019-04-05T14:31:10
| 2019-04-05T14:31:10
| 164,306,391
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 267
|
py
|
T = int(input())
for i in range(T):
num = input().split()
for j in range(1,len(num)):
if(num[j-1] == num[j]):
a = "="
elif(num[j-1] < num[j]):
a = "<"
else:
a = ">"
print(f"#{i+1} {a}")
|
[
"ehdtlr9376@naver.com"
] |
ehdtlr9376@naver.com
|
46a7dc7d39d0004436658a782bd0079b40b4fe48
|
69b96f181557286c1cbddbd1502848d6cc744a7a
|
/Python/GTK/main.py
|
0850a8e92eca187283c5dff00c1ceb03796a7e92
|
[] |
no_license
|
WeepingDogel/drafts
|
1bfe2af41da17529ddefd6b810fdc7fe1a889620
|
f011af6ad31b7e3becc213f98999312160f786d1
|
refs/heads/main
| 2023-07-03T11:25:27.023304
| 2021-08-07T08:30:39
| 2021-08-07T08:30:39
| 376,811,396
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 504
|
py
|
'''
GTK 简易程序
'''
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk
class Windows(Gtk.Window):
def __init__ (self):
Gtk.Window.__init__(self, title="Hello World")
self.button = Gtk.Button(label="Click Here")
self.button.connect("clicked", self.on_button_clicked)
self.add(self.button)
def on_button_clicked(self, widget):
print("Hello World")
win = Windows()
win.connect("destroy", Gtk.main_quit)
win.show_all()
Gtk.main()
|
[
"weepingdogel@gmail.com"
] |
weepingdogel@gmail.com
|
3d989d910f6713c152f986aca7c81af9d1b9ca65
|
38f469b99d7768b4b640dd53bc5d28583287aa54
|
/lib/optimize/soft_assign.py
|
56dbd3484cdd2e030d2ec54140c117d428173660
|
[] |
no_license
|
neumannjan/charging-station-siting-sizing
|
32a865feaf8739c0235b4eaa45f1e72084d5b9aa
|
7d50935599cbaed0f75a0a04322c2140096d0e32
|
refs/heads/main
| 2023-07-18T07:16:58.821613
| 2021-09-04T14:06:03
| 2021-09-04T14:08:34
| 369,623,093
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 26,001
|
py
|
import copy
from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Union
import gurobipy as gp
import numpy as np
import pandas as pd
from gurobipy import GRB
from .. import simulation as sim
from .. import utils
from ..budget import BudgetDist, ListBudgetDist, DictBudgetDist
from .hard_assign import optimize_hard_assign
from .. import traffic as trf
from . import utils as optutils
def codify(vals):
vals = np.array(vals)
vals_map, codes = np.unique(vals, return_inverse=True)
codes = np.reshape(codes, vals.shape)
return np.array(vals_map), codes
def idx_groups(log: pd.DataFrame, additional_cols=[]):
# creates an array of indices, sorted by unique element
# sorts records array so all unique elements are together
log_sorted = log[additional_cols].reset_index()
log_sorted.sort_values(
[log_sorted.columns[0], *additional_cols], inplace=True)
idx_sort = np.array(log_sorted.index)
sorted_vals = np.array(log_sorted[log_sorted.columns[0]])
# returns the unique values, the index of the first occurrence of a value, and the count for each element
_, idx_start = np.unique(
sorted_vals, return_index=True)
# splits the indices into separate arrays
res = np.split(idx_sort, idx_start[1:])
return res
class _ModelBuilder:
def __init__(self):
self.model: gp.Model = None
self.log: pd.DataFrame = None
self.log_original: pd.DataFrame = None
self.max_station_distance: float = float('inf')
self.max_allowed_budget: int = None
self.first_come_first_served = True
self.station_subset_only_mode = False
self.debug = False
self.reattempts_disallowed = False
# variables
self.xs: Any = None
self.bs: Any = None
self.bs_built: Any = None
self.V_map: Any = None
self.V: Any = None
self.L_map: Any = None
self.L: Any = None
self.S: Any = None
self.st_mask: Any = None
self.A_groups: Any = None
self.objective_constraints: Dict[int, List[Any]] = dict()
self.forced_built_stations_constraints: List[Any] = []
def enter(self):
if self.model is None:
self.model = gp.Model("station_budget_soft_assign")
return self
def __enter__(self):
return self.enter()
def __exit__(self, a, b, c):
self.model.dispose()
def set_debug(self, debug: bool):
self.debug = debug
return self
def init_log(self, log: pd.DataFrame, max_allowed_budget: int):
self.log_original = log
self.log = log.copy()
self.max_allowed_budget = max_allowed_budget
if self.debug:
print("Max allowed budget:", max_allowed_budget)
return self
def init_settings(self, station_subset_only_mode: bool = False):
self.station_subset_only_mode = station_subset_only_mode
return self
def limit_station_distance(self, max_station_distance: float):
if max_station_distance < float('inf'):
if 'attempt_no' in self.log.columns:
drop_idx = self.log.loc[(self.log['attempt_no'] == 0) & (
self.log['station_distance'] > max_station_distance)].index
else:
drop_idx = self.log.loc[self.log['station_distance']
> max_station_distance].index
pre = len(self.log)
self.log.drop(drop_idx, inplace=True)
if self.debug:
print("Dropped", pre - len(self.log),
"rows due to station distance limit.")
self.max_station_distance = max_station_distance
return self
def add_variables(self):
self.V_map, self.V = codify(self.log['vehicle'])
self.L_map, self.L = codify(self.log['station'])
self.S = self.log['status'].tolist()
if self.debug:
print(f"Station mapping: 0 to {np.max(self.L)} (incl.) -",
{i: v for i, v in enumerate(self.L_map)})
self.st_mask = self.log['status'] == 1
self.A_groups = idx_groups(self.log, ['attempt_no'])
if self.debug:
for g in self.A_groups:
b = True
l = self.log.iloc[g]
l_e = self.log.iloc[g[::2]]
l_o = self.log.iloc[g[1::2]]
b = b and (l.index == l.index[0]).all()
b = b and (l_e['status'] == 1).all()
b = b and (l_o['status'] == -1).all()
b = b and (l_e.index == l_o.index).all()
if not b:
print("Error in A_groups")
utils.display(l)
if not self.station_subset_only_mode:
self.bs = self.model.addMVar(shape=len(self.L_map), vtype=GRB.INTEGER,
lb=0, ub=self.max_allowed_budget, name="b")
self.xs = self.model.addMVar(shape=len(self.log),
vtype=GRB.BINARY, name="x")
self.bs_built = self.model.addMVar(
shape=len(self.L_map), vtype=GRB.BINARY, name="bs_built")
return self
def add_common_constraints(self, first_come_first_served=True, progress=False):
self.first_come_first_served = first_come_first_served
xs_l = self.xs.tolist()
bs_l = self.bs.tolist() if not self.station_subset_only_mode else None
bs_built_l = self.bs_built.tolist()
# built binary constraints
if not self.station_subset_only_mode:
for i in range(len(self.L_map)):
self.model.addLConstr(
bs_built_l[i], GRB.LESS_EQUAL, bs_l[i], name=f"st-{self.L_map[i]}-built-lower")
self.model.addLConstr(bs_l[i], GRB.LESS_EQUAL, self.max_allowed_budget *
bs_built_l[i], name=f"st-{self.L_map[i]}-built-upper")
# arrdep constraints
for g in utils.progressify(self.A_groups, desc="Arrival/departure constraints", enabled=progress):
for i in range(0, len(g), 2):
self.model.addLConstr(xs_l[g[i]], GRB.EQUAL,
xs_l[g[i+1]], f"arrdep_xs_{g[i]}_and_{g[i+1]}")
# attempt constraints
for g in utils.progressify(self.A_groups, desc="Attempt constraints", enabled=progress):
self.model.addLConstr(
sum(self.xs[g[::2]].tolist()), GRB.LESS_EQUAL, 1, f"attempts-xs-of-{'-'.join(str(v) for v in g[::2].tolist())}_<=_1")
# self.model.addSOS(GRB.SOS_TYPE1, self.xs[g[::2]].tolist())
if self.station_subset_only_mode:
for i, l in utils.progressify(enumerate(self.L), desc="Budget constraints", total=len(self.L), enabled=progress):
self.model.addLConstr(xs_l[i], GRB.LESS_EQUAL, bs_built_l[l],
f"station-subset-only-mode-x_{i}-allowed-only-if-st-{self.L_map[l]}-built")
else:
# cumulative sums
cs = self.model.addMVar(shape=(len(self.log), len(self.L_map)),
vtype=GRB.INTEGER, name="c", lb=0, ub=self.max_allowed_budget)
cs_l = cs.tolist()
for j in range(len(self.L_map)):
self.model.addLConstr(cs_l[0][j], GRB.EQUAL, bs_l[j])
for i, l in utils.progressify(list(enumerate(self.L))[:-1], desc="Cumulative sums", total=len(self.L) - 1, enabled=progress):
for j in range(l):
self.model.addLConstr(cs_l[i+1][j], GRB.EQUAL, cs_l[i][j])
self.model.addLConstr(
cs_l[i+1][l], GRB.EQUAL, cs_l[i][l] - xs_l[i]*self.S[i])
for j in range(l+1, len(self.L_map)):
self.model.addLConstr(cs_l[i+1][j], GRB.EQUAL, cs_l[i][j])
# cumulative sum constraints
if first_come_first_served:
# signum variables
ss = self.model.addMVar(
shape=len(self.log), vtype=GRB.BINARY, name="s")
ss_l = ss.tolist()
for gg in utils.progressify(self.A_groups, desc="Cumulative sum constraints", enabled=progress):
g = gg[::2]
for i in range(0, len(g)):
loc = self.L[g[i]]
self.model.addLConstr(
ss_l[g[i]], GRB.LESS_EQUAL, sum(self.xs[g[:i+1]].tolist()), f"signum-{g[i]}-<=-group-of-xs-{'-'.join(str(v) for v in g[:i+1].tolist())}")
# self.model.addLConstr(
# xs_l[g[i]], GRB.LESS_EQUAL, ss_l[g[i]], f"x-{g[i]}<=signum-{g[i]}")
self.model.addLConstr(
ss_l[g[i]], GRB.LESS_EQUAL, cs_l[g[i]][loc], f"signum-{g[i]}-<=-cumulative-sum-at-{g[i]}-at-location-{loc}-station-{self.L_map[loc]}")
self.model.addLConstr(
cs_l[g[i]][loc], GRB.LESS_EQUAL, self.max_allowed_budget * ss_l[g[i]], f"cumulative-sum-at-{g[i]}-at-location-{loc}-station-{self.L_map[loc]}-<=-{self.max_allowed_budget}*signum")
return self
def add_disallow_reattempts_constraints(self, progress=False):
xs_l = self.xs.tolist()
bs_built_l = self.bs_built.tolist()
for g in utils.progressify(self.A_groups, desc="No reattempt constraints", enabled=progress):
g = g[::2]
for i1, i2 in zip(g[:-1], g[1:]):
self.model.addLConstr(xs_l[i2], GRB.LESS_EQUAL, 1 - bs_built_l[self.L[i1]],
f"no-reattempt-x{i2}-if-st-{self.L_map[self.L[i1]]}-of-x{i1}-built")
self.reattempts_disallowed = True
return self
def clear_objective(self, priority: int):
if priority in self.objective_constraints.keys():
for cstr in self.objective_constraints[priority]:
self.model.remove(cstr)
self.objective_constraints[priority] = []
return self
def primary_optim_coverage_for_given_budget(self, budget: int, upper_bound: Optional[int] = None):
assert not self.station_subset_only_mode, "Cannot optimize for given budget in station subset-only mode"
PRIORITY = 1
self.clear_objective(priority=PRIORITY)
obj = -1 * sum(self.xs[np.where(self.st_mask)[0]].tolist())
# -1 * to maximize
self.model.setObjectiveN(obj, index=0, priority=PRIORITY)
cstrs = []
if upper_bound is not None:
cstr = self.model.addLConstr(obj, GRB.GREATER_EQUAL, -1 * upper_bound,
f"primary-optim-coverage-for-given-budget__known-upper-bound-{upper_bound}")
cstrs.append(cstr)
cstr = self.model.addLConstr(
sum(self.bs.tolist()), GRB.LESS_EQUAL, budget, f"primary-optim-coverage-for-given-budget__sum-bs-<=-{budget}")
cstrs.append(cstr)
self.objective_constraints[PRIORITY] = cstrs
return self
def primary_optim_budget_for_full_coverage(self, progress=False):
PRIORITY = 1
self.clear_objective(priority=PRIORITY)
self.model.setObjectiveN(
sum(self.bs_built.tolist() if self.station_subset_only_mode else self.bs.tolist()), index=0, priority=PRIORITY)
cstrs = []
# attempt constraints
for g in utils.progressify(self.A_groups, desc="Attempt constraints", enabled=progress):
g = g[::2]
# model.addLConstr(sum(xs[g].tolist()), GRB.LESS_EQUAL, 1, "m2_attempts")
cstr = self.model.addLConstr(
sum(self.xs[g].tolist()), GRB.EQUAL, 1, f"primary-optim-budget-for-full-coverage__xs-of-{'-'.join(str(v) for v in g.tolist())}==1")
cstrs.append(cstr)
cstrs.append(cstr)
self.objective_constraints[PRIORITY] = cstrs
return self
def secondary_optim_distance_sum(self):
PRIORITY = 0
self.clear_objective(priority=PRIORITY)
dists = self.log['station_distance'][self.st_mask].copy()
dists /= dists.sum()
obj = sum(
x * d for x, d in zip(self.xs[np.where(self.st_mask)[0]].tolist(), dists.tolist()))
self.model.setObjectiveN(obj, index=1, priority=PRIORITY)
return self
def secondary_optim_min_stations(self):
assert not self.station_subset_only_mode, "Secondary min number of stations objective is redundant in station subset-only mode"
PRIORITY = 0
self.model.setObjectiveN(
sum(self.bs_built.tolist()), index=1, priority=PRIORITY)
def secondary_optim_closest_to_dist(self, budget_distribution):
assert not self.station_subset_only_mode, "Cannot add secondary distance objective in station subset-only mode"
bdist = [budget_distribution[l] for l in self.L_map]
PRIORITY = 0
self.clear_objective(priority=PRIORITY)
cstrs = []
bdiff = self.model.addMVar(
shape=len(self.L_map), vtype=GRB.INTEGER, lb=-self.max_allowed_budget, ub=self.max_allowed_budget, name="budget-diff")
bdiff_l = bdiff.tolist()
cstrs.append(bdiff)
bdiff_abs = self.model.addMVar(
shape=len(self.L_map), vtype=GRB.INTEGER, lb=0, ub=self.max_allowed_budget, name="budget-diff-abs")
bdiff_abs_l = bdiff_abs.tolist()
cstrs.append(bdiff_abs)
bs_l = self.bs.tolist()
for i in range(len(self.L_map)):
cstr = self.model.addConstr(
bdiff_l[i] == bs_l[i] - bdist[i], f"secondary-optim-closest-to-dist__diff-of-{self.L_map[i]}")
cstrs.append(cstr)
cstr = self.model.addConstr(bdiff_abs_l[i] == gp.abs_(
bdiff_l[i]), f"secondary-optim-closest-to-dist__diff-abs-of-{self.L_map[i]}")
cstrs.append(cstr)
self.model.setObjectiveN(sum(bdiff_abs_l), index=1, priority=PRIORITY)
self.objective_constraints[PRIORITY] = cstrs
def force_build_stations(self, budget_distribution, only_on_max_distance_violation=False):
assert not self.station_subset_only_mode, "Cannot force build stations in station subset-only mode"
for cstr in self.forced_built_stations_constraints:
self.model.remove(cstr)
self.forced_built_stations_constraints = []
if budget_distribution is not None:
sts = [not not budget_distribution[l] for l in self.L_map]
bs_l = self.bs.tolist()
if not only_on_max_distance_violation:
for i in range(len(sts)):
if sts[i]:
cstr = self.model.addLConstr(
bs_l[i], GRB.GREATER_EQUAL, 1, f"station-{self.L_map[i]}-nonzero")
self.forced_built_stations_constraints.append(cstr)
else:
xs_l = self.xs.tolist()
stations_first = self.log.query('attempt_no == 0 and status == 1')[
'station'].reindex(self.log.index).to_numpy()
dist_mask = (self.log['station_distance']
> self.max_station_distance).to_numpy()
st_mask = self.st_mask.to_numpy()
for i in range(len(sts)):
if not sts[i]:
continue
ords = np.where(
(stations_first == self.L_map[i]) & dist_mask)[0]
for j in ords:
cstr = self.model.addLConstr(
bs_l[i], GRB.GREATER_EQUAL, xs_l[j], f"station-{self.L_map[i]}-nonzero-if-iloc-{j}")
self.forced_built_stations_constraints.append(cstr)
return self
def add_starter_solution(self, budget_distribution):
assert not self.station_subset_only_mode, "Adding starter solution not yet supported in station subset-only mode"
bs_start = [budget_distribution[l] for l in self.L_map]
self.model.setAttr(GRB.Attr.Start, self.bs.tolist(), bs_start)
if not self.reattempts_disallowed:
xs_start = sim.simulate(sim.PreprocessedLog.from_dataframe(self.log),
budget_distribution,
max_station=self.log_original['station'].max(),
max_station_distance=self.max_station_distance)
xs_start = (xs_start.reindex(self.log.index, fill_value=-1)
== self.log['station']).tolist()
self.model.setAttr(GRB.Attr.Start, self.xs.tolist(), xs_start)
return self
def check_solution_feasibility(self, budget_distribution):
bs_start = [budget_distribution[l] for l in self.L_map]
if self.station_subset_only_mode:
bs_built_l = self.bs_built.tolist()
for i, (b_var, b) in enumerate(zip(bs_built_l, bs_start)):
built_flag = 1 if b >= 1 else 0
self.model.addLConstr(
b_var, GRB.EQUAL, built_flag, f"b-built-{i}=={b}")
else:
bs_l = self.bs.tolist()
for i, (b_var, b) in enumerate(zip(bs_l, bs_start)):
self.model.addLConstr(b_var, GRB.EQUAL, b, f"b-{i}=={b}")
if not self.reattempts_disallowed:
xs_start = sim.simulate(sim.PreprocessedLog.from_dataframe(self.log),
budget_distribution,
max_station=self.log_original['station'].max(),
max_station_distance=self.max_station_distance)
xs_start = (xs_start.reindex(self.log.index, fill_value=-1)
== self.log['station']).tolist()
xs_l = self.xs.tolist()
for i, (x_var, x) in enumerate(zip(xs_l, xs_start)):
self.model.addLConstr(x_var, GRB.EQUAL, x, f"x-{i}=={x}")
def optimize(self, return_as_solution=True, return_simulation=False, compute_iis=False):
self.model.setParam(GRB.Param.MIPFocus, 2)
#self.model.setParam(GRB.Param.ConcurrentMIP, 2)
self.model.setParam(GRB.Param.NodeMethod, 1)
self.bs_built.setAttr(GRB.Attr.BranchPriority, 2)
if self.bs is not None:
self.bs.setAttr(GRB.Attr.BranchPriority, 1)
self.model.update()
print()
self.model.optimize()
print()
if self.model.getAttr(GRB.Attr.Status) == GRB.INFEASIBLE:
print("Infeasible!")
if compute_iis:
self.model.computeIIS()
for v in self.model.getVars():
iis = dict(
lb=v.getAttr(GRB.Attr.IISLB),
ub=v.getAttr(GRB.Attr.IISUB),
)
if np.any(list(iis.values())):
print("Variable", v.getAttr(GRB.Attr.VarName), "LB =", v.getAttr(
GRB.Attr.LB), "UB =", v.getAttr(GRB.Attr.UB))
for v in self.model.getConstrs():
iis = dict(
constr=v.getAttr(GRB.Attr.IISConstr)
)
if np.any(list(iis.values())):
print("Constraint", v.getAttr(GRB.Attr.ConstrName))
return
max_station = self.log_original['station'].max()
xs_opt = self.xs.X > 0.5
xs_log_full = self.log.iloc[xs_opt]
xs_log = xs_log_full\
.query('status == 1')['station']\
.reindex(self.log_original.index.drop_duplicates(), fill_value=-1)
if self.station_subset_only_mode:
dist_opt = ListBudgetDist([0] * (max_station + 1))
for l, b in enumerate(self.bs_built.X.astype(int)):
if b >= 0.99:
dist_opt[self.L_map[l]] = min(
self.max_allowed_budget, 1)
else:
bs_opt = self.bs.X.astype(int)
dist_opt = ListBudgetDist([0] * (max_station + 1))
for l, b in enumerate(bs_opt):
dist_opt[self.L_map[l]] = b
if self.first_come_first_served and not self.station_subset_only_mode:
sim_res = sim.simulate(
sim.PreprocessedLog.from_dataframe(self.log_original),
dist_opt, max_station=max_station,
max_station_distance=self.max_station_distance)
assert (sim_res == xs_log).all()
if return_as_solution:
result = optutils.Solution(
budget=sum(dist_opt),
distribution=dist_opt,
objective=sim.get_satisfied_charging_requests(xs_log))
else:
result = dist_opt
if return_simulation:
return result, xs_log
return result
def close(self):
self.model.dispose()
@dataclass(init=True, repr=True)
class ModelSpec:
log: Union[pd.DataFrame, trf.TrafficSpec]
budget_upper_limit: int
max_station_distance: float
opt_coverage_total_budget: Optional[int] = None
opt_budget_for_full_coverage: bool = False
secondary_opt_distance_sum: bool = False
secondary_opt_min_stations: bool = False
secondary_optim_closest_to_distribution: Optional[Union[List[int],
BudgetDist]] = None
force_build_stations_distribution: Optional[Union[List[int],
BudgetDist]] = None
force_build_stations_on_max_distance_violation_only: bool = False
station_subset_only_mode: bool = False
starter_solution_distribution: Optional[Union[List[int],
BudgetDist]] = None
test_starter_solution: bool = False
common_constraints: bool = True
first_come_first_served: bool = True
disallow_reattempts: bool = False
debug: bool = False
progress: bool = False
def __post_init__(self):
assert (
self.opt_coverage_total_budget is None) == self.opt_budget_for_full_coverage
assert self.opt_budget_for_full_coverage is None or self.budget_upper_limit >= self.opt_budget_for_full_coverage
assert self.test_starter_solution == False or self.starter_solution_distribution != None
nsecondaries = sum([int(b) for b in [
self.secondary_opt_distance_sum,
self.secondary_opt_min_stations,
self.secondary_optim_closest_to_distribution is not None,
]])
assert nsecondaries <= 1
assert not self.station_subset_only_mode or (
(self.test_starter_solution or self.starter_solution_distribution == None) and
self.opt_budget_for_full_coverage and
(self.secondary_opt_distance_sum or nsecondaries == 0) and
self.force_build_stations_distribution == None)
def build_model(spec: ModelSpec,
traffic_full: Optional[pd.DataFrame] = None,
charging_stations: Optional[pd.DataFrame] = None,
station_distances_mtx: Optional[List[List[float]]] = None) -> _ModelBuilder:
mb = _ModelBuilder()
mb.enter()
mb.set_debug(spec.debug)
max_allowed_budget = spec.budget_upper_limit
if spec.starter_solution_distribution is not None:
max_allowed_budget = min(spec.budget_upper_limit, sum(
spec.starter_solution_distribution))
log: pd.DataFrame
if isinstance(spec.log, trf.TrafficSpec):
if traffic_full is None:
raise Exception(
"traffic_full must be specified if spec.log is an instance of TrafficSpec")
elif charging_stations is None:
raise Exception(
"charging_stations must be specified if spec.log is an instance of TrafficSpec")
elif station_distances_mtx is None:
raise Exception(
"station_distances_mtx must be specified if spec.log is an instance of TrafficSpec")
else:
log = sim.traffic_to_log(trf.build_traffic(
spec.log, traffic_full, charging_stations, station_distances_mtx))
elif isinstance(spec.log, pd.DataFrame):
log = spec.log
else:
raise Exception(f"Unexpected value type {type(spec.log)} of spec.log")
mb.init_log(log, max_allowed_budget=max_allowed_budget)
mb.init_settings(station_subset_only_mode=spec.station_subset_only_mode)
mb.limit_station_distance(spec.max_station_distance)
mb.add_variables()
if spec.common_constraints:
mb.add_common_constraints(
first_come_first_served=spec.first_come_first_served, progress=spec.progress)
if spec.disallow_reattempts:
mb.add_disallow_reattempts_constraints(progress=spec.progress)
if spec.opt_coverage_total_budget is not None:
mb.primary_optim_coverage_for_given_budget(
spec.opt_coverage_total_budget)
elif spec.opt_budget_for_full_coverage:
mb.primary_optim_budget_for_full_coverage(progress=spec.progress)
if spec.secondary_opt_distance_sum:
mb.secondary_optim_distance_sum()
elif spec.secondary_opt_min_stations:
mb.secondary_optim_min_stations()
elif spec.secondary_optim_closest_to_distribution is not None:
mb.secondary_optim_closest_to_dist(
list(spec.secondary_optim_closest_to_distribution))
if spec.force_build_stations_distribution is not None:
mb.force_build_stations(list(spec.force_build_stations_distribution),
spec.force_build_stations_on_max_distance_violation_only)
if spec.test_starter_solution:
mb.check_solution_feasibility(spec.starter_solution_distribution)
elif spec.starter_solution_distribution is not None:
mb.add_starter_solution(list(spec.starter_solution_distribution))
return mb
|
[
"janneumannprg@gmail.com"
] |
janneumannprg@gmail.com
|
63f339e09d599b1fd3c00a12ce49bac8419f2116
|
2e1fd3bfea9d61d50ee7ee12ef038277e6d4d8dc
|
/QuestionParser.py
|
76750b598d493ec230e5967e2637ea65475e255d
|
[] |
no_license
|
PatrickLevy/Cetus-Chatbot
|
da74b3b0f806e2dda126d0fbd9b93a83ef167875
|
8d2a931424c756214a50eed540d9fd7027a6605b
|
refs/heads/master
| 2021-05-10T10:43:16.275124
| 2018-03-15T02:28:44
| 2018-03-15T02:28:44
| 118,391,962
| 0
| 0
| null | 2018-03-14T03:36:09
| 2018-01-22T01:45:33
|
Python
|
UTF-8
|
Python
| false
| false
| 1,733
|
py
|
#!/usr/bin/env python3
###########################################
# Question Parser Function(s)
# @input: Raw user input string
# @output: parsed string that can be fed into our AI module
###########################################
def parseInput(inputString):
# Categories
greetingWord = 0
goodbyeWord = 0
questionWord = 0
questionMark = 0
period = 0
exclamationPoint = 0
# Definitions
greetingWords = ["hi", "hello", "what's up"]
goodbyeWords = ["bye", "later", "good night"]
questionWords = ["who", "what", "where", "when", "why", "how"]
# Check for greetings
if any(word in inputString for word in greetingWords):
greetingWord = 1
# Check for goodbyes
elif any(word in inputString for word in goodbyeWords):
goodbyeWord = 1
# Check for question words
if any(word in inputString for word in questionWords):
questionWord = 1
# Check for last character punctuation
lastChar = inputString[-1:]
if lastChar == "?":
questionMark = 1
if lastChar in ["."]:
period = 1
if lastChar in ["!"]:
exclamationPoint = 1
return {
"greetingWord": greetingWord,
"goodbyeWord": goodbyeWord,
"questionWord": questionWord,
"questionMark": questionMark,
"period": period,
"exclamationPoint": exclamationPoint,
}
###########################################
# Question Parser Function(s)
# @input: Raw user input string
# @output: parsed string that can be fed into our AI module
###########################################
def QuestionParser(inputString):
# categorize input string
parsed = parseInput(inputString)
return parsed
|
[
"mrpatricklevy@gmail.com"
] |
mrpatricklevy@gmail.com
|
d1d0da5432589877aed660cf28249bdf1dba9eec
|
3744c992514a0d6ac5ae57b4b28910774d40c124
|
/courses/urls.py
|
27b48c4ecf00b11be157f2ec168b333a40bc6d9d
|
[] |
no_license
|
inno-asiimwe/treehouse-django-rest
|
96a7e67780e439f7f234349784b020ad18d545b3
|
1f4e987a1aa5bf0c79ba19ee256a133e1a84db19
|
refs/heads/master
| 2021-09-02T00:38:47.974575
| 2017-12-29T12:55:07
| 2017-12-29T12:55:07
| 115,721,670
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 949
|
py
|
"""courses URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('api-auth/', include('rest_framework.urls',
namespace='rest_framework')),
path('api/v1/courses/', include('coursesapp.urls', namespace='courses')),
]
|
[
"innocent@Innocents-MacBook-Pro.local"
] |
innocent@Innocents-MacBook-Pro.local
|
869e1b7818c587f8a141d1475c01a4f6ee1fba91
|
94d1face7f63908f4776d9656d7c509f1f3d7605
|
/gradivo/regularization.py
|
f134dd6f8b74af4eb01946df51e4a9f68839ec60
|
[] |
no_license
|
BlazZupan/uozp-zapiski
|
be7f4e988f7c404db4108619c4fe73f57d726555
|
846ba038f355111f308d654451aba9f5d1126497
|
refs/heads/master
| 2023-06-08T03:01:08.629668
| 2023-05-28T06:33:28
| 2023-05-28T06:33:28
| 97,014,240
| 11
| 16
| null | 2023-05-28T06:33:29
| 2017-07-12T13:58:26
|
TeX
|
UTF-8
|
Python
| false
| false
| 2,631
|
py
|
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
from sklearn.model_selection import KFold, cross_val_predict
from sklearn.linear_model import Ridge, Lasso, LinearRegression
from sklearn.metrics import r2_score
# load the data, prepare it for sklearn
data = pd.read_excel("bodyfat.xlsx")
m = data.to_numpy()
X, y = m[:, 1:], m[:, 0]
labels = data.columns.values[1:]
# normalization
# think about why it has no effect with plain (unregularized) linear regression
# and why the cross-validated accuracy changes between raw and normalized data
# when we use regularization
X = X - X.mean(axis=0)
X /= X.std(axis=0)
# selection of the model
# model_type = LinearRegression
model_type, model_name = \
[(Ridge, "Ridge Regression"), (Lasso, "Lasso Regression")][0]
# cross validation
# for the start, to figure out if we are doing well
# and to compare between normalized and raw data
cv = KFold(n_splits=10, random_state=1, shuffle=True)
model = LinearRegression()
y_hat = cross_val_predict(model, X, y, cv=cv, n_jobs=-1)
r2 = r2_score(y, y_hat)
print(f"R2 for unregularized regression: {r2:.3}")
# choice of regularization parameters
alphas = np.logspace(-4, 5, 20)
# how does the accuracy change with regularization strength?
# we cross-validate to assess the accuracy and plot
# the accuracy(regularization strength) graph
r2s = []
for alpha in alphas:
model = model_type(alpha=alpha)
y_hat = cross_val_predict(model, X, y, cv=cv, n_jobs=-1)
r2 = r2_score(y, y_hat)
r2s.append(r2)
index_max = np.argmax(r2s)
best_alpha = alphas[index_max]
print(f"Best regularization strength: {best_alpha:.2}")
fig = plt.figure()
ax = plt.gca()
ax.plot(alphas, r2s, "o-")
ax.set_xscale('log')
plt.xlabel("regularization strength")
plt.ylabel("cross validated r2")
plt.savefig("fig-accuracy-vs-regularization.pdf")
plt.clf()
# select best-ranked features for specific degree of regularization
alpha = 0.1
model = model_type(alpha=alpha)
fitted = model.fit(X, y)
coef = np.abs(fitted.coef_)
k = 5 # number of best-rank features to select
ind = np.argpartition(coef, -k)[-k:]
# compute coefficients for the regularization path
cs = []
for alpha in alphas:
model = model_type(alpha=alpha)
fitted = model.fit(X, y)
cs.append(fitted.coef_)
res = np.stack(cs)
# plot the regularization path for selected features
fig = plt.figure()
ax = plt.gca()
for i in ind:
ax.plot(alphas, res[:, i], "o-", label=labels[i])
ax.legend(loc="upper right")
ax.set_xscale('log')
plt.xlabel("regularization strength")
plt.ylabel("feature weight")
plt.savefig("fig-regularization-path.pdf")
plt.clf()
|
[
"blaz.zupan@fri.uni-lj.si"
] |
blaz.zupan@fri.uni-lj.si
|
f510a16e93d238d949d4fc2a786cc29033b9020a
|
e32154b11d9307855bfa3a28fd5ef909e6ea7fca
|
/Detox/python/spreadLowRankSets.py
|
e2230a3d970a8f9582356069147f606b9c54d8b9
|
[
"MIT"
] |
permissive
|
cpausmit/IntelROCCS
|
c5947cb038e338f7bcdf8f3d4d203b45ae0df670
|
526672963452ed33706468ea2dc6fb63510d9090
|
refs/heads/master
| 2020-04-15T17:28:04.978006
| 2017-03-24T12:17:01
| 2017-03-24T12:17:01
| 17,555,528
| 1
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,731
|
py
|
#===================================================================================================
# C L A S S
#===================================================================================================
import sys, os, subprocess, re, smtplib, shutil, string, statistics
import phedexApi
import dbInfoHandler
class SpreadLowRankSets:
def __init__(self,dbInfoHandler):
if not os.environ.get('DETOX_DB'):
raise Exception(' FATAL -- DETOX environment not defined: source setup.sh\n')
self.DETOX_USAGE_MAX = float(os.environ['DETOX_USAGE_MAX'])
self.sitePropers = {}
self.dataPropers = {}
self.setsToSites = {}
self.dbInfoHandler = dbInfoHandler
def assignSitePropers(self,sitePropers):
self.sitePropers = sitePropers
def assignPhedexSets(self,dataPropers):
self.dataPropers = dataPropers
def assignDatasets(self):
#list all datasets ordered by ranks
#pick T1 site that have enough space, best rank sites match first
#select datasets for T1 sites, can't increase T1 site by more than 1%
siteNames = []
siteAssigned = {}
ranksAssigned = {}
for site in sorted(self.sitePropers, cmp=self.compSites):
if not site.startswith("T1_"):
continue
siteNames.append(site)
self.setsToSites[site] = []
siteAssigned[site] = 0.0
ranksAssigned[site] = []
for dset in sorted(self.dataPropers, cmp=self.compDatasets):
setSize = self.dataPropers[dset].getTrueSize()
dsetRank = self.dataPropers[dset].getGlobalRank()
if dsetRank < 90:
continue
if self.dataPropers[dset].isOnT1Site():
continue
for site in siteNames:
taken = self.sitePropers[site].spaceTaken() + siteAssigned[site] + setSize
quota = self.sitePropers[site].siteSizeGb()
available = quota*self.DETOX_USAGE_MAX - taken
siteRank = self.sitePropers[site].siteRank()
if setSize < available and (siteAssigned[site] + setSize) < quota*0.03:
siteAssigned[site] = siteAssigned[site] + setSize
ranksAssigned[site].append(dsetRank)
self.setsToSites[site].append(dset)
for site in sorted(self.setsToSites, cmp=self.compSites):
if len(ranksAssigned[site]) > 0:
print " - Subscribing to " + site + " %d TBs"%(siteAssigned[site])
print " -- average assigned rank %d"%(statistics.mean(ranksAssigned[site]))
#print self.setsToSites[site]
self.submitTransferRequest(site,self.setsToSites[site])
break
def submitTransferRequest(self,site,datasets2trans):
if len(datasets2trans) < 1:
return
phedex = phedexApi.phedexApi(logPath='./')
# compose data for deletion request
check,data = phedex.xmlData(datasets=datasets2trans,instance='prod')
if check:
print " ERROR - phedexApi.xmlData failed"
print data
sys.exit(1)
# here the request is really sent
message = 'IntelROCCS -- Automatic Transfer Request'
check,response = phedex.subscribe(node=site,data=data,comments=message,group='AnalysisOps')
if check:
print " ERROR - phedexApi.subscribe failed"
print response
sys.exit(1)
respo = response.read()
matchObj = re.search(r'"id":"(\d+)"',respo)
reqid = int(matchObj.group(1))
rdate = (re.search(r'"request_date":"(.*?)"',respo)).group(1)
rdate = rdate[:-3]
self.dbInfoHandler.logRequest(site,datasets2trans,reqid,rdate,0)
#self.submitUpdateRequest(site,reqid)
def compDatasets(self,a,b):
ra = self.dataPropers[a].getGlobalRank()
rb = self.dataPropers[b].getGlobalRank()
if ra >= rb:
return -1
else:
return 1
def compSites(self,a,b):
ra = self.sitePropers[a].siteRank()
rb = self.sitePropers[b].siteRank()
if ra >= rb:
return 1
else:
return -1
def submitUpdateRequest(self,site,reqid):
# here we brute force deletion to be approved
phedex = phedexApi.phedexApi(logPath='./')
check,response = phedex.updateRequest(decision='approve',request=reqid,node=site,instance='prod')
if check:
print " ERROR - phedexApi.updateRequest failed - reqid="+ str(reqid)
print response
del phedex
|
[
"maxi@mit.edu"
] |
maxi@mit.edu
|
416a54616a634ee1d6f0d276de0e9ec4f0b850e3
|
61aa319732d3fa7912e28f5ff7768498f8dda005
|
/src/dev/Platform.py
|
4f28db39fe3d0a476989e9d783707802c3b19110
|
[
"BSD-3-Clause",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.0-or-later",
"MIT"
] |
permissive
|
TeCSAR-UNCC/gem5-SALAM
|
37f2f7198c93b4c18452550df48c1a2ab14b14fb
|
c14c39235f4e376e64dc68b81bd2447e8a47ff65
|
refs/heads/main
| 2023-06-08T22:16:25.260792
| 2023-05-31T16:43:46
| 2023-05-31T16:43:46
| 154,335,724
| 62
| 22
|
BSD-3-Clause
| 2023-05-31T16:43:48
| 2018-10-23T13:45:44
|
C++
|
UTF-8
|
Python
| false
| false
| 1,955
|
py
|
# Copyright (c) 2005-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from m5.SimObject import SimObject
from m5.params import *
from m5.proxy import *
class Platform(SimObject):
type = 'Platform'
abstract = True
cxx_header = "dev/platform.hh"
cxx_class = 'gem5::Platform'
system = Param.System(Parent.any, "system")
# for platforms using device trees to set properties of CPU nodes
def annotateCpuDeviceNode(self, cpu, state):
pass
|
[
"sroger48@uncc.edu"
] |
sroger48@uncc.edu
|
86333736952dac1e7a775e01237283db71ff2ff1
|
db6c8209405269d7d3d650ae992b6435d343df2a
|
/session8/block.py
|
bf267c29d206b3d9c43202951042ec2583d517f4
|
[
"BSD-2-Clause"
] |
permissive
|
macterra/pb-exercises
|
3b53322a08ad05d96baa671a56e4fb8a15b93d8a
|
d606433aa68e2b0806d429d1f388121e10910a10
|
refs/heads/master
| 2020-04-02T11:40:11.932788
| 2018-11-08T20:57:08
| 2018-11-08T20:57:08
| 154,400,770
| 0
| 0
|
BSD-2-Clause
| 2018-10-23T21:45:22
| 2018-10-23T21:45:22
| null |
UTF-8
|
Python
| false
| false
| 10,967
|
py
|
from io import BytesIO
from unittest import TestCase
from helper import (
double_sha256,
int_to_little_endian,
little_endian_to_int,
merkle_root,
)
GENESIS_BLOCK_HASH = bytes.fromhex('000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f')
TESTNET_GENESIS_BLOCK_HASH = bytes.fromhex('000000000933ea01ad0ee984209779baaec3ced90fa3f408719526f8d77f4943')
class Block:
def __init__(self, version, prev_block, merkle_root, timestamp, bits, nonce, tx_hashes=None):
self.version = version
self.prev_block = prev_block
self.merkle_root = merkle_root
self.timestamp = timestamp
self.bits = bits
self.nonce = nonce
self.tx_hashes = tx_hashes
self.merkle_tree = None
@classmethod
def parse(cls, s):
'''Takes a byte stream and parses a block. Returns a Block object'''
# s.read(n) will read n bytes from the stream
# version - 4 bytes, little endian, interpret as int
version = little_endian_to_int(s.read(4))
# prev_block - 32 bytes, little endian (use [::-1] to reverse)
prev_block = s.read(32)[::-1]
# merkle_root - 32 bytes, little endian (use [::-1] to reverse)
merkle_root = s.read(32)[::-1]
# timestamp - 4 bytes, little endian, interpret as int
timestamp = little_endian_to_int(s.read(4))
# bits - 4 bytes
bits = s.read(4)
# nonce - 4 bytes
nonce = s.read(4)
# initialize class
return cls(version, prev_block, merkle_root, timestamp, bits, nonce)
def serialize(self):
'''Returns the 80 byte block header'''
# version - 4 bytes, little endian
result = int_to_little_endian(self.version, 4)
# prev_block - 32 bytes, little endian
result += self.prev_block[::-1]
# merkle_root - 32 bytes, little endian
result += self.merkle_root[::-1]
# timestamp - 4 bytes, little endian
result += int_to_little_endian(self.timestamp, 4)
# bits - 4 bytes
result += self.bits
# nonce - 4 bytes
result += self.nonce
return result
def hash(self):
'''Returns the double-sha256 interpreted little endian of the block'''
# serialize
s = self.serialize()
# double-sha256
sha = double_sha256(s)
# reverse
return sha[::-1]
def bip9(self):
'''Returns whether this block is signaling readiness for BIP9'''
# BIP9 is signalled if the top 3 bits are 001
# remember version is 32 bytes so right shift 29 (>> 29) and see if
# that is 001
return self.version >> 29 == 0b001
def bip91(self):
'''Returns whether this block is signaling readiness for BIP91'''
# BIP91 is signalled if the 5th bit from the right is 1
# shift 4 bits to the right and see if the last bit is 1
return self.version >> 4 & 1 == 1
def bip141(self):
'''Returns whether this block is signaling readiness for BIP141'''
# BIP91 is signalled if the 2nd bit from the right is 1
# shift 1 bit to the right and see if the last bit is 1
return self.version >> 1 & 1 == 1
def target(self):
'''Returns the proof-of-work target based on the bits'''
# last byte is exponent
exponent = self.bits[-1]
# the first three bytes are the coefficient in little endian
coefficient = little_endian_to_int(self.bits[:-1])
# the formula is:
# coefficient * 256**(exponent-3)
return coefficient * 256**(exponent - 3)
def difficulty(self):
'''Returns the block difficulty based on the bits'''
# note difficulty is (target of lowest difficulty) / (self's target)
# lowest difficulty has bits that equal 0xffff001d
lowest = 0xffff * 256**(0x1d - 3)
return lowest / self.target()
def check_pow(self):
'''Returns whether this block satisfies proof of work'''
# get the double_sha256 of the serialization of this block
sha = double_sha256(self.serialize())
# interpret this hash as a little-endian number
proof = little_endian_to_int(sha)
# return whether this integer is less than the target
return proof < self.target()
def validate_merkle_root(self):
'''Gets the merkle root of the tx_hashes and checks that it's
the same as the merkle root of this block.
'''
# reverse all the transaction hashes (self.tx_hashes)
hashes = [h[::-1] for h in self.tx_hashes]
# get the Merkle Root
root = merkle_root(hashes)
# reverse the Merkle Root
# return whether self.merkle root is the same as
# the reverse of the calculated merkle root
return root[::-1] == self.merkle_root
class BlockTest(TestCase):
def test_parse(self):
block_raw = bytes.fromhex('020000208ec39428b17323fa0ddec8e887b4a7c53b8c0a0a220cfd0000000000000000005b0750fce0a889502d40508d39576821155e9c9e3f5c3157f961db38fd8b25be1e77a759e93c0118a4ffd71d')
stream = BytesIO(block_raw)
block = Block.parse(stream)
self.assertEqual(block.version, 0x20000002)
want = bytes.fromhex('000000000000000000fd0c220a0a8c3bc5a7b487e8c8de0dfa2373b12894c38e')
self.assertEqual(block.prev_block, want)
want = bytes.fromhex('be258bfd38db61f957315c3f9e9c5e15216857398d50402d5089a8e0fc50075b')
self.assertEqual(block.merkle_root, want)
self.assertEqual(block.timestamp, 0x59a7771e)
self.assertEqual(block.bits, bytes.fromhex('e93c0118'))
self.assertEqual(block.nonce, bytes.fromhex('a4ffd71d'))
def test_serialize(self):
block_raw = bytes.fromhex('020000208ec39428b17323fa0ddec8e887b4a7c53b8c0a0a220cfd0000000000000000005b0750fce0a889502d40508d39576821155e9c9e3f5c3157f961db38fd8b25be1e77a759e93c0118a4ffd71d')
stream = BytesIO(block_raw)
block = Block.parse(stream)
self.assertEqual(block.serialize(), block_raw)
def test_hash(self):
block_raw = bytes.fromhex('020000208ec39428b17323fa0ddec8e887b4a7c53b8c0a0a220cfd0000000000000000005b0750fce0a889502d40508d39576821155e9c9e3f5c3157f961db38fd8b25be1e77a759e93c0118a4ffd71d')
stream = BytesIO(block_raw)
block = Block.parse(stream)
self.assertEqual(block.hash(), bytes.fromhex('0000000000000000007e9e4c586439b0cdbe13b1370bdd9435d76a644d047523'))
def test_bip9(self):
block_raw = bytes.fromhex('020000208ec39428b17323fa0ddec8e887b4a7c53b8c0a0a220cfd0000000000000000005b0750fce0a889502d40508d39576821155e9c9e3f5c3157f961db38fd8b25be1e77a759e93c0118a4ffd71d')
stream = BytesIO(block_raw)
block = Block.parse(stream)
self.assertTrue(block.bip9())
block_raw = bytes.fromhex('0400000039fa821848781f027a2e6dfabbf6bda920d9ae61b63400030000000000000000ecae536a304042e3154be0e3e9a8220e5568c3433a9ab49ac4cbb74f8df8e8b0cc2acf569fb9061806652c27')
stream = BytesIO(block_raw)
block = Block.parse(stream)
self.assertFalse(block.bip9())
def test_bip91(self):
block_raw = bytes.fromhex('1200002028856ec5bca29cf76980d368b0a163a0bb81fc192951270100000000000000003288f32a2831833c31a25401c52093eb545d28157e200a64b21b3ae8f21c507401877b5935470118144dbfd1')
stream = BytesIO(block_raw)
block = Block.parse(stream)
self.assertTrue(block.bip91())
block_raw = bytes.fromhex('020000208ec39428b17323fa0ddec8e887b4a7c53b8c0a0a220cfd0000000000000000005b0750fce0a889502d40508d39576821155e9c9e3f5c3157f961db38fd8b25be1e77a759e93c0118a4ffd71d')
stream = BytesIO(block_raw)
block = Block.parse(stream)
self.assertFalse(block.bip91())
def test_bip141(self):
block_raw = bytes.fromhex('020000208ec39428b17323fa0ddec8e887b4a7c53b8c0a0a220cfd0000000000000000005b0750fce0a889502d40508d39576821155e9c9e3f5c3157f961db38fd8b25be1e77a759e93c0118a4ffd71d')
stream = BytesIO(block_raw)
block = Block.parse(stream)
self.assertTrue(block.bip141())
block_raw = bytes.fromhex('0000002066f09203c1cf5ef1531f24ed21b1915ae9abeb691f0d2e0100000000000000003de0976428ce56125351bae62c5b8b8c79d8297c702ea05d60feabb4ed188b59c36fa759e93c0118b74b2618')
stream = BytesIO(block_raw)
block = Block.parse(stream)
self.assertFalse(block.bip141())
def test_target(self):
block_raw = bytes.fromhex('020000208ec39428b17323fa0ddec8e887b4a7c53b8c0a0a220cfd0000000000000000005b0750fce0a889502d40508d39576821155e9c9e3f5c3157f961db38fd8b25be1e77a759e93c0118a4ffd71d')
stream = BytesIO(block_raw)
block = Block.parse(stream)
self.assertEqual(block.target(), 0x13ce9000000000000000000000000000000000000000000)
self.assertEqual(int(block.difficulty()), 888171856257)
def test_check_pow(self):
block_raw = bytes.fromhex('04000000fbedbbf0cfdaf278c094f187f2eb987c86a199da22bbb20400000000000000007b7697b29129648fa08b4bcd13c9d5e60abb973a1efac9c8d573c71c807c56c3d6213557faa80518c3737ec1')
stream = BytesIO(block_raw)
block = Block.parse(stream)
self.assertTrue(block.check_pow())
block_raw = bytes.fromhex('04000000fbedbbf0cfdaf278c094f187f2eb987c86a199da22bbb20400000000000000007b7697b29129648fa08b4bcd13c9d5e60abb973a1efac9c8d573c71c807c56c3d6213557faa80518c3737ec0')
stream = BytesIO(block_raw)
block = Block.parse(stream)
self.assertFalse(block.check_pow())
def test_validate_merkle_root(self):
hashes_hex = [
'f54cb69e5dc1bd38ee6901e4ec2007a5030e14bdd60afb4d2f3428c88eea17c1',
'c57c2d678da0a7ee8cfa058f1cf49bfcb00ae21eda966640e312b464414731c1',
'b027077c94668a84a5d0e72ac0020bae3838cb7f9ee3fa4e81d1eecf6eda91f3',
'8131a1b8ec3a815b4800b43dff6c6963c75193c4190ec946b93245a9928a233d',
'ae7d63ffcb3ae2bc0681eca0df10dda3ca36dedb9dbf49e33c5fbe33262f0910',
'61a14b1bbdcdda8a22e61036839e8b110913832efd4b086948a6a64fd5b3377d',
'fc7051c8b536ac87344c5497595d5d2ffdaba471c73fae15fe9228547ea71881',
'77386a46e26f69b3cd435aa4faac932027f58d0b7252e62fb6c9c2489887f6df',
'59cbc055ccd26a2c4c4df2770382c7fea135c56d9e75d3f758ac465f74c025b8',
'7c2bf5687f19785a61be9f46e031ba041c7f93e2b7e9212799d84ba052395195',
'08598eebd94c18b0d59ac921e9ba99e2b8ab7d9fccde7d44f2bd4d5e2e726d2e',
'f0bb99ef46b029dd6f714e4b12a7d796258c48fee57324ebdc0bbc4700753ab1',
]
hashes = [bytes.fromhex(x) for x in hashes_hex]
stream = BytesIO(bytes.fromhex('00000020fcb19f7895db08cadc9573e7915e3919fb76d59868a51d995201000000000000acbcab8bcc1af95d8d563b77d24c3d19b18f1486383d75a5085c4e86c86beed691cfa85916ca061a00000000'))
block = Block.parse(stream)
block.tx_hashes = hashes
self.assertTrue(block.validate_merkle_root())
|
[
"jaejoon@gmail.com"
] |
jaejoon@gmail.com
|
96f3ef431cdaf6d5f096a36f286a606425eb3bc6
|
42657a8b67660fe8ec7ca92f3c58df68b619f4d5
|
/test/test_general.py
|
376f0036589f8f42475ff2ce25df99efe082a51d
|
[] |
no_license
|
bellothornus/Anagrams
|
e7fcfe05d3a3a8e704839869e443ca0daf892ad9
|
870399db74d409bdfbb68aaa7d6bb626c530a3fb
|
refs/heads/master
| 2023-01-07T07:38:17.968065
| 2020-10-29T18:57:50
| 2020-10-29T18:57:50
| 309,442,734
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 218
|
py
|
from src.code import anagrams
def test_abba():
assert anagrams("abba",["aabb","abcd","bbaa","dada"]) == ["aabb","bbaa"]
assert anagrams("racer", ["crazer","carer","racar","caers","racer"]) == ["carer","racer"]
|
[
"damia_iv@yahoo.es"
] |
damia_iv@yahoo.es
|
639f0aa1ce6e1fa972d0216bbf03855c465f84b5
|
a6c337d435a4e76e71a18729ec26ee9c47160257
|
/shop/urls.py
|
2b668837010afa3c125c5a7729e675f1823e5a9d
|
[] |
no_license
|
sanuk2424/shoppingX
|
18a6deb5f5a598ed143604a551607036e10b229a
|
50e4ad9a0709920a5df4ba20c7ed777bb5f61d5c
|
refs/heads/master
| 2023-08-17T01:33:09.916970
| 2021-09-15T12:42:51
| 2021-09-15T12:42:51
| 406,473,853
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 457
|
py
|
from django.contrib import admin
from django.urls import path
from . import views
urlpatterns = [
path('',views.index,name="ShopHome"),
path('about/',views.about,name="AboutUs"),
path('contact/',views.contact,name="ContactUs"),
path('tracker/',views.tracker,name="Tracker"),
path('search/',views.search,name="seach"),
path('productview/',views.productView,name="ProductView"),
path('checkout/',views.checkout,name="Checkout"),
]
|
[
"ajay@gmail.com"
] |
ajay@gmail.com
|
5cbc74b264f6b7dd2691a294cb03e2bfdde072f9
|
fec34bb1349385356cf828615d376444ce3e8628
|
/F5 grafana/grafana_bados_dashboard/data/plugins/grafana-admdb-datasource/table.py
|
1fe9dc0f5a44ac2f58690f78b996016db6620c90
|
[] |
no_license
|
alexnimo/F5
|
633085dffefc54a24aaea45ca42562fcff1b8525
|
b03cca43941652e0d25b92e6deca7335549b903a
|
refs/heads/master
| 2020-03-17T13:51:26.849342
| 2018-11-19T18:56:35
| 2018-11-19T18:56:35
| 133,647,513
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,208
|
py
|
# how to extract table data from admdb in one shot, using python code injection
s = b"""
import os,json
def chch2a(chch):
# convert 2 chars encode, to ascii char
if chch[0]=='_':
return chch[1]
else:
return chr(int(chch,16))
def fixedDecodeURIComponent(vs):
# demangles encoded vs name
return "".join([chch2a(vs[i:i+2]) for i in range(0, len(vs), 2)])
def vs_table_query(base_path='/ts/dms/dos/bados/admdb',db='default'):
# return list of virtual servers (not demangled) in db (except for 'all' component)
return [x for x in os.listdir(os.path.join(base_path,db)) if x!='_a_l_l']
def cell_table_query(base_path,db,vs_raw,metric,sRate,columns,tsfiles,ts):
#ts=(from,to)
# returns {columns:[cola,colb],rows[[a,b]]}
for tsfile in reversed(tsfiles): # from last file
try:
with open(os.path.join(base_path,db,vs_raw,metric,str(sRate),tsfile+'.txt'),'r') as f:
try:
j=json.loads(f.read()+']}')
cols = j['properties']['columns']
d=dict(zip(cols,range(len(cols))))
tcol=d['time']
if columns!="*":
d=dict([(k,d[k]) for k in columns if k in d]) #col:idx
for v in reversed(j['values']):
if v[tcol]<=ts[1] and v[tcol]>=ts[0]:
return {'columns':d.keys(),'rows':[[v[i] for i in d.values()]]}
except:
pass
except:pass
return {'columns':[], 'rows':[]}
def table_query(base_path,db,sRate,tsfiles,ts,metric_columns):
vs_raw_list = vs_table_query(base_path,db)
ret = { "columns": [{"text":"vs"}], "rows":[], "type":"table" } #
for ixrow,vs_raw in enumerate(vs_raw_list):
row = [fixedDecodeURIComponent(vs_raw)]
for mc in metric_columns:
r=cell_table_query(base_path,db,vs_raw,mc[0],sRate,mc[1],tsfiles,ts)
if ixrow==0:
ret['columns']+=[{"text":mc[0]+'.'+c} for c in r['columns']]
if len(r['rows']) and len(r['rows'][0])==len(r['columns']):
row+=r['rows'][0]
else:
row+=[0]*len(r['columns'])
ret['rows'].append(row)
return json.dumps(ret)
#print vs_table_query()
#print cell_table_query('/ts/dms/dos/bados/admdb','default','2f_C_o_m_m_o_n2f_d_a_t_a','sig.tps',1000,["v0"],['1471346688000'],(1471347709000,1471347710000))
print table_query('/ts/dms/dos/bados/admdb','default',1000,['1471354880000'],(1471355030000,1471355030000),[['sig.tps','*']])
"""
import base64;
s = base64.b64encode(s)
#s=b'cHJpbnQgW3ggZm9yIHggaW4gcmFuZ2UoMTApXQ=='
#print(s.decode('ascii'))
print (requests.post('https://10.241.108.22/mgmt/tm/util/bash', verify=False, headers = {'content-type': 'application/json'},
auth=HTTPBasicAuth('admin', 'admin'),
data=json.dumps({"command":"run","utilCmdArgs":
"-c \"echo '"+s.decode('ascii')+"' |python -c 'import base64; exec(base64.b64decode(raw_input()))' \""
})).json())
|
[
"noreply@github.com"
] |
alexnimo.noreply@github.com
|
143515a5e1393feeb57384f0ef8f3d1395fee71f
|
d17fa59dc0b03dc71787e96473ce92fefbb7b65b
|
/simulator/test/test_accuracy.py
|
83083496e07d696f8a79374563698edc244257a6
|
[] |
no_license
|
Dunes/janitor
|
517d920ba7fd4a2d9ffe410682bf93593d645b04
|
f43be1c7e0b63d579e3351a27003ae25abbf3683
|
refs/heads/master
| 2021-01-21T05:02:27.056457
| 2015-03-16T14:35:43
| 2015-03-16T14:35:43
| 17,876,786
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 539
|
py
|
"""
Created on 20 Jun 2014
@author: jack
"""
import unittest
from accuracy import quantize
from decimal import Decimal
class AccuracyTest(unittest.TestCase):
def test_quantize_with_int(self):
self.assertEqual(Decimal("0"), quantize(0))
def test_quantize_with_float(self):
self.assertEqual(Decimal("0"), quantize(0.))
def test_quantize_rounds_down(self):
self.assertEqual(Decimal("0"), quantize(0.9))
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
[
"jxh576@cs.bham.ac.uk"
] |
jxh576@cs.bham.ac.uk
|
e7abb7d013122fac077153081102b85c3a002398
|
69390b62e85ecfe81b6d2b516c5177743222f136
|
/Server/puzzle2AI.py
|
5ace175226b6fcd86ef4039765d7d27a480a0562
|
[] |
no_license
|
mmiller5/Major-Tom
|
b4da537dda20221328ada8b40d7d61133bae5bb5
|
aad68a2ce7e0907b9f8d8e0e47019abf5b662f1c
|
refs/heads/master
| 2020-03-10T23:13:16.079679
| 2018-05-03T16:35:19
| 2018-05-03T16:35:19
| 129,636,575
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,503
|
py
|
# Rudimentary AI for the checkers puzzle
'''
Alpha-Beta Minimax code written by Eric Clinch
https://drive.google.com/drive/folders/1lLCArvni4VB6sL6wTP3qZv-B8_lX2kxX
modified by me
'''
from puzzle2Logic import *
import random
def MaxieMoveAlphaBeta(board, depth, alpha, beta):
assert(alpha < beta)
if depth == 0:
return (None, board.heuristic(board))
else:
bestMove = None
bestScore = -100
moves = board.legalMoves("Maxie")
random.shuffle(moves)
for move in moves:
board.makeMove(move, "Maxie")
moveScore = MinnieMoveAlphaBeta(board, depth - 1, alpha, beta)[1]
board.undoMove(move, "Maxie")
if moveScore > bestScore:
bestScore = moveScore
bestMove = move
alpha = max(alpha, bestScore)
if (alpha >= beta):
return (bestMove, bestScore)
return (bestMove, bestScore)
# same as Maxie, but maximizes Minnie's score by minimizing
# the board score
def MinnieMoveAlphaBeta(board, depth, alpha, beta):
assert(alpha < beta)
if depth == 0:
return (None, board.heuristic(board))
else:
bestMove = None
bestScore = 100
moves = board.legalMoves("Minnie")
random.shuffle(moves)
for move in moves:
board.makeMove(move, "Minnie")
moveScore = MaxieMoveAlphaBeta(board, depth - 1, alpha, beta)[1]
board.undoMove(move, "Minnie")
if moveScore < bestScore:
bestScore = moveScore
bestMove = move
beta = min(beta, bestScore)
if (alpha >= beta):
return (bestMove, bestScore)
return (bestMove, bestScore)
def getMove(board, depth, alpha=-100, beta=100):
move = MinnieMoveAlphaBeta(board, depth, alpha, beta)
print(move)
return move[0]
'''
def MaxieMoveWithHeuristics(board, depth, alpha, beta):
if board.gameOver:
return (None, 100) if board.won(Maxie) else (None, -100)
elif depth == 0:
return (None, board.heuristic(board))
else:
bestMove = None
bestScore = -100
for move in board.legalMoves("Maxie"):
board.makeMove(move, "Maxie")
_, moveScore = MinnieMoveWithHeuristics(board, depth - 1)
board.undoMove(move, "Maxie")
if moveScore > bestScore:
bestScore = moveScore
bestMove = move
alpha = max(alpha, bestScore)
if (alpha >= beta):
return (bestMove, bestScore)
return (bestMove, bestScore)
# same as Maxie, but maximizes Minnie's score by minimizing
# the board score
def MinnieMoveWithHeuristics(board, depth, alpha, beta):
if board.gameOver:
return (None, -100) if board.won(Minnie) else (None, 100)
elif depth == 0:
return (None, board.heuristic(board))
else:
bestMove = None
bestScore = 100
for move in board.legalMoves("Minnie"):
board.makeMove(move, "Minnie")
_, moveScore = MaxieMoveWithHeuristics(board, depth - 1)
board.undoMove(move, "Minnie")
if moveScore < bestScore:
bestScore = moveScore
bestMove = move
beta = min(beta, bestScore)
if (alpha >= beta):
return (bestMove, bestScore)
return (bestMove, bestScore)
'''
|
[
"mmiller5@andrew.cmu.edu"
] |
mmiller5@andrew.cmu.edu
|
440e26cfdf14cbcc4b30092206aeda748a17c387
|
cb596dea60590ef584d03e564203b0a6db3f4fad
|
/model/TextCNN-tf2.0/corpus/__init__.py
|
fd52b058393f2c15172caea407279db2fc4a3834
|
[] |
no_license
|
yooyisi/TOOLKIT
|
79cf7e2912e34c355887bcf610933f5362873446
|
040be8ba10dac6f3e5fcdaa99231bca426f2abf6
|
refs/heads/master
| 2023-02-23T22:14:01.982336
| 2023-02-12T17:02:31
| 2023-02-12T17:02:31
| 281,157,903
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,017
|
py
|
# -*- coding: utf-8 -*-
import re
from itertools import islice
import sys
import os
FILE_PATH = os.path.split(os.path.realpath(__file__))[0]
PARENT_PATH = os.path.dirname(FILE_PATH) + '/..'
sys.path.append(PARENT_PATH)
train_data_file = FILE_PATH + '/train.txt'
stop_word_file = FILE_PATH + '/stop_word.txt'
import ambiguitySegmenter as myjieba
KEEP_POS = {'cont': 'CONTINENT', 'nco': 'COUNTRY', 'npr': 'PROVINCE', 'na': 'CITY', 'naf': 'CITY', 'region': 'REGION',
'nregion': 'REGION',
'tjd': 'POI', 'hs': 'COMMERCIAL_ZONE',
'mt': 'TIME',
'nf': 'AIRPORT', 'np': 'AIRLINE', 'fc': 'SEATCLASS',
'pr': 'PRICE', 'tl': 'STAY',
'hb': '酒店', 'ht': '酒店', 'star': '酒店', 'hotelbed': '酒店'}
stopword_list = list(open(stop_word_file, "r").readlines())
stopword_list = [x.strip() for x in stopword_list]
# 给一个文本,标签的List就好
def get_data_tag_pairs():
train_data, train_corpus = [], []
with open(train_data_file, 'r') as fp:
for line in islice(fp, 0, None):
line = preProcessing(line)
train_corpus.append(line)
pairs = line.split("\t")
text = pairs[1]
if type(text) is str:
text = text.decode('utf-8')
# if u'晚' in text:
# print text
word_list, pos_list = myjieba.lcut_with_tag(text) # cut_all=True
seg_list = []
for w, pos in zip(word_list, pos_list):
if w.encode('utf-8') in stopword_list:
continue
if pos in KEEP_POS:
if len(re.findall(u'tl', pos)) > 0 and len(re.findall(u'[晚宿]', w)) > 0:
seg_list.append('酒店')
else:
seg_list.append(KEEP_POS[pos])
else:
if type(w) is not str:
w = w.encode('utf-8')
seg_list.append(w)
words_str = " ".join(seg_list)
train_data.append((words_str, pairs[0], pairs[1]))
with open(train_data_file, 'w') as fout:
train_corpus = sorted(train_corpus)
fout.write('\n'.join(train_corpus))
return train_data
# 给一个文本,标签的List就好
def get_format_text(sentence):
text = preProcessing(sentence)
if type(text) is str:
text = text.decode('utf-8')
word_list, pos_list = myjieba.lcut_with_tag(text) # cut_all=True
seg_list = []
for w, pos in zip(word_list, pos_list):
if w.encode('utf-8') in stopword_list:
continue
if pos in KEEP_POS:
seg_list.append(KEEP_POS[pos])
else:
if type(w) is not str:
w = w.encode('utf-8')
seg_list.append(w)
words_str = " ".join(seg_list)
return words_str
def preProcessing(str):
# remove special characters
str = str.strip("\n")
str = str.replace(" ", "")
return str
|
[
"kksjtu@gmail.com"
] |
kksjtu@gmail.com
|
25ad7ef802f7d87643df8b1cbedb050c10b17906
|
dae29b8e0045d72458420d87ba27658e3ecb34e3
|
/auchan/migrations/0004_auto__add_tag.py
|
fa5988ccf7c5d6bfa659bbce4447974b79eb58c3
|
[] |
no_license
|
Boussadia/osmscraper
|
c056641986881a020431e77ab945591eba3017ef
|
809c7e0ff31272a69aef480c8405e42799b6fa54
|
refs/heads/master
| 2021-01-10T00:59:52.044984
| 2013-07-11T14:21:46
| 2013-07-11T14:21:46
| 18,377,210
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,097
|
py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Tag'
db.create_table('auchan_tag', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=100)),
))
db.send_create_signal('auchan', ['Tag'])
# Adding M2M table for field tags on 'Product'
db.create_table('auchan_product_tags', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('product', models.ForeignKey(orm['auchan.product'], null=False)),
('tag', models.ForeignKey(orm['auchan.tag'], null=False))
))
db.create_unique('auchan_product_tags', ['product_id', 'tag_id'])
def backwards(self, orm):
# Deleting model 'Tag'
db.delete_table('auchan_tag')
# Removing M2M table for field tags on 'Product'
db.delete_table('auchan_product_tags')
models = {
'auchan.brand': {
'Meta': {'object_name': 'Brand'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
'auchan.category': {
'Meta': {'unique_together': "(('name', 'parent_category'),)", 'object_name': 'Category'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'parent_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auchan.Category']", 'null': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '9999', 'unique': 'True', 'null': 'True'})
},
'auchan.history': {
'Meta': {'object_name': 'History'},
'availability': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'html': ('django.db.models.fields.TextField', [], {'max_length': '9999999999999999999999L', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'price': ('django.db.models.fields.FloatField', [], {}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auchan.Product']"}),
'shipping_area': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auchan.ShippingArea']", 'null': 'True'}),
'unit_price': ('django.db.models.fields.FloatField', [], {})
},
'auchan.product': {
'Meta': {'object_name': 'Product'},
'avantages': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'brand': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auchan.Brand']", 'null': 'True'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auchan.Category']", 'null': 'True', 'symmetrical': 'False'}),
'complement': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'}),
'conservation': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'html': ('django.db.models.fields.TextField', [], {'max_length': '9999999999999999999999L', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_url': ('django.db.models.fields.CharField', [], {'max_length': '9999', 'null': 'True'}),
'ingredients': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'}),
'package_measure': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'package_quantity': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'package_unit': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'pratique': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'reference': ('django.db.models.fields.CharField', [], {'max_length': '9999', 'unique': 'True', 'null': 'True'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auchan.Tag']", 'null': 'True', 'symmetrical': 'False'}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auchan.Unit']", 'null': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '9999', 'null': 'True'}),
'valeur_nutritionnelle': ('django.db.models.fields.TextField', [], {'null': 'True'})
},
'auchan.promotion': {
'Meta': {'object_name': 'Promotion'},
'after': ('django.db.models.fields.FloatField', [], {}),
'availability': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'before': ('django.db.models.fields.FloatField', [], {}),
'content': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auchan.Product']", 'symmetrical': 'False'}),
'end': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'html': ('django.db.models.fields.TextField', [], {'max_length': '9999999999999999999999L', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_url': ('django.db.models.fields.CharField', [], {'max_length': '9999', 'null': 'True'}),
'reference': ('django.db.models.fields.CharField', [], {'max_length': '9999', 'unique': 'True', 'null': 'True'}),
'shipping_area': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auchan.ShippingArea']", 'null': 'True'}),
'start': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'u'", 'max_length': '1'}),
'unit_price': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '9999', 'null': 'True'})
},
'auchan.shippingarea': {
'Meta': {'object_name': 'ShippingArea'},
'city_name': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_shipping_area': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'postal_code': ('django.db.models.fields.TextField', [], {'null': 'True'})
},
'auchan.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
'auchan.unit': {
'Meta': {'object_name': 'Unit'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
}
}
complete_apps = ['auchan']
|
[
"ahmed.boussadia@gmail.com"
] |
ahmed.boussadia@gmail.com
|
5f771db2b579a6ea0a948b9ee7063ad7506f7d31
|
7022af77ee07a298a10a652156c4fc16e221e3c8
|
/data_reader.py
|
b4fc213c2531c275a00e428a47b2ede69703abe5
|
[] |
no_license
|
AChelikani/ML-Trader
|
01c5afb6223ae2f54746e1440dfd20d31819bc1d
|
9bfb8445fd2819e2011da5d77c705307c2ec27bc
|
refs/heads/master
| 2021-04-24T19:15:20.026998
| 2018-03-12T02:45:10
| 2018-03-12T02:45:10
| 117,623,844
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,314
|
py
|
import csv
########################################
# Data Format
# Date,Open,High,Low,Close,Volume,Name
########################################
class DataReader(object):
def __init__(self, filename):
self.data = {}
self.outputs = {}
self.read_data(filename)
def read_data(self, filename):
with open(filename) as csvfile:
reader = csv.reader(csvfile, delimiter=",")
# Skip header row
next(reader, None)
for row in reader:
try:
cleaned_row = [row[0]] + list(map(float, row[1:-1]))
cleaned_row = cleaned_row[1:]
ticker = row[-1]
if ticker not in self.data:
self.data[ticker] = [cleaned_row]
else:
self.data[ticker].append(cleaned_row)
except:
continue
def gen_outputs(self):
for stock in self.data:
outputs = []
states = self.data[stock]
for x in range(1,len(states)):
outputs.append(states[x][0])
self.data[stock] = states[:-1]
self.outputs[stock] = outputs
if __name__ == "__main__":
dr = DataReader("data/all_stocks_1yr.csv")
|
[
"advith.chelikani@gmail.com"
] |
advith.chelikani@gmail.com
|
6970baf5352002daeeb173171126f4ad46c02acc
|
f8d9627ee93176af9e39e3855bf4760d7f9e1c0c
|
/PR_2/F23.py
|
d4ee47b297c49812b39ab41a640ffbf267c2a1d7
|
[] |
no_license
|
LizzieGri/Python
|
2d88f9c5935d51126e62c5b6ca59f4b20a7b87f3
|
59cd938ab2021ca85f22f200a9cc24a207d84495
|
refs/heads/master
| 2023-04-30T01:15:34.601398
| 2021-05-17T10:06:47
| 2021-05-17T10:06:47
| 342,169,018
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 953
|
py
|
def f23(arr):
for i in range(len(arr) - 1, -1, -1):
if None in arr[i]:
del arr[i]
for i in range(len(arr)):
for j in range(len(arr[i])):
if arr[i][len(arr[i]) - 2] == arr[i][len(arr[i]) - 1]:
del (arr[i][len(arr[i]) - 1])
for i in range(len(arr)):
for j in range(len(arr[i])):
if arr[i][1] == "да":
arr[i][1] = "Да"
if arr[i][1] == "нет":
arr[i][1] = "Нет"
for i in range(len(arr)):
for j in range(len(arr[i])):
if arr[i][0]:
num = arr[i][0]
n = num[0] + num[1] + num[2] + num[3] + "/" + num[5] + num[6] + "/" + num[8] + num[9]
arr[i][0] = n
for i in range(len(arr)):
numb = arr[i][2]
n1 = numb[0] + numb[1] + numb[2] + "-" + numb[3] + numb[4] + "-" + numb[5] + numb[6]
arr[i][2] = n1
return arr
|
[
"65723977+LizzieGri@users.noreply.github.com"
] |
65723977+LizzieGri@users.noreply.github.com
|
f53ce2413e89b7f3a72c55e74bf1388a5ab5239b
|
8de2869bf284e98de6a9b424e90da5ab361d8aac
|
/book/_build/jupyter_execute/plotly/SankeyDiagram.py
|
e15bdc1717cb497625adb13deed7141e759fc4fc
|
[] |
no_license
|
hossainlab/dataviz
|
d37081da066bd88165aba41e2a8050ee17a1b131
|
e02b38827ab363f907b8c06c8f7ffc98a6a27a8f
|
refs/heads/master
| 2023-07-20T01:42:47.144900
| 2021-08-29T10:43:15
| 2021-08-29T10:43:15
| 291,055,389
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,321
|
py
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import plotly.plotly as py
import plotly.graph_objs as go
import plotly.offline as offline
offline.init_notebook_mode(connected=True)
# ## Sankey Diagrams
# Sankey diagrams are a useful way to display movement between locations/nodes.
#
# As an example, assume a person has set up a conference in his/her office and has attendees visiting from uptown, midtown and downtown in the city. They enter the office from either the front or rear lobby and then make their way to the conference room.
# #### Create a list for all the locations/nodes
# In[2]:
locations = ['Uptown',
'Midtown',
'Downtown',
'Front Lobby',
'Rear Lobby',
'Conference Room'
]
# #### Define the Sankey diagram
# The nodes will be the locations we have defined above. The ordering of these nodes matters as they will be assigned an index which will be used to define the links in the Sankey diagram.
#
# The source and target are effectively the nodes at either end of the link. The value represents the weight of the link. So taking the first index of each list, we get (0, 3, 2) - this means 2 people went from Uptown to the Front Lobby.
# In[3]:
data = go.Sankey(node = dict(label = locations),
link = dict(source = [0, 0, 1, 1, 2, 3, 4],
target = [3, 4, 3, 4, 3, 5, 5],
value = [2, 2, 4, 1, 5, 9, 3]
)
)
# In[4]:
layout = dict(title = 'Basic Sankey Diagram',
font = dict(size = 12)
)
# #### Plot the diagram
# We observe that 2 of the attendees seem to have lost their way from the front lobby to the conference room
# In[5]:
fig = dict(data=[data],
layout=layout)
offline.iplot(fig)
# #### Using a real dataset
# This dataset tracks the movement of refugees aiming to enter Australia from their countries of origin to the refugee camps in Manus and Nauru and beyond.
#
# Download the dataset here: https://github.com/plotly/dash-app-datasets/blob/master/refugee-movement.csv
# In[6]:
import pandas as pd
data = pd.read_csv('datasets/refugee_movement.csv')
data
# #### Plot the Sankey diagram
# Format the nodes:
# * <b>pad</b> determines the amount of padding between the nodes
# * <b>thickness</b> sets the width of the node
#
# The 'Node, Label' field is meant to be a list of all the nodes contains several nan values in the dataframe. We drop those values when using it in our diagram
# In[7]:
data_trace = go.Sankey(node = dict(pad = 10,
thickness = 30,
label = data['Node, Label'].dropna(),
color = data['Color']),
link = dict(source = data['Source'],
target = data['Target'],
value = data['Value'])
)
# In[8]:
layout = dict(title = "Refugee movement through Manus and Nauru")
# In[9]:
fig = dict(data=[data_trace],
layout=layout)
offline.iplot(fig)
# In[ ]:
|
[
"work.jubayer@gmail.com"
] |
work.jubayer@gmail.com
|
d3971377eba99dc7b8add8259fff3e9af72df4b8
|
d3aa23e994e323d10390e46e504b4da9a79eab98
|
/batdata/vndData/vnd.py
|
9804a6b8ed9058ed0bdb37eea340f26a3d3a490f
|
[
"MIT"
] |
permissive
|
buiquocanh1991/batdata
|
c02bf92bce1e4852d962289d8b824762dae1ca79
|
772f7d7c1ca340f46ad8f7404099043f422de352
|
refs/heads/master
| 2020-04-23T14:29:00.900492
| 2019-02-15T11:41:18
| 2019-02-15T11:41:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,906
|
py
|
import datetime as dt
from ..models import CurrDataModel, HistDataModel
from .taskManagement import createTasks, runTasks
from .utils import fieldsConversion, tickersConversion, VndDate
class Vnd:
"""
Wrapper class to handle inputs and present output
"""
def __init__(self, defaultFormat="df"):
self.defaultFormat = defaultFormat
def hist(self, tickers, fields, fromDate=None, toDate=None, **overrides):
"""
Getting historical data.
:tickers: string or list of string
:fields: string or list of string
:fromDate: 'yyyymmdd' string or python datetime object, default value is 20 days prior toTime
:toDate: 'yyyymmdd' string or python datetime object, default value is today
"""
tickers = tickersConversion(tickers) # convert tickers to string
fields = fieldsConversion(fields) # convert fields to list of fields
# Handle date format and generate default date
if toDate is None:
toDate = dt.datetime.today()
toDate = VndDate(toDate)
if fromDate is None:
fromDate = toDate - dt.timedelta(days=20)
fromDate = VndDate(fromDate)
tasks = createTasks("hist", tickers, fields, fromDate, toDate, **overrides)
data = runTasks(tasks)
# TODO: implement overrides
return HistDataModel(data)
def curr(self, tickers, fields, **overrides):
"""
Getting historical data.
:tickers: string or list of string
:fields: string or list of string
"""
tickers = tickersConversion(tickers) # convert tickers to string
fields = fieldsConversion(fields) # convert fields to list of fields
tasks = createTasks("current", tickers, fields, **overrides)
data = runTasks(tasks)
# TODO: implement overrides
return CurrDataModel(data)
|
[
"chulucninh09@gmail.com"
] |
chulucninh09@gmail.com
|
371146f44c358eebffb2be0331ba40c58ef4a1f1
|
757868e5a8243a26c7babf971aee3a01b770c2d9
|
/venv/Scripts/pip-script.py
|
4165d313f5d53c920c64b897312fdffff8ee067e
|
[] |
no_license
|
Aklmenrah/streamlit
|
fcfc33f718357507633eecf38f73148e362ed9da
|
b32f7bf6237282e5805566f90114630e46bc0d4f
|
refs/heads/master
| 2022-11-06T21:30:00.920688
| 2020-07-06T14:35:22
| 2020-07-06T14:35:22
| 273,005,573
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 431
|
py
|
#!C:\Users\aklme\PycharmProjects\predict-schema2\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip')()
)
|
[
"noreply@github.com"
] |
Aklmenrah.noreply@github.com
|
05597be72fcb1415ea763ef46929da51ad09d779
|
8ad8c7b4a45902893935d3d5240816b9ce0ce4ea
|
/Mission_to_Mars/scrape_mars.py
|
f69c2f999624718f2d492c054d592f36d2e8dd82
|
[] |
no_license
|
melcardenas28/Web-Scraping-HW---Mission-to-Mars
|
e4304c8e27fe0c7c899ecec2f4439d2565af60b2
|
885e54b3fc15c5373a3e27521e5de9a6e1f19671
|
refs/heads/main
| 2023-07-13T20:47:06.309834
| 2021-08-15T22:25:05
| 2021-08-15T22:25:05
| 396,160,485
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,962
|
py
|
from bs4 import BeautifulSoup as soup
from splinter import Browser
import pandas as pd
import datetime as dt
from webdriver_manager.chrome import ChromeDriverManager
executable_path = {'executable_path': ChromeDriverManager().install()}
browser = Browser('chrome', **executable_path, headless=False)
def mars_news(browser):
url = 'https://redplanetscience.com/'
browser.visit(url)
browser.is_element_present_by_css("ul.item_list li.slide", wait_time=0.5)
html = browser.html
news_soup = soup(html, "html.parser")
try:
slide_elem = news_soup.select_one("div.list_text")
slide_elem.find("div", class_ = "content_title")
news_title = slide_elem.find("div", class_ = "content_title").get_text()
news_p = slide_elem.find("div", class_ = "article_teaser_body").get_text()
except AttributeError:
return None, None
return news_title, news_p
#JPL Mars Space Images - Featured Image
def featured_image(browser):
featured_image_url = 'https://spaceimages-mars.com'
browser.visit(featured_image_url)
full_image_elem = browser.find_by_tag("button")[1]
full_image_elem.click()
browser.is_element_present_by_text("more info", wait_time=1)
more_info_element = browser.find_link_by_partial_text("more info")
more_info_element.click()
# Mars Facts
# more_info_url = 'https://galaxyfacts-mars.com'
# browser.visit(more_info_url)
# table = pd.read_html(more_info_url)
# df = table[0]
# df.head()
def mars_facts():
try:
df= pd.read_html("https://galaxyfacts-mars.com")[0]
except BaseException:
return None
df.columns = ['Description', 'Mars', 'Earth']
df.set_index('Description', inplace= True)
return df.to_html(classes= "table table-striped")
# Mars Hemispheres
def hemisphere(browser):
# Visit the USGS Astrogeology Science Center Site
url = 'https://marshemispheres.com/'
browser.visit(url)
hemisphere_image_urls = []
# Get a List of All the Hemisphere
links = browser.find_by_css('a.product-item img')
# Next, loop through those links, click the link, find the sample anchor, return the href
for i in range(len(links)):
hemisphere = {}
# We have to find the elements on each loop to avoid a stale element exception
browser.find_by_css('a.product-item img')[i].click()
# Next, we find the Sample image anchor tag and extract the href
sample_elem = browser.links.find_by_text('Sample').first
hemisphere['img_url'] = sample_elem['href']
# Get Hemisphere title
hemisphere['title'] = browser.find_by_css('h2.title').text
# Append hemisphere object to list
hemisphere_image_urls.append(hemisphere)
# Finally, we navigate backwards
browser.back()
return hemisphere_image_urls
# Helper Function
def scrape_hemisphere(html_text):
hemisphere_soup = soup(html_text, "html.parser")
try:
title_element = hemisphere_soup.find("h2", class_="title").get_text()
sample_element = hemisphere_soup.find("a", text="Sample").get("href")
except AttributeError:
title_element = None
sample_element = None
hemisphere = {
"title": title_element,
"img_url": sample_element
}
return hemisphere
# Main Web Scraping Bot
def scrape_all():
# executable_path = {'executable_path': ChromeDriverManager().install()}
browser = Browser('chrome', executable_path = 'chromedriver', headless=True)
news_title, news_p = mars_news(browser)
img_url = featured_image(browser)
facts = table
hemisphere_image_urls = hemisphere(browser)
data = {
"news_title": news_title,
"news_p": news_p,
"featured_image": img_url,
"facts": table,
"hemispheres": hemisphere_image_urls,
}
browser.quit()
return data
if __name__ == "__main__":
print(scrape_all())
|
[
"mel.cardenas28@gmail.com"
] |
mel.cardenas28@gmail.com
|
de0f677968e9f4697dd7adc64171b4ecddd51055
|
dbc335168eb6fcfe682d64f0f5ee1cb947443a40
|
/scripts/train.py
|
24fd129e7dfd0c95fdcaee40fe8698fc93f6f4ff
|
[
"MIT"
] |
permissive
|
iampawansingh/Active_Learning_in_NLP
|
4bbc96925f3ab04525ffd90bc748dc12686e6dcc
|
5d1adb3241d3dd8c34fc7aea52b6798beaeb9962
|
refs/heads/main
| 2023-05-04T03:51:00.339546
| 2021-05-19T07:39:26
| 2021-05-19T07:39:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,384
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 2 14:23:30 2021
Script to train simpletransformer model
@author: Abinaya Mahendiran
"""
from pathlib import Path
import numpy as np
import pandas as pd
# Import necessary libraries
import torch
import torch.nn as nn
from simpletransformers.classification import ClassificationModel
from sklearn.metrics import accuracy_score
from scripts import config
from scripts.config import logger
class NewsClassification:
def __init__(self):
self.model_name = config.MODEL_NAME
self.model_type = config.MODEL_TYPE
self.train_data = pd.read_csv(Path(config.DATA_DIR, "train.csv"))
self.test_data = pd.read_csv(Path(config.DATA_DIR, "test.csv"))
self.cuda = torch.cuda.is_available()
self.model_args = config.MODEL_ARGS
self.labels = config.LABELS
def preprocess_data(self, data: object, column_name: str) -> object:
"""
Perform preprocessing on the text data
Parameters
----------
data : object
dataframe.
column_name : str
name of the column in the dataframe
Returns
-------
object
pre-processed dataframe.
"""
data.rename(columns={"Unnamed: 0": "idx"}, inplace=True)
if column_name == "text":
data[column_name] = data[column_name].str.lower()
if column_name == "label":
data[column_name] = data[column_name].apply(int) - 1
data.rename(columns={"label": "labels"}, inplace=True)
return data
def split_data(self, data: object, random_seed: int) -> (object, object):
"""
Split the dataset into train and eval
Parameters
----------
data : object
dataframe containing training data.
random_seed : int
integer to set the random seed
Returns
-------
(object, object)
train split, eval split.
"""
np.random.seed(random_seed)
train_idx = np.random.choice(
data.index, size=int(data.shape[0] * config.TEST_SPLIT), replace=False
)
valid_idx = set(data.index) - set(train_idx)
train_data = data[data.index.isin(train_idx)]
eval_data = data[data.index.isin(valid_idx)]
return (train_data, eval_data)
def train(self, train_data: object, eval_data: object) -> object:
"""
Create and train the chosen model based on the args
Parameters
----------
train_data : object
train split of the train_data.
eval_data : object
validation split of the train_data.
Returns
-------
object
model.
"""
# Create a ClassificationModel
model = ClassificationModel(
self.model_name,
self.model_type,
args=self.model_args,
use_cuda=self.cuda,
num_labels=len(self.labels) - 1,
)
# Train the model
model.train_model(train_df=train_data, eval_df=eval_data, accuracy=accuracy_score)
return model
def load_model(self, model_type: str) -> object:
"""
Load the specified model
Parameters
----------
model_type : str
path or model type to be loaded.
Returns
-------
object
model.
"""
model = ClassificationModel(
self.model_name,
model_type,
args=self.model_args,
use_cuda=self.cuda,
num_labels=len(self.labels) - 1,
)
return model
def format_output(self, predictions: object, raw_output: object) -> object:
"""
Format the output to the required format for annotation
Parameters:
----------
predictions : object
probabilities.
raw_output : object
logits.
Returns:
-------
object
Modified dataframe in the required format
"""
# Convert logits to labels
sfm = nn.Softmax(dim=1)
raw_output_tensor = torch.from_numpy(raw_output)
annotate_class_prob = sfm(raw_output_tensor)
max_prob = torch.max(annotate_class_prob, dim=1)
annotate_class_prob = annotate_class_prob.numpy()
max_prob = max_prob.values.numpy()
# Reshape the data
annotate_df_with_pred = self.test_data
probabilities = pd.DataFrame(
annotate_class_prob, columns=["prob_0", "prob_1", "prob_2", "prob_3"]
)
annotate_df_with_pred = pd.concat([annotate_df_with_pred, probabilities], axis=1)
annotate_df_with_pred["max_prob"] = max_prob
annotate_df_with_pred["label_pred"] = predictions
annotate_df_with_pred["annotated_labels"] = ""
annotate_df_with_pred["sampling_method"] = ""
return annotate_df_with_pred
def main():
"""
Run the news classification model
Returns
-------
None.
"""
# Create classification object
news_model = NewsClassification()
logger.info("News classification model instantiated")
# Preprocess and split data
data = news_model.preprocess_data(news_model.train_data, "text")
logger.info("Train data is pre-processed")
train_data, eval_data = news_model.split_data(data, config.RANDOM_SEED)
logger.info("Data is split")
# Train model
# train_model = news_model.train(train_data, eval_data)
logger.info("Model is trained")
# Load model from the best model directory
loaded_model = news_model.load_model(config.BEST_MODEL_SPEC_DIR)
logger.info("Model is loaded")
# Eval model
model_result, model_outputs, wrong_predictions = loaded_model.eval_model(
eval_data, accuracy=accuracy_score
)
logger.info("Model is evaluated")
# Prediction
news_model.test_data = news_model.preprocess_data(news_model.test_data, "text")
predictions, raw_outputs = loaded_model.predict(news_model.test_data.text.values.tolist())
logger.info("Predictions completed")
# Format output
annotate_data = news_model.format_output(predictions, raw_outputs)
annotate_data.to_csv(Path(config.DATA_DIR, "annotate_data.csv"))
if __name__ == "__main__":
main()
|
[
"abinaya.m02@mphasis.com"
] |
abinaya.m02@mphasis.com
|
a1bd9482ac29f6bb873cef722bb8e6642378cdfc
|
5749de379939a7a39abb6598f323e401a853a781
|
/covid_crowd_management/asgi.py
|
0f514f9424ce23a678cdc479f6d7e19969c1db50
|
[] |
no_license
|
Team-VSUR/COVID-19-Crowd-Management-Website
|
f109ef50dea1338b653a00f653b79b42a9bcf6e4
|
a21b31783ffd59432199c7cd2f20c424004fd385
|
refs/heads/master
| 2022-07-19T17:52:24.463903
| 2020-05-19T18:59:45
| 2020-05-19T18:59:45
| 263,388,309
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 421
|
py
|
"""
ASGI config for covid_crowd_management project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'covid_crowd_management.settings')
application = get_asgi_application()
|
[
"59662860+vanshamb2601@users.noreply.github.com"
] |
59662860+vanshamb2601@users.noreply.github.com
|
f32bb5d29a90efe2a350355d236f78799ead02cb
|
358c55a24b566a84f86e33dce614bff9634769ba
|
/blog/migrations/0002_auto_20190903_1022.py
|
ce434bf5ce3b36ab6cd0bc810e98e65c440ae451
|
[] |
no_license
|
farruhcool/blog
|
384e1731c068c5512271f3e7af62946fb9b16a11
|
d6ccd20f3f68c987c5c120475ebc51aabe1b0287
|
refs/heads/master
| 2023-05-04T22:55:38.924975
| 2019-09-03T05:30:08
| 2019-09-03T05:30:08
| 203,350,678
| 0
| 0
| null | 2023-04-21T20:36:30
| 2019-08-20T10:08:15
|
Python
|
UTF-8
|
Python
| false
| false
| 730
|
py
|
# Generated by Django 2.2.4 on 2019-09-03 05:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50)),
('slug', models.SlugField()),
],
),
migrations.AddField(
model_name='post',
name='tags',
field=models.ManyToManyField(blank=True, related_name='posts', to='blog.Tag'),
),
]
|
[
"farruhcool1993@gmail.com"
] |
farruhcool1993@gmail.com
|
b09867d33bc0a96dd11eeb77d0555f2d7ab5a8b4
|
29aa518b2c07a138ead231296dcd1b25a0b057fa
|
/venv/Scripts/pip3-script.py
|
20f2d462d04d99366d1f63828ebae57b62ebc21c
|
[] |
no_license
|
anabanslo/heroku-practise
|
d9f8094621f80581af61356afe124e7fc35f6d57
|
764b8c2df5ae22cad89c35728bc37221b5153bbc
|
refs/heads/master
| 2023-05-10T19:10:45.058420
| 2020-03-25T19:45:02
| 2020-03-25T19:45:02
| 250,073,747
| 0
| 0
| null | 2023-05-02T18:43:24
| 2020-03-25T19:35:54
|
Python
|
UTF-8
|
Python
| false
| false
| 421
|
py
|
#!C:\Users\Anaba\PycharmProjects\heroku-practise\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')()
)
|
[
"anabananaslo@hotmail.com"
] |
anabananaslo@hotmail.com
|
6465dc787a23d16a3e2c1cf22c9dc6de61ba0a90
|
ba9485e8ea33acee24dc7bd61049ecfe9f8b8930
|
/aoj/itp18b.py
|
d39fd25df763c61ab9e2475985a6549280301003
|
[] |
no_license
|
diohabara/competitive_programming
|
a0b90a74b0b923a636b9c82c75b690fef11fe8a4
|
1fb493eb44ce03e289d1245bf7d3dc450f513135
|
refs/heads/master
| 2021-12-11T22:43:40.925262
| 2021-11-06T12:56:49
| 2021-11-06T12:56:49
| 166,757,685
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 140
|
py
|
while (True):
x = input()
ans = 0
for i in range(len(x)):
ans += int(x[i])
if ans == 0:
break
print(ans)
|
[
"diohabara@gmail.com"
] |
diohabara@gmail.com
|
13e6b9a157b45b00f46e46154a3f1a5828b0fb3c
|
a1c34f7cbc3f522213e3b2180ca2ed10b698651e
|
/operators/api/urls.py
|
ecd5c3c290bd1a14eb5fdf475a7b5a0551a9b495
|
[] |
no_license
|
ulugbek1999/ncd
|
c9461c1dc8ee2f7ba084ba54f5320d65c44ac335
|
d976ee5a77f5ceeaa3fd5e2acf1805667141e0be
|
refs/heads/master
| 2022-08-26T12:06:30.024540
| 2020-03-07T10:33:47
| 2020-03-07T10:33:47
| 202,854,987
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 962
|
py
|
from django.urls import path, include
from operators.api.operator1 import views as op1
from operators.api.operator2 import views as op2
from operators.api.operator3 import views as op3
from operators.api.operator4 import views as op4
operator1_patterns = [
path('employee/create/', op1.EmployeeCreateAPIView.as_view(), name='api.operator.emplyee.1')
]
operator2_patterns = [
path('employee/update/<int:id>/', op2.EmployeeUpdateAPIView.as_view(), name='api.operator.emplyee.2')
]
operator3_patterns = [
path('employee/update/<int:id>/', op3.EmployeeUpdateAPIView.as_view(), name='api.operator.emplyee.3')
]
operator4_patterns = [
path('employee/update/<int:id>/', op4.EmployeeUpdateAPIView.as_view(), name='api.operator.emplyee.4')
]
urlpatterns = [
path('op1/', include(operator1_patterns)),
path('op2/', include(operator2_patterns)),
path('op3/', include(operator3_patterns)),
path('op4/', include(operator4_patterns)),
]
|
[
"kayrat.nazov@gmail.com"
] |
kayrat.nazov@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.