hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5861890f40c195d9d9cd8464fdd3da892466f679 | 8,080 | py | Python | python/mxnet/context.py | feevos/incubator-mxnet | 275378a49a6035fd5bdead4a74ac36b6070295a7 | [
"Apache-2.0"
] | null | null | null | python/mxnet/context.py | feevos/incubator-mxnet | 275378a49a6035fd5bdead4a74ac36b6070295a7 | [
"Apache-2.0"
] | null | null | null | python/mxnet/context.py | feevos/incubator-mxnet | 275378a49a6035fd5bdead4a74ac36b6070295a7 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
"""Context management API of mxnet."""
from __future__ import absolute_import
import threading
import warnings
from .base import classproperty, with_metaclass, _MXClassPropertyMetaClass
# initialize the default context in Context
Context._default_ctx.value = Context('cpu', 0)
def cpu(device_id=0):
"""Returns a CPU context.
This function is a short cut for ``Context('cpu', device_id)``.
For most operations, when no context is specified, the default context is `cpu()`.
Examples
----------
>>> with mx.cpu():
... cpu_array = mx.nd.ones((2, 3))
>>> cpu_array.context
cpu(0)
>>> cpu_array = mx.nd.ones((2, 3), ctx=mx.cpu())
>>> cpu_array.context
cpu(0)
Parameters
----------
device_id : int, optional
The device id of the device. `device_id` is not needed for CPU.
This is included to make interface compatible with GPU.
Returns
-------
context : Context
The corresponding CPU context.
"""
return Context('cpu', device_id)
def cpu_pinned(device_id=0):
"""Returns a CPU pinned memory context. Copying from CPU pinned memory to GPU
is faster than from normal CPU memory.
This function is a short cut for ``Context('cpu_pinned', device_id)``.
Examples
----------
>>> with mx.cpu_pinned():
... cpu_array = mx.nd.ones((2, 3))
>>> cpu_array.context
cpu_pinned(0)
>>> cpu_array = mx.nd.ones((2, 3), ctx=mx.cpu_pinned())
>>> cpu_array.context
cpu_pinned(0)
Parameters
----------
device_id : int, optional
The device id of the device. `device_id` is not needed for CPU.
This is included to make interface compatible with GPU.
Returns
-------
context : Context
The corresponding CPU pinned memory context.
"""
return Context('cpu_pinned', device_id)
def gpu(device_id=0):
"""Returns a GPU context.
This function is a short cut for Context('gpu', device_id).
The K GPUs on a node are typically numbered as 0,...,K-1.
Examples
----------
>>> cpu_array = mx.nd.ones((2, 3))
>>> cpu_array.context
cpu(0)
>>> with mx.gpu(1):
... gpu_array = mx.nd.ones((2, 3))
>>> gpu_array.context
gpu(1)
>>> gpu_array = mx.nd.ones((2, 3), ctx=mx.gpu(1))
>>> gpu_array.context
gpu(1)
Parameters
----------
device_id : int, optional
The device id of the device, needed for GPU.
Returns
-------
context : Context
The corresponding GPU context.
"""
return Context('gpu', device_id)
def current_context():
"""Returns the current context.
By default, `mx.cpu()` is used for all the computations
and it can be overridden by using `with mx.Context(x)` statement where
x can be cpu(device_id) or gpu(device_id).
Examples
-------
>>> mx.current_context()
cpu(0)
>>> with mx.Context('gpu', 1): # Context changed in `with` block.
... mx.current_context() # Computation done here will be on gpu(1).
...
gpu(1)
>>> mx.current_context() # Back to default context.
cpu(0)
Returns
-------
default_ctx : Context
"""
if not hasattr(Context._default_ctx, "value"):
Context._default_ctx.value = Context('cpu', 0)
return Context._default_ctx.value
| 30.490566 | 91 | 0.627104 |
58618a4465a15f955aaa88fd0bac8fbce9ce5c48 | 3,422 | py | Python | theory/model/tconfig.py | ralfonso/theory | 41684969313cfc545d74b306e409fd5bf21387b3 | [
"MIT"
] | 4 | 2015-07-03T19:53:59.000Z | 2016-04-25T03:03:56.000Z | theory/model/tconfig.py | ralfonso/theory | 41684969313cfc545d74b306e409fd5bf21387b3 | [
"MIT"
] | null | null | null | theory/model/tconfig.py | ralfonso/theory | 41684969313cfc545d74b306e409fd5bf21387b3 | [
"MIT"
] | 2 | 2020-03-29T22:02:29.000Z | 2021-07-13T07:17:19.000Z | # theory MPD client
# Copyright (C) 2008 Ryan Roemmich <ralfonso@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import ConfigParser
import pickle
from pylons import config
from pylons import app_globals as g
| 34.918367 | 108 | 0.630333 |
5861aaa87e16980cf7f95fd4b748950ec3d44176 | 5,055 | py | Python | tests/test_error.py | iotanbo/iotanbo_py_utils | 96a2728e051b5e5ee601459b4c449b5495768ba8 | [
"MIT"
] | null | null | null | tests/test_error.py | iotanbo/iotanbo_py_utils | 96a2728e051b5e5ee601459b4c449b5495768ba8 | [
"MIT"
] | 14 | 2021-06-07T17:36:02.000Z | 2021-06-07T18:02:37.000Z | tests/test_error.py | iotanbo/iotanbo_py_utils | 96a2728e051b5e5ee601459b4c449b5495768ba8 | [
"MIT"
] | null | null | null | """Test `iotanbo_py_utils.error.py`."""
from iotanbo_py_utils.error import Error
from iotanbo_py_utils.error import ErrorKind
| 28.240223 | 76 | 0.669041 |
58641c0b89af2618e34db3686e77a3a4237fbad3 | 651 | py | Python | setup.py | wontonst/orvibo | 72722b16caa929ae3f07b0a6789a0f18cd3ebad3 | [
"MIT"
] | null | null | null | setup.py | wontonst/orvibo | 72722b16caa929ae3f07b0a6789a0f18cd3ebad3 | [
"MIT"
] | null | null | null | setup.py | wontonst/orvibo | 72722b16caa929ae3f07b0a6789a0f18cd3ebad3 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
from os import path
from io import open
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='orvibo',
version='1.5.0',
description='Python module to controll Orvibo devices, such as s20 wifi sockets and AllOne IR blasters',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/cherezov/orvibo',
packages=find_packages(exclude=['contrib', 'docs', 'tests']), # Required
)
| 27.125 | 108 | 0.725038 |
58652e385a1acfda94718bdec15c5a91dde6b8c7 | 3,053 | py | Python | autograder-master/autograder/test_runner.py | Diana1320622/AILabs | 315a6f4b8f8dd60e4f53d348e06e23b4d827d179 | [
"MIT"
] | null | null | null | autograder-master/autograder/test_runner.py | Diana1320622/AILabs | 315a6f4b8f8dd60e4f53d348e06e23b4d827d179 | [
"MIT"
] | null | null | null | autograder-master/autograder/test_runner.py | Diana1320622/AILabs | 315a6f4b8f8dd60e4f53d348e06e23b4d827d179 | [
"MIT"
] | null | null | null | import glob, os
import subprocess
from difflib import context_diff
| 33.184783 | 83 | 0.556174 |
5865c20624359a297b3a450c2e37573f88fc2710 | 245 | py | Python | amazon/model/admin.py | Lakshmivijaykrishnan/mini-amazon | 89ce7c5e2af127a2e8e027c87cb245fa82d184d6 | [
"Unlicense"
] | null | null | null | amazon/model/admin.py | Lakshmivijaykrishnan/mini-amazon | 89ce7c5e2af127a2e8e027c87cb245fa82d184d6 | [
"Unlicense"
] | null | null | null | amazon/model/admin.py | Lakshmivijaykrishnan/mini-amazon | 89ce7c5e2af127a2e8e027c87cb245fa82d184d6 | [
"Unlicense"
] | null | null | null | from amazon.model import db
| 22.272727 | 43 | 0.669388 |
58672ac219aa158da24cd5ab42129bffccff6013 | 2,429 | py | Python | tests/fixtures/test_abstracts/content_03_expected.py | elifesciences/elife-tools | ee345bf0e6703ef0f7e718355e85730abbdfd117 | [
"MIT"
] | 9 | 2015-04-16T08:13:31.000Z | 2020-05-18T14:03:06.000Z | tests/fixtures/test_abstracts/content_03_expected.py | elifesciences/elife-tools | ee345bf0e6703ef0f7e718355e85730abbdfd117 | [
"MIT"
] | 310 | 2015-02-11T00:30:09.000Z | 2021-07-14T23:58:50.000Z | tests/fixtures/test_abstracts/content_03_expected.py | elifesciences/elife-tools | ee345bf0e6703ef0f7e718355e85730abbdfd117 | [
"MIT"
] | 9 | 2015-02-04T01:21:28.000Z | 2021-06-15T12:50:47.000Z | expected = [
{
"abstract_type": None,
"content": "RET can be activated in cis or trans by its co-receptors and ligands in vitro, but the physiological roles of trans signaling are unclear. Rapidly adapting (RA) mechanoreceptors in dorsal root ganglia (DRGs) express Ret and the co-receptor Gfr\u03b12 and depend on Ret for survival and central projection growth. Here, we show that Ret and Gfr\u03b12 null mice display comparable early central projection deficits, but Gfr\u03b12 null RA mechanoreceptors recover later. Loss of Gfr\u03b11, the co-receptor implicated in activating RET in trans, causes no significant central projection or cell survival deficit, but Gfr\u03b11;Gfr\u03b12 double nulls phenocopy Ret nulls. Finally, we demonstrate that GFR\u03b11 produced by neighboring DRG neurons activates RET in RA mechanoreceptors. Taken together, our results suggest that trans and cis RET signaling could function in the same developmental process and that the availability of both forms of activation likely enhances but not diversifies outcomes of RET signaling.",
"full_content": "<p>RET can be activated in <italic>cis</italic> or <italic>trans</italic> by its co-receptors and ligands <italic>in vitro</italic>, but the physiological roles of <italic>trans</italic> signaling are unclear. Rapidly adapting (RA) mechanoreceptors in dorsal root ganglia (DRGs) express <italic>Ret</italic> and the co-receptor <italic>Gfr\u03b12</italic> and depend on <italic>Ret</italic> for survival and central projection growth. Here, we show that <italic>Ret</italic> and <italic>Gfr\u03b12</italic> null mice display comparable early central projection deficits, but <italic>Gfr\u03b12</italic> null RA mechanoreceptors recover later. Loss of <italic>Gfr\u03b11</italic>, the co-receptor implicated in activating RET <italic>in trans</italic>, causes no significant central projection or cell survival deficit, but <italic>Gfr\u03b11;Gfr\u03b12</italic> double nulls phenocopy <italic>Ret</italic> nulls. Finally, we demonstrate that GFR\u03b11 produced by neighboring DRG neurons activates RET in RA mechanoreceptors. Taken together, our results suggest that <italic>trans</italic> and <italic>cis</italic> RET signaling could function in the same developmental process and that the availability of both forms of activation likely enhances but not diversifies outcomes of RET signaling.</p>",
},
]
| 303.625 | 1,326 | 0.792096 |
5868165c78c75470f0c193bcf63d0eb76394d605 | 5,059 | py | Python | NER-BiLSTM-CRF-PyTorch-main/src/eval.py | OscarChang46/de-identify-sensitive-data-for-cybersecurity-use-cases-e.g.-PII-and-PHI-data-in-unstructured-text-an | e77ca0d5c19206a349ecc94fc71febdc10824482 | [
"MIT"
] | 22 | 2021-04-28T04:19:04.000Z | 2022-03-20T17:17:57.000Z | NER-BiLSTM-CRF-PyTorch-main/src/eval.py | OscarChang46/de-identify-sensitive-data-for-cybersecurity-use-cases-e.g.-PII-and-PHI-data-in-unstructured-text-an | e77ca0d5c19206a349ecc94fc71febdc10824482 | [
"MIT"
] | 3 | 2021-09-03T12:14:36.000Z | 2022-03-07T10:43:51.000Z | NER-BiLSTM-CRF-PyTorch-main/src/eval.py | OscarChang46/de-identify-sensitive-data-for-cybersecurity-use-cases-e.g.-PII-and-PHI-data-in-unstructured-text-an | e77ca0d5c19206a349ecc94fc71febdc10824482 | [
"MIT"
] | 9 | 2021-03-25T13:44:51.000Z | 2022-02-19T03:56:38.000Z | # coding=utf-8
import optparse
import torch
import time
import pickle
from torch.autograd import Variable
from loader import *
from utils import *
# python -m visdom.server
optparser = optparse.OptionParser()
optparser.add_option(
"-t", "--test", default="data/eng.testb",
help="Test set location"
)
optparser.add_option(
'--score', default='evaluation/temp/score.txt',
help='score file location'
)
optparser.add_option(
"-g", '--use_gpu', default='1',
type='int', help='whether or not to ues gpu'
)
optparser.add_option(
'--loss', default='loss.txt',
help='loss file location'
)
optparser.add_option(
'--model_path', default='models/test',
help='model path'
)
optparser.add_option(
'--map_path', default='models/mapping.pkl',
help='model path'
)
optparser.add_option(
'--char_mode', choices=['CNN', 'LSTM'], default='CNN',
help='char_CNN or char_LSTM'
)
opts = optparser.parse_args()[0]
mapping_file = opts.map_path
with open(mapping_file, 'rb') as f:
mappings = pickle.load(f)
word_to_id = mappings['word_to_id']
tag_to_id = mappings['tag_to_id']
id_to_tag = {k[1]: k[0] for k in tag_to_id.items()}
char_to_id = mappings['char_to_id']
parameters = mappings['parameters']
word_embeds = mappings['word_embeds']
use_gpu = opts.use_gpu == 1 and torch.cuda.is_available()
assert os.path.isfile(opts.test)
assert parameters['tag_scheme'] in ['iob', 'iobes']
if not os.path.isfile(eval_script):
raise Exception('CoNLL evaluation script not found at "%s"' % eval_script)
if not os.path.exists(eval_temp):
os.makedirs(eval_temp)
lower = parameters['lower']
zeros = parameters['zeros']
tag_scheme = parameters['tag_scheme']
test_sentences = load_sentences(opts.test, lower, zeros)
update_tag_scheme(test_sentences, tag_scheme)
test_data = prepare_dataset(
test_sentences, word_to_id, char_to_id, tag_to_id, lower
)
model = torch.load(opts.model_path)
model_name = opts.model_path.split('/')[-1].split('.')[0]
if use_gpu:
model.cuda()
model.eval()
t = time.time()
eval(model, test_data)
print(time.time() - t) | 32.22293 | 94 | 0.613758 |
586904d063488a1bde40ac6c380144e572f09389 | 789 | py | Python | src/openbiolink/graph_creation/metadata_db_file/edge/dbMetaEdgeSiderInd.py | cthoyt/OpenBioLink | c5f85b99f9104f70493136c343e4554261e990a5 | [
"MIT"
] | null | null | null | src/openbiolink/graph_creation/metadata_db_file/edge/dbMetaEdgeSiderInd.py | cthoyt/OpenBioLink | c5f85b99f9104f70493136c343e4554261e990a5 | [
"MIT"
] | null | null | null | src/openbiolink/graph_creation/metadata_db_file/edge/dbMetaEdgeSiderInd.py | cthoyt/OpenBioLink | c5f85b99f9104f70493136c343e4554261e990a5 | [
"MIT"
] | null | null | null | from openbiolink.graph_creation.metadata_db_file.edge.dbMetadataEdge import DbMetadataEdge
from openbiolink.graph_creation.types.dbType import DbType
| 43.833333 | 90 | 0.69455 |
586a480b0504292a0e113f2a2851c35d28765f0b | 377 | py | Python | rename_files.py | ssinhaleite/util | b65a6e0d7ff270cc2bbdbc09b7894ffc77edaf8d | [
"MIT"
] | 1 | 2018-10-10T11:37:45.000Z | 2018-10-10T11:37:45.000Z | rename_files.py | ssinhaleite/util | b65a6e0d7ff270cc2bbdbc09b7894ffc77edaf8d | [
"MIT"
] | null | null | null | rename_files.py | ssinhaleite/util | b65a6e0d7ff270cc2bbdbc09b7894ffc77edaf8d | [
"MIT"
] | null | null | null | import mahotas as mh
import glob
import os
# rename files from 0 to number of files.
files = glob.glob("/path/*")
files.sort()
if not os.path.isdir("/path/renamed/"):
os.mkdir("/path/renamed/")
for i in range(len(files)):
print("Processing " + files[i])
renamedFile = mh.imread(files[i])
mh.imsave("/path/renamed/" + str(i).zfill(5) + ".tif", renamedFile)
| 20.944444 | 71 | 0.649867 |
586b210947d0b950e98da84e776f0d34d976b0d2 | 1,749 | py | Python | scripts/download_lookml.py | orf/lkml | 2175a22e0fe8a894ef9312c73c6a062df2447795 | [
"MIT"
] | 110 | 2019-06-25T14:26:41.000Z | 2022-02-01T13:27:19.000Z | scripts/download_lookml.py | orf/lkml | 2175a22e0fe8a894ef9312c73c6a062df2447795 | [
"MIT"
] | 56 | 2019-06-26T22:11:43.000Z | 2022-03-15T20:37:30.000Z | scripts/download_lookml.py | orf/lkml | 2175a22e0fe8a894ef9312c73c6a062df2447795 | [
"MIT"
] | 28 | 2019-07-08T17:34:49.000Z | 2022-03-25T14:36:00.000Z | import os
import re
from base64 import b64decode
from pathlib import Path
import requests
username = os.environ["GITHUB_USERNAME"]
password = os.environ["GITHUB_PERSONAL_ACCESS_TOKEN"]
auth = requests.auth.HTTPBasicAuth(username, password)
directory = Path(__file__).resolve().parent.parent / "github"
directory.mkdir(exist_ok=True)
start_url = "https://api.github.com/search/code?q=view+language:lookml"
next_url = None
page = 1
with requests.Session() as session:
session.auth = auth
while True:
response = session.get(next_url or start_url)
response.raise_for_status()
links = response.headers["Link"]
finds = re.findall(
r"<(https://api.github.com/search/code\?"
r'q=view\+language%3Alookml&page=\d+)>; rel="next"',
links,
)
if finds:
next_url = finds[0]
else:
next_url = None
print(next_url)
urls = [item["url"] for item in response.json()["items"]]
print(f"Downloading all content from page {page}")
for url in urls:
response = session.get(url)
response.raise_for_status()
response_json = response.json()
name = response_json["name"]
encoded = response_json["content"]
content = b64decode(encoded).decode("utf-8")
if (
name.endswith(".lookml")
or content.startswith("-")
or "- view" in content
):
continue
file_path = directory / name
with file_path.open("w+") as file:
file.write(content)
if next_url is None:
break
else:
page += 1
| 26.5 | 71 | 0.573471 |
586cd0144d170eb0711f6999768b768351ab3215 | 579 | py | Python | docs/conf.py | RTBHOUSE/carreralib | 1daa959ef411b29601c92f86c0f6876fe8367837 | [
"MIT"
] | null | null | null | docs/conf.py | RTBHOUSE/carreralib | 1daa959ef411b29601c92f86c0f6876fe8367837 | [
"MIT"
] | null | null | null | docs/conf.py | RTBHOUSE/carreralib | 1daa959ef411b29601c92f86c0f6876fe8367837 | [
"MIT"
] | 1 | 2020-02-25T20:40:50.000Z | 2020-02-25T20:40:50.000Z | import os
import sys
sys.path.insert(0, os.path.abspath('..'))
project = 'carreralib'
copyright = '2015-2017 Thomas Kemmer'
version = get_version(b'../carreralib/__init__.py')
release = version
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.todo'
]
exclude_patterns = ['_build']
master_doc = 'index'
html_theme = 'default'
| 20.678571 | 71 | 0.658031 |
586d9bd962737e276a73a87798d6fdc63e31cd16 | 503 | py | Python | algos/lcs.py | asaini/algo-py | e9d18ef82d14e6304430bbd8b065430e76aa7eb8 | [
"MIT"
] | 1 | 2015-10-01T21:17:10.000Z | 2015-10-01T21:17:10.000Z | algos/lcs.py | asaini/algo-py | e9d18ef82d14e6304430bbd8b065430e76aa7eb8 | [
"MIT"
] | null | null | null | algos/lcs.py | asaini/algo-py | e9d18ef82d14e6304430bbd8b065430e76aa7eb8 | [
"MIT"
] | null | null | null | def lcs(x, y):
"""
Longest Common Subsequence
"""
n = len(x) + 1
m = len(y) + 1
table = [ [0]*m for i in range(n) ]
for i in range(n):
for j in range(m):
# If either string is empty, then lcs = 0
if i == 0 or j == 0:
table[i][j] = 0
elif x[i - 1] == y[j - 1]:
table[i][j] = 1 + table[i-1][j-1]
else:
table[i][j] = max(table[i-1][j], table[i][j-1])
return table[len(x)][len(y)]
if __name__ == '__main__':
x = "AGGTAB"
y = "GXTXAYB"
print lcs(x, y)
| 16.225806 | 51 | 0.499006 |
586f8df02b72779b1db755aeb20b5f85e4d788d2 | 350 | py | Python | app/models/methods/set_device_info_method.py | luisalvesmartins/TAPO-P100 | 02bc929a87bbe4681739b14a716f6cef2b159fd1 | [
"MIT"
] | null | null | null | app/models/methods/set_device_info_method.py | luisalvesmartins/TAPO-P100 | 02bc929a87bbe4681739b14a716f6cef2b159fd1 | [
"MIT"
] | 1 | 2021-06-23T09:21:40.000Z | 2021-07-02T17:21:12.000Z | app/models/methods/set_device_info_method.py | luisalvesmartins/TAPO-P100 | 02bc929a87bbe4681739b14a716f6cef2b159fd1 | [
"MIT"
] | null | null | null | from models.methods import method
from typing import Any
| 25 | 51 | 0.708571 |
586fa3eca62dcbd41023f2732e592c80e6a5d80c | 3,077 | py | Python | projects/InterpretationReID/train_net.py | SheldongChen/AMD.github.io | 5f3018f239127949b2d3995162ffe033dcf8051a | [
"Apache-2.0"
] | 17 | 2021-11-01T01:14:06.000Z | 2022-03-02T14:59:39.000Z | projects/InterpretationReID/train_net.py | SheldongChen/AMD.github.io | 5f3018f239127949b2d3995162ffe033dcf8051a | [
"Apache-2.0"
] | 2 | 2021-12-22T07:56:13.000Z | 2022-03-18T10:26:21.000Z | projects/InterpretationReID/train_net.py | SheldongChen/AMD.github.io | 5f3018f239127949b2d3995162ffe033dcf8051a | [
"Apache-2.0"
] | 2 | 2022-02-18T07:42:38.000Z | 2022-02-18T10:16:26.000Z | #!/usr/bin/env python
# encoding: utf-8
"""
@author: sherlock
@contact: sherlockliao01@gmail.com
"""
import logging
import os
import sys
sys.path.append('.')
from fastreid.config import get_cfg
from projects.InterpretationReID.interpretationreid.engine import DefaultTrainer, default_argument_parser, default_setup, launch
from fastreid.utils.checkpoint import Checkpointer
from projects.InterpretationReID.interpretationreid.evaluation import ReidEvaluator
import projects.InterpretationReID.interpretationreid as PII
from fastreid.utils.logger import setup_logger
def setup(args):
"""
Create configs_old and perform basic setups.
"""
cfg = get_cfg()
PII.add_interpretation_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(cfg, args)
return cfg
if __name__ == "__main__":
args = default_argument_parser().parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
| 26.756522 | 128 | 0.677283 |
586fc6bf6803a3f84dbc2e58fa7867211fee503b | 826 | py | Python | asynciomeasures/collectors.py | Poogles/aiomeasures | d62482e8de56a00bce310e7c422d1a1e7a114ef7 | [
"MIT"
] | 2 | 2018-12-27T22:01:41.000Z | 2019-04-29T11:51:15.000Z | asynciomeasures/collectors.py | Poogles/aiomeasures | d62482e8de56a00bce310e7c422d1a1e7a114ef7 | [
"MIT"
] | null | null | null | asynciomeasures/collectors.py | Poogles/aiomeasures | d62482e8de56a00bce310e7c422d1a1e7a114ef7 | [
"MIT"
] | null | null | null | from collections import deque
from asynciomeasures.events import Event
| 28.482759 | 63 | 0.457627 |
58708500568a55067da2b5aa34b23852b3efa570 | 1,576 | py | Python | hardware/testbenches/common/drivers/state/driver.py | Intuity/nexus | 0d1414fa2ea518dae9f031930c40692ebac5d154 | [
"Apache-2.0"
] | 6 | 2021-06-28T05:52:15.000Z | 2022-03-27T20:45:28.000Z | hardware/testbenches/common/drivers/state/driver.py | Intuity/nexus | 0d1414fa2ea518dae9f031930c40692ebac5d154 | [
"Apache-2.0"
] | null | null | null | hardware/testbenches/common/drivers/state/driver.py | Intuity/nexus | 0d1414fa2ea518dae9f031930c40692ebac5d154 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021, Peter Birch, mailto:peter@lightlogic.co.uk
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cocotb_bus.drivers import Driver
from cocotb.triggers import RisingEdge
from ..driver_common import BaseDriver
| 37.52381 | 74 | 0.692259 |
5870b28965fe63f49e7e9e53bb51b1566ea6452e | 1,463 | py | Python | python/primsAlgo.py | Ayushd70/RetartedCodes | 301ced178a0ec352b2d127e19028845de950551d | [
"MIT"
] | null | null | null | python/primsAlgo.py | Ayushd70/RetartedCodes | 301ced178a0ec352b2d127e19028845de950551d | [
"MIT"
] | null | null | null | python/primsAlgo.py | Ayushd70/RetartedCodes | 301ced178a0ec352b2d127e19028845de950551d | [
"MIT"
] | null | null | null | # Prim's Algorithm in Python
INF = 9999999
# number of vertices in graph
V = 5
# create a 2d array of size 5x5
# for adjacency matrix to represent graph
G = [
[0, 9, 75, 0, 0],
[9, 0, 95, 19, 42],
[75, 95, 0, 51, 66],
[0, 19, 51, 0, 31],
[0, 42, 66, 31, 0],
]
# create a array to track selected vertex
# selected will become true otherwise false
selected = [0, 0, 0, 0, 0]
# set number of edge to 0
no_edge = 0
# the number of egde in minimum spanning tree will be
# always less than(V - 1), where V is number of vertices in
# graph
# choose 0th vertex and make it true
selected[0] = True
# print for edge and weight
print("Edge : Weight\n")
while no_edge < V - 1:
# For every vertex in the set S, find the all adjacent vertices
# , calculate the distance from the vertex selected at step 1.
# if the vertex is already in the set S, discard it otherwise
# choose another vertex nearest to selected vertex at step 1.
minimum = INF
x = 0
y = 0
for i in range(V):
if selected[i]:
for j in range(V):
if (not selected[j]) and G[i][j]:
# not in selected and there is an edge
if minimum > G[i][j]:
minimum = G[i][j]
x = i
y = j
print(str(x) + "-" + str(y) + ":" + str(G[x][y]))
selected[y] = True
no_edge += 1
| 31.12766 | 68 | 0.546822 |
5870fd20f646ac17e765716a8a7674f3e6c452db | 13,449 | py | Python | tests/test_likelihood.py | sa2c/care-home-fit | 58a2639c74b53e24f062d0dfc3e21df6d53b3077 | [
"MIT"
] | null | null | null | tests/test_likelihood.py | sa2c/care-home-fit | 58a2639c74b53e24f062d0dfc3e21df6d53b3077 | [
"MIT"
] | null | null | null | tests/test_likelihood.py | sa2c/care-home-fit | 58a2639c74b53e24f062d0dfc3e21df6d53b3077 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
'''Tests for the likelihood.py module'''
from time import perf_counter_ns
import pytest
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
from scipy.stats import gamma
import likelihood
SMALL_FIT_PARAMS = {
'baseline_intensities': np.asarray([1, 2, np.nan, np.nan]),
'r_h': 1.5,
'r_c': 0.5
}
SIMPLE_DIST_PARAMS = {
'self_excitation_shape': 2,
'self_excitation_scale': 1,
'discharge_excitation_shape': 3,
'discharge_excitation_scale': 2
}
SMALL_CASES_FILE = 'tests/fixtures/small.csv'
SMALL_COVARIATES_FILE = 'tests/fixtures/small_covariates.csv'
LARGE_FIT_PARAMS = {
'baseline_intensities': np.asarray([0.3, 0.4, 0.6, 0.9]),
'r_h': 1.5,
'r_c': 0.5
}
FULL_DIST_PARAMS = {
'self_excitation_shape': 2.6,
'self_excitation_scale': 2.5,
'discharge_excitation_shape': 2.6,
'discharge_excitation_scale': 2.5
}
def test_read_and_tidy_data():
'''Test that a CSV file with care home IDs as a header row
is read, sorted, and split correctly.'''
ids, values = likelihood.read_and_tidy_data(SMALL_CASES_FILE)
assert_array_equal(ids, [14, 16, 35])
assert_array_equal(
values,
[[4, 1, 6], [4, 0, 3], [6, 66, 2]]
)
def test_carehome_intensity_null(small_cases, small_covariates):
'''Test that calculating the null-case intensity (based on mapping banded
carehome size to a base intensity) gives the correct result'''
_, cases = small_cases
_, covariates = small_covariates
intensity = likelihood.carehome_intensity_null(
covariates=covariates,
cases=cases,
fit_params=SMALL_FIT_PARAMS
)
assert_array_equal(intensity, [[1, 2, 2], [1, 2, 2], [1, 2, 2]])
def test_single_excitation(small_cases):
'''Test that excitation terms of the form
e_i(t) = \\sum_{s<t} f(t - s) triggers_i(s)
are correctly calculated'''
_, cases = small_cases
excitation = likelihood.single_excitation(cases, 2, 1)
assert_almost_equal(
excitation,
[[0, 0, 0], [1.472, 0.368, 2.207], [2.554, 0.271, 2.728]],
decimal=3
)
def test_cached_single_excitation(small_cases):
'''
Test that the caching of the single_excitation function works correctly.
'''
_, cases = small_cases
cases.flags.writeable = False
shape = SIMPLE_DIST_PARAMS['self_excitation_shape']
scale = SIMPLE_DIST_PARAMS['self_excitation_scale']
uncached_start = perf_counter_ns()
uncached_excitation = likelihood.single_excitation(cases, shape, scale)
uncached_end = perf_counter_ns()
first_excitation = likelihood.cached_single_excitation(
cases, shape, scale
)
assert_array_equal(uncached_excitation, first_excitation)
cached_start = perf_counter_ns()
cached_excitation = likelihood.cached_single_excitation(
cases, shape, scale
)
cached_end = perf_counter_ns()
assert_array_equal(uncached_excitation, cached_excitation)
# Cached version should be quicker
assert (cached_end - cached_start) < (uncached_end - uncached_start)
def test_carehome_intensity_no_discharges(small_cases, small_covariates):
'''Test that the behaviour of carehome_intensity in the case where
discharges are not considered.'''
_, cases = small_cases
_, covariates = small_covariates
fit_params_no_rh = {**SMALL_FIT_PARAMS, 'r_h': None}
intensity = likelihood.carehome_intensity(
covariates=covariates,
cases=cases,
fit_params=fit_params_no_rh,
dist_params=SIMPLE_DIST_PARAMS
)
assert_almost_equal(
intensity,
[[1, 2, 2], [1.736, 2.184, 3.104], [2.277, 2.135, 3.364]],
decimal=3
)
def test_carehome_intensity_with_discharges(small_cases, small_covariates):
'''Test that the behaviour of carehome_intensity is correct in the case
where discharges are considered.'''
_, cases = small_cases
_, covariates = small_covariates
discharges = cases[::-1]
intensity = likelihood.carehome_intensity(
covariates=covariates,
cases=cases,
fit_params=SMALL_FIT_PARAMS,
dist_params=SIMPLE_DIST_PARAMS,
discharges=discharges
)
assert_almost_equal(
intensity,
[[1, 2, 2], [2.077, 5.937, 3.217], [3.332, 11.240, 3.810]],
decimal=3
)
def test_likelihood():
'''Test that the likelihood calculation is correct'''
cases = np.asarray([[3, 1, 0, 1], [1, 0, 2, 1], [0, 0, 0, 1]])
intensity = np.asarray(
[[1, 3, 1.5, 6], [4.2, 3.1, 7, 1.4], [2, 5.1, 4.2, 8.9]]
)
result = likelihood.likelihood(intensity, cases)
assert_almost_equal(result, -39.145, decimal=3)
def test_calculate_likelihood_from_files_no_discharges():
'''Test that likelihood is correctly calculated from input files
when discharges are not considered.'''
fit_params_no_rh = {**SMALL_FIT_PARAMS, 'r_h': None}
result = likelihood.calculate_likelihood_from_files(
SMALL_CASES_FILE, SMALL_COVARIATES_FILE,
fit_params=fit_params_no_rh, dist_params=SIMPLE_DIST_PARAMS
)
assert_almost_equal(result, -187.443, decimal=3)
def test_calculate_likelihood_from_files_no_cases():
'''Test that likelihood is correctly calculated from input files
when cases are not considered.'''
fit_params_no_rh = {**SMALL_FIT_PARAMS, 'r_c': 0}
result = likelihood.calculate_likelihood_from_files(
SMALL_CASES_FILE, SMALL_COVARIATES_FILE,
discharges_file=SMALL_CASES_FILE,
fit_params=fit_params_no_rh, dist_params=SIMPLE_DIST_PARAMS
)
assert_almost_equal(result, -189.046, decimal=3)
def test_calculate_likelihood_from_files_no_discharges_or_cases():
'''Test that likelihood is correctly calculated from input files
when neither cases nor discharges are considered.'''
fit_params_no_rh = {**SMALL_FIT_PARAMS, 'r_h': None, 'r_c': 0}
result = likelihood.calculate_likelihood_from_files(
SMALL_CASES_FILE, SMALL_COVARIATES_FILE,
fit_params=fit_params_no_rh, dist_params=SIMPLE_DIST_PARAMS
)
assert_almost_equal(result, -196.466, decimal=3)
def test_calculate_likelihood_from_files_with_discharges():
'''Test that likelihood is correctly calculated from input files
when discharges are considered.'''
result = likelihood.calculate_likelihood_from_files(
SMALL_CASES_FILE, SMALL_COVARIATES_FILE,
discharges_file=SMALL_CASES_FILE,
fit_params=SMALL_FIT_PARAMS, dist_params=SIMPLE_DIST_PARAMS
)
assert_almost_equal(result, -182.761, decimal=3)
def test_calculate_likelihood_from_files_missing_discharges():
'''Test that an error is generated when r_h is provided but discharge data
are not'''
with pytest.raises(AssertionError):
likelihood.calculate_likelihood_from_files(
SMALL_CASES_FILE, SMALL_COVARIATES_FILE,
fit_params=SMALL_FIT_PARAMS, dist_params=SIMPLE_DIST_PARAMS
)
def test_intensity_performance_base(large_test_data, benchmark):
'''
Test the performance of the intensity function for the base case
'''
_, cases, covariates, _ = large_test_data
kwargs = {
'fit_params': {**LARGE_FIT_PARAMS, 'r_h': None, 'r_c': None},
'covariates': covariates,
'cases': cases
}
# Ensure that numba can jit the function before timing it
likelihood.carehome_intensity_null(**kwargs)
benchmark(likelihood.carehome_intensity_null, **kwargs)
def test_likelihood_performance(large_test_data, benchmark):
'''
Test the performance of the calculation of likelihood from the intensity
and case distribution.'''
_, cases, covariates, discharges = large_test_data
intensity = likelihood.carehome_intensity(
fit_params=LARGE_FIT_PARAMS,
covariates=covariates,
cases=cases,
discharges=discharges,
dist_params=FULL_DIST_PARAMS
)
benchmark(likelihood.likelihood, intensity, cases)
| 32.485507 | 79 | 0.693509 |
5871c92a8c31f58780367e72dd645d194490519d | 266 | py | Python | HalleyComet/bit/models.py | ryanduan/Halley_Comet | bd3263e4575c820dd14c265c2c0d4b6b44197682 | [
"Apache-2.0"
] | null | null | null | HalleyComet/bit/models.py | ryanduan/Halley_Comet | bd3263e4575c820dd14c265c2c0d4b6b44197682 | [
"Apache-2.0"
] | null | null | null | HalleyComet/bit/models.py | ryanduan/Halley_Comet | bd3263e4575c820dd14c265c2c0d4b6b44197682 | [
"Apache-2.0"
] | null | null | null | from django.db import models
| 26.6 | 56 | 0.740602 |
587220b8fa00eda9456728bff03e4461bf290254 | 2,411 | py | Python | data-structure/queue.py | 66chenbiao/sleepace_verification_tool | 6271312d9d78ee50703e27a75787510cab4c7f4d | [
"Apache-2.0"
] | null | null | null | data-structure/queue.py | 66chenbiao/sleepace_verification_tool | 6271312d9d78ee50703e27a75787510cab4c7f4d | [
"Apache-2.0"
] | null | null | null | data-structure/queue.py | 66chenbiao/sleepace_verification_tool | 6271312d9d78ee50703e27a75787510cab4c7f4d | [
"Apache-2.0"
] | null | null | null | import unittest
if __name__ == "__main__":
unittest.main()
| 27.397727 | 84 | 0.561178 |
58731f101956f7789a536c381812a3703830d466 | 262 | py | Python | m2-modified/ims/common/agentless-system-crawler/crawler/plugins/emitters/http_emitter.py | CCI-MOC/ABMI | 955c12ae9d2dc7afe7323f6c25f2af120f5b281a | [
"Apache-2.0"
] | 108 | 2015-07-21T10:40:36.000Z | 2021-07-01T06:54:51.000Z | m2-modified/ims/common/agentless-system-crawler/crawler/plugins/emitters/http_emitter.py | CCI-MOC/ABMI | 955c12ae9d2dc7afe7323f6c25f2af120f5b281a | [
"Apache-2.0"
] | 320 | 2015-07-21T01:33:20.000Z | 2020-07-21T15:57:02.000Z | m2-modified/ims/common/agentless-system-crawler/crawler/plugins/emitters/http_emitter.py | CCI-MOC/ABMI | 955c12ae9d2dc7afe7323f6c25f2af120f5b281a | [
"Apache-2.0"
] | 61 | 2015-07-20T18:26:37.000Z | 2021-03-17T01:18:54.000Z | import logging
from iemit_plugin import IEmitter
from plugins.emitters.base_http_emitter import BaseHttpEmitter
logger = logging.getLogger('crawlutils')
| 20.153846 | 62 | 0.793893 |
58733d23c2ee586468a2e3bd18d3eae0569b7613 | 1,946 | py | Python | frequency_domain/dwt.py | StephenTaylor1998/Research | 193dc88d368caf5a458be24456c4f6d5045d341f | [
"Apache-2.0"
] | null | null | null | frequency_domain/dwt.py | StephenTaylor1998/Research | 193dc88d368caf5a458be24456c4f6d5045d341f | [
"Apache-2.0"
] | null | null | null | frequency_domain/dwt.py | StephenTaylor1998/Research | 193dc88d368caf5a458be24456c4f6d5045d341f | [
"Apache-2.0"
] | 1 | 2022-03-27T14:04:46.000Z | 2022-03-27T14:04:46.000Z | import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
import pywt
| 38.156863 | 81 | 0.571429 |
58747a38ec4c868ae85caa7d4f7a021b2a655030 | 7,973 | py | Python | facebook_messenger_conversation.py | davidkrantz/FacebookChatStatistics | 01fc2a022d45ed695fa7f4ad53d6532a160379db | [
"MIT"
] | 35 | 2018-02-22T09:04:21.000Z | 2022-03-21T18:28:21.000Z | facebook_messenger_conversation.py | davidkrantz/FacebookChatStatistics | 01fc2a022d45ed695fa7f4ad53d6532a160379db | [
"MIT"
] | 5 | 2018-05-03T17:56:35.000Z | 2022-02-24T08:19:58.000Z | facebook_messenger_conversation.py | davidkrantz/FacebookChatStatistics | 01fc2a022d45ed695fa7f4ad53d6532a160379db | [
"MIT"
] | 12 | 2018-05-15T19:15:25.000Z | 2022-02-24T08:20:15.000Z | import sys
import numpy as np
import json
import matplotlib.pyplot as plt
from matplotlib.dates import DateFormatter
from matplotlib.backends.backend_pdf import PdfPages
from datetime import datetime, timedelta
import matplotlib.dates as mdates
import emoji
| 34.07265 | 82 | 0.56014 |
5876eec474b4c0410b6e104f6c352d08e47e01ce | 1,758 | py | Python | setup.py | stevearc/pyramid_duh | af14b185533d00b69dfdb8ab1cab6f1d1d8d4647 | [
"MIT"
] | 5 | 2015-12-15T09:27:16.000Z | 2017-12-12T12:56:04.000Z | setup.py | stevearc/pyramid_duh | af14b185533d00b69dfdb8ab1cab6f1d1d8d4647 | [
"MIT"
] | null | null | null | setup.py | stevearc/pyramid_duh | af14b185533d00b69dfdb8ab1cab6f1d1d8d4647 | [
"MIT"
] | null | null | null | """ Setup file """
import os
import sys
from setuptools import setup, find_packages
HERE = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(HERE, 'README.rst')).read()
CHANGES = open(os.path.join(HERE, 'CHANGES.rst')).read()
REQUIREMENTS = [
'pyramid',
'six',
]
TEST_REQUIREMENTS = [
'mock',
]
if sys.version_info[:2] < (2, 7):
TEST_REQUIREMENTS.extend(['unittest2'])
if __name__ == "__main__":
setup(
name='pyramid_duh',
version='0.1.2',
description='Useful utilities for every pyramid app',
long_description=README + '\n\n' + CHANGES,
classifiers=[
'Development Status :: 4 - Beta',
'Framework :: Pyramid',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Utilities',
],
author='Steven Arcangeli',
author_email='arcangeli07@gmail.com',
url='http://pyramid-duh.readthedocs.org/',
keywords='pyramid util utility',
license='MIT',
zip_safe=False,
include_package_data=True,
packages=find_packages(exclude=('tests',)),
install_requires=REQUIREMENTS,
tests_require=REQUIREMENTS + TEST_REQUIREMENTS,
test_suite='tests',
)
| 30.310345 | 61 | 0.580205 |
5877683380ccf730fab956e05f9d48490796920d | 555 | py | Python | telegramhelpers.py | olgamirete/log-public-ip | 0e53c27e62f0709a9d0adf52c860b407a841a252 | [
"MIT"
] | null | null | null | telegramhelpers.py | olgamirete/log-public-ip | 0e53c27e62f0709a9d0adf52c860b407a841a252 | [
"MIT"
] | null | null | null | telegramhelpers.py | olgamirete/log-public-ip | 0e53c27e62f0709a9d0adf52c860b407a841a252 | [
"MIT"
] | null | null | null | import requests, os
from dotenv import load_dotenv
load_dotenv()
API_TOKEN = os.getenv('API_TOKEN') | 24.130435 | 64 | 0.572973 |
58793e76d3fcb25dfcdd3339f4cd5621aa988f33 | 1,880 | py | Python | datawinners/questionnaire/tests/test_questionnaire_template.py | ICT4H/dcs-web | fb0f53fad4401cfac1c1789ff28b9d5bda40c975 | [
"Apache-2.0"
] | 1 | 2015-11-02T09:11:12.000Z | 2015-11-02T09:11:12.000Z | datawinners/questionnaire/tests/test_questionnaire_template.py | ICT4H/dcs-web | fb0f53fad4401cfac1c1789ff28b9d5bda40c975 | [
"Apache-2.0"
] | null | null | null | datawinners/questionnaire/tests/test_questionnaire_template.py | ICT4H/dcs-web | fb0f53fad4401cfac1c1789ff28b9d5bda40c975 | [
"Apache-2.0"
] | null | null | null | from unittest import TestCase
from mock import patch, MagicMock
from mangrove.datastore.database import DatabaseManager
from datawinners.questionnaire.library import QuestionnaireLibrary
| 47 | 100 | 0.643617 |
58795121905bb3b21c3853525b46782e8fe333ee | 2,826 | py | Python | chainer_chemistry/links/update/ggnn_update.py | pfnet/chainerchem | efe323aa21f63a815130d673781e7cca1ccb72d2 | [
"MIT"
] | 184 | 2019-11-27T12:59:01.000Z | 2022-03-29T19:18:54.000Z | chainer_chemistry/links/update/ggnn_update.py | pfnet/chainerchem | efe323aa21f63a815130d673781e7cca1ccb72d2 | [
"MIT"
] | 21 | 2019-12-08T01:53:33.000Z | 2020-10-23T01:19:56.000Z | chainer_chemistry/links/update/ggnn_update.py | pfnet/chainerchem | efe323aa21f63a815130d673781e7cca1ccb72d2 | [
"MIT"
] | 45 | 2019-11-28T09:59:54.000Z | 2022-02-07T02:42:46.000Z | import chainer
from chainer import functions
from chainer import links
import chainer_chemistry
from chainer_chemistry.links.connection.graph_linear import GraphLinear
from chainer_chemistry.utils import is_sparse
| 36.230769 | 79 | 0.624204 |
587a404ecb9909eff07171e4499fcb5702d3abd5 | 78 | py | Python | samples/ast/test.py | Ryoich/python_zero | fe4a5fd8b11c8c059d82b797cd1668f96d54e541 | [
"CC-BY-4.0"
] | 203 | 2018-12-14T10:16:33.000Z | 2022-03-10T07:23:34.000Z | samples/ast/test.py | Ryoich/python_zero | fe4a5fd8b11c8c059d82b797cd1668f96d54e541 | [
"CC-BY-4.0"
] | 39 | 2019-06-21T12:28:03.000Z | 2022-01-17T10:41:53.000Z | samples/ast/test.py | Ryoich/python_zero | fe4a5fd8b11c8c059d82b797cd1668f96d54e541 | [
"CC-BY-4.0"
] | 29 | 2018-12-30T06:48:59.000Z | 2022-03-10T07:43:42.000Z |
print("Hello")
| 8.666667 | 16 | 0.538462 |
587a892ea698fcb43f251688aa0bd017aec53e6b | 1,621 | py | Python | badboids/test/test_simulation_parameters.py | RiannaK/Coursework2 | 471589593fd09c61fae39cb5975cc88fee36971c | [
"MIT"
] | null | null | null | badboids/test/test_simulation_parameters.py | RiannaK/Coursework2 | 471589593fd09c61fae39cb5975cc88fee36971c | [
"MIT"
] | 2 | 2017-01-02T11:11:31.000Z | 2017-01-02T22:09:15.000Z | badboids/test/test_simulation_parameters.py | RiannaK/Coursework2 | 471589593fd09c61fae39cb5975cc88fee36971c | [
"MIT"
] | null | null | null | from numpy.testing import assert_array_almost_equal as array_assert
from badboids.boids import SimulationParameters
def test_simulation_parameters_init():
"""Tests Simulation Parameters constructor"""
# Arrange
formation_flying_distance = 800
formation_flying_strength = 0.10
alert_distance = 8
move_to_middle_strength = 0.2
delta_t = 1.5
# Act
sut = SimulationParameters(formation_flying_distance, formation_flying_strength, alert_distance,
move_to_middle_strength, delta_t)
# Assert
array_assert(sut.formation_flying_distance, formation_flying_distance)
array_assert(sut.formation_flying_strength, formation_flying_strength)
array_assert(sut.alert_distance, alert_distance)
array_assert(sut.move_to_middle_strength, move_to_middle_strength)
array_assert(sut.delta_t, delta_t)
def test_get_defaults():
"""Tests Simulation Parameters get defaults method"""
# Arrange
expected_formation_flying_distance = 10000
expected_formation_flying_strength = 0.125
expected_alert_distance = 100
expected_move_to_middle_strength = 0.01
expected_delta_t = 1.0
# Act
parameters = SimulationParameters.get_defaults()
# Assert
assert parameters.formation_flying_distance == expected_formation_flying_distance
assert parameters.formation_flying_strength == expected_formation_flying_strength
assert parameters.alert_distance == expected_alert_distance
assert parameters.move_to_middle_strength == expected_move_to_middle_strength
assert parameters.delta_t == expected_delta_t
| 33.770833 | 100 | 0.779766 |
587e704ad57d09ab05a6f91557e90faddd8fb439 | 3,247 | py | Python | django_town/oauth2/models.py | uptown/django-town | 4c3b078a8ce5dcc275d65faa4a1cdfb7ebc74a50 | [
"MIT"
] | null | null | null | django_town/oauth2/models.py | uptown/django-town | 4c3b078a8ce5dcc275d65faa4a1cdfb7ebc74a50 | [
"MIT"
] | null | null | null | django_town/oauth2/models.py | uptown/django-town | 4c3b078a8ce5dcc275d65faa4a1cdfb7ebc74a50 | [
"MIT"
] | null | null | null | #-*- coding: utf-8 -*-
from django_town.core.settings import OAUTH2_SETTINGS
try:
if not OAUTH2_SETTINGS.ACCESS_TOKEN_SECRET_KEY:
raise ImportError
except KeyError:
# import traceback
# traceback.print_exc()
raise ImportError
from django.db import models
from django.conf import settings
from django.contrib import admin
from django_town.cache.model import CachingModel
from django_town.core.fields import JSONField
from django_town.utils import generate_random_from_vschar_set
# class ServiceSecretKey(CachingModel):
# cache_key_format = "_ut_o2ss:%(service__pk)d"
#
# service = models.ForeignKey(Service, unique=True)
# secret_key = models.CharField(max_length=OAUTH2_SETTINGS.SERVICE_SECRET_KEY_LENGTH,
# default=lambda: generate_random_from_vschar_set(
# OAUTH2_SETTINGS.SERVICE_SECRET_KEY_LENGTH))
def _generate_random_from_vschar_set_for_client_id():
return generate_random_from_vschar_set(OAUTH2_SETTINGS.CLIENT_ID_LENGTH)
def _generate_random_from_vschar_set_for_client_secret():
return generate_random_from_vschar_set(OAUTH2_SETTINGS.CLIENT_ID_LENGTH)
admin.site.register(Client, admin.ModelAdmin)
admin.site.register(Service, admin.ModelAdmin)
| 33.132653 | 92 | 0.73206 |
587e7271e86565dcf7c4f99ca8d0228de3d2839e | 265 | py | Python | util_list_files.py | jhu-alistair/image_utilities | 07fcf2fb78b57b3e8ac798daffa9f4d7b05d9063 | [
"Apache-2.0"
] | null | null | null | util_list_files.py | jhu-alistair/image_utilities | 07fcf2fb78b57b3e8ac798daffa9f4d7b05d9063 | [
"Apache-2.0"
] | null | null | null | util_list_files.py | jhu-alistair/image_utilities | 07fcf2fb78b57b3e8ac798daffa9f4d7b05d9063 | [
"Apache-2.0"
] | null | null | null | # List files in a directory. Useful for testing the path
from local_tools import *
from image_renamer import ImageRenamer
if confirm_config('path'):
img_path = get_config('path')
fl = ImageRenamer(img_path)
for ff in fl.image_files():
print(ff)
| 29.444444 | 56 | 0.720755 |
587f22d6d391706fced03d26fcfcf342a5722cf3 | 1,394 | py | Python | deepmedic_config.py | farrokhkarimi/deepmedic_project | b0c916171673ce3259d2458146f2db941f0bf270 | [
"MIT"
] | 2 | 2021-07-15T18:40:18.000Z | 2021-08-03T17:10:12.000Z | deepmedic_config.py | farrokhkarimi/deepmedic_project | b0c916171673ce3259d2458146f2db941f0bf270 | [
"MIT"
] | null | null | null | deepmedic_config.py | farrokhkarimi/deepmedic_project | b0c916171673ce3259d2458146f2db941f0bf270 | [
"MIT"
] | 1 | 2022-01-17T12:11:51.000Z | 2022-01-17T12:11:51.000Z | import os
| 48.068966 | 137 | 0.636298 |
587fce66d43c23ddc2eed105e1033650f3ef5080 | 174 | py | Python | configs/models/aott.py | yoxu515/aot-benchmark | 5a7665fc8e0f0e64bc8ba6028b15d9ab32f4c56a | [
"BSD-3-Clause"
] | 105 | 2021-11-16T12:43:59.000Z | 2022-03-31T08:05:11.000Z | configs/models/aott.py | lingyunwu14/aot-benchmark | 99f74f051c91ac221e44f3edab3534ae4dd233f7 | [
"BSD-3-Clause"
] | 14 | 2021-11-18T09:52:36.000Z | 2022-03-31T16:26:32.000Z | configs/models/aott.py | lingyunwu14/aot-benchmark | 99f74f051c91ac221e44f3edab3534ae4dd233f7 | [
"BSD-3-Clause"
] | 17 | 2021-11-16T13:28:29.000Z | 2022-03-29T02:14:48.000Z | import os
from .default import DefaultModelConfig
| 21.75 | 39 | 0.706897 |
58803dca4ce3bbdf62d3eb2aca41861e7c2239d8 | 1,125 | py | Python | main.py | philipperemy/tensorflow-fifo-queue-example | 1e38e6e907b7856e954caeeb01d52f7d66e54de0 | [
"MIT"
] | 42 | 2017-03-17T07:22:30.000Z | 2022-03-31T16:11:50.000Z | main.py | afcarl/tensorflow-fifo-queue-example | 885bcfe417f6d3a9beb4180922cd221d95abc1ef | [
"MIT"
] | 2 | 2017-08-15T19:23:26.000Z | 2017-08-18T02:49:11.000Z | main.py | afcarl/tensorflow-fifo-queue-example | 885bcfe417f6d3a9beb4180922cd221d95abc1ef | [
"MIT"
] | 15 | 2017-05-04T07:27:24.000Z | 2022-03-31T16:26:28.000Z | from __future__ import print_function
import time
import tensorflow as tf
from data import DataGenerator
if __name__ == '__main__':
main()
| 25 | 95 | 0.683556 |
58803eeb0ae16d220e9ed2f74395fa7c80ff5afa | 1,285 | py | Python | Chapter_3/OO_DimmerSwitch_Model1.py | zjwillie/Object-Oriented-Python-Code | 017b07084c7937c870926b96a856f60b9d7077aa | [
"BSD-2-Clause"
] | 38 | 2021-11-16T03:04:42.000Z | 2022-03-27T05:57:50.000Z | Chapter_3/OO_DimmerSwitch_Model1.py | zjwillie/Object-Oriented-Python-Code | 017b07084c7937c870926b96a856f60b9d7077aa | [
"BSD-2-Clause"
] | null | null | null | Chapter_3/OO_DimmerSwitch_Model1.py | zjwillie/Object-Oriented-Python-Code | 017b07084c7937c870926b96a856f60b9d7077aa | [
"BSD-2-Clause"
] | 22 | 2021-11-11T15:57:58.000Z | 2022-03-18T12:58:07.000Z | # Dimmer Switch class
# Main code
# Create first DimmerSwitch, turn on and raise level twice
oDimmer1 = DimmerSwitch('Dimmer1')
oDimmer1.turnOn()
oDimmer1.raiseLevel()
oDimmer1.raiseLevel()
# Create second DimmerSwitch, turn on and raise level 3 times
oDimmer2 = DimmerSwitch('Dimmer2')
oDimmer2.turnOn()
oDimmer2.raiseLevel()
oDimmer2.raiseLevel()
oDimmer2.raiseLevel()
# Create third DimmerSwitch, using the default settings
oDimmer3 = DimmerSwitch('Dimmer3')
# Ask each switch to show itself
oDimmer1.show()
oDimmer2.show()
oDimmer3.show()
| 23.363636 | 61 | 0.654475 |
5881480162c7fded411cf84766484caa36fe07ea | 2,101 | py | Python | python/vosk/transcriber/cli.py | madkote/vosk-api | 8cf64ee93e5cc21a6d53595c6a80fc638a45b1d7 | [
"Apache-2.0"
] | 33 | 2019-09-03T23:21:14.000Z | 2020-01-02T10:18:15.000Z | python/vosk/transcriber/cli.py | madkote/vosk-api | 8cf64ee93e5cc21a6d53595c6a80fc638a45b1d7 | [
"Apache-2.0"
] | 7 | 2019-09-11T09:40:03.000Z | 2019-12-31T10:04:21.000Z | python/vosk/transcriber/cli.py | madkote/vosk-api | 8cf64ee93e5cc21a6d53595c6a80fc638a45b1d7 | [
"Apache-2.0"
] | 10 | 2019-09-05T05:30:16.000Z | 2020-01-02T10:18:17.000Z | #!/usr/bin/env python3
import logging
import argparse
from pathlib import Path
from vosk import list_models, list_languages
from vosk.transcriber.transcriber import Transcriber
parser = argparse.ArgumentParser(
description = 'Transcribe audio file and save result in selected format')
parser.add_argument(
'--model', '-m', type=str,
help='model path')
parser.add_argument(
'--list-models', default=False, action='store_true',
help='list available models')
parser.add_argument(
'--list-languages', default=False, action='store_true',
help='list available languages')
parser.add_argument(
'--model-name', '-n', type=str,
help='select model by name')
parser.add_argument(
'--lang', '-l', default='en-us', type=str,
help='select model by language')
parser.add_argument(
'--input', '-i', type=str,
help='audiofile')
parser.add_argument(
'--output', '-o', default='', type=str,
help='optional output filename path')
parser.add_argument(
'--output-type', '-t', default='txt', type=str,
help='optional arg output data type')
parser.add_argument(
'--log-level', default='INFO',
help='logging level')
if __name__ == "__main__":
main()
| 28.013333 | 113 | 0.634936 |
58816407aff8c18d528cadd21c391b1d398c40c2 | 2,923 | py | Python | OCR-Flask-app-master/tesseract.py | ChungNPH/OCR | 06a78fa5f2c8f5891db1969ac2076ef8d20b74a8 | [
"MIT"
] | null | null | null | OCR-Flask-app-master/tesseract.py | ChungNPH/OCR | 06a78fa5f2c8f5891db1969ac2076ef8d20b74a8 | [
"MIT"
] | null | null | null | OCR-Flask-app-master/tesseract.py | ChungNPH/OCR | 06a78fa5f2c8f5891db1969ac2076ef8d20b74a8 | [
"MIT"
] | null | null | null | import numpy as np
import cv2
from imutils.object_detection import non_max_suppression
import pytesseract
from matplotlib import pyplot as plt
print(ocr(["./images/car_wash.png"])) | 30.134021 | 109 | 0.485118 |
5882bcb6d8e741c3012ffb7ce72cd027f9aee6d9 | 727 | py | Python | Scripts/autotest/bug/migrations/0003_auto_20180128_2144.py | ludechu/DJevn | ee97447da3f6f55c92bfa1b6a20436a4f3098150 | [
"bzip2-1.0.6"
] | null | null | null | Scripts/autotest/bug/migrations/0003_auto_20180128_2144.py | ludechu/DJevn | ee97447da3f6f55c92bfa1b6a20436a4f3098150 | [
"bzip2-1.0.6"
] | null | null | null | Scripts/autotest/bug/migrations/0003_auto_20180128_2144.py | ludechu/DJevn | ee97447da3f6f55c92bfa1b6a20436a4f3098150 | [
"bzip2-1.0.6"
] | null | null | null | # Generated by Django 2.0 on 2018-01-28 21:44
from django.db import migrations, models
| 30.291667 | 153 | 0.558459 |
5882ce3c784a4517fd7838c8665ee9d1be914598 | 17,439 | py | Python | tests/test_validators.py | STYLER-Inc/styler-validation | 73e6e3ac241cf26ca3ccee070f2736778d3d4849 | [
"MIT"
] | null | null | null | tests/test_validators.py | STYLER-Inc/styler-validation | 73e6e3ac241cf26ca3ccee070f2736778d3d4849 | [
"MIT"
] | null | null | null | tests/test_validators.py | STYLER-Inc/styler-validation | 73e6e3ac241cf26ca3ccee070f2736778d3d4849 | [
"MIT"
] | null | null | null | """ Tests for validators
"""
from decimal import Decimal
from unittest.mock import Mock
import random
import string
from styler_validation import validators as va
from styler_validation import messages as msg
| 23.159363 | 68 | 0.565113 |
5883b253d513cb80bd2362de5b0a8311d18ca8c7 | 3,305 | py | Python | blog/models.py | Libor03/django-final | e29dc3237252c7b0fcfea13d948ed54ffe6d0339 | [
"CC0-1.0"
] | null | null | null | blog/models.py | Libor03/django-final | e29dc3237252c7b0fcfea13d948ed54ffe6d0339 | [
"CC0-1.0"
] | null | null | null | blog/models.py | Libor03/django-final | e29dc3237252c7b0fcfea13d948ed54ffe6d0339 | [
"CC0-1.0"
] | null | null | null | from django.core.files.storage import FileSystemStorage
from django.db import models
# Create your models here.
from datetime import date
from django.urls import reverse #Used to generate URLs by reversing the URL patterns
from django.contrib.auth.models import User #Blog author or commenter
""" Metoda vrac cestu k uploadovanmu plaktu. """
| 36.722222 | 135 | 0.698033 |
5884a5b1746cc453a881292d3b4f5da9a92c1838 | 369 | py | Python | 1c. beginner_path_3_How to Think Like a Computer Scientist Learning with Python 3/thinkcs-python3-solutions/Chapter 11/E1.py | codeclubbentleigh/Python | 94d6a937aa3520b201ee1641c2009bd90566d52a | [
"MIT"
] | 12 | 2018-11-14T03:55:58.000Z | 2021-12-12T01:13:05.000Z | 1c. beginner_path_3_How to Think Like a Computer Scientist Learning with Python 3/thinkcs-python3-solutions/Chapter 11/E1.py | codeclubbentleigh/Python | 94d6a937aa3520b201ee1641c2009bd90566d52a | [
"MIT"
] | null | null | null | 1c. beginner_path_3_How to Think Like a Computer Scientist Learning with Python 3/thinkcs-python3-solutions/Chapter 11/E1.py | codeclubbentleigh/Python | 94d6a937aa3520b201ee1641c2009bd90566d52a | [
"MIT"
] | 7 | 2019-10-10T06:28:58.000Z | 2022-02-15T07:18:12.000Z | print(list(range(10, 0, -2)))
# if start > end and step > 0:
# a list generated from start to no more than end with step as constant increment
# if start > end and step < 0:
# an empty list generated
# if start < end and step > 0:
# an empty list generated
# if start < end and step < 0
# a list generated from start to no more than end with step as constant decrement
| 36.9 | 81 | 0.704607 |
588580ba517b618b5770a81ab628f3858c9ed41b | 3,141 | py | Python | DeepModels/KerasConvNetMNIST.py | amingolnari/Deep-Learning-Course | 00d4fe10db8d1dde3d9b2a94fe93531e8f836cbc | [
"MIT"
] | 17 | 2018-12-05T06:50:34.000Z | 2021-05-26T04:03:18.000Z | DeepModels/KerasConvNetMNIST.py | amingolnari/Deep-Learning-Course | 00d4fe10db8d1dde3d9b2a94fe93531e8f836cbc | [
"MIT"
] | null | null | null | DeepModels/KerasConvNetMNIST.py | amingolnari/Deep-Learning-Course | 00d4fe10db8d1dde3d9b2a94fe93531e8f836cbc | [
"MIT"
] | 3 | 2018-12-08T14:59:47.000Z | 2019-12-26T17:52:09.000Z | """
github : https://github.com/amingolnari/Deep-Learning-Course
Author : Amin Golnari
Keras Version : 2.2.4
Date : 4/12/2018
Keras CNN Classification on MNIST Data
Code 301
"""
## If your GPU is AMD , you can use PlaidML Backend
# import os
# os.environ["KERAS_BACKEND"] = "plaidml.keras.backend"
from keras.models import Sequential
from keras.layers import Dense, Flatten, Dropout, Conv2D, MaxPool2D
from keras.optimizers import SGD
from keras.datasets import mnist
from keras.utils.np_utils import to_categorical
import matplotlib.pyplot as plt
# Load MNIST Data (Download for First)
if __name__ == "__main__":
main()
| 32.381443 | 97 | 0.61127 |
588a0eb119fc13cdd266d6e4d090c38c400126a9 | 526 | py | Python | armstrong.py | Sanchr-sys/Letsupgrade_python7 | f130cb1dff259878193fb65dd414fc936a7c760d | [
"Apache-2.0"
] | null | null | null | armstrong.py | Sanchr-sys/Letsupgrade_python7 | f130cb1dff259878193fb65dd414fc936a7c760d | [
"Apache-2.0"
] | null | null | null | armstrong.py | Sanchr-sys/Letsupgrade_python7 | f130cb1dff259878193fb65dd414fc936a7c760d | [
"Apache-2.0"
] | null | null | null | start = 104200
end = 702648265
for arm1 in range(start, end + 1):
exp = len(str(arm1))
num_sum = 0
c = arm1
while c > 0:
num = c % 10
num_sum += num ** exp
c //= 10
if arm1 != num_sum:
continue
else:
if arm1 == num_sum:
print("The first Armstrong number encountered is:", arm1)
break
#####OUTPUT#####
## The first Armstrong number encountered is: 548834
## Process finished with exit code 0
| 17.533333 | 70 | 0.503802 |
588a5c56f6208237e504aa5b6fa4afab73e5300c | 787 | py | Python | neji.py | NejiViraj/Viraj | d5bfc60e29100e00a87596b5e16961ab97a3dc4c | [
"BSD-2-Clause"
] | null | null | null | neji.py | NejiViraj/Viraj | d5bfc60e29100e00a87596b5e16961ab97a3dc4c | [
"BSD-2-Clause"
] | null | null | null | neji.py | NejiViraj/Viraj | d5bfc60e29100e00a87596b5e16961ab97a3dc4c | [
"BSD-2-Clause"
] | null | null | null | import requests
import json
| 24.59375 | 125 | 0.645489 |
588aa8a3b88a98a9d49032a085a9b2d4f04e667f | 9,731 | py | Python | xmaintnote/ticketing.py | 0xmc/maint-notification | bdf27f7b863a45d2191068c46f729db3c94386d1 | [
"BSD-2-Clause"
] | null | null | null | xmaintnote/ticketing.py | 0xmc/maint-notification | bdf27f7b863a45d2191068c46f729db3c94386d1 | [
"BSD-2-Clause"
] | null | null | null | xmaintnote/ticketing.py | 0xmc/maint-notification | bdf27f7b863a45d2191068c46f729db3c94386d1 | [
"BSD-2-Clause"
] | null | null | null | #!/bin/env python3
"""Handling events as tickets
The goal here is, provided a maintenance event, create an event if not a
duplicate. To determine if not duplicate, use some combination of values to
form a key. Methods to delete, update, and otherwise transform the ticket
should be available
A base class, Ticket, is provided to do some boiler plate things and enforce a
consistent interface.
"""
from textwrap import dedent
from jira import JIRA
| 33.439863 | 79 | 0.582468 |
588c8d695d5770a68e19da667343fb316670eec3 | 195 | py | Python | tests/commands/types/test_flag_param.py | cicdenv/cicdenv | 5b72fd9ef000bf07c2052471b59edaa91af18778 | [
"MIT"
] | 8 | 2020-08-10T20:57:24.000Z | 2021-08-08T10:46:20.000Z | tests/commands/types/test_flag_param.py | cicdenv/cicdenv | 5b72fd9ef000bf07c2052471b59edaa91af18778 | [
"MIT"
] | null | null | null | tests/commands/types/test_flag_param.py | cicdenv/cicdenv | 5b72fd9ef000bf07c2052471b59edaa91af18778 | [
"MIT"
] | 1 | 2020-08-10T20:42:09.000Z | 2020-08-10T20:42:09.000Z | from cicdctl.commands.types.flag import FlagParamType
| 24.375 | 72 | 0.702564 |
588fc576880c0f000634f775d5b9b45b44869222 | 7,957 | py | Python | tools/merge_messages.py | cclauss/personfinder | 62417192e79c9711d0c6c7cfc042f6d6b0dc2dc2 | [
"Apache-2.0"
] | 1 | 2021-11-18T20:09:09.000Z | 2021-11-18T20:09:09.000Z | tools/merge_messages.py | ZhengC1/personfinder | 7e40f2783ac89b91efd1d8497f1acc5b006361fa | [
"Apache-2.0"
] | null | null | null | tools/merge_messages.py | ZhengC1/personfinder | 7e40f2783ac89b91efd1d8497f1acc5b006361fa | [
"Apache-2.0"
] | 1 | 2022-01-05T07:06:43.000Z | 2022-01-05T07:06:43.000Z | #!/usr/bin/env python
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Merge translations from a set of .po or XMB files into a set of .po files.
Usage:
../tools/merge_messages <source-dir> <template-file>
../tools/merge_messages <source-dir> <template-file> <target-dir>
../tools/merge_messages <source-po-file> <template-file> <target-po-file>
<source-dir> should be a directory containing a subdirectories named with
locale codes (e.g. pt_BR). For each locale, this script looks for the first
.po or .xml file it finds anywhere under <source-dir>/<locale-code>/ and
adds all its messages and translations to the corresponding django.po file
in the target directory, at <target-dir>/<locale-code>/LC_MESSAGES/django.po.
<template-file> is the output file from running:
'find_missing_translations --format=po'
With the name that corresponds to the --format=xmb output.
Make sure to run this in a tree that corresponds to the version used for
generating the xmb file or the resulting merge will be wrong. See
validate_merge for directions on verifying the merge was correct.
If <target-dir> is unspecified, it defaults to the app/locale directory of
the current app. Alternatively, you can specify a single source file and
a single target file to update.
When merging messages from a source file into a target file:
- Empty messages and messages marked "fuzzy" in the source file are ignored.
- Translations in the source file will replace any existing translations
for the same messages in the target file.
- Other translations in the source file will be added to the target file.
- If the target file doesn't exist, it will be created.
- To minimize unnecessary changes from version to version, the target file
has no "#: filename:line" comments and the messages are sorted by msgid.
"""
import babel.messages
from babel.messages import pofile
import codecs
import os
import sys
import xml.sax
def log(text):
"""Prints out Unicode text."""
print text.encode('utf-8')
def log_change(old_message, new_message):
"""Describes an update to a message."""
if not old_message:
if new_message.id:
log('+ msgid "%s"' % str(new_message.id))
else:
print >>sys.stderr, 'no message id: %s' % new_message
log('+ msgstr "%s"' % str(new_message.string.encode('ascii', 'ignore')))
if new_message.flags:
log('+ #, %s' % ', '.join(sorted(new_message.flags)))
else:
if (new_message.string != old_message.string or
new_message.flags != old_message.flags):
log(' msgid "%s"' % old_message.id)
log('- msgstr "%s"' % old_message.string)
if old_message.flags:
log('- #, %s' % ', '.join(sorted(old_message.flags)))
log('+ msgstr "%s"' % new_message.string)
if new_message.flags:
log('+ #, %s' % ', '.join(sorted(new_message.flags)))
def create_file(filename):
"""Opens a file for writing, creating any necessary parent directories."""
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
return open(filename, 'w')
def merge(source, target_filename):
"""Merges the messages from the source Catalog into a .po file at
target_filename. Creates the target file if it doesn't exist."""
if os.path.exists(target_filename):
target = pofile.read_po(open(target_filename))
for message in source:
if message.id and message.string and not message.fuzzy:
log_change(message.id in target and target[message.id], message)
# This doesn't actually replace the message! It just updates
# the fields other than the string. See Catalog.__setitem__.
target[message.id] = message
# We have to mutate the message to update the string and flags.
target[message.id].string = message.string
target[message.id].flags = message.flags
else:
for message in source:
log_change(None, message)
target = source
target_file = create_file(target_filename)
pofile.write_po(target_file, target,
no_location=True, sort_output=True, ignore_obsolete=True)
target_file.close()
if __name__ == '__main__':
args = sys.argv[1:]
if len(args) not in [1, 2, 3]:
print __doc__
sys.exit(1)
args = (args + [None, None])[:3]
source_path = args[0]
template_path = args[1]
target_path = args[2] or os.path.join(os.environ['APP_DIR'], 'locale')
# If a single file is specified, merge it.
if ((source_path.endswith('.po') or source_path.endswith('.xml')) and
target_path.endswith('.po')):
print target_path
merge_file(source_path, target_path, template_path)
sys.exit(0)
# Otherwise, we expect two directories.
if not os.path.isdir(source_path) or not os.path.isdir(target_path):
print __doc__
sys.exit(1)
# Find all the source files.
source_filenames = {} # {locale: po_filename}
def find_po_file(key, dir, filenames):
"""Looks for a .po file and records it in source_filenames."""
for filename in filenames:
if filename.endswith('.po') or filename.endswith('.xml'):
source_filenames[key] = os.path.join(dir, filename)
for locale in os.listdir(source_path):
os.path.walk(os.path.join(source_path, locale), find_po_file,
locale.replace('-', '_'))
# Merge them into the target files.
for locale in sorted(source_filenames.keys()):
target = os.path.join(target_path, locale, 'LC_MESSAGES', 'django.po')
print target
merge_file(source_filenames[locale], target, template_path)
| 39.004902 | 80 | 0.662184 |
5890360ab5457f3e208d3176b19465a1fa0b29ad | 621 | py | Python | misc/derwin.py | ssebs/nccsv | f5e94dab833a5f0822308299e154f13fd68d88f6 | [
"MIT"
] | null | null | null | misc/derwin.py | ssebs/nccsv | f5e94dab833a5f0822308299e154f13fd68d88f6 | [
"MIT"
] | null | null | null | misc/derwin.py | ssebs/nccsv | f5e94dab833a5f0822308299e154f13fd68d88f6 | [
"MIT"
] | null | null | null | # derwin.py - testing a window within a window
import curses
# main
if __name__ == "__main__":
curses.wrapper(main)
| 20.7 | 52 | 0.653784 |
589099f22121deb7215ea93a44c6ab088a52a57b | 1,110 | py | Python | test/z_emulator_autoload.py | DXCyber409/AndroidNativeEmulator | 11a0360a947114375757724eecd9bd9dbca43a56 | [
"Apache-2.0"
] | 3 | 2020-05-21T09:15:11.000Z | 2022-01-12T13:52:20.000Z | test/z_emulator_autoload.py | DXCyber409/AndroidNativeEmulator | 11a0360a947114375757724eecd9bd9dbca43a56 | [
"Apache-2.0"
] | null | null | null | test/z_emulator_autoload.py | DXCyber409/AndroidNativeEmulator | 11a0360a947114375757724eecd9bd9dbca43a56 | [
"Apache-2.0"
] | null | null | null | import sys
import logging
from unicorn import *
from unicorn.arm_const import *
from androidemu.emulator import Emulator
from UnicornTraceDebugger import udbg
logging.basicConfig(stream=sys.stdout,
level=logging.DEBUG,
format="%(asctime)s %(levelname)7s %(name)34s | %(message)s")
logger = logging.getLogger(__name__)
emulator = Emulator()
libc = emulator.load_library('jnilibs/libc.so', do_init=False)
libso = emulator.load_library('jnilibs/libnative-lib.so', do_init=False)
# data segment
data_base = 0xa00000
data_size = 0x10000 * 3
emulator.mu.mem_map(data_base, data_size)
emulator.mu.mem_write(data_base, b'123')
emulator.mu.reg_write(UC_ARM_REG_R0, data_base)
try:
dbg = udbg.UnicornDebugger(emulator.mu)
addr_start = 0xcbc66000 + 0x9B68 + 1
addr_end = 0xcbc66000 + 0x9C2C
emulator.mu.emu_start(addr_start, addr_end)
r2 = emulator.mu.reg_read(UC_ARM_REG_R2)
result = emulator.mu.mem_read(r2, 16)
print(result.hex())
except UcError as e:
list_tracks = dbg.get_tracks()
for addr in list_tracks[-100:-1]:
print(hex(addr - 0xcbc66000))
print (e)
| 29.210526 | 72 | 0.73964 |
58919030577b20ce04be8ee22121a25618dfdeb8 | 816 | py | Python | community_ext/__init__.py | altsoph/community_loglike | ea8800217097575558f8bfb97f7737d12cad2339 | [
"BSD-3-Clause"
] | 16 | 2018-02-14T23:14:32.000Z | 2021-09-15T09:38:47.000Z | community_ext/__init__.py | altsoph/community_loglike | ea8800217097575558f8bfb97f7737d12cad2339 | [
"BSD-3-Clause"
] | null | null | null | community_ext/__init__.py | altsoph/community_loglike | ea8800217097575558f8bfb97f7737d12cad2339 | [
"BSD-3-Clause"
] | 7 | 2019-05-09T10:25:24.000Z | 2020-06-06T09:37:18.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This package implements several community detection.
Originally based on community aka python-louvain library from Thomas Aynaud
(https://github.com/taynaud/python-louvain)
"""
from .community_ext import (
partition_at_level,
modularity,
best_partition,
generate_dendrogram,
induced_graph,
load_binary,
estimate_gamma,
estimate_mu,
ilfr_mu_loglikelihood,
compare_partitions,
model_log_likelihood
)
__author__ = """Aleksey Tikhonov (altsoph@gmail.com)"""
__author__ = """Liudmila Ostroumova Prokhorenkova (ostroumova-la@yandex-team.ru)"""
# Copyright (C) 2018 by
# Aleksey Tikhonov (altsoph@gmail.com>
# Liudmila Ostroumova Prokhorenkova (ostroumova-la@yandex-team.ru)
# All rights reserved.
# BSD license.
| 24.727273 | 83 | 0.72549 |
589531a8cfe2795a9b90146b7a85879eaadf036f | 895 | py | Python | youbot_gazebo_publisher/src/listener.py | ingjavierpinilla/youBot-Gazebo-Publisher | 9314f5c471cde91127d76ba205ce6259e595145a | [
"MIT"
] | null | null | null | youbot_gazebo_publisher/src/listener.py | ingjavierpinilla/youBot-Gazebo-Publisher | 9314f5c471cde91127d76ba205ce6259e595145a | [
"MIT"
] | null | null | null | youbot_gazebo_publisher/src/listener.py | ingjavierpinilla/youBot-Gazebo-Publisher | 9314f5c471cde91127d76ba205ce6259e595145a | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import rospy
from std_msgs.msg import String
from nav_msgs.msg import Odometry
from trajectory_msgs.msg import JointTrajectory
from control_msgs.msg import JointTrajectoryControllerState
if __name__ == "__main__":
listener()
| 24.861111 | 102 | 0.746369 |
5895c3e0dafc21f11b778b930d6d27f00014cab8 | 75,699 | py | Python | main.py | hustleer/Discord-Encouragement-Bot | 4105d1e81fa0e76ade7cfd293dd82ea610064f58 | [
"Apache-2.0"
] | null | null | null | main.py | hustleer/Discord-Encouragement-Bot | 4105d1e81fa0e76ade7cfd293dd82ea610064f58 | [
"Apache-2.0"
] | null | null | null | main.py | hustleer/Discord-Encouragement-Bot | 4105d1e81fa0e76ade7cfd293dd82ea610064f58 | [
"Apache-2.0"
] | null | null | null | #Botpic:https://upload.wikimedia.org/wikipedia/commons/thumb/b/b8/Red_Rose_Photography.jpg/800px-Red_Rose_Photography.jpg
#Botpic:https://commons.wikimedia.org/wiki/File:Red_Rose_Photography.jpg
#reference:https://www.youtube.com/watch?v=SPTfmiYiuok
import discord
import os
import requests
import json
import math, random
from replit import db
from keep_alive import keep_alive
import asyncpraw, asyncprawcore
#import commands
import time, asyncio, datetime
from discord.ext import tasks
from discord import Member
from discord.ext.commands import has_permissions, MissingPermissions
from prawcore import NotFound
import ffmpeg
from discord import FFmpegPCMAudio
from dotenv import load_dotenv
from youtube_search import YoutubeSearch
load_dotenv()
client = discord.Client()
# To cache the every user For on_remove_reaction to be usable
# Also enable members intent from https://discord.com/developers/ in bot secition
intents = discord.Intents.default()
intents.members = True
global playing, stream
global currently_playing_message
#t1 = threading.Timer(10, say_hello)
#t1.start()
#---------- To keep the bot alive --------------------------
#1. keeping the bot alive
'''
#------------------- adding a background task -----------------
status = cycle(['with Python','JetHub'])
@bot.event
async def on_ready():
change_status.start()
print("Your bot is ready")
@tasks.loop(seconds=10)
async def change_status():
await bot.change_presence(activity=discord.Game(next(status)))
#--------------------------------------------------------------
3. Setup the Uptime Robot :
create an account on uptime robot.
After creating an account, go to the dashboard and click on Add new monitor (preview)
select monitor type Http(s) (preview)
then go to to ur project on repl.it and copy the url from the top of the console and paste it in url section of the monitor (preview)
now set the monitoring interval to every 5 mins (so that it will ping the bot every 5 mins) and click on create monitor twice (preview)
Thats itNow go to ur project on repl.it and hit the Run button
'''
reddit = asyncpraw.Reddit(
client_id="nnhGBCiBxSJysTobl6SLPQ",
client_secret=os.environ['rd_client_secret'],
password=os.environ['rd_pass'],
user_agent="praw_test",
username="Alternative-Ad-8849",
)
from discord.ext import commands
bot = commands.Bot(command_prefix='.', help_command=None, intents=intents)
'''
class MyHelpCommand(commands.MinimalHelpCommand):
async def send_pages(self):
destination = self.get_destination()
e = discord.Embed(colour=discord.Color.blurple(), description='')
for page in self.paginator.pages:
e.description += page
await destination.send(embed=e)
bot.help_command = MyHelpCommand()'''
# My sample help command:
# My sample help command:
'''@bot.command()
async def schedule(ctx, message='Hello There', seconds = 3):
#print(ctx.channel.id)
m=str(message)
id = ctx.message.id
print('\n\n\n{}\n\n'.format(m))
author = str(ctx.message.author).split('#')[0]
await ctx.message.delete()
#id=ctx.channel.id
channel = bot.get_channel(id=id)
print(id)
print(channel)
#await channel.send('hi')
#await schedule_message(author, m, id, seconds = seconds)
#print(ctx.message)
#await ctx.message.delete(ctx.message)
#await channel.send('hi')
#await ctx.send('pong')
#print('Im invoked')'''
#name='', brief='', help='e.g. `.`'
'''@bot.command(name='', brief='', help='e.g. `.`')
async def h(ctx, what='general'):
#await ctx.send('pong')
if str(what).lower()=='general':
for command in commands:
await ctx.send(command)
elif str(what).lower() == 'fuse':
for command in fuse_help_commands:
await ctx.send(command)'''
#------------------------------------------
# _______________________________________________________________________
# ---------------------------- For Music Bot : https://medium.com/pythonland/build-a-discord-bot-in-python-that-plays-music-and-send-gifs-856385e605a1
# _______________________________________________________________________
import os, youtube_dl
import ffmpeg
class YTDLSource(discord.PCMVolumeTransformer):
#Downloads videb name/url and returns full filename
#To make leave voice channel if bot is alone in voice channel
#_______________________________________________________________________
# ----------------------------- ---------------------------------------
# _______________________________________________________________________
# ----------------------------- FM Player -----------------------------
from discord import FFmpegPCMAudio
from discord.ext.commands import Bot
from dotenv import load_dotenv
load_dotenv()
#To be implemented
global streams
streams = None
#To get current, next, previous streams
# _____________________________________________________
# ///////////////////// FM Player /////////////////////
# _____________________________________________________
'''
async def my_background_task():
await client.wait_until_ready()
counter = 0
channel = client.get_channel(id=123456789) # replace with channel_id
while not client.is_closed():
counter += 1
await channel.send(counter)
print(counter)
await asyncio.sleep(60) # task runs every 60 seconds''' ''
sad_words = [
"sad", "depressed", "unhappy", "angry", "miserable", "depressing", "hurt",
"pain"
]
starter_encouragements = [
"Cheer up!",
"You are a great person / bot!",
]
commandss = [
'\".h fuse\" or \".help fuse\" -> for fuse_auto_attend help',
'fuse auto-attend registration at: https://ioee.herokuapp.com/',
'\".inspire\" or \".quote\" -> to display quote ',
'\".joke\" -> to display joke',
'\".meme\" -> displays best random meme',
'\".riddle\" -> displays best random riddle',
'\".puns\" -> displays best random puns',
'\".knock knock\" -> displays knock knock joke',
'\".deactivate\" -> deactivates the bot .activate -> activates the bot',
'\".new inspirational_message\" -> Adds new inspirationsl message to db',
'\".del inspirational_message\" -> deletes inspirational message from db',
'\".list\" -> lists the current inspirational messages',
]
fuse_help_commands = [
'\".h\" or \".help\" - for general help',
'----------- ------------------------- -----------',
'fuse auto-attend registration at: https://ioee.herokuapp.com/',
'---------------------------------',
'\".add_user user_token\" -> to add user for auto-fuse attandance',
'.remove_user user_token -> to remove user',
'\".list_user\" -> to list available users',
'\".check class\" or \".snoop class\" -> checks if live class started.',
'\".mute unsuccessful\" -> to mute unsuccessful attending_logs. ie. hide \"Live Class not started\" messages',
'\".mute successful\" -> to mute successful attending_logs ie. hide messages when attended successfully',
'\".unmute unsuccessful\" -> to unmute unsuccessful attending_logs ie. show \"Live Class not started\" messages',
'\".umute successful\" -> to unmute successful attending_logs ie. show messages when attended successfully',
]
#from discord.ext import commands
#bot = commands.Bot(command_prefix='.')
#@bot.command()
#async def test(ctx):
# await ctx.send('I heard you! {0}'.format(ctx.author))
'''print('--------------Test Mode--------------------------------')
print(client.servers)
print('-------------------------------------------------------')'''
if "responding" not in db.keys():
db["responding"] = True
if "unsuccessful_logs" not in db.keys():
db["unsuccessful_logs"] = False
if "successful_logs" not in db.keys():
db["successful_logs"] = True
#---------------Working------------------------
# For scrapping quotes every 1 min.
#----------- To list discord servers ---------
#----------- To list discord servers ---------
class OwnerCommands(commands.Cog):
#-------------------------
'''
async def unleash_reddit(subreddit, channel_id, no_of_posts=5):
channel = bot.get_channel(id=int(channel_id))
submissions_top = await reddit.subreddit(subreddit)
submissions_hot = await reddit.subreddit(subreddit)
submissions_new = await reddit.subreddit(subreddit)
#30% top, 40%hot, 30%new
for i in range(0, no_of_posts):
print('Unleash for loop:{}'.format(i))
if i < int(no_of_posts/3):
submission=random.choice([x async for x in submissions_top.top(limit=25)])
print(a)
''async for x in submissions_top.top(limit=15):
if not x.stickied:
submission = x
#submission = next(x async for x in submissions_top.top('all') if not x.stickied)''
elif i < int(no_of_posts/7):
#submission = next(x async for x in submissions_hot.hot('all') if not x.stickied)
submission=random.choice([x async for x in submissions_top.hot(limit=35)])
else:
#submission = next(x async for x in submissions_new.new('all') if not x.stickied)
submission=random.choice([x async for x in #submissions_top.new(limit=15)])
embed=discord.Embed(
title=submission.title,
description=submission.selftext,
#description=submission.title,
colour=discord.Color.green())
embed.set_image(url=submission.url)
await channel.send(embed=embed)'''
keep_alive()
bot.run(os.environ['TOKEN'])
#client.loop.create_task(my_background_task())
bot.run('token') #
| 34.03732 | 253 | 0.588462 |
589634be0915da002b383091ea3d6a080249430a | 9,862 | py | Python | mwp_solver/module/Layer/transformer_layer.py | max-stack/MWP-SS-Metrics | 01268f2d6da716596216b04de4197e345b96c219 | [
"MIT"
] | null | null | null | mwp_solver/module/Layer/transformer_layer.py | max-stack/MWP-SS-Metrics | 01268f2d6da716596216b04de4197e345b96c219 | [
"MIT"
] | null | null | null | mwp_solver/module/Layer/transformer_layer.py | max-stack/MWP-SS-Metrics | 01268f2d6da716596216b04de4197e345b96c219 | [
"MIT"
] | null | null | null | # Code Taken from https://github.com/LYH-YF/MWPToolkit
# -*- encoding: utf-8 -*-
# @Author: Yihuai Lan
# @Time: 2021/08/29 22:05:03
# @File: transformer_layer.py
import torch
import math
from torch import nn
from torch.nn import functional as F
from transformers.activations import gelu_new as gelu_bert
from module.Attention.multi_head_attention import MultiHeadAttention
from module.Attention.multi_head_attention import EPTMultiHeadAttention
from module.Attention.group_attention import GroupAttention
from utils.utils import clones
| 43.831111 | 205 | 0.688806 |
5897a699b6d877a1d06ab69aa68b4566e5a0268c | 6,564 | py | Python | tests/4_ckks_basics.py | TimTam725/SEAL-true | 87c3f3f345b7dc5f49380556c55a85a7efa45bb6 | [
"MIT"
] | null | null | null | tests/4_ckks_basics.py | TimTam725/SEAL-true | 87c3f3f345b7dc5f49380556c55a85a7efa45bb6 | [
"MIT"
] | null | null | null | tests/4_ckks_basics.py | TimTam725/SEAL-true | 87c3f3f345b7dc5f49380556c55a85a7efa45bb6 | [
"MIT"
] | null | null | null | import math
from seal import *
from seal_helper import *
if __name__ == '__main__':
example_ckks_basics()
| 35.673913 | 84 | 0.655088 |
5899ee9d789144345b8642bab6672fe498055f42 | 2,422 | py | Python | FigureTable/NeuroPathRegions/barplots.py | vkola-lab/multi-task | 6a61db4223e1812744f13028747b07e2f840cc0b | [
"MIT"
] | 1 | 2021-12-19T01:45:01.000Z | 2021-12-19T01:45:01.000Z | FigureTable/NeuroPathRegions/barplots.py | vkola-lab/multi-task | 6a61db4223e1812744f13028747b07e2f840cc0b | [
"MIT"
] | null | null | null | FigureTable/NeuroPathRegions/barplots.py | vkola-lab/multi-task | 6a61db4223e1812744f13028747b07e2f840cc0b | [
"MIT"
] | 1 | 2022-03-14T18:30:23.000Z | 2022-03-14T18:30:23.000Z | from correlate import *
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib import rc, rcParams
rc('axes', linewidth=1)
rc('font', weight='bold', size=10)
if __name__ == "__main__":
years = 2
layername = 'block2BN'
time_threshold, type = 365*years, 'COG'
folder = type + '_correlation_{}_years/'.format(years)
if not os.path.exists(folder):
os.mkdir(folder)
interval = file_interval_info(type)
y_lim = [0, 0]
corre = collections.defaultdict(dict)
error = collections.defaultdict(dict)
pool = [[0, prefixes[i], regions[i]] for i in range(len(regions))]
for i, region in enumerate(prefixes):
for stain in stains:
corr, std = get_correlation(region + '_' + stain, prefix_idx[region], time_threshold, interval, folder, type, layername, missing=0)
corre[region][stain] = corr
error[region][stain] = 0
y_lim[1] = max(y_lim[1], corr)
y_lim[0] = min(y_lim[0], corr)
pool[i][0] -= corr
pool.sort()
prefixes = [p[1] for p in pool]
regions = [p[2] for p in pool]
barplots(prefixes, regions, stains, corre, error, '{}days_{}shap_{}'.format(time_threshold, type, layername), folder, y_lim)
| 36.69697 | 143 | 0.627168 |
589a8463ce8f13fdbedded623d8ccbad3c17d953 | 4,549 | py | Python | examples/distributed_autofaiss_n_indices.py | Rexiome/autofaiss | 79d7c396819ffd6859edde17c6958c1c3338b29b | [
"Apache-2.0"
] | null | null | null | examples/distributed_autofaiss_n_indices.py | Rexiome/autofaiss | 79d7c396819ffd6859edde17c6958c1c3338b29b | [
"Apache-2.0"
] | null | null | null | examples/distributed_autofaiss_n_indices.py | Rexiome/autofaiss | 79d7c396819ffd6859edde17c6958c1c3338b29b | [
"Apache-2.0"
] | null | null | null | """
An example of running autofaiss by pyspark to produce N indices.
You need to install pyspark before using the following example.
"""
from typing import Dict
import faiss
import numpy as np
from autofaiss import build_index
# You'd better create a spark session before calling build_index,
# otherwise, a spark session would be created by autofaiss with the least configuration.
_, index_path2_metric_infos = build_index(
embeddings="hdfs://root/path/to/your/embeddings/folder",
distributed="pyspark",
file_format="parquet",
temporary_indices_folder="hdfs://root/tmp/distributed_autofaiss_indices",
current_memory_available="10G",
max_index_memory_usage="100G",
nb_indices_to_keep=10,
)
index_paths = sorted(index_path2_metric_infos.keys())
###########################################
# Use case 1: merging 10 indices into one #
###########################################
merged = faiss.read_index(index_paths[0])
for rest_index_file in index_paths[1:]:
index = faiss.read_index(rest_index_file)
faiss.merge_into(merged, index, shift_ids=False)
with open("merged-knn.index", "wb") as f:
faiss.write_index(merged, faiss.PyCallbackIOWriter(f.write))
########################################
# Use case 2: searching from N indices #
########################################
K, DIM, all_distances, all_ids, NB_QUERIES = 5, 512, [], [], 2
queries = faiss.rand((NB_QUERIES, DIM))
for rest_index_file in index_paths:
index = faiss.read_index(rest_index_file)
distances, ids = index.search(queries, k=K)
all_distances.append(distances)
all_ids.append(ids)
dists_arr = np.stack(all_distances, axis=1).reshape(NB_QUERIES, -1)
knn_ids_arr = np.stack(all_ids, axis=1).reshape(NB_QUERIES, -1)
sorted_k_indices = np.argsort(-dists_arr)[:, :K]
sorted_k_dists = np.take_along_axis(dists_arr, sorted_k_indices, axis=1)
sorted_k_ids = np.take_along_axis(knn_ids_arr, sorted_k_indices, axis=1)
print(f"{K} nearest distances: {sorted_k_dists}")
print(f"{K} nearest ids: {sorted_k_ids}")
############################################
# Use case 3: on disk merging of N indices #
############################################
# using faiss.merge_ondisk (https://github.com/facebookresearch/faiss/blob/30abcd6a865afef7cf86df7e8b839a41b5161505/contrib/ondisk.py )
# https://github.com/facebookresearch/faiss/blob/151e3d7be54aec844b6328dc3e7dd0b83fcfa5bc/demos/demo_ondisk_ivf.py
# to merge indices on disk without using memory
# this is useful in particular to use a very large index with almost no memory usage.
from faiss.contrib.ondisk import merge_ondisk
import faiss
block_fnames = index_paths
empty_index = faiss.read_index(block_fnames[0], faiss.IO_FLAG_MMAP)
empty_index.ntotal = 0
merge_ondisk(empty_index, block_fnames, "merged_index.ivfdata")
faiss.write_index(empty_index, "populated.index")
pop = faiss.read_index("populated.index", faiss.IO_FLAG_ONDISK_SAME_DIR)
########################################################
# Use case 4: use N indices using HStackInvertedLists #
########################################################
# This allows using N indices as a single combined index
# without changing anything on disk or loading anything to memory
# it works well but it's slower than first using merge_ondisk
# because it requires explore N pieces of inverted list for each
# list to explore
import os
index = CombinedIndex(index_paths)
index.search(queries, K)
| 34.992308 | 135 | 0.675533 |
589dcbc08792dc79d40776858af24dca67ad7bfe | 4,170 | py | Python | rbkcli/core/handlers/callback.py | rubrikinc/rbkcli | 62bbb20d15c78d2554d7258bdae655452ac826c7 | [
"MIT"
] | 10 | 2019-07-23T13:13:16.000Z | 2022-03-04T17:48:10.000Z | rbkcli/core/handlers/callback.py | rubrikinc/rbkcli | 62bbb20d15c78d2554d7258bdae655452ac826c7 | [
"MIT"
] | 19 | 2019-08-22T06:23:09.000Z | 2021-12-28T04:04:52.000Z | rbkcli/core/handlers/callback.py | rubrikinc/rbkcli | 62bbb20d15c78d2554d7258bdae655452ac826c7 | [
"MIT"
] | 5 | 2019-08-06T14:29:35.000Z | 2021-06-17T20:35:17.000Z | """Callback module for rbkcli."""
import json
from rbkcli.core.handlers.inputs import InputHandler
from rbkcli.base.essentials import DotDict, RbkcliException
from rbkcli.core.handlers import ApiTargetTools
from rbkcli.core.handlers.outputs import OutputHandler
| 37.232143 | 90 | 0.591367 |
589e413db07bdd7cf6dcd6e3ab66ffc0b716eb5c | 1,001 | py | Python | portrait/webapp/migrations/0001_initial.py | andela-sjames/Portrait | 83074e3d16d8009a71b674b6859f7c276b8d6537 | [
"MIT"
] | null | null | null | portrait/webapp/migrations/0001_initial.py | andela-sjames/Portrait | 83074e3d16d8009a71b674b6859f7c276b8d6537 | [
"MIT"
] | null | null | null | portrait/webapp/migrations/0001_initial.py | andela-sjames/Portrait | 83074e3d16d8009a71b674b6859f7c276b8d6537 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.1 on 2019-05-16 23:28
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
| 34.517241 | 152 | 0.631369 |
589e51c361515efc2c983bdbd855621e6ab93aac | 9,950 | py | Python | src/greenbudget/app/group/serializers.py | nickmflorin/django-proper-architecture-testing | da7c4019697e85f921695144375d2f548f1e98ad | [
"MIT"
] | null | null | null | src/greenbudget/app/group/serializers.py | nickmflorin/django-proper-architecture-testing | da7c4019697e85f921695144375d2f548f1e98ad | [
"MIT"
] | null | null | null | src/greenbudget/app/group/serializers.py | nickmflorin/django-proper-architecture-testing | da7c4019697e85f921695144375d2f548f1e98ad | [
"MIT"
] | null | null | null | from rest_framework import serializers, exceptions
from greenbudget.lib.rest_framework_utils.serializers import (
EnhancedModelSerializer)
from greenbudget.app.account.models import BudgetAccount, TemplateAccount
from greenbudget.app.tagging.serializers import ColorField
from greenbudget.app.subaccount.models import (
BudgetSubAccount, TemplateSubAccount)
from .models import (
Group,
BudgetAccountGroup,
TemplateAccountGroup,
BudgetSubAccountGroup,
TemplateSubAccountGroup
)
| 37.689394 | 80 | 0.63206 |
58a0bffac08dce61ed79b44c63defce1adefa9d1 | 12,103 | py | Python | objects/CSCG/_2d/mesh/domain/inputs/base.py | mathischeap/mifem | 3242e253fb01ca205a76568eaac7bbdb99e3f059 | [
"MIT"
] | 1 | 2020-10-14T12:48:35.000Z | 2020-10-14T12:48:35.000Z | objects/CSCG/_2d/mesh/domain/inputs/base.py | mathischeap/mifem | 3242e253fb01ca205a76568eaac7bbdb99e3f059 | [
"MIT"
] | null | null | null | objects/CSCG/_2d/mesh/domain/inputs/base.py | mathischeap/mifem | 3242e253fb01ca205a76568eaac7bbdb99e3f059 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
INTRO
@author: Yi Zhang. Created on Tue May 21 11:57:52 2019
Department of Aerodynamics
Faculty of Aerospace Engineering
TU Delft,
Delft, the Netherlands
"""
import inspect
from screws.freeze.main import FrozenOnly
from typing import Dict, Union
import numpy as np
from screws.decorators.classproperty.main import classproperty
def ___PRIVATE_boundary_name_requirement_checker___(self, boundaryRegionSidesDict):
"""
Requirements:
1). != domain name.
2). Length > 2
3). Can not start with 'R:' (So it must be different from regions names).
4). Only have letters
"""
for boundary_name in boundaryRegionSidesDict.keys():
assert boundary_name != self.domain_name
assert len(boundary_name) > 2, f"boundary_name = {boundary_name} is too short (>2 must)."
assert boundary_name[0:2] != 'R:', f"boundary_name = {boundary_name} wrong."
assert boundary_name.isalpha(), f"boundary_name = {boundary_name} wrong, boundary_name can only contain letters."
def ___PRIVATE_periodic_boundary_requirement_checker___(self, pBd):
"""
Here we only do a simple check. We make sure that the keys are in format of:
0). boundary_name_1=boundary_name_2.
1). A boundary name at most appear in one pair.
"""
assert isinstance(pBd, dict)
bnPOOL = set()
for pair in pBd:
assert '=' in pair
bn1, bn2 = pair.split('=')
lengthPOOL = len(bnPOOL)
assert bn1 in self._boundary_names_ and bn2 in self._boundary_names_
bnPOOL.add(bn1)
bnPOOL.add(bn2)
newLengthPOOL = len(bnPOOL)
assert newLengthPOOL == lengthPOOL + 2, "Boundary(s) used for multiple periodic pairs!"
self._periodic_boundaries_ = bnPOOL
# class properties -------------------------
| 36.345345 | 125 | 0.629513 |
58a1f761a8b86a42b461b76e20b0ebb5fa21fa7a | 4,098 | py | Python | src/sage/modules/vector_symbolic_dense.py | sloebrich/sage | b8f53f72e817e78722ad1b40d70aa9071426700b | [
"BSL-1.0"
] | 1 | 2020-05-19T22:34:03.000Z | 2020-05-19T22:34:03.000Z | src/sage/modules/vector_symbolic_dense.py | sbt4104/sage | 2cbd93e0b78dec701f4b7ad9271d3b1e967bcd6c | [
"BSL-1.0"
] | null | null | null | src/sage/modules/vector_symbolic_dense.py | sbt4104/sage | 2cbd93e0b78dec701f4b7ad9271d3b1e967bcd6c | [
"BSL-1.0"
] | 3 | 2020-03-29T17:13:36.000Z | 2021-05-03T18:11:28.000Z | """
Vectors over the symbolic ring.
Implements vectors over the symbolic ring.
AUTHORS:
- Robert Bradshaw (2011-05-25): Added more element-wise simplification methods
- Joris Vankerschaver (2011-05-15): Initial version
EXAMPLES::
sage: x, y = var('x, y')
sage: u = vector([sin(x)^2 + cos(x)^2, log(2*y) + log(3*y)]); u
(cos(x)^2 + sin(x)^2, log(3*y) + log(2*y))
sage: type(u)
<class 'sage.modules.free_module.FreeModule_ambient_field_with_category.element_class'>
sage: u.simplify_full()
(1, log(3*y) + log(2*y))
TESTS:
Check that the outcome of arithmetic with symbolic vectors is again
a symbolic vector (:trac:`11549`)::
sage: v = vector(SR, [1, 2])
sage: w = vector(SR, [sin(x), 0])
sage: type(v)
<class 'sage.modules.free_module.FreeModule_ambient_field_with_category.element_class'>
sage: type(w)
<class 'sage.modules.free_module.FreeModule_ambient_field_with_category.element_class'>
sage: type(v + w)
<class 'sage.modules.free_module.FreeModule_ambient_field_with_category.element_class'>
sage: type(-v)
<class 'sage.modules.free_module.FreeModule_ambient_field_with_category.element_class'>
sage: type(5*w)
<class 'sage.modules.free_module.FreeModule_ambient_field_with_category.element_class'>
Test pickling/unpickling::
sage: u = vector(SR, [sin(x^2)])
sage: loads(dumps(u)) == u
True
"""
from __future__ import absolute_import
#*****************************************************************************
# Copyright (C) 2011 Joris Vankerschaver (jv@caltech.edu)
#
# Distributed under the terms of the GNU General Public License (GPL)
# as published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
# http://www.gnu.org/licenses/
#*****************************************************************************
from . import free_module_element
from sage.symbolic.all import Expression
def apply_map(phi):
"""
Returns a function that applies phi to its argument.
EXAMPLES::
sage: from sage.modules.vector_symbolic_dense import apply_map
sage: v = vector([1,2,3])
sage: f = apply_map(lambda x: x+1)
sage: f(v)
(2, 3, 4)
"""
def apply(self, *args, **kwds):
"""
Generic function used to implement common symbolic operations
elementwise as methods of a vector.
EXAMPLES::
sage: var('x,y')
(x, y)
sage: v = vector([sin(x)^2 + cos(x)^2, log(x*y), sin(x/(x^2 + x)), factorial(x+1)/factorial(x)])
sage: v.simplify_trig()
(1, log(x*y), sin(1/(x + 1)), factorial(x + 1)/factorial(x))
sage: v.canonicalize_radical()
(cos(x)^2 + sin(x)^2, log(x) + log(y), sin(1/(x + 1)), factorial(x + 1)/factorial(x))
sage: v.simplify_rational()
(cos(x)^2 + sin(x)^2, log(x*y), sin(1/(x + 1)), factorial(x + 1)/factorial(x))
sage: v.simplify_factorial()
(cos(x)^2 + sin(x)^2, log(x*y), sin(x/(x^2 + x)), x + 1)
sage: v.simplify_full()
(1, log(x*y), sin(1/(x + 1)), x + 1)
sage: v = vector([sin(2*x), sin(3*x)])
sage: v.simplify_trig()
(2*cos(x)*sin(x), (4*cos(x)^2 - 1)*sin(x))
sage: v.simplify_trig(False)
(sin(2*x), sin(3*x))
sage: v.simplify_trig(expand=False)
(sin(2*x), sin(3*x))
"""
return self.apply_map(lambda x: phi(x, *args, **kwds))
apply.__doc__ += "\nSee Expression." + phi.__name__ + "() for optional arguments."
return apply
# Add elementwise methods.
for method in ['simplify', 'simplify_factorial',
'simplify_log', 'simplify_rational',
'simplify_trig', 'simplify_full', 'trig_expand',
'canonicalize_radical', 'trig_reduce']:
setattr(Vector_symbolic_dense, method, apply_map(getattr(Expression, method)))
| 34.728814 | 108 | 0.59346 |
58a347a92a051b6eeb3be14043e523039fd31e40 | 784 | py | Python | backend/app/models/weather.py | francoiscolombo/webnews | 2f4c3fa5343919e6c658d97aebec4997d4d7ea48 | [
"MIT"
] | null | null | null | backend/app/models/weather.py | francoiscolombo/webnews | 2f4c3fa5343919e6c658d97aebec4997d4d7ea48 | [
"MIT"
] | 4 | 2021-03-10T12:26:29.000Z | 2022-02-27T02:00:32.000Z | backend/app/models/weather.py | francoiscolombo/webnews | 2f4c3fa5343919e6c658d97aebec4997d4d7ea48 | [
"MIT"
] | null | null | null | from app import db
from app.models.serializer import Serializer
| 32.666667 | 70 | 0.672194 |
58a3ad8eacd907942afee36829131b2e139101c4 | 894 | py | Python | conu/backend/nspawn/constants.py | lslebodn/conu | dee6fd958471f77d1c0511b031ea136dfaf8a77a | [
"MIT"
] | 95 | 2018-05-19T14:35:08.000Z | 2022-01-08T23:31:40.000Z | conu/backend/nspawn/constants.py | lslebodn/conu | dee6fd958471f77d1c0511b031ea136dfaf8a77a | [
"MIT"
] | 179 | 2017-09-12T11:14:30.000Z | 2018-04-26T05:36:13.000Z | conu/backend/nspawn/constants.py | lslebodn/conu | dee6fd958471f77d1c0511b031ea136dfaf8a77a | [
"MIT"
] | 16 | 2018-05-09T14:15:32.000Z | 2021-08-02T21:11:33.000Z | # -*- coding: utf-8 -*-
#
# Copyright Contributors to the Conu project.
# SPDX-License-Identifier: MIT
#
# TODO: move this line to some generic constants, instead of same in
# docker and nspawn
CONU_ARTIFACT_TAG = 'CONU.'
CONU_IMAGES_STORE = "/opt/conu-nspawn-images/"
CONU_NSPAWN_BASEPACKAGES = [
"dnf",
"iproute",
"dhcp-client",
"initscripts",
"passwd",
"systemd",
"rpm",
"bash",
"shadow-utils",
"sssd-client",
"util-linux",
"libcrypt",
"sssd-client",
"coreutils",
"glibc-all-langpacks",
"vim-minimal"]
BOOTSTRAP_IMAGE_SIZE_IN_MB = 5000
BOOTSTRAP_FS_UTIL = "mkfs.ext4"
BOOTSTRAP_PACKAGER = [
"dnf",
"-y",
"install",
"--nogpgcheck",
"--setopt=install_weak_deps=False",
"--allowerasing"]
DEFAULT_RETRYTIMEOUT = 30
DEFAULT_SLEEP = 1
| 22.35 | 68 | 0.587248 |
58a653052f6df764ec062ee02680225f5a15d5ec | 805 | py | Python | onnxruntime/python/tools/quantization/operators/qdq_base_operator.py | mszhanyi/onnxruntime | 6f85d3e5c81c919022ac4a77e5a051da8518b15d | [
"MIT"
] | 669 | 2018-12-03T22:00:31.000Z | 2019-05-06T19:42:49.000Z | onnxruntime/python/tools/quantization/operators/qdq_base_operator.py | mszhanyi/onnxruntime | 6f85d3e5c81c919022ac4a77e5a051da8518b15d | [
"MIT"
] | 440 | 2018-12-03T21:09:56.000Z | 2019-05-06T20:47:23.000Z | onnxruntime/python/tools/quantization/operators/qdq_base_operator.py | mszhanyi/onnxruntime | 6f85d3e5c81c919022ac4a77e5a051da8518b15d | [
"MIT"
] | 140 | 2018-12-03T21:15:28.000Z | 2019-05-06T18:02:36.000Z | import itertools
from ..quant_utils import QuantizedValue, QuantizedValueType, attribute_to_kwarg, quantize_nparray
from .base_operator import QuantOperatorBase
| 32.2 | 106 | 0.719255 |
58a66b1c1e9cbe9103fe7260fec4a45f53280f13 | 495 | py | Python | baya/tests/models.py | hrichards/baya | f319cef5e95cd6a166265d51ae0ea236b6f65be3 | [
"MIT"
] | null | null | null | baya/tests/models.py | hrichards/baya | f319cef5e95cd6a166265d51ae0ea236b6f65be3 | [
"MIT"
] | 1 | 2018-12-28T16:53:42.000Z | 2018-12-28T16:53:42.000Z | baya/tests/models.py | hrichards/baya | f319cef5e95cd6a166265d51ae0ea236b6f65be3 | [
"MIT"
] | null | null | null | from django.db import models
# Create your models here.
| 21.521739 | 47 | 0.711111 |
58a67e64a1b78dbe199317a20cf65d4984a16e33 | 7,598 | py | Python | code/rec_eval.py | dawenl/content_wmf | b3e0a8eeb1b28836f280997c47444786afe91d3f | [
"MIT"
] | 24 | 2016-09-18T10:28:07.000Z | 2021-08-21T14:48:01.000Z | code/rec_eval.py | dawenl/content_wmf | b3e0a8eeb1b28836f280997c47444786afe91d3f | [
"MIT"
] | null | null | null | code/rec_eval.py | dawenl/content_wmf | b3e0a8eeb1b28836f280997c47444786afe91d3f | [
"MIT"
] | 15 | 2015-10-29T14:46:03.000Z | 2020-03-12T09:35:55.000Z | import bottleneck as bn
import numpy as np
from scipy import sparse
"""
All the data should be in the shape of (n_users, n_items)
All the latent factors should in the shape of (n_users/n_items, n_components)
1. train_data refers to the data that was used to train the model
2. heldout_data refers to the data that was used for evaluation (could be test
set or validation set)
3. vad_data refers to the data that should be excluded as validation set, which
should only be used when calculating test scores
"""
# helper functions #
def mean_rrank_at_k_batch(train_data, heldout_data, Et, Eb,
user_idx, k=5, vad_data=None):
'''
mean reciprocal rank@k: For each user, make predictions and rank for
all the items. Then calculate the mean reciprocal rank for the top K that
are in the held-out set.
'''
batch_users = user_idx.stop - user_idx.start
X_pred = _make_prediction(train_data, Et, Eb, user_idx,
batch_users, vad_data=vad_data)
all_rrank = 1. / (np.argsort(np.argsort(-X_pred, axis=1), axis=1) + 1)
X_true_binary = (heldout_data[user_idx] > 0).toarray()
heldout_rrank = X_true_binary * all_rrank
top_k = bn.partsort(-heldout_rrank, k, axis=1)
return -top_k[:, :k].mean(axis=1)
def NDCG_binary_batch(train_data, heldout_data, Et, Eb, user_idx,
vad_data=None):
'''
normalized discounted cumulative gain for binary relevance
'''
batch_users = user_idx.stop - user_idx.start
n_items = train_data.shape[1]
X_pred = _make_prediction(train_data, Et, Eb, user_idx,
batch_users, vad_data=vad_data)
all_rank = np.argsort(np.argsort(-X_pred, axis=1), axis=1)
# build the discount template
tp = np.hstack((1, 1. / np.log2(np.arange(2, n_items + 1))))
all_disc = tp[all_rank]
X_true_binary = (heldout_data[user_idx] > 0).tocoo()
disc = sparse.csr_matrix((all_disc[X_true_binary.row, X_true_binary.col],
(X_true_binary.row, X_true_binary.col)),
shape=all_disc.shape)
DCG = np.array(disc.sum(axis=1)).ravel()
IDCG = np.array([tp[:n].sum()
for n in heldout_data[user_idx].getnnz(axis=1)])
return DCG / IDCG
def mean_perc_rank_batch(train_data, heldout_data, Et, Eb, user_idx,
vad_data=None):
'''
mean percentile rank for a batch of users
MPR of the full set is the sum of batch MPR's divided by the sum of all the
feedbacks. (Eq. 8 in Hu et al.)
This metric not necessarily constrains the data to be binary
'''
batch_users = user_idx.stop - user_idx.start
X_pred = _make_prediction(train_data, Et, Eb, user_idx, batch_users,
vad_data=vad_data)
all_perc = np.argsort(np.argsort(-X_pred, axis=1), axis=1) / \
np.isfinite(X_pred).sum(axis=1, keepdims=True).astype(np.float32)
perc_batch = (all_perc[heldout_data[user_idx].nonzero()] *
heldout_data[user_idx].data).sum()
return perc_batch
| 37.613861 | 79 | 0.633325 |
58a6b4335eac35be6ee8f5597bc84e5d66427621 | 1,295 | py | Python | pgdrive/tests/vis_block/vis_std_t_intersection.py | decisionforce/pgdrive | 19af5d09a40a68a2a5f8b3ac8b40f109e71c26ee | [
"Apache-2.0"
] | 97 | 2020-12-25T06:02:17.000Z | 2022-01-16T06:58:39.000Z | pgdrive/tests/vis_block/vis_std_t_intersection.py | decisionforce/pgdrive | 19af5d09a40a68a2a5f8b3ac8b40f109e71c26ee | [
"Apache-2.0"
] | 192 | 2020-12-25T07:58:17.000Z | 2021-08-28T10:13:59.000Z | pgdrive/tests/vis_block/vis_std_t_intersection.py | decisionforce/pgdrive | 19af5d09a40a68a2a5f8b3ac8b40f109e71c26ee | [
"Apache-2.0"
] | 11 | 2020-12-29T11:23:44.000Z | 2021-12-06T23:25:49.000Z | from pgdrive.component.blocks.curve import Curve
from pgdrive.component.blocks.first_block import FirstPGBlock
from pgdrive.component.blocks.std_t_intersection import StdTInterSection
from pgdrive.component.blocks.straight import Straight
from pgdrive.component.road.road_network import RoadNetwork
from pgdrive.tests.vis_block.vis_block_base import TestBlock
if __name__ == "__main__":
test = TestBlock(True)
from pgdrive.engine.asset_loader import initialize_asset_loader
initialize_asset_loader(test)
global_network = RoadNetwork()
first = FirstPGBlock(global_network, 3.0, 2, test.render, test.world, 1)
curve = Curve(1, first.get_socket(0), global_network, 1)
curve.construct_block(test.render, test.world)
straight = Straight(2, curve.get_socket(0), global_network, 1)
straight.construct_block(test.render, test.world)
intersection = StdTInterSection(3, straight.get_socket(0), global_network, 1)
print(intersection.construct_block(test.render, test.world))
id = 4
for socket_idx in range(intersection.SOCKET_NUM):
block = Curve(id, intersection.get_socket(socket_idx), global_network, id + 1)
block.construct_block(test.render, test.world)
id += 1
test.show_bounding_box(global_network)
test.run()
| 40.46875 | 86 | 0.766023 |
58a742e0b8ad8aa4381262e4194d124ffb86733b | 6,241 | py | Python | version/v 4.0/spider/config.py | zhong-yan/neteasenews | 4dda8ef13d44f08e90e3869f4a7d972fb4b9feed | [
"Apache-2.0"
] | null | null | null | version/v 4.0/spider/config.py | zhong-yan/neteasenews | 4dda8ef13d44f08e90e3869f4a7d972fb4b9feed | [
"Apache-2.0"
] | null | null | null | version/v 4.0/spider/config.py | zhong-yan/neteasenews | 4dda8ef13d44f08e90e3869f4a7d972fb4b9feed | [
"Apache-2.0"
] | null | null | null | from selenium import webdriver
BASE_URL = 'http://news.163.com/'
# ajax,ajax,?callback=data_callback'
# json?..json
JSON_INDEX_URLS = [
'http://temp.163.com/special/00804KVA/cm_yaowen.js?callback=data_callback',
'http://house.163.com/special/00078GU7/guangzhou_xw_news_v1.js?callback=data_callback',
'http://temp.163.com/special/00804KVA/cm_shehui.js?callback=data_callback',
'http://temp.163.com/special/00804KVA/cm_guonei.js?callback=data_callback',
'http://temp.163.com/special/00804KVA/cm_guoji.js?callback=data_callback',
'http://temp.163.com/special/00804KVA/cm_dujia.js?callback=data_callback',
'http://temp.163.com/special/00804KVA/cm_war.js?callback=data_callback',
'http://temp.163.com/special/00804KVA/cm_money.js?callback=data_callback',
'http://temp.163.com/special/00804KVA/cm_tech.js?callback=data_callback',
'http://temp.163.com/special/00804KVA/cm_sports.js?callback=data_callback',
'http://temp.163.com/special/00804KVA/cm_ent.js?callback=data_callback',
'http://temp.163.com/special/00804KVA/cm_lady.js?callback=data_callback',
'http://temp.163.com/special/00804KVA/cm_auto.js?callback=data_callback',
'http://temp.163.com/special/00804KVA/cm_houseguangzhou.js?callback=data_callback',
'http://temp.163.com/special/00804KVA/cm_hangkong.js?callback=data_callback',
'http://temp.163.com/special/00804KVA/cm_jiankang.js?callback=data_callback',
#
'http://news.163.com/uav/special/000189N0/uav_index.js?callback=data_callback',
#
'http://sports.163.com/special/000587PR/newsdata_n_index.js?callback=data_callback',
'http://sports.163.com/special/000587PR/newsdata_n_world.js?callback=data_callback',
'http://sports.163.com/special/000587PR/newsdata_n_china.js?callback=data_callback',
'http://sports.163.com/special/000587PR/newsdata_n_cba.js?callback=data_callback',
'http://sports.163.com/special/000587PR/newsdata_n_allsports.js?callback=data_callback',
# NBA
'http://sports.163.com/special/000587PK/newsdata_nba_hj.js?callback=data_callback',
'http://sports.163.com/special/000587PK/newsdata_nba_qsh.js?callback=data_callback',
'http://sports.163.com/special/000587PK/newsdata_nba_ysh.js?callback=data_callback',
'http://sports.163.com/special/000587PK/newsdata_nba_ketr.js?callback=data_callback',
'http://sports.163.com/special/000587PK/newsdata_nba_okc.js?callback=data_callback',
'http://sports.163.com/special/000587PK/newsdata_nba_huren.js?callback=data_callback',
'http://sports.163.com/special/000587PK/newsdata_nba_mc.js?callback=data_callback',
#
'http://ent.163.com/special/000380VU/newsdata_index.js?callback=data_callback',
'http://ent.163.com/special/000380VU/newsdata_music.js?callback=data_callback',
'http://ent.163.com/special/000380VU/newsdata_star.js?callback=data_callback',
'http://ent.163.com/special/000380VU/newsdata_movie.js?callback=data_callback',
'http://ent.163.com/special/000380VU/newsdata_tv.js?callback=data_callback',
'http://ent.163.com/special/000380VU/newsdata_show.js?callback=data_callback',
#
'http://money.163.com/special/002557S5/newsdata_idx_index.js?callback=data_callback',
'http://money.163.com/special/002557S5/newsdata_idx_stock.js?callback=data_callback',
'http://money.163.com/special/002557S5/newsdata_idx_finance.js?callback=data_callback',
'http://money.163.com/special/002557S5/newsdata_idx_fund.js?callback=data_callback',
'http://money.163.com/special/002557S5/newsdata_idx_licai.js?callback=data_callback',
'http://money.163.com/special/002557S5/newsdata_idx_biz.js?callback=data_callback',
'http://money.163.com/special/002557S5/newsdata_idx_bitcoin.js?callback=data_callback',
#
'http://money.163.com/special/002557S6/newsdata_gp_index.js?callback=data_callback',
'http://money.163.com/special/002557S6/newsdata_gp_hkstock.js?callback=data_callback',
'http://money.163.com/special/002557S6/newsdata_gp_usstock.js?callback=data_callback',
'http://money.163.com/special/002557S6/newsdata_gp_ipo.js?callback=data_callback',
'http://money.163.com/special/002557S6/newsdata_gp_bitcoin.js?callback=data_callback',
'http://money.163.com/special/002557S6/newsdata_gp_dy.js?callback=data_callback',
#
'http://tech.163.com/special/00097UHL/tech_datalist.js?callback=data_callback',
# ,,copy
'http://bendi.news.163.com/beijing/special/04388GGG/bjxinxiliu.js',
'http://bendi.news.163.com/shanghai/special/04188GP4/shxinxiliu.js',
'http://tj.news.163.com/special/04208F5D/tjxxl.js',
'http://bendi.news.163.com/jiangsu/special/04248H8U/njxxl.js',
'http://bendi.news.163.com/zhejiang/special/04098FBT/xinxiliu.js',
'http://sc.news.163.com/special/04268EVT/xinxiliu.js',
'http://bendi.news.163.com/heilongjiang/special/04238DR5/haerbin.js',
'http://bendi.news.163.com/jilin/special/04118E6D/center_news_cc.js',
'http://bendi.news.163.com/liaoning/special/04228EED/xinxiliu.js',
'http://bendi.news.163.com/neimengu/special/04138EHT/nmgxxl.js'
]
URLs = ['http://news.163.com/',
'http://news.163.com/rank/',
'http://news.163.com/photo/#Current',
'http://news.163.com/domestic/',
'http://news.163.com/world/',
'http://news.163.com/shehui/',
'http://data.163.com/special/datablog/',
'http://war.163.com/',
'http://news.163.com/air/',
'http://news.163.com/uav/',
'http://news.163.com/college',
'http://gov.163.com/',
'http://gongyi.163.com/',
'http://media.163.com/']
# config mongoDB
MONGODB_HOST = 'localhost'
MONGODB_PORT = 27017
MONGODB_DBNAME = 'neteasenews'
# ,
MONGODB_TABLE_1 = 'article'
# ,
MONGODB_TABLE_2 = 'newsrank'
# ,
MONGODB_TABLE_3 = 'coldpage'
#
MONGODB_TABLE_4 = 'picture'
# config chromedriver:
prefs = {
'profile.default_content_setting_values': {
'images': 2,
# 'javascript': 2
# 'User-Agent': ua
}
}
options = webdriver.ChromeOptions()
options.add_experimental_option('prefs', prefs)
#
options.add_argument('--headless') | 54.745614 | 92 | 0.737863 |
58a7437e24bf8faeb840154530f279b8c6eee778 | 2,044 | py | Python | assignments/10_conserved/conserved.py | brianUA/be434-fall-2021 | bf0bb3f1c8129599818b98b7ee25b39aa926fd1f | [
"MIT"
] | null | null | null | assignments/10_conserved/conserved.py | brianUA/be434-fall-2021 | bf0bb3f1c8129599818b98b7ee25b39aa926fd1f | [
"MIT"
] | null | null | null | assignments/10_conserved/conserved.py | brianUA/be434-fall-2021 | bf0bb3f1c8129599818b98b7ee25b39aa926fd1f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
Author : Brian Scott <brianscott@email.arizona.edu>
Date : 2021-11-09
Purpose: FInd the similarities between sequences.
"""
import argparse
# --------------------------------------------------
def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description='Rock the Casbah',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('file',
help='Input file',
metavar='FILE',
type=argparse.FileType('rt'))
return parser.parse_args()
# --------------------------------------------------
def main():
"""Makes it work"""
args = get_args()
sequences = []
for line in args.file:
temp_list = []
for char in line.strip():
temp_list.append(char)
sequences.append(temp_list)
out_seq = ""
for base in range(len(sequences[0])):
temp_base = []
# for line in range(len(sequences)):
# had to use enumerate to get pylint to shut up.
# also had to then use the variable so pylint would shut up
# because If i use the range(len()) method pylint doesn't like it
# even though it makes more sense than enumerate becuase
# it doesn't create unnecessary variables#
for line, value in enumerate(sequences):
temp_base.append(sequences[line][base])
# print(temp_base)
all_same(value) # only here to get enumerate to shup up
if all_same(temp_base):
out_seq = out_seq + "|"
else:
out_seq = out_seq + "X"
for line in sequences:
temp_line = ""
for char in line:
temp_line = temp_line + char
print(temp_line)
print(out_seq)
def all_same(list1):
"""checks if all items in list are equal"""
return all(x == list1[0] for x in list1)
# --------------------------------------------------
if __name__ == '__main__':
main()
| 27.621622 | 73 | 0.54501 |
58a9853e032d70b843b4faffe8df15e8491bea40 | 13,999 | py | Python | ldss_spec/tools/spec_red.py | dsanmartim/ldss_specred | 8274ce0cf0eddfc7649106d7b9d0ce733e69c722 | [
"MIT"
] | null | null | null | ldss_spec/tools/spec_red.py | dsanmartim/ldss_specred | 8274ce0cf0eddfc7649106d7b9d0ce733e69c722 | [
"MIT"
] | null | null | null | ldss_spec/tools/spec_red.py | dsanmartim/ldss_specred | 8274ce0cf0eddfc7649106d7b9d0ce733e69c722 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf8 -*-
# Loading a few python packages
import os
import glob
import warnings
from astropy import log
from astropy.io import fits as pyfits
import json
# Loading iraf packages
from pyraf import iraf
from pyraf.iraf import onedspec
from pyraf.iraf import twodspec, apextract
def wcalibrate_spec(self, obs_dict, disp_sol_content_dict, prefix='e_', out_prefix='w'):
"""
This method runs a couple of IRAF routines to obtain the dispersion function for a arc files and then apply it
to the corresponding spectra.
Args:
obs_dict (dict):
prefix (str):
out_prefix (str):
Returns:
"""
onedspec.identify.unlearn()
onedspec.refspec.unlearn()
onedspec.dispcor.unlearn()
for target, p in obs_dict.iteritems():
spec_in = prefix + p['spec'] + '.0001.fits'
arc_ref = prefix + p['arc'] + '.0001.fits'
# Checking sepctral setup
self.check_spec_setup(spec_in, arc_ref)
##### Copying disp solution to 'database' dir
# 1. Getting content dictionary with disp solutions of the corresponding arc
slit, grism, filter = self.get_spec_setup(arc_ref)
w1, w2 = self.wrange[grism][0], self.wrange[grism][1]
# Getting specific disp sol content of the corresponding arc file
key = '{}_{}'.format(grism,filter)
content_dict = disp_sol_content_dict[key]
# 2. Writting solution to database dir
self.write_modified_disp_sol_to_database(arc_ref, content_dict, database_dir=self.database_dir)
##### Running iraf to get updated disp sol
print('\n')
log.info('Finding wavelength solution to reference arc ' + arc_ref + '...')
onedspec.identify(arc_ref, **self.identify_flags)
print('\n')
log.info('Associating the obtained wavelength solution with the spectrum of the star:')
log.info(spec_in + ' -----> REFSPEC = ' + arc_ref + '.')
onedspec.refspec(spec_in, reference=arc_ref, sort='', group='')
print('\n')
log.info('Applying wavelength calibration to ' + spec_in + '.')
onedspec.dispcor(spec_in, out=out_prefix + spec_in, w1=w1, w2=w2)
if self.fit_continuum:
onedspec.continuum.unlearn()
print('\n')
log.info('Fitting continuum to ' + out_prefix + spec_in + '.')
input = out_prefix + spec_in
output = 'cont_' + out_prefix + spec_in
onedspec.continuum(input=input, output=output, type='fit', function='legendre', order=15, niterate=10,
low_reject=2.0, high_reject=0.0)
| 37.733154 | 129 | 0.572541 |
58a9c2475f1d862dde62daacb24c84dc06c0e208 | 1,667 | py | Python | lookupService/helpers/job_scheduler.py | selfjell/MirMachine | b61b555e7d0942f6fdcc53634469fffea2b92f4c | [
"MIT"
] | 1 | 2021-11-11T12:47:20.000Z | 2021-11-11T12:47:20.000Z | lookupService/helpers/job_scheduler.py | selfjell/MirMachine | b61b555e7d0942f6fdcc53634469fffea2b92f4c | [
"MIT"
] | null | null | null | lookupService/helpers/job_scheduler.py | selfjell/MirMachine | b61b555e7d0942f6fdcc53634469fffea2b92f4c | [
"MIT"
] | null | null | null | from ..models import Job
from engine.scripts.mirmachine_args import run_mirmachine
from .socket_helper import announce_status_change, announce_queue_position, announce_initiation, announce_completed
from .maintainer import clean_up_temporary_files
from django.utils import timezone
from MirMachineWebapp import user_config as config
| 30.309091 | 115 | 0.718056 |
542d0bbb398d02e9717cfe574c3d52048a5a205b | 836 | py | Python | Exercicios Colecoes Python/exercicio 33 - secao 7 - p1.py | cristinamais/exercicios_python | 8a09b0b68ffaa62d13afb952998e890a79667c7e | [
"MIT"
] | null | null | null | Exercicios Colecoes Python/exercicio 33 - secao 7 - p1.py | cristinamais/exercicios_python | 8a09b0b68ffaa62d13afb952998e890a79667c7e | [
"MIT"
] | null | null | null | Exercicios Colecoes Python/exercicio 33 - secao 7 - p1.py | cristinamais/exercicios_python | 8a09b0b68ffaa62d13afb952998e890a79667c7e | [
"MIT"
] | null | null | null | """
33 - Faca um programa que leia um vetor de 15 posicoes e o compacte, ou seja, elimine as posicoes com valor zero.
Para isso, todos os elementos a frente do valor zero, devem ser movidos uma posicao para tras no vetor.
"""
"""
vetor = []
count = 0
for x in range(1, 16):
vetor.append(int(input(f'Digite o {x}/15: ')))
n = len(vetor)
for i in range(n):
if vetor[i] != 0:
vetor[count] = vetor[i]
count += 1
while n > count:
vetor[count] = 0
count += 1
print(vetor) # [5, 6, 9, 8, 10, 15, 33, 22, 66, 99, 10, 100, 0, 0, 0]
Este os zeros vao para tras
"""
from itertools import compress, repeat, chain
vetor = []
for x in range(1, 16):
vetor.append(int(input(f'Digite o {x}/15: ')))
# usando list.count e itertools.compress
y = [0] * vetor.count(0)
y.extend(compress(vetor, vetor))
print(y)
| 22 | 113 | 0.626794 |
542d7e740031b1e39b6ee826c5f6675358cb832c | 533 | py | Python | multimedia/Pygame/02-plot_pixels.py | vicente-gonzalez-ruiz/python-tutorial | e6a79510a0b3663786d6476a40e79fc8e8726f61 | [
"CC0-1.0"
] | 4 | 2017-03-06T09:49:11.000Z | 2019-10-16T00:09:38.000Z | multimedia/Pygame/02-plot_pixels.py | vicente-gonzalez-ruiz/python-tutorial | e6a79510a0b3663786d6476a40e79fc8e8726f61 | [
"CC0-1.0"
] | null | null | null | multimedia/Pygame/02-plot_pixels.py | vicente-gonzalez-ruiz/python-tutorial | e6a79510a0b3663786d6476a40e79fc8e8726f61 | [
"CC0-1.0"
] | 7 | 2017-11-02T11:00:30.000Z | 2020-01-31T22:41:27.000Z | import pygame
import my_colors as color
pygame.init()
screen_width = 800
screen_height = 600
screen_size = (screen_width, screen_height)
screen = pygame.display.set_mode(screen_size)
pygame.display.set_caption("Search the green pixel at the coordinates (x=10, y=100)")
running = True
while running:
screen.set_at((1, 1), color.white)
screen.set_at((10, 100), color.green)
pygame.display.update()
event = pygame.event.wait()
if event.type == pygame.QUIT:
running = False
pygame.quit()
print("Goodbye!")
| 24.227273 | 85 | 0.718574 |
542ed8915929d9082655f10271231d6e8237f5b5 | 2,848 | py | Python | bin/models_vs_uniprot_check/ViPhOG_chunks_rank_summ.py | alexcorm/emg-viral-pipeline | f367002f0e1e375840e5696323bde65f7accb31f | [
"Apache-2.0"
] | 30 | 2020-05-18T14:02:34.000Z | 2022-03-16T20:04:25.000Z | bin/models_vs_uniprot_check/ViPhOG_chunks_rank_summ.py | lynceuslq/emg-viral-pipeline | 53a99b84ed93428ee88d61e529bcf6799f5eec94 | [
"Apache-2.0"
] | 45 | 2020-04-30T09:45:03.000Z | 2022-03-21T09:10:21.000Z | bin/models_vs_uniprot_check/ViPhOG_chunks_rank_summ.py | lynceuslq/emg-viral-pipeline | 53a99b84ed93428ee88d61e529bcf6799f5eec94 | [
"Apache-2.0"
] | 12 | 2020-06-02T12:43:49.000Z | 2022-02-22T13:09:13.000Z | #!/usr/bin/env python3
import os
import re
import glob
import sys
import operator
import ast
import argparse
###############################################################################################
# This script was written as part of the analysis conducted on the output generated by #
# hmmsearch, when the ViPhOG database was searched against UniProtKB. The ViPhOG profile HMM #
# files were stored in different directories, each containing maximum 2000 files and named #
# using a sequential number from 1 to 16 (hmm1...hmm16). For each one of these a corresponding#
# output directory was generated, each containing a domtbl output file for each of the files #
# stored in the hmm directories. The output directories were named using the same sequential #
# numbers as the directories storing the hmm files (hmm1domtbl...hmm16domtbl). #
###############################################################################################
parser = argparse.ArgumentParser(description = "Step 3: Generate summary tables for each taxonomic rank. Make sure to run the script from within the directory containing the domtbl output directories (check comment block for guidance) and following the scripts that execute Step 1 and Step 2")
parser.add_argument("-i", "--input", dest = "input_file", help = "Path to summary chunk file", required = True)
if len(sys.argv) == 1:
parser.print_help()
else:
args = parser.parse_args()
summ_file = args.input_file
with open(summ_file) as input_file:
header_line = input_file.readline().rstrip()
taxa_ranks = []
for x,y in enumerate(header_line.split("\t")):
if x >= 2:
taxa_ranks.append((x, y))
for x,y in taxa_ranks:
input_file.seek(0)
next(input_file)
with open(f"{os.path.splitext(summ_file)[0]}_{y}.tsv", "w") as output_file:
output_file.write("ViPhOG\t#_taxons\tMost_significant\tMax_min_score\tOverlapping_taxons\tNext_max_score\n")
for line in input_file:
line = line.rstrip()
viphog_id = line.split("\t")[0]
rank_hits = ast.literal_eval(line.split("\t")[x])
total_hits = len(rank_hits)
most_significant = ""
score_range = ""
overlap = ""
next_max_score = ""
if total_hits > 0:
rank_hits_sorted = sorted(rank_hits, key = operator.itemgetter(2), reverse = True)
most_significant = rank_hits_sorted[0][0]
score_range = (rank_hits_sorted[0][2], rank_hits_sorted[0][3])
if total_hits > 1:
overlap = []
for elem in rank_hits_sorted[1:]:
if elem[2] >= score_range[1]:
overlap.append((elem[0], elem[2]))
if len(overlap) < 1:
overlap = ""
next_max_score = rank_hits_sorted[1][2]
output_file.write("{}\t{}\t{}\t{}\t{}\t{}\n".format(viphog_id, total_hits, most_significant, score_range, overlap, next_max_score)) | 45.935484 | 293 | 0.652388 |
54318f46c52690013bfe7cc4791a2d7dcc84bf04 | 6,349 | py | Python | bika/lims/permissions.py | hocinebendou/bika.gsoc | 85bc0c587de7f52073ae0e89bddbc77bf875f295 | [
"MIT"
] | null | null | null | bika/lims/permissions.py | hocinebendou/bika.gsoc | 85bc0c587de7f52073ae0e89bddbc77bf875f295 | [
"MIT"
] | null | null | null | bika/lims/permissions.py | hocinebendou/bika.gsoc | 85bc0c587de7f52073ae0e89bddbc77bf875f295 | [
"MIT"
] | null | null | null | """ All permissions are defined here.
They are also defined in permissions.zcml.
The two files must be kept in sync.
bika.lims.__init__ imports * from this file, so
bika.lims.PermName or bika.lims.permissions.PermName are
both valid.
"""
from Products.CMFCore.permissions import AddPortalContent
# Add Permissions:
# ----------------
AddAnalysis = 'BIKA: Add Analysis'
AddAnalysisProfile = 'BIKA: Add AnalysisProfile'
AddAnalysisRequest = 'BIKA: Add Analysis Request'
AddAnalysisSpec = 'BIKA: Add AnalysisSpec'
AddAttachment = 'BIKA: Add Attachment'
AddARTemplate = 'BIKA: Add ARTemplate'
AddBatch = 'BIKA: Add Batch'
AddClient = 'BIKA: Add Client'
AddClientFolder = 'BIKA: Add ClientFolder'
AddInvoice = 'BIKA: Add Invoice'
AddMethod = 'BIKA: Add Method'
AddMultifile = 'BIKA: Add Multifile'
AddPricelist = 'BIKA: Add Pricelist'
AddProduct = 'BIKA: Add Product'
AddProductCategory = 'BIKA: Add ProductCategory'
AddStockItem = 'BIKA: Add StockItem'
AddSupplyOrder = 'BIKA: Add SupplyOrder'
AddInventoryOrder = 'BIKA: Add Inventory Order'
AddSample = 'BIKA: Add Sample'
AddSampleMatrix = 'BIKA: Add SampleMatrix'
AddSamplePartition = 'BIKA: Add SamplePartition'
AddSamplePoint = 'BIKA: Add SamplePoint'
AddStorageLocation = 'BIKA: Add StorageLocation'
AddSamplingDeviation = 'BIKA: Add SamplingDeviation'
AddSamplingRound = 'BIKA: Add SamplingRound'
AddSRTemplate = 'BIKA: Add SRTemplate'
AddStorageLevel = 'BIKA: Add StorageLevel'
AddStorageUnit = 'BIKA: Add StorageUnit'
AddSubGroup = 'BIKA: Add Sub-group'
# Default Archetypes Add Permission
ADD_CONTENT_PERMISSION = AddPortalContent
# Add Permissions for specific types, if required
ADD_CONTENT_PERMISSIONS = {
'ARAnalysisSpec': AddAnalysisSpec,
'AnalysisProfile': AddAnalysisProfile,
'Analysis': AddAnalysis,
'AnalysisRequest': AddAnalysisRequest,
'Attachment': AddAttachment,
'Batch': AddBatch,
'Client': AddClient,
'Invoice': AddInvoice,
'Method': AddMethod,
'Multifile': AddMultifile,
'SupplyOrder': AddSupplyOrder,
'Order': AddInventoryOrder,
'Sample': AddSample,
'SampleMatrix': AddSampleMatrix,
'SamplePartition': AddSamplePartition,
'SamplingDeviation': AddSamplingDeviation,
'SamplingRound': AddSamplingRound,
'SubGroup': AddSubGroup,
'StorageLevel': AddStorageLevel,
'StorageUnit': AddStorageUnit,
}
# Very Old permissions:
# ---------------------
ManageBika = 'BIKA: Manage Bika'
DispatchOrder = 'BIKA: Dispatch Order'
ManageAnalysisRequests = 'BIKA: Manage Analysis Requests'
ManageSamples = 'BIKA: Manage Samples'
ManageSuppliers = 'BIKA: Manage Reference Suppliers'
ManageReference = 'BIKA: Manage Reference'
PostInvoiceBatch = 'BIKA: Post Invoice batch'
ManagePricelists = 'BIKA: Manage Pricelists'
# This allows to edit all client fields, and perform admin tasks on Clients.
ManageClients = 'BIKA: Manage Clients'
# this is for creating and transitioning worksheets
ManageWorksheets = 'BIKA: Manage Worksheets'
# this is for adding/editing/exporting analyses on worksheets
EditWorksheet = 'BIKA: Edit Worksheet'
RejectWorksheet = 'BIKA: Reject Worksheet'
ImportInstrumentResults = "BIKA: Import Instrument Results"
AccessJSONAPI = 'BIKA: Access JSON API'
# New or changed permissions:
# ---------------------------
DispatchInventoryOrder = 'BIKA: Dispatch Inventory Order'
ReceiveInventoryOrder = 'BIKA: Receive Inventory Order'
StoreInventoryOrder = 'BIKA: Store Inventory Order'
SampleSample = 'BIKA: Sample Sample'
PreserveSample = 'BIKA: Preserve Sample'
ReceiveSample = 'BIKA: Receive Sample'
ExpireSample = 'BIKA: Expire Sample'
DisposeSample = 'BIKA: Dispose Sample'
ImportAnalysis = 'BIKA: Import Analysis'
Retract = "BIKA: Retract"
Verify = 'BIKA: Verify'
VerifyOwnResults = 'BIKA: Verify own results'
Publish = 'BIKA: Publish'
EditSample = 'BIKA: Edit Sample'
EditAR = 'BIKA: Edit AR'
ResultsNotRequested = 'BIKA: Results not requested'
ManageInvoices = 'BIKA: Manage Invoices'
ViewResults = 'BIKA: View Results'
EditResults = 'BIKA: Edit Results'
EditFieldResults = 'BIKA: Edit Field Results'
ViewRetractedAnalyses = 'BIKA: View Retracted Analyses'
CancelAndReinstate = 'BIKA: Cancel and reinstate'
# For adding login credentials to Contacts.
ManageLoginDetails = 'BIKA: Manage Login Details'
Assign = 'BIKA: Assign analyses'
Unassign = 'BIKA: Unassign analyses'
# Field permissions
EditARContact = "BIKA: Edit AR Contact"
ViewLogTab = 'BIKA: View Log Tab'
# Edit AR
# -----------------------------------------------------------------------------
# Allows to set values for AR fields in AR view
#
# Only takes effect if:
# - The AR's 'cancellation_state' is 'active'
# - The AR's 'review_state' is in:
# 'sample_registered', 'to_be_sampled', 'sampled', 'to_be_preserved',
# 'sample_due', 'sample_received', 'to_be_verified', 'attachment_due'
EditAR = 'BIKA: Edit AR'
# Edit Sample Partition
# -----------------------------------------------------------------------------
# Allows to set a Container and/or Preserver for a Sample Partition.
# See AR view: Sample Partitions table and Sample Partitions tab
#
# Only takes effect if:
# - The Sample's 'cancellation_state' is 'active'
# - The Sample's 'review_state' is in:
# 'sample_registered', 'to_be_sampled', 'sampled', 'to_be_preserved',
# 'sample_due', 'sample_received', 'to_be_verified', 'attachment_due'
EditSamplePartition = 'BIKA: Edit Sample Partition'
# Edit Client
# ----------------------------------------------
# Allows access to 'Edit' and 'Contacts' tabs from Client View
EditClient = 'BIKA: Edit Client'
# Manage Supply Orders
# ----------------------------------------------
# Allows access to 'Supply Orders' tab in Client context
ManageSupplyOrders = 'BIKA: Manage Supply Orders'
# Batch-specific permissions
# ----------------------------------------------
EditBatch = 'BIKA: Edit Batch'
CloseBatch = 'BIKA: Close Batch'
ReopenBatch = 'BIKA: Reopen Batch'
# Sampling Round permissions
# --------------------------
CloseSamplingRound = 'BIKA: Close SamplingRound'
ReopenSamplingRound = 'BIKA: Reopen SamplingRound'
# Manage AR Imports
# ----------------------------------------------
ManageARImport = 'BIKA: Manage ARImport'
# Manage AR Priorities
# ----------------------------------------------
ManageARPriority = 'BIKA: Manage ARPriority'
| 34.505435 | 79 | 0.695228 |
5431f5aaf571f8d48be62c018da65e8a8b984c28 | 5,140 | py | Python | python/venv/lib/python2.7/site-packages/openstack/tests/unit/telemetry/v2/test_sample.py | sjsucohort6/openstack | 8471e6e599c3f52319926a582358358ef84cbadb | [
"MIT"
] | null | null | null | python/venv/lib/python2.7/site-packages/openstack/tests/unit/telemetry/v2/test_sample.py | sjsucohort6/openstack | 8471e6e599c3f52319926a582358358ef84cbadb | [
"MIT"
] | null | null | null | python/venv/lib/python2.7/site-packages/openstack/tests/unit/telemetry/v2/test_sample.py | sjsucohort6/openstack | 8471e6e599c3f52319926a582358358ef84cbadb | [
"MIT"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import testtools
from openstack.telemetry.v2 import sample
SAMPLE = {
'id': None,
'metadata': {'1': 'one'},
'meter': '2',
'project_id': '3',
'recorded_at': '4',
'resource_id': '5',
'source': '6',
'timestamp': '7',
'type': '8',
'unit': '9',
'user_id': '10',
'volume': '11.1',
}
OLD_SAMPLE = {
'counter_name': '1',
'counter_type': '2',
'counter_unit': '3',
'counter_volume': '4',
'message_id': None,
'project_id': '5',
'recorded_at': '6',
'resource_id': '7',
'resource_metadata': '8',
'source': '9',
'timestamp': '10',
'user_id': '11',
}
| 36.978417 | 75 | 0.637743 |
54324dc90f9df188cfe21f89b7c0b9336f381fe0 | 7,645 | py | Python | data_convert/convert_text_to_tree.py | wlof-2/Text2Relation | a1321e3627fee4714d2c39c964d93d12d0802467 | [
"MIT"
] | null | null | null | data_convert/convert_text_to_tree.py | wlof-2/Text2Relation | a1321e3627fee4714d2c39c964d93d12d0802467 | [
"MIT"
] | null | null | null | data_convert/convert_text_to_tree.py | wlof-2/Text2Relation | a1321e3627fee4714d2c39c964d93d12d0802467 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import os
import json
from collections import Counter, defaultdict
from data_convert.format.text2tree import Entity_Type, Text2Tree
from data_convert.task_format.event_extraction import Event, DyIEPP, Conll04
from data_convert.utils import read_file, check_output, data_counter_to_table, get_schema, output_schema
from nltk.corpus import stopwords
Ace_Entity_Type = {"ORG": "<ORG>", "VEH": "<VEH>", "WEA": "<WEA>",
"LOC": "<LOC>", "FAC": "<FAC>", "PER": "<PER>", "GPE": "<GPE>"}
Sci_Entity_Type = {'Metric': '<Metric>', 'Task': '<Task>',
'OtherScientificTerm': '<OtherScientificTerm>', 'Generic': '<Generic>', 'Material': '<Material>', 'Method': '<Method>'}
Conll04_Type = {'Org': '<Org>', 'Peop': '<Peop>',
'Other': '<Other>', 'Loc': '<Loc>'}
english_stopwords = set(stopwords.words('english') + ["'s", "'re", "%"])
if __name__ == "__main__":
type_format_name = 'subtype'
convert_ace2005_event("data/new_text2tree/one_ie_ace2005_%s" % type_format_name,
type_format=type_format_name,
ignore_nonevent=False,
mark_tree=False
)
# """
# convert_sci_event("data/new_text2tree/sci_relation_%s" % type_format_name,
# type_format=type_format_name,
# ignore_nonevent=False,
# mark_tree=False)
# """
# convert_conll04_relation("data/new_text2tree/conll04_relation_%s" % type_format_name,
# type_format=type_format_name,
# ignore_nonevent=False,
# mark_tree=False)
| 45.778443 | 138 | 0.597515 |
5432a871244c2f1064853af01dd1344e9304f2e3 | 1,246 | py | Python | arachnado/utils/spiders.py | wigginzz/arachnado | 8de92625262958e886263b4ccb189f4fc62d7400 | [
"MIT"
] | 2 | 2017-12-26T14:50:14.000Z | 2018-06-12T07:04:08.000Z | arachnado/utils/spiders.py | wigginzz/arachnado | 8de92625262958e886263b4ccb189f4fc62d7400 | [
"MIT"
] | null | null | null | arachnado/utils/spiders.py | wigginzz/arachnado | 8de92625262958e886263b4ccb189f4fc62d7400 | [
"MIT"
] | null | null | null | from scrapy.utils.misc import walk_modules
from scrapy.utils.spider import iter_spider_classes
def get_spider_cls(url, spider_packages, default):
"""
Return spider class based on provided url.
:param url: if it looks like `spider://spidername` it tries to load spider
named `spidername`, otherwise it returns default spider class
:param spider_packages: a list of package names that will be searched for
spider classes
:param default: the class that is returned when `url` doesn't start with
`spider://`
"""
if url.startswith('spider://'):
spider_name = url[len('spider://'):]
return find_spider_cls(spider_name, spider_packages)
return default
def find_spider_cls(spider_name, spider_packages):
"""
Find spider class which name is equal to `spider_name` argument
:param spider_name: spider name to look for
:param spider_packages: a list of package names that will be searched for
spider classes
"""
for package_name in spider_packages:
for module in walk_modules(package_name):
for spider_cls in iter_spider_classes(module):
if spider_cls.name == spider_name:
return spider_cls
| 35.6 | 78 | 0.690209 |
543307112090d54acedcff9238e2cea7185b6c19 | 1,165 | py | Python | Social_Encoders.py | Haroon96/GraphRec-WWW19 | fc28eee70fad927d761c15cab97de52f5955dcfd | [
"MIT"
] | null | null | null | Social_Encoders.py | Haroon96/GraphRec-WWW19 | fc28eee70fad927d761c15cab97de52f5955dcfd | [
"MIT"
] | null | null | null | Social_Encoders.py | Haroon96/GraphRec-WWW19 | fc28eee70fad927d761c15cab97de52f5955dcfd | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
from torch.nn import init
import torch.nn.functional as F
| 32.361111 | 103 | 0.654936 |
5433996009680b5160e896f44a3bff1c9d65a2bb | 3,280 | py | Python | deephub/utils/__main__.py | deeplab-ai/deephub | b1d271436fab69cdfad14f19fa2e29c5338f18d6 | [
"Apache-2.0"
] | 8 | 2019-10-17T12:46:13.000Z | 2020-03-12T08:09:40.000Z | deephub/utils/__main__.py | deeplab-ai/deephub | b1d271436fab69cdfad14f19fa2e29c5338f18d6 | [
"Apache-2.0"
] | 12 | 2019-10-22T13:11:56.000Z | 2022-02-10T00:23:30.000Z | deephub/utils/__main__.py | deeplab-ai/deephub | b1d271436fab69cdfad14f19fa2e29c5338f18d6 | [
"Apache-2.0"
] | 1 | 2019-10-17T13:21:27.000Z | 2019-10-17T13:21:27.000Z | import click
import time
from deephub.common.io import resolve_glob_pattern
from deephub.models.feeders.tfrecords.meta import generate_fileinfo, get_fileinfo, TFRecordValidationError, \
TFRecordInfoMissingError
| 33.814433 | 115 | 0.633232 |
543471083e8ed6e6fd0d08082e7de83061292ab1 | 10,072 | py | Python | utils_mit_im.py | putama/visualcomposition | ada3d8e71b79a5f3e239718f3cdac58eca5e1327 | [
"MIT"
] | null | null | null | utils_mit_im.py | putama/visualcomposition | ada3d8e71b79a5f3e239718f3cdac58eca5e1327 | [
"MIT"
] | null | null | null | utils_mit_im.py | putama/visualcomposition | ada3d8e71b79a5f3e239718f3cdac58eca5e1327 | [
"MIT"
] | null | null | null | import numpy as np
import cPickle
import os
from scipy.io import loadmat
import time
import h5py
import json
import copy
import bz2
#extension with "." e.g. .jpg
def mkdir_if_missing(output_dir):
"""
def mkdir_if_missing(output_dir)
"""
if not os.path.exists(output_dir):
try:
os.makedirs(output_dir)
return True;
except: #generally happens when many processes try to make this dir
return False;
def save_variables(pickle_file_name, var, info, overwrite = False):
"""
def save_variables(pickle_file_name, var, info, overwrite = False)
"""
fext = os.path.splitext(pickle_file_name)[1]
if fext =='.h5':
return save_variables_h5(pickle_file_name, var, info, overwrite);
elif fext == '.pkl' or fext == '.pklz':
if os.path.exists(pickle_file_name) and overwrite == False:
raise Exception('{:s} exists and over write is false.'.format(pickle_file_name))
if info is not None:
# Construct the dictionary
assert(type(var) == list); assert(type(info) == list);
d = {}
for i in xrange(len(var)):
d[info[i]] = var[i]
else: #we have the dictionary in var
d = var;
if fext == '.pkl':
with open(pickle_file_name, 'wb') as f:
cPickle.dump(d, f, cPickle.HIGHEST_PROTOCOL)
else:
with bz2.BZ2File(pickle_file_name, 'w') as f:
cPickle.dump(d, f, cPickle.HIGHEST_PROTOCOL)
else:
raise Exception('{:s}: extension unknown'.format(fext))
def load_variables(pickle_file_name):
"""
d = load_variables(pickle_file_name)
Output:
d is a dictionary of variables stored in the pickle file.
"""
fext = os.path.splitext(pickle_file_name)[1]
if fext =='.h5':
return load_variablesh5(pickle_file_name);
elif fext == '.pkl' or fext == '.pklz':
if os.path.exists(pickle_file_name):
if fext == '.pkl':
with open(pickle_file_name, 'rb') as f:
d = cPickle.load(f)
else:
with bz2.BZ2File(pickle_file_name, 'r') as f:
d = cPickle.load(f)
return d
else:
raise Exception('{:s} does not exists.'.format(pickle_file_name))
elif fext == '.json':
with open(pickle_file_name, 'r') as fh:
data = json.load(fh)
return data
else:
raise Exception('{:s}: extension unknown'.format(fext))
#wrappers for load_variables and save_variables
def calc_pr_ovr_noref(counts, out):
"""
[P, R, score, ap] = calc_pr_ovr(counts, out, K)
Input :
counts : number of occurrences of this word in the ith image
out : score for this image
Output :
P, R : precision and recall
score : score which corresponds to the particular precision and recall
ap : average precision
"""
#binarize counts
out = out.astype(np.float64)
counts = np.array(counts > 0, dtype=np.float32);
tog = np.hstack((counts[:,np.newaxis].astype(np.float64), out[:, np.newaxis].astype(np.float64)))
ind = np.argsort(out)
ind = ind[::-1]
score = np.array([tog[i,1] for i in ind])
sortcounts = np.array([tog[i,0] for i in ind])
tp = sortcounts;
fp = sortcounts.copy();
for i in xrange(sortcounts.shape[0]):
if sortcounts[i] >= 1:
fp[i] = 0.;
elif sortcounts[i] < 1:
fp[i] = 1.;
tp = np.cumsum(tp)
fp = np.cumsum(fp)
# P = np.cumsum(tp)/(np.cumsum(tp) + np.cumsum(fp));
P = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
numinst = np.sum(counts);
R = tp/numinst
ap = voc_ap(R,P)
return P, R, score, ap | 32.807818 | 141 | 0.652502 |
5435607e763096b9e0e81fbf68d44b9c31b6852e | 1,085 | py | Python | python_teste/python_aulas/aula_94.py | BrunoDantasMoreira/projectsPython | bd73ab0b3c067456407f227ed2ece42e7f21ddfc | [
"MIT"
] | 1 | 2020-07-27T14:18:08.000Z | 2020-07-27T14:18:08.000Z | python_teste/python_aulas/aula_94.py | BrunoDantasMoreira/projectsPython | bd73ab0b3c067456407f227ed2ece42e7f21ddfc | [
"MIT"
] | null | null | null | python_teste/python_aulas/aula_94.py | BrunoDantasMoreira/projectsPython | bd73ab0b3c067456407f227ed2ece42e7f21ddfc | [
"MIT"
] | null | null | null | dict = {}
lista = []
soma = 0
while True:
dict['nome'] = str(input('Nome: ')).capitalize()
dict['sexo'] = str(input('Sexo: ')).strip().upper()[0]
while dict['sexo'] not in 'MF':
print('ERRO! Por favor, digite apenas M ou F')
dict['sexo'] = str(input('Sexo: ')).strip().upper()[0]
dict['idade'] = int(input('Idade: '))
soma += dict['idade']
lista.append(dict.copy())
opo = str(input('Quer continuar? ')).strip().upper()[0]
while opo not in 'SN':
print('ERRO! Responda apenas S ou N.')
opo = str(input('Quer continuar? ')).strip().upper()[0]
if opo == 'N':
break
print('-='*30)
print(f'A) Ao todo temos {len(lista)} pessoas cadastradas.')
media = soma / len(lista)
print(f'B) A media de idade de {media:5.2f} anos')
print('C) As mulheres cadastradas foram ', end='')
for p in lista:
if p['sexo'] == 'F':
print(f'{p["nome"]}', end=' ')
print()
print('D) As pessoas com idade maior que a mdia so ', end='')
for c in lista:
if c['idade'] > media:
print(f'{c["nome"]}', end=' ')
| 33.90625 | 65 | 0.562212 |
543805ee596eba6c41f93710a63dc5eaf28196da | 7,894 | py | Python | nlp/layers/linears.py | zhihao-chen/NLP-experiments | c7512276050f5b8489adb4c745fa970ea8119646 | [
"MIT"
] | 4 | 2021-11-10T03:49:28.000Z | 2022-03-24T02:18:44.000Z | nlp/layers/linears.py | zhihao-chen/NLP-experiments | c7512276050f5b8489adb4c745fa970ea8119646 | [
"MIT"
] | null | null | null | nlp/layers/linears.py | zhihao-chen/NLP-experiments | c7512276050f5b8489adb4c745fa970ea8119646 | [
"MIT"
] | 1 | 2021-11-14T18:01:18.000Z | 2021-11-14T18:01:18.000Z | # -*- coding: utf8 -*-
"""
======================================
Project Name: NLP
File Name: linears
Author: czh
Create Date: 2021/11/15
--------------------------------------
Change Activity:
======================================
"""
import math
import torch
import torch.nn as nn
import torch.nn.functional as func
from torch.nn.parameter import Parameter
| 38.8867 | 117 | 0.673676 |
5438824c4ced393aa643d5e74bfabb01555d5d5c | 2,037 | py | Python | components/siren.py | TalaoDAO/ecole42 | 2236f24527966195c953f222f9715ee967348b0f | [
"Apache-2.0"
] | 1 | 2021-09-22T16:30:57.000Z | 2021-09-22T16:30:57.000Z | components/siren.py | TalaoDAO/credential-repository | d36c694d9e90ead8a35bd8cc5be47c6d951474ba | [
"Apache-2.0"
] | null | null | null | components/siren.py | TalaoDAO/credential-repository | d36c694d9e90ead8a35bd8cc5be47c6d951474ba | [
"Apache-2.0"
] | null | null | null | import requests
| 55.054054 | 370 | 0.571919 |
5438db8d908a649df431fff16b0d49559bcdf6d6 | 2,036 | py | Python | Week 2/medt_opdracht_9.py | zowie93/ISCRIPT | fa3e5122be8ef47b23c23554ec9e1c04b37da562 | [
"MIT"
] | null | null | null | Week 2/medt_opdracht_9.py | zowie93/ISCRIPT | fa3e5122be8ef47b23c23554ec9e1c04b37da562 | [
"MIT"
] | null | null | null | Week 2/medt_opdracht_9.py | zowie93/ISCRIPT | fa3e5122be8ef47b23c23554ec9e1c04b37da562 | [
"MIT"
] | null | null | null | """
Opdracht 9 - Loonbrief
https://dodona.ugent.be/nl/exercises/990750894/
"""
# functie voor start amount
# functie voor salaris
# functie voor gemiddeld salaris
if __name__ == '__main__':
main()
| 24.238095 | 71 | 0.651768 |
543993f4662d66952cafe8284d07a22ac01ccee7 | 1,845 | py | Python | 4.1.1-simple-object-tracking-video.py | CleverYh/opencv_py | 20b28e8ef20fa3015f4f7c20ed69fed954c16805 | [
"MIT"
] | 2 | 2020-04-05T13:44:13.000Z | 2020-07-06T08:53:58.000Z | 4.1.1-simple-object-tracking-video.py | CleverYh/opencv_py | 20b28e8ef20fa3015f4f7c20ed69fed954c16805 | [
"MIT"
] | null | null | null | 4.1.1-simple-object-tracking-video.py | CleverYh/opencv_py | 20b28e8ef20fa3015f4f7c20ed69fed954c16805 | [
"MIT"
] | null | null | null | # coding: utf-8
from cv2 import cv2
import numpy as np
cap = cv2.VideoCapture(0)
while(1):
# Take each frame
_, frame = cap.read()
# Convert BGR to HSV
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# define range of blue color in HSV
lower_blue = np.array([110,50,50])
upper_blue = np.array([130,255,255])
# Threshold the HSV image to get only blue colors
mask = cv2.inRange(hsv, lower_blue, upper_blue)
# Bitwise-AND mask and original image
res = cv2.bitwise_and(frame,frame, mask= mask)
cv2.imshow('frame',frame)
cv2.imshow('mask',mask)
cv2.imshow('res',res)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
# OBJECT TRACKING
# Take each frame of the video
# Convert from BGR to HSV color-space
# We threshold the HSV image for a range of blue color
# Now extract the blue object alone, we can do whatever on that image we want.
# HOW TO FINDHSV VALUES TO TRACK?
# It is very simple and you can use the same function, cv2.cvtColor(). Instead of passing an image, you just pass the BGR values you want. For example, to find the HSV value of Green, try following commands in Python terminal:
# >>> green = np.uint8([[[0,255,0 ]]])
# >>> hsv_green = cv2.cvtColor(green,cv2.COLOR_BGR2HSV)
# >>> print hsv_green
# [[[ 60 255 255]]]
# Now you take [H-10, 100,100] and [H+10, 255, 255] as lower bound and upper bound respectively. Apart from this method, you can use any image editing tools like GIMP or any online converters to find these values, but dont forget to adjust the HSV ranges.
# Now you take [H-10, 100,100] and [H+10, 255, 255] as lower bound and upper bound respectively. Apart from this method, you can use any image editing tools like GIMP or any online converters to find these values, but dont forget to adjust the HSV ranges. | 37.653061 | 256 | 0.701897 |
5439f19ce894429f825edd092b433b960bae49d4 | 9,411 | py | Python | src/peering/azext_peering/custom.py | michimune/azure-cli-extensions | 697e2c674e5c0825d44c72d714542fe01331e107 | [
"MIT"
] | 1 | 2022-03-22T15:02:32.000Z | 2022-03-22T15:02:32.000Z | src/peering/azext_peering/custom.py | michimune/azure-cli-extensions | 697e2c674e5c0825d44c72d714542fe01331e107 | [
"MIT"
] | 1 | 2021-02-10T22:04:59.000Z | 2021-02-10T22:04:59.000Z | src/peering/azext_peering/custom.py | michimune/azure-cli-extensions | 697e2c674e5c0825d44c72d714542fe01331e107 | [
"MIT"
] | 1 | 2021-06-03T19:31:10.000Z | 2021-06-03T19:31:10.000Z | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
# pylint: disable=too-many-statements
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=unused-argument
import json
| 42.013393 | 154 | 0.604824 |
543ac48e108696b4125575c0e8b5fa9098b4ddb3 | 830 | py | Python | votes/migrations/0004_team.py | aiventimptner/horizon | 6e2436bfa81cad55fefd4c0bb67df3c36a9b6deb | [
"MIT"
] | null | null | null | votes/migrations/0004_team.py | aiventimptner/horizon | 6e2436bfa81cad55fefd4c0bb67df3c36a9b6deb | [
"MIT"
] | 1 | 2021-06-10T19:59:07.000Z | 2021-06-10T19:59:07.000Z | votes/migrations/0004_team.py | aiventimptner/horizon | 6e2436bfa81cad55fefd4c0bb67df3c36a9b6deb | [
"MIT"
] | null | null | null | # Generated by Django 3.1.4 on 2020-12-30 00:27
from django.conf import settings
from django.db import migrations, models
| 31.923077 | 114 | 0.609639 |
543ac83c6ae50796c548f885ed09b3775131b174 | 576 | py | Python | Python/Day 21/score.py | Aswinpkrishnan94/Fabulous-Python | bafba6d5b3889008299c012625b4a9e1b63b1d44 | [
"MIT"
] | null | null | null | Python/Day 21/score.py | Aswinpkrishnan94/Fabulous-Python | bafba6d5b3889008299c012625b4a9e1b63b1d44 | [
"MIT"
] | null | null | null | Python/Day 21/score.py | Aswinpkrishnan94/Fabulous-Python | bafba6d5b3889008299c012625b4a9e1b63b1d44 | [
"MIT"
] | null | null | null | from turtle import Turtle
FONT = ("Arial", 10, "normal")
ALIGN = "center" | 23.04 | 67 | 0.553819 |
543c4f51f177e890cbcf4f4101beb26f2ee15486 | 81 | py | Python | tests/integration/testdata/buildcmd/PyLayerMake/layer.py | renanmontebelo/aws-sam-cli | b5cfc46aa9726b5cd006df8ecc08d1b4eedeb9ea | [
"BSD-2-Clause",
"Apache-2.0"
] | 2,959 | 2018-05-08T21:48:56.000Z | 2020-08-24T14:35:39.000Z | tests/integration/testdata/buildcmd/PyLayerMake/layer.py | renanmontebelo/aws-sam-cli | b5cfc46aa9726b5cd006df8ecc08d1b4eedeb9ea | [
"BSD-2-Clause",
"Apache-2.0"
] | 1,469 | 2018-05-08T22:44:28.000Z | 2020-08-24T20:19:24.000Z | tests/integration/testdata/buildcmd/PyLayerMake/layer.py | renanmontebelo/aws-sam-cli | b5cfc46aa9726b5cd006df8ecc08d1b4eedeb9ea | [
"BSD-2-Clause",
"Apache-2.0"
] | 642 | 2018-05-08T22:09:19.000Z | 2020-08-17T09:04:37.000Z | import numpy
| 13.5 | 45 | 0.617284 |
543cd354a10448d8c328281db21e317c63dd0072 | 5,520 | py | Python | bcbio/qc/coverage.py | markdunning/bcbio-nextgen | 37b69efcc5b2b3713b8d5cd207cece4cb343380d | [
"MIT"
] | null | null | null | bcbio/qc/coverage.py | markdunning/bcbio-nextgen | 37b69efcc5b2b3713b8d5cd207cece4cb343380d | [
"MIT"
] | null | null | null | bcbio/qc/coverage.py | markdunning/bcbio-nextgen | 37b69efcc5b2b3713b8d5cd207cece4cb343380d | [
"MIT"
] | null | null | null | """Coverage based QC calculations.
"""
import glob
import os
import subprocess
from bcbio.bam import ref, readstats, utils
from bcbio.distributed import transaction
from bcbio.heterogeneity import chromhacks
import bcbio.pipeline.datadict as dd
from bcbio.provenance import do
from bcbio.variation import coverage as cov
from bcbio.variation import bedutils
def run(bam_file, data, out_dir):
"""Run coverage QC analysis
"""
out = dict()
out_dir = utils.safe_makedir(out_dir)
if dd.get_coverage(data) and dd.get_coverage(data) not in ["None"]:
merged_bed_file = bedutils.clean_file(dd.get_coverage_merged(data), data, prefix="cov-", simple=True)
target_name = "coverage"
elif dd.get_coverage_interval(data) != "genome":
merged_bed_file = dd.get_variant_regions_merged(data)
target_name = "variant_regions"
else:
merged_bed_file = None
target_name = "genome"
avg_depth = cov.get_average_coverage(target_name, merged_bed_file, data)
if target_name == "coverage":
out_files = cov.coverage_region_detailed_stats(target_name, merged_bed_file, data, out_dir)
else:
out_files = []
out['Avg_coverage'] = avg_depth
samtools_stats_dir = os.path.join(out_dir, os.path.pardir, 'samtools')
from bcbio.qc import samtools
samtools_stats = samtools.run(bam_file, data, samtools_stats_dir)["metrics"]
out["Total_reads"] = total_reads = int(samtools_stats["Total_reads"])
out["Mapped_reads"] = mapped = int(samtools_stats["Mapped_reads"])
out["Mapped_paired_reads"] = int(samtools_stats["Mapped_paired_reads"])
out['Duplicates'] = dups = int(samtools_stats["Duplicates"])
if total_reads:
out["Mapped_reads_pct"] = 100.0 * mapped / total_reads
if mapped:
out['Duplicates_pct'] = 100.0 * dups / mapped
if dd.get_coverage_interval(data) == "genome":
mapped_unique = mapped - dups
else:
mapped_unique = readstats.number_of_mapped_reads(data, bam_file, keep_dups=False)
out['Mapped_unique_reads'] = mapped_unique
if merged_bed_file:
ontarget = readstats.number_of_mapped_reads(
data, bam_file, keep_dups=False, bed_file=merged_bed_file, target_name=target_name)
out["Ontarget_unique_reads"] = ontarget
if mapped_unique:
out["Ontarget_pct"] = 100.0 * ontarget / mapped_unique
out['Offtarget_pct'] = 100.0 * (mapped_unique - ontarget) / mapped_unique
if dd.get_coverage_interval(data) != "genome":
# Skip padded calculation for WGS even if the "coverage" file is specified
# the padded statistic makes only sense for exomes and panels
padded_bed_file = bedutils.get_padded_bed_file(out_dir, merged_bed_file, 200, data)
ontarget_padded = readstats.number_of_mapped_reads(
data, bam_file, keep_dups=False, bed_file=padded_bed_file, target_name=target_name + "_padded")
out["Ontarget_padded_pct"] = 100.0 * ontarget_padded / mapped_unique
if total_reads:
out['Usable_pct'] = 100.0 * ontarget / total_reads
indexcov_files = _goleft_indexcov(bam_file, data, out_dir)
out_files += [x for x in indexcov_files if x and utils.file_exists(x)]
out = {"metrics": out}
if len(out_files) > 0:
out["base"] = out_files[0]
out["secondary"] = out_files[1:]
return out
def _goleft_indexcov(bam_file, data, out_dir):
"""Use goleft indexcov to estimate coverage distributions using BAM index.
Only used for whole genome runs as captures typically don't have enough data
to be useful for index-only summaries.
"""
if not dd.get_coverage_interval(data) == "genome":
return []
out_dir = utils.safe_makedir(os.path.join(out_dir, "indexcov"))
out_files = [os.path.join(out_dir, "%s-indexcov.%s" % (dd.get_sample_name(data), ext))
for ext in ["roc", "ped", "bed.gz"]]
if not utils.file_uptodate(out_files[-1], bam_file):
with transaction.tx_tmpdir(data) as tmp_dir:
tmp_dir = utils.safe_makedir(os.path.join(tmp_dir, dd.get_sample_name(data)))
gender_chroms = [x.name for x in ref.file_contigs(dd.get_ref_file(data)) if chromhacks.is_sex(x.name)]
gender_args = "--sex %s" % (",".join(gender_chroms)) if gender_chroms else ""
cmd = "goleft indexcov --directory {tmp_dir} {gender_args} -- {bam_file}"
try:
do.run(cmd.format(**locals()), "QC: goleft indexcov")
except subprocess.CalledProcessError as msg:
if not ("indexcov: no usable" in str(msg) or
("indexcov: expected" in str(msg) and "sex chromosomes, found:" in str(msg))):
raise
for out_file in out_files:
orig_file = os.path.join(tmp_dir, os.path.basename(out_file))
if utils.file_exists(orig_file):
utils.copy_plus(orig_file, out_file)
# MultiQC needs non-gzipped/BED inputs so unpack the file
out_bed = out_files[-1].replace(".bed.gz", ".tsv")
if utils.file_exists(out_files[-1]) and not utils.file_exists(out_bed):
with transaction.file_transaction(data, out_bed) as tx_out_bed:
cmd = "gunzip -c %s > %s" % (out_files[-1], tx_out_bed)
do.run(cmd, "Unpack indexcov BED file")
out_files[-1] = out_bed
return [x for x in out_files if utils.file_exists(x)]
| 46.386555 | 115 | 0.664312 |
543cef330851534694d86f1be5bca5d7e8614e34 | 1,210 | py | Python | shrike-examples/contoso/utils/arg_utils.py | lynochka/azure-ml-problem-sets | e7e69de763444c5603e4455e35e69e917081a4cc | [
"MIT"
] | 3 | 2021-07-27T16:28:51.000Z | 2021-11-15T18:29:02.000Z | shrike-examples/contoso/utils/arg_utils.py | lynochka/azure-ml-problem-sets | e7e69de763444c5603e4455e35e69e917081a4cc | [
"MIT"
] | null | null | null | shrike-examples/contoso/utils/arg_utils.py | lynochka/azure-ml-problem-sets | e7e69de763444c5603e4455e35e69e917081a4cc | [
"MIT"
] | 7 | 2021-08-09T15:04:03.000Z | 2022-03-09T05:48:56.000Z | """
Utility functions for argument parsing
"""
import argparse
def str2bool(val):
"""
Resolving boolean arguments if they are not given in the standard format
Arguments:
val (bool or string): boolean argument type
Returns:
bool: the desired value {True, False}
"""
if isinstance(val, bool):
return val
if isinstance(val, str):
if val.lower() in ("yes", "true", "t", "y", "1"):
return True
elif val.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
def str2intlist(val):
"""Converts comma separated string of integers into list of integers
Args:
val (str): comma separate string of integers
"""
return commastring2list(int)(val)
def commastring2list(output_type=str):
"""Returns a lambda function which converts a comma separated string into a list of a given type
Args:
output_type (function, optional): string type conversion function. Defaults to str.
Returns:
function: lambda function
"""
return lambda input_str: list(map(output_type, input_str.split(",")))
| 25.744681 | 100 | 0.638017 |
543dd03030508ee683df7a6d3985dc5051235db5 | 277 | py | Python | tests/test_learner.py | luksurious/faster-teaching | 1493311d5b723ca3f216f537bda8db5907196443 | [
"MIT"
] | 2 | 2020-08-06T13:21:51.000Z | 2021-04-15T04:29:03.000Z | tests/test_learner.py | luksurious/faster-teaching | 1493311d5b723ca3f216f537bda8db5907196443 | [
"MIT"
] | null | null | null | tests/test_learner.py | luksurious/faster-teaching | 1493311d5b723ca3f216f537bda8db5907196443 | [
"MIT"
] | null | null | null | from concepts.letter_addition import LetterAddition
from learners.sim_memoryless_learner import SimMemorylessLearner
| 27.7 | 64 | 0.776173 |
543dded51722ade60b4b464e9cde6ba374678fe4 | 2,536 | py | Python | piper/jde.py | miketarpey/piper | d1620727889228d61fbe448f4747cef9351ede59 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | piper/jde.py | miketarpey/piper | d1620727889228d61fbe448f4747cef9351ede59 | [
"BSD-2-Clause-FreeBSD"
] | 24 | 2021-02-03T17:06:13.000Z | 2021-04-02T13:09:13.000Z | piper/jde.py | miketarpey/piper | d1620727889228d61fbe448f4747cef9351ede59 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | import logging
import pandas as pd
from datetime import datetime
from typing import (
Any,
Callable,
Dict,
Hashable,
Iterable,
List,
NamedTuple,
Optional,
Pattern,
Set,
Tuple,
Union,
)
logger = logging.getLogger(__name__)
# add_jde_batch() {{{1
def add_jde_batch(df: pd.DataFrame,
col_prefix: str = 'ed',
userid: str = 'userid',
batch: str = 'ABC',
start: int = 100,
step: int = 100) -> pd.DataFrame:
''' Add 'standard' JDE timestamp/default columns.
For given dataframe, adds the following standard Z-file columns.
User ID (edus)
Batch Number (edbt)
Transaction Number (edtn)
Line Number (edln)
Examples
--------
from piper.defaults import *
from piper.jde import *
.. code-block:
%%piper
sample_sales() >>
select('-target_profit', '-location', '-month') >>
reset_index(drop=True) >>
add_jde_batch(start=3) >>
head(tablefmt='plain')
edus edbt edtn edln product target_sales actual_sales actual_profit
0 userid ABC_20210331 1 3 Beachwear 31749 29209 1753
1 userid ABC_20210331 1 103 Beachwear 37833 34050 5448
2 userid ABC_20210331 1 203 Jeans 29485 31549 4417
3 userid ABC_20210331 1 303 Jeans 37524 40901 4090
Parameters
----------
df : the pandas dataframe object
col_prefix : 2 character (e.g. 'ed') column name prefix to be
applied to the added columns
userid : default userid text value
batch : 2 character prefix to concatenated to current timestamp
trans_no : start number in xxln column
step : step increment in xxln column
Returns
-------
A pandas dataframe
'''
timestamp = datetime.now().strftime('_%Y%m%d')
start_position = 0
range_seq = range(start, (df.shape[0]+1)*step, step)
df.insert(start_position, f'{col_prefix}us', userid)
df.insert(start_position+1, f'{col_prefix}bt', batch + timestamp)
df.insert(start_position+2, f'{col_prefix}tn', 1)
df.insert(start_position+3, f'{col_prefix}ln', pd.Series(range_seq))
return df
| 27.868132 | 109 | 0.542587 |
543e07ad4f4ef4e280a96b2a4575d3e61db5448a | 2,159 | py | Python | codes/utils.py | epfml/byzantine-robust-noniid-optimizer | 0e27349ac99235251110d54dd102fda0091bf274 | [
"MIT"
] | 7 | 2021-06-22T03:12:15.000Z | 2022-01-06T16:11:14.000Z | codes/utils.py | epfml/byzantine-robust-noniid-optimizer | 0e27349ac99235251110d54dd102fda0091bf274 | [
"MIT"
] | null | null | null | codes/utils.py | epfml/byzantine-robust-noniid-optimizer | 0e27349ac99235251110d54dd102fda0091bf274 | [
"MIT"
] | 2 | 2021-12-12T13:28:02.000Z | 2022-02-18T13:22:20.000Z | import os
import shutil
import logging
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
| 25.104651 | 64 | 0.642427 |
543e913c7932efd8a58e4692b8be276e0e6a692e | 2,090 | py | Python | setup.py | robertjanes/drawbot | 5a0a2ce55cda3f87624ae8c028d9d59aceee3897 | [
"BSD-2-Clause"
] | null | null | null | setup.py | robertjanes/drawbot | 5a0a2ce55cda3f87624ae8c028d9d59aceee3897 | [
"BSD-2-Clause"
] | null | null | null | setup.py | robertjanes/drawbot | 5a0a2ce55cda3f87624ae8c028d9d59aceee3897 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
from __future__ import division, absolute_import, print_function
from setuptools import setup
import os
import re
import shutil
_versionRE = re.compile(r'__version__\s*=\s*\"([^\"]+)\"')
# read the version number for the settings file
with open('drawBot/drawBotSettings.py', "r") as settings:
code = settings.read()
found = _versionRE.search(code)
assert found is not None, "drawBot __version__ not found"
__version__ = found.group(1)
externalTools = ("ffmpeg", "gifsicle", "mkbitmap", "potrace")
externalToolsSourceRoot = os.path.join(os.path.dirname(__file__), "Resources", "externalTools")
externalToolsDestRoot = os.path.join(os.path.dirname(__file__), "drawBot", "context", "tools")
# copy all external tools into drawBot.context.tools folder
for externalTool in externalTools:
source = os.path.join(externalToolsSourceRoot, externalTool)
dest = os.path.join(externalToolsDestRoot, externalTool)
shutil.copyfile(source, dest)
os.chmod(dest, 0o775)
setup(name="drawBot",
version=__version__,
description="DrawBot is a powerful tool that invites you to write simple Python scripts to generate two-dimensional graphics. The builtin graphics primitives support rectangles, ovals, (bezier) paths, polygons, text objects and transparency.",
author="Just van Rossum, Erik van Blokland, Frederik Berlaen",
author_email="frederik@typemytype.com",
url="http://drawbot.com",
license="BSD",
packages=[
"drawBot",
"drawBot.context",
"drawBot.context.tools",
"drawBot.ui"
],
package_data={
"drawBot": [
"context/tools/ffmpeg",
"context/tools/gifsicle",
"context/tools/mkbitmap",
"context/tools/potrace"
]
},
install_requires=[
"pyobjc",
"fontTools",
"booleanOperations",
"pillow"
],
include_package_data=True,
)
# remove all external tools
for externalTool in externalTools:
dest = os.path.join(externalToolsDestRoot, externalTool)
os.remove(dest)
| 32.65625 | 247 | 0.688517 |
543fd7e53080b049a8ec4e7ace7dac2f370068e8 | 38,634 | py | Python | pysnmp-with-texts/ZHONE-COM-IP-FILTER-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 8 | 2019-05-09T17:04:00.000Z | 2021-06-09T06:50:51.000Z | pysnmp-with-texts/ZHONE-COM-IP-FILTER-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 4 | 2019-05-31T16:42:59.000Z | 2020-01-31T21:57:17.000Z | pysnmp-with-texts/ZHONE-COM-IP-FILTER-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module ZHONE-COM-IP-FILTER-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ZHONE-COM-IP-FILTER-MIB
# Produced by pysmi-0.3.4 at Wed May 1 15:47:04 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, SingleValueConstraint, ValueRangeConstraint, ConstraintsUnion, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsUnion", "ConstraintsIntersection")
ifIndex, = mibBuilder.importSymbols("IF-MIB", "ifIndex")
SnmpAdminString, = mibBuilder.importSymbols("SNMP-FRAMEWORK-MIB", "SnmpAdminString")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Gauge32, Counter64, iso, Integer32, ModuleIdentity, ObjectIdentity, IpAddress, Unsigned32, MibIdentifier, TimeTicks, MibScalar, MibTable, MibTableRow, MibTableColumn, Bits, NotificationType, Counter32 = mibBuilder.importSymbols("SNMPv2-SMI", "Gauge32", "Counter64", "iso", "Integer32", "ModuleIdentity", "ObjectIdentity", "IpAddress", "Unsigned32", "MibIdentifier", "TimeTicks", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Bits", "NotificationType", "Counter32")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
zhoneModules, zhoneIp = mibBuilder.importSymbols("Zhone", "zhoneModules", "zhoneIp")
ZhoneRowStatus, ZhoneAdminString = mibBuilder.importSymbols("Zhone-TC", "ZhoneRowStatus", "ZhoneAdminString")
comIpFilter = ModuleIdentity((1, 3, 6, 1, 4, 1, 5504, 6, 58))
comIpFilter.setRevisions(('2005-01-10 10:16', '2005-01-03 09:24', '2004-12-21 09:25', '2004-08-30 11:00', '2004-04-06 00:17', '2001-01-17 08:48', '2000-09-11 16:22',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: comIpFilter.setRevisionsDescriptions(('changed portAccessArg1, portAccessArg2 to more intuitive names.', 'changed portArg1, portArg2 to IP addresses', 'added Port_Access', 'V01.01.02 - Add type field to mcastControlList.', 'V01.01.01 - Implementation of multicast-control-list.', 'V01.01.00 - Added keyword markup, updated SMI, Added the filterStmtRenumTable and filterStatsTable', 'V01.00.00 - Initial Release',))
if mibBuilder.loadTexts: comIpFilter.setLastUpdated('200501100015Z')
if mibBuilder.loadTexts: comIpFilter.setOrganization('Zhone Technologies, Inc.')
if mibBuilder.loadTexts: comIpFilter.setContactInfo(' Postal: Zhone Technologies, Inc. @ Zhone Way 7001 Oakport Street Oakland, CA 94621 USA Toll-Free: +1 877-ZHONE20 (+1 877-946-6320) Tel: +1-510-777-7000 Fax: +1-510-777-7001 E-mail: support@zhone.com')
if mibBuilder.loadTexts: comIpFilter.setDescription('Zhone IP Filter MIB Module. IP Software Minneapolis, MN')
filter = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8))
if mibBuilder.loadTexts: filter.setStatus('current')
if mibBuilder.loadTexts: filter.setDescription('The MIB module representing IP filter specifications in Zhone Technologies products. IP filtering is typically performed to enhance network security by limiting what access is allowed between two networks. Filtering is also effective in eliminating certain denial-of-service attacks. Packet filtering also provides a framework for sanity checking packet headers, and rejecting packets that are unlikely (or that should be impossible). In this way, packet filtering can prevent certain unfortunate mistakes from shutting a network down.')
if mibBuilder.loadTexts: filter.setReference("RFC1812, 'Requirements for IP Version 4 Routers,' ftp://ftp.isi.edu/in-notes/rfc1812.txt. RFC2267, 'Network Ingress Filtering: Defeating Denial of Service Attacks which employ IP Source Address Spoofing,' ftp://ftp.isi.edu/in-notes/rfc2267.txt. RFC2474, 'Definition of the Differentiated Services Field (DS Field) in the IPv4 and IPv6 Headers', ftp://ftp.isi.edu/in-notes/rfc2474.txt. D. Brent Chapman, 'Network (In)Security Through IP Packet Filtering,' Proceedings of the 3rd USENIX Security Symposium, Sept. 1992. Andrew Molitor, 'An Architecture for Advanced Packet Filtering,' Proceedings of the 5th USENIX Security Symposium, June. 1995. Paul Russell, 'Linux IPCHAINS-HOWTO,' http://www.rustcorp.com/linux/ipchains/HOWTO.html, v1.0.7, Mar. 1999.")
filterGlobal = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 1))
if mibBuilder.loadTexts: filterGlobal.setStatus('current')
if mibBuilder.loadTexts: filterGlobal.setDescription('Global filter provisioning information.')
fltGlobalIndexNext = MibScalar((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fltGlobalIndexNext.setStatus('current')
if mibBuilder.loadTexts: fltGlobalIndexNext.setDescription('The next available filter spec table index (filterSpecIndex). A GET on this object increments the value by one. A GETNEXT on this object will always return zero.')
fltGlobalTimeout = MibScalar((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setUnits('seconds').setMaxAccess("readwrite")
if mibBuilder.loadTexts: fltGlobalTimeout.setStatus('current')
if mibBuilder.loadTexts: fltGlobalTimeout.setDescription('Filter inconsistency timeout in seconds. A filter spec is considered to be in an inconsistent state when the value of the objects fltSpecVersion1 and fltSpecVersion2 are not equal. This timeout indicates the minimum number of seconds a filter may be in an inconsistent state before the filter spec becomes invalid and the default action for a filter is used as the filter. Provided fltGlobalTimeout is long enough, it should ensure that both an old modification is permanently stalled (ensuring exclusive access) as well as enough time to repair a filter. Default is five seconds.')
filterSpecTable = MibTable((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 2), )
if mibBuilder.loadTexts: filterSpecTable.setStatus('current')
if mibBuilder.loadTexts: filterSpecTable.setDescription("The filter specification table contains specifications for the IP filtering module. Rows are indexed by a single integer index (filterSpecIndex). The fltGlobalIndexNext object is used to determine the next index value. Each row points to a sequence of rows (statements) in the filterStatementTable. When any row in that sequence is modified, created, or removed, the fltSpecVersion1 and fltSpecVersion2 objects must be incremented. Rows are created by assigning fltSpecIndex and setting fltSpecRowStatus to 'createAndGo'. All columnar objects in this table have default values, so no objects other than the index value need be set to create a row. Rows are removed by setting fltSpecRowStatus to 'destroy'. When a row is removed, each row in filterStatementTable with the same fltSpecIndex is automatically removed.")
filterSpecEntry = MibTableRow((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 2, 1), ).setIndexNames((0, "ZHONE-COM-IP-FILTER-MIB", "fltSpecIndex"))
if mibBuilder.loadTexts: filterSpecEntry.setStatus('current')
if mibBuilder.loadTexts: filterSpecEntry.setDescription('An entry in the filterSpecTable.')
fltSpecIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: fltSpecIndex.setStatus('current')
if mibBuilder.loadTexts: fltSpecIndex.setDescription('The index that identifies an entry in the filterSpecTable. The fltGlobalIndexNext object is used to determine the next value of this object.')
fltSpecName = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 2, 1, 2), ZhoneAdminString()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: fltSpecName.setStatus('current')
if mibBuilder.loadTexts: fltSpecName.setDescription('The filter name associated with this filter specification. This name should indicate the nature of the filter. The default value is an empty string.')
fltSpecDesc = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 2, 1, 3), SnmpAdminString()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: fltSpecDesc.setStatus('current')
if mibBuilder.loadTexts: fltSpecDesc.setDescription('Textual description of the filter specification. This should briefly describe the nature of the filter defined by the associated filter statements. The default value is an empty string.')
fltSpecVersion1 = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 2, 1, 4), Unsigned32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: fltSpecVersion1.setStatus('current')
if mibBuilder.loadTexts: fltSpecVersion1.setDescription('The version number of the filter specification. This is used to flag any changes in the statements that comprise a filter. Each time a modification occurs to an object in a filter spec (including the the list of filter statements of the same fltSpecIndex in filterStatementTable), the value of this object, and fltSpecVersion2 must be incremented. The manager adding, deleting, or modifying a filter statement or statements must increment this version number in the following manner. A read of fltSpecVersion1 returns its current value. A write to fltSpecVersion1 must be one greater than its current value. A successful write of this object transfers ownership to the manager, where the manager must subsequently perform any desired modifications to the filter spec and then write the new value of fltSpecVersion1 to the fltSpecVersion2 object to release ownership. When fltSpecVersion1 does not equal to fltSpecVersion2, the filter spec is in an inconsistent state. If the filter spec remains in an inconsistent state longer than the time specified in fltGlobalTimeout, the filter spec is declared invalid and the filter spec does not become active. The previously provisioned filter spec will remain active. If no previous filter spec was provisioned for this interface, a default action is used. It is up to the manager to fix the invalid filter spec and bring it into a consistent state.')
fltSpecVersion2 = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 2, 1, 5), Unsigned32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: fltSpecVersion2.setStatus('current')
if mibBuilder.loadTexts: fltSpecVersion2.setDescription('The version number of the filter specification. The value of this object must be equal to fltSpecVersion1, otherwise the filter spec is inconsistent. See fltSpecVersion1 for details.')
fltSpecLanguageVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 2, 1, 6), Unsigned32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: fltSpecLanguageVersion.setStatus('current')
if mibBuilder.loadTexts: fltSpecLanguageVersion.setDescription('The language version of the filter. The language version further details the meaning and use of the objects in filterStatmentTable. The definitions of the filter languages is beyond the scope of this description.')
fltSpecRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 2, 1, 7), ZhoneRowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: fltSpecRowStatus.setStatus('current')
if mibBuilder.loadTexts: fltSpecRowStatus.setDescription('Zhone convention to support row creation and deletion. This is the only object required to create or destroy a row in this table.')
filterStatementTable = MibTable((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 3), )
if mibBuilder.loadTexts: filterStatementTable.setStatus('current')
if mibBuilder.loadTexts: filterStatementTable.setDescription("This table contains the filter specification statements for the IP filtering module. A complete filter specification is comprised of all the linked statements (rows) that are pointed to by an entry in the filterSpecTable. Filter statements are linked together by fltSpecIndex, and are ordered within the comprised filter using fltStmtIndex. A statement can only be owned by one filter spec. Rows are created by assigning fltSpecIndex and fltStmtIndex, and setting fltStmtRowStatus to 'createAndGo'. All columnar objects in this table have default values, so no objects other than the index values need be set to create a row. Rows are destroyed by setting fltStmtRowStatus to 'delete'. When rows are created or destroyed, the version of the corresponding filter spec row is incremented.")
filterStatementEntry = MibTableRow((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 3, 1), ).setIndexNames((0, "ZHONE-COM-IP-FILTER-MIB", "fltSpecIndex"), (0, "ZHONE-COM-IP-FILTER-MIB", "fltStmtIndex"))
if mibBuilder.loadTexts: filterStatementEntry.setStatus('current')
if mibBuilder.loadTexts: filterStatementEntry.setDescription('An entry in the filterStatement table. Each entry represents one of a sequence of statements that comprise a filter. Each filter statement consists of an index, specific packet header fields, and arbitrary packet offsets and values. Some objects in this entry define ranges for specific packet header fields. These objects define comparison operations on the field they share in the following manner: Low High Compare Method for field f --- ---- ------------------------------------------- 0 0 no comparison on the field 0 H less than or equal to High (f <= H) L 0 exact match (L == f) L H inclusive between comparison (L <= f <= H) ')
fltStmtIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 3, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: fltStmtIndex.setStatus('current')
if mibBuilder.loadTexts: fltStmtIndex.setDescription('The table index that identifies a filter statement. These indicies should be sparse to allow for insertion into the list.')
fltStmtIpSrcAddrLow = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 3, 1, 2), IpAddress().clone(hexValue="00000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: fltStmtIpSrcAddrLow.setStatus('current')
if mibBuilder.loadTexts: fltStmtIpSrcAddrLow.setDescription('The inclusive lower bound for the source IP address range. See the filterStatementEntry description for details.')
fltStmtIpSrcAddrHigh = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 3, 1, 3), IpAddress().clone(hexValue="00000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: fltStmtIpSrcAddrHigh.setStatus('current')
if mibBuilder.loadTexts: fltStmtIpSrcAddrHigh.setDescription('The inclusive upper bound for the source IP address range. See the filterStatementEntry description for details.')
fltStmtSrcPortLow = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 3, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: fltStmtSrcPortLow.setStatus('current')
if mibBuilder.loadTexts: fltStmtSrcPortLow.setDescription('The inclusive lower bound for the transport layer source port range. See the filterStatementEntry description for details.')
fltStmtSrcPortHigh = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 3, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: fltStmtSrcPortHigh.setStatus('current')
if mibBuilder.loadTexts: fltStmtSrcPortHigh.setDescription('The inclusive upper bound for the transport layer source port range. See the filterStatementEntry description for details.')
fltStmtIpDstAddrLow = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 3, 1, 6), IpAddress().clone(hexValue="00000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: fltStmtIpDstAddrLow.setStatus('current')
if mibBuilder.loadTexts: fltStmtIpDstAddrLow.setDescription('The inclusive lower bound for the destination IP address range. See the filterStatementEntry description for details.')
fltStmtIpDstAddrHigh = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 3, 1, 7), IpAddress().clone(hexValue="00000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: fltStmtIpDstAddrHigh.setStatus('current')
if mibBuilder.loadTexts: fltStmtIpDstAddrHigh.setDescription('The inclusive upper bound for the destination IP address range. See the filterStatementEntry description for details.')
fltStmtDstPortLow = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 3, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: fltStmtDstPortLow.setStatus('current')
if mibBuilder.loadTexts: fltStmtDstPortLow.setDescription('The inclusive lower bound for the transport layer destination port range. See the filterStatementEntry description for details.')
fltStmtDstPortHigh = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 3, 1, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: fltStmtDstPortHigh.setStatus('current')
if mibBuilder.loadTexts: fltStmtDstPortHigh.setDescription('The inclusive upper bound for the transport layer destination port range. See the filterStatementEntry description for details.')
fltStmtIpProtocol = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 3, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("any", 1), ("ip", 2), ("tcp", 3), ("udp", 4), ("icmp", 5))).clone('any')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: fltStmtIpProtocol.setStatus('current')
if mibBuilder.loadTexts: fltStmtIpProtocol.setDescription('The IP protocol value that is to be matched. The enum values are as follows: any(1) : any protocol type is a match (wildcard) ip(2) : raw IP packet tcp(3) : TCP packet udp(4) : UDP packet icmp(5) : ICMP packet The default value is any(1).')
fltStmtArbValueBase = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 3, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7))).clone(namedValues=NamedValues(("none", 1), ("ip", 2), ("udp", 3), ("tcp", 4), ("icmp", 5), ("ipOptions", 6), ("tcpOptions", 7))).clone('none')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: fltStmtArbValueBase.setStatus('current')
if mibBuilder.loadTexts: fltStmtArbValueBase.setDescription('This field identifies the protocol header to which the arbitrary value comparison applies. The enum values are as follows: none(1) : no arbitrary value comparison ip(2) : base is IP header udp(3) : base is UDP header tcp(4) : base is TCP header icmp(5) : base is ICMP header ipOptions(6) : base is IP options header tcpOptions(7) : base is TCP options header The default value is none(1).')
fltStmtArbOffset = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 3, 1, 12), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 64))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: fltStmtArbOffset.setStatus('current')
if mibBuilder.loadTexts: fltStmtArbOffset.setDescription('The offset, in octets, from the beginning of the header to the most significant octet for the arbitrary value comparison.')
fltStmtArbMask = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 3, 1, 13), Unsigned32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: fltStmtArbMask.setStatus('current')
if mibBuilder.loadTexts: fltStmtArbMask.setDescription('This object is mask for for arbitrary value comparisons. The non-zero bits in this field determine the size of the arbitrary field.')
fltStmtArbValueLow = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 3, 1, 14), Unsigned32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: fltStmtArbValueLow.setStatus('current')
if mibBuilder.loadTexts: fltStmtArbValueLow.setDescription('This object is the inclusive lower bound for arbitrary value comparison. See the filterStatementEntry description for details.')
fltStmtArbValueHigh = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 3, 1, 15), Unsigned32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: fltStmtArbValueHigh.setStatus('current')
if mibBuilder.loadTexts: fltStmtArbValueHigh.setDescription('This object is the inclusive upper bound for arbitrary value comparison. See the filterStatementEntry description for details.')
fltStmtModifier = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 3, 1, 16), Bits().clone(namedValues=NamedValues(("notIpSrc", 0), ("notSrcPort", 1), ("notDstIp", 2), ("notPortDst", 3), ("notProtocol", 4), ("notArbitrary", 5), ("notStatement", 6)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: fltStmtModifier.setStatus('current')
if mibBuilder.loadTexts: fltStmtModifier.setDescription('Filter statement modifier. The bits set in this object logically negate the results of the comparisons made on their respecive fields as shown : notIpSrcAddr(1) : fltStmtIpSrcAddrLow, fltStmtIpSrcAddrHigh notSrcPort(2) : fltStmtSrcPortLow, fltStmtSrcPortHigh notIpDstAddr(3) : fltStmtIpDstAddrLow, fltStmtIpDstAddrHigh notDstPort(4) : fltStmtDstPortLow, fltStmtDstPortHigh notIpProtocol(5) : fltStmtIpProtocol notArbitrary(6) : fltStmtArbValueLow, fltStmtArbValueHigh notStatement(7) : negate outcome of the entire statement No bits set (the default) specifies to use all outcomes as is.')
fltStmtAction = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 3, 1, 17), Bits().clone(namedValues=NamedValues(("reset", 0), ("permit", 1), ("deny", 2), ("forward", 3), ("reject", 4), ("log", 5))).clone(namedValues=NamedValues(("deny", 2)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: fltStmtAction.setStatus('current')
if mibBuilder.loadTexts: fltStmtAction.setDescription('Filter statement action. The bits set in this object specify actions to take on packets matching this statement. Supported actions are: reset(0) : Return a TCP reset packet to the packet sender and drop the packet. This cannot be specified with permit. permit(1) : Stop filtering the packet and allow it to be sent on the associated interface. This cannot be specified with deny. deny(2) : Stop filtering the packet and discard it. This cannot be specified with permit. forward(3) : Forward the packet the IP address specified in fltStmtActionArg. reject(4) : Return an ICMP destination unreachable packet (type 3) to the packet sender with code 13 (communication administratively prohibited). This cannot be specified permit. log(5) : Write the packet to the log stream. There are some mutually exclusive bits: reset(0) and permit(1), permit(1) and deny(2), permit(1) and reject(4). No bits set implies to continue filtering on the packet.')
fltStmtActionArg = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 3, 1, 18), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: fltStmtActionArg.setStatus('current')
if mibBuilder.loadTexts: fltStmtActionArg.setDescription('Filter statement action argument. The meaning of this object depends on the value of fltStmtAction: forward(3) : An IP address to forward the packet to. The value of this object must be non-zero. All other values of fltStmtAction have no relation to this object. The default is zero.')
fltStmtRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 3, 1, 19), ZhoneRowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: fltStmtRowStatus.setStatus('current')
if mibBuilder.loadTexts: fltStmtRowStatus.setDescription('Zhone convention to support row creation and deletion. This is the only object required to create or destroy a row in this table.')
filterStmtRenumTable = MibTable((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 4), )
if mibBuilder.loadTexts: filterStmtRenumTable.setStatus('current')
if mibBuilder.loadTexts: filterStmtRenumTable.setDescription('This table provides a mechanism for renumbering individual filter statments within their particular filter spec.')
filterStmtRenumEntry = MibTableRow((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 4, 1), )
filterStatementEntry.registerAugmentions(("ZHONE-COM-IP-FILTER-MIB", "filterStmtRenumEntry"))
filterStmtRenumEntry.setIndexNames(*filterStatementEntry.getIndexNames())
if mibBuilder.loadTexts: filterStmtRenumEntry.setStatus('current')
if mibBuilder.loadTexts: filterStmtRenumEntry.setDescription('An entry in the filterStmtRenumTable.')
fltStmtIndexNew = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 4, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: fltStmtIndexNew.setStatus('current')
if mibBuilder.loadTexts: fltStmtIndexNew.setDescription("The new statement index for the filter statement. Reading this object will return the same value as the 'fltStmtIndex' portion of its index. Writing to this object will cause the corresponding filter statement to be relocated to the position identified by the value written here. If no statement exists at the current index, 'no such instance' will be returned. If a statement already exists at the new index then 'inconsistent value' is returned. For example, to move the second statement of filter #4 to the third position (e.g. to make room for a new statement #2), the following SNMP set-request would be issued: fltStmtIndexNew.4.2 = 3 There is no default value for this object as it is derived from the fltStmtIndex.")
filterStatsTable = MibTable((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 5), )
if mibBuilder.loadTexts: filterStatsTable.setStatus('current')
if mibBuilder.loadTexts: filterStatsTable.setDescription('This table provides ingress and egress IP filter statistics for each interface. This table is indexed by the ifIndex of the interface and the direction (ingress or egress) of traffic being filtered. This is a read-only table.')
filterStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 5, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "ZHONE-COM-IP-FILTER-MIB", "fltStatDirection"))
if mibBuilder.loadTexts: filterStatsEntry.setStatus('current')
if mibBuilder.loadTexts: filterStatsEntry.setDescription('An entry in the filterStatsTable. There will be an entry for each filter provisioned on an interface. There can be, at most, two filters provisioned per interface; one for ingress filtering and the other for egress filtering.')
fltStatDirection = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 5, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("ingress", 1), ("egress", 2))))
if mibBuilder.loadTexts: fltStatDirection.setStatus('current')
if mibBuilder.loadTexts: fltStatDirection.setDescription('The direction for which this set of statistics is kept: ingress or egress.')
fltStatResetPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 5, 1, 2), Counter32()).setUnits('packets').setMaxAccess("readonly")
if mibBuilder.loadTexts: fltStatResetPkts.setStatus('current')
if mibBuilder.loadTexts: fltStatResetPkts.setDescription('The number of discarded packets for which a TCP reset packet was sent.')
fltStatPermitPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 5, 1, 3), Counter32()).setUnits('packets').setMaxAccess("readonly")
if mibBuilder.loadTexts: fltStatPermitPkts.setStatus('current')
if mibBuilder.loadTexts: fltStatPermitPkts.setDescription('The number of permitted packets.')
fltStatDenyPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 5, 1, 4), Counter32()).setUnits('packets').setMaxAccess("readonly")
if mibBuilder.loadTexts: fltStatDenyPkts.setStatus('current')
if mibBuilder.loadTexts: fltStatDenyPkts.setDescription('The number of discarded packets.')
fltStatForwardPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 5, 1, 5), Counter32()).setUnits('packets').setMaxAccess("readonly")
if mibBuilder.loadTexts: fltStatForwardPkts.setStatus('current')
if mibBuilder.loadTexts: fltStatForwardPkts.setDescription('The number of packets forwarded to the IP address specified in the filter.')
fltStatRejectPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 5, 1, 6), Counter32()).setUnits('packets').setMaxAccess("readonly")
if mibBuilder.loadTexts: fltStatRejectPkts.setStatus('current')
if mibBuilder.loadTexts: fltStatRejectPkts.setDescription('The number of discarded packets for which an ICMP destination unreachable packet with code 13 was sent.')
fltStatLogPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 5, 1, 7), Counter32()).setUnits('packets').setMaxAccess("readonly")
if mibBuilder.loadTexts: fltStatLogPkts.setStatus('current')
if mibBuilder.loadTexts: fltStatLogPkts.setDescription('The number of logged packets.')
fltStatDefaultPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 5, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fltStatDefaultPkts.setStatus('current')
if mibBuilder.loadTexts: fltStatDefaultPkts.setDescription('The number of packets that pass through the filter without matching upon which the default action is used.')
fltStatSpecVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 5, 1, 9), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fltStatSpecVersion.setStatus('current')
if mibBuilder.loadTexts: fltStatSpecVersion.setDescription('The version of the filter being used on this interface.')
fltStatSpecIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 5, 1, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fltStatSpecIndex.setStatus('current')
if mibBuilder.loadTexts: fltStatSpecIndex.setDescription('The index of the filter specification being used on this interface. If there is no filter configured for an interface, the entry will not exist in this table.')
mcastControl = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 6))
if mibBuilder.loadTexts: mcastControl.setStatus('current')
if mibBuilder.loadTexts: mcastControl.setDescription('The MIB module representing Multicast control list specifications in Zhone Technologies products. The First application of multicast control list is to accept of deny a IGMP request to join or leave a IGMP group. Any IGMP request to join a group is accepted only if the group address is available in the Multicast Control list pointed by a field in the ip-interface-record.')
mcastControlListTable = MibTable((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 6, 1), )
if mibBuilder.loadTexts: mcastControlListTable.setStatus('current')
if mibBuilder.loadTexts: mcastControlListTable.setDescription('Multicast control list table conatins the one of the IP Address that can be allowed to join to by a IGMP join request from IP interface that has the the multicast control list in its ip-interfce-profile. The address to the table is the multicast control list ID and the precedence. The Row status in the table contains indication of whether the row is being created or destroyed. ')
mcastControlListEntry = MibTableRow((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 6, 1, 1), ).setIndexNames((0, "ZHONE-COM-IP-FILTER-MIB", "mcastControlListControlId"), (0, "ZHONE-COM-IP-FILTER-MIB", "mcastControlListControlPrecedence"))
if mibBuilder.loadTexts: mcastControlListEntry.setStatus('current')
if mibBuilder.loadTexts: mcastControlListEntry.setDescription('An entry in the Multicast Control List.')
mcastControlListControlId = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 6, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: mcastControlListControlId.setStatus('current')
if mibBuilder.loadTexts: mcastControlListControlId.setDescription('Description.')
mcastControlListControlPrecedence = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 6, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: mcastControlListControlPrecedence.setStatus('current')
if mibBuilder.loadTexts: mcastControlListControlPrecedence.setDescription('Description.')
mcastControlListRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 6, 1, 1, 3), ZhoneRowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: mcastControlListRowStatus.setStatus('current')
if mibBuilder.loadTexts: mcastControlListRowStatus.setDescription('Description.')
mcastControlListIpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 6, 1, 1, 4), IpAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: mcastControlListIpAddress.setStatus('current')
if mibBuilder.loadTexts: mcastControlListIpAddress.setDescription('multicast ip address.')
mcastControlListType = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 6, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("normal", 1), ("always-on", 2), ("periodic", 3))).clone('normal')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: mcastControlListType.setStatus('current')
if mibBuilder.loadTexts: mcastControlListType.setDescription('Defines the video stream type. normal - join and leave when desired. Used for video. always-on - always joined. Meant for EBS, not video. periodic - will join and leave after task complete. Not meant for video. Used to download the tv guide.')
portAccessControl = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 7))
if mibBuilder.loadTexts: portAccessControl.setStatus('current')
if mibBuilder.loadTexts: portAccessControl.setDescription('This MIB represents the port access control list in Zhone products. It is used to control access to internal ports. Initially it is used just for TELNET (23) , but in theory could be used for other ports as well.')
portAccessNextIndex = MibScalar((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 7, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: portAccessNextIndex.setStatus('current')
if mibBuilder.loadTexts: portAccessNextIndex.setDescription('Description: A hint for the next free index should the manager want to create a new entry.')
portAccessTable = MibTable((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 7, 2), )
if mibBuilder.loadTexts: portAccessTable.setStatus('current')
if mibBuilder.loadTexts: portAccessTable.setDescription('Contains the list of entries that control port access on this device.')
portAccessEntry = MibTableRow((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 7, 2, 1), ).setIndexNames((0, "ZHONE-COM-IP-FILTER-MIB", "portAccessIndex"))
if mibBuilder.loadTexts: portAccessEntry.setStatus('current')
if mibBuilder.loadTexts: portAccessEntry.setDescription('This contains the entry that is to be accepted. Currently only used to control access to port 23. arg1, arg2 provide IP Address/mask to allow in.')
portAccessIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 7, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 100)))
if mibBuilder.loadTexts: portAccessIndex.setStatus('current')
if mibBuilder.loadTexts: portAccessIndex.setDescription('The index of this entry in table. 100 entries should be more than enough.')
portAccessRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 7, 2, 1, 2), ZhoneRowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: portAccessRowStatus.setStatus('current')
if mibBuilder.loadTexts: portAccessRowStatus.setDescription('Description.: used to create/delete entries in the table.')
portAccessNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 7, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 1023))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: portAccessNumber.setStatus('current')
if mibBuilder.loadTexts: portAccessNumber.setDescription('PortNumber that this applies to, 1..1023 supported.')
portAccessSrcAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 7, 2, 1, 4), IpAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: portAccessSrcAddr.setStatus('current')
if mibBuilder.loadTexts: portAccessSrcAddr.setDescription('The IP address that we will accept packets from.')
portAccessNetMask = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 8, 7, 2, 1, 5), IpAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: portAccessNetMask.setStatus('current')
if mibBuilder.loadTexts: portAccessNetMask.setDescription('portAccessNetMask - used to pass the range that we will accept with regards to portAccessSrcAddr.')
mibBuilder.exportSymbols("ZHONE-COM-IP-FILTER-MIB", fltStmtIpDstAddrLow=fltStmtIpDstAddrLow, fltStmtIpSrcAddrHigh=fltStmtIpSrcAddrHigh, mcastControlListIpAddress=mcastControlListIpAddress, fltSpecVersion1=fltSpecVersion1, fltStatSpecIndex=fltStatSpecIndex, portAccessSrcAddr=portAccessSrcAddr, fltStatSpecVersion=fltStatSpecVersion, portAccessNumber=portAccessNumber, fltStmtIpProtocol=fltStmtIpProtocol, fltStmtModifier=fltStmtModifier, fltSpecLanguageVersion=fltSpecLanguageVersion, fltStmtSrcPortLow=fltStmtSrcPortLow, mcastControlListControlPrecedence=mcastControlListControlPrecedence, fltStmtActionArg=fltStmtActionArg, fltSpecVersion2=fltSpecVersion2, filterStmtRenumEntry=filterStmtRenumEntry, filterStmtRenumTable=filterStmtRenumTable, portAccessTable=portAccessTable, mcastControlListControlId=mcastControlListControlId, fltStmtIpDstAddrHigh=fltStmtIpDstAddrHigh, fltStmtRowStatus=fltStmtRowStatus, comIpFilter=comIpFilter, portAccessControl=portAccessControl, fltStatDirection=fltStatDirection, mcastControl=mcastControl, fltStmtArbValueLow=fltStmtArbValueLow, mcastControlListTable=mcastControlListTable, filterGlobal=filterGlobal, fltSpecIndex=fltSpecIndex, PYSNMP_MODULE_ID=comIpFilter, fltStmtSrcPortHigh=fltStmtSrcPortHigh, filterStatsTable=filterStatsTable, fltStmtArbMask=fltStmtArbMask, fltGlobalIndexNext=fltGlobalIndexNext, fltStmtIndexNew=fltStmtIndexNew, mcastControlListRowStatus=mcastControlListRowStatus, filterStatsEntry=filterStatsEntry, fltStmtArbValueBase=fltStmtArbValueBase, fltStatLogPkts=fltStatLogPkts, fltStatResetPkts=fltStatResetPkts, fltStatPermitPkts=fltStatPermitPkts, mcastControlListType=mcastControlListType, portAccessIndex=portAccessIndex, fltStmtDstPortLow=fltStmtDstPortLow, fltGlobalTimeout=fltGlobalTimeout, filterStatementTable=filterStatementTable, fltStatDefaultPkts=fltStatDefaultPkts, filter=filter, fltStmtArbOffset=fltStmtArbOffset, portAccessEntry=portAccessEntry, portAccessNextIndex=portAccessNextIndex, fltStatRejectPkts=fltStatRejectPkts, mcastControlListEntry=mcastControlListEntry, filterStatementEntry=filterStatementEntry, fltStmtIndex=fltStmtIndex, filterSpecTable=filterSpecTable, fltSpecRowStatus=fltSpecRowStatus, fltStmtArbValueHigh=fltStmtArbValueHigh, portAccessNetMask=portAccessNetMask, portAccessRowStatus=portAccessRowStatus, fltStmtAction=fltStmtAction, fltStmtIpSrcAddrLow=fltStmtIpSrcAddrLow, filterSpecEntry=filterSpecEntry, fltStatDenyPkts=fltStatDenyPkts, fltSpecDesc=fltSpecDesc, fltSpecName=fltSpecName, fltStmtDstPortHigh=fltStmtDstPortHigh, fltStatForwardPkts=fltStatForwardPkts)
| 168.707424 | 2,566 | 0.786354 |
5442d7409922b392e57d7544f376052f8505514b | 11,160 | py | Python | watertap/examples/flowsheets/case_studies/municipal_treatment/municipal_treatment.py | avdudchenko/watertap | ac8d59e015688ff175a8087d2d52272e4f1fe84f | [
"BSD-3-Clause-LBNL"
] | 4 | 2021-11-06T01:13:22.000Z | 2022-02-08T21:16:38.000Z | watertap/examples/flowsheets/case_studies/municipal_treatment/municipal_treatment.py | avdudchenko/watertap | ac8d59e015688ff175a8087d2d52272e4f1fe84f | [
"BSD-3-Clause-LBNL"
] | 233 | 2021-10-13T12:53:44.000Z | 2022-03-31T21:59:50.000Z | watertap/examples/flowsheets/case_studies/municipal_treatment/municipal_treatment.py | avdudchenko/watertap | ac8d59e015688ff175a8087d2d52272e4f1fe84f | [
"BSD-3-Clause-LBNL"
] | 12 | 2021-11-01T19:11:03.000Z | 2022-03-08T22:20:58.000Z | ###############################################################################
# WaterTAP Copyright (c) 2021, The Regents of the University of California,
# through Lawrence Berkeley National Laboratory, Oak Ridge National
# Laboratory, National Renewable Energy Laboratory, and National Energy
# Technology Laboratory (subject to receipt of any required approvals from
# the U.S. Dept. of Energy). All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and license
# information, respectively. These files are also available online at the URL
# "https://github.com/watertap-org/watertap/"
#
###############################################################################
from pyomo.environ import (
ConcreteModel,
value,
TransformationFactory,
units as pyunits,
assert_optimal_termination,
)
from pyomo.network import Arc, SequentialDecomposition
from pyomo.util.check_units import assert_units_consistent
from idaes.core import FlowsheetBlock
from idaes.core.util import get_solver
from idaes.generic_models.unit_models import Product
import idaes.core.util.scaling as iscale
from idaes.generic_models.costing import UnitModelCostingBlock
from watertap.core.util.initialization import assert_degrees_of_freedom
from watertap.core.wt_database import Database
import watertap.core.zero_order_properties as prop_ZO
from watertap.unit_models.zero_order import (
FeedZO,
MunicipalDrinkingZO,
WaterPumpingStationZO,
PumpZO,
CoagulationFlocculationZO,
SedimentationZO,
OzoneZO,
FixedBedZO,
GACZO,
UVZO,
IonExchangeZO,
ChlorinationZO,
StorageTankZO,
BackwashSolidsHandlingZO,
)
from watertap.core.zero_order_costing import ZeroOrderCosting
if __name__ == "__main__":
m, results = main()
| 32.631579 | 88 | 0.66819 |
54439c9a0c52b928b7dce1ab1fcc8ffac580ad8b | 2,680 | py | Python | lib/googlecloudsdk/sql/tools/instances/delete.py | IsaacHuang/google-cloud-sdk | 52afa5d1a75dff08f4f5380c5cccc015bf796ca5 | [
"Apache-2.0"
] | null | null | null | lib/googlecloudsdk/sql/tools/instances/delete.py | IsaacHuang/google-cloud-sdk | 52afa5d1a75dff08f4f5380c5cccc015bf796ca5 | [
"Apache-2.0"
] | null | null | null | lib/googlecloudsdk/sql/tools/instances/delete.py | IsaacHuang/google-cloud-sdk | 52afa5d1a75dff08f4f5380c5cccc015bf796ca5 | [
"Apache-2.0"
] | null | null | null | # Copyright 2013 Google Inc. All Rights Reserved.
"""Deletes a Cloud SQL instance."""
from googlecloudapis.apitools.base import py as apitools_base
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.core import log
from googlecloudsdk.core.util import console_io
from googlecloudsdk.sql import util
| 31.904762 | 79 | 0.701493 |