hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
659cc327a8d71d143b1d4f60325b26e2f3b52adc | 818 | py | Python | pem_recover.py | EggPool/gpg-experiments | 82f79fc05dbc745a84b9bb14c60161716cd08756 | [
"MIT"
] | null | null | null | pem_recover.py | EggPool/gpg-experiments | 82f79fc05dbc745a84b9bb14c60161716cd08756 | [
"MIT"
] | null | null | null | pem_recover.py | EggPool/gpg-experiments | 82f79fc05dbc745a84b9bb14c60161716cd08756 | [
"MIT"
] | null | null | null | from Cryptodome.PublicKey import RSA
import hashlib
import json
# Edit with your pem file
with open('privkey.pem', 'r') as f:
private_key_readable = f.read()
key = RSA.importKey(private_key_readable)
recover(key)
| 30.296296 | 77 | 0.720049 |
659cf1416e415156d8b4e266bad74755407e575d | 316 | py | Python | arcade/python/arcade-theCore/06_LabyrinthOfNestedLoops/043_IsPower.py | netor27/codefights-arcade-solutions | 69701ab06d45902c79ec9221137f90b75969d8c8 | [
"MIT"
] | null | null | null | arcade/python/arcade-theCore/06_LabyrinthOfNestedLoops/043_IsPower.py | netor27/codefights-arcade-solutions | 69701ab06d45902c79ec9221137f90b75969d8c8 | [
"MIT"
] | null | null | null | arcade/python/arcade-theCore/06_LabyrinthOfNestedLoops/043_IsPower.py | netor27/codefights-arcade-solutions | 69701ab06d45902c79ec9221137f90b75969d8c8 | [
"MIT"
] | null | null | null | def isPower(n):
'''
Determine if the given number is a power of some non-negative integer.
'''
if n == 1:
return True
sqrt = math.sqrt(n)
for a in range(int(sqrt)+1):
for b in range(2, int(sqrt)+1):
if a ** b == n:
return True
return False | 24.307692 | 74 | 0.506329 |
659ffa3c1d30e46aa593ca5d32d54d54bd7d5e35 | 218 | py | Python | plugins/pick/choices.py | rbracken/internbot | 58b802e0dd7597ace12acd9342bb938e2f33c25d | [
"BSD-2-Clause"
] | 1 | 2016-09-24T16:00:06.000Z | 2016-09-24T16:00:06.000Z | plugins/pick/choices.py | rbracken/internbot | 58b802e0dd7597ace12acd9342bb938e2f33c25d | [
"BSD-2-Clause"
] | null | null | null | plugins/pick/choices.py | rbracken/internbot | 58b802e0dd7597ace12acd9342bb938e2f33c25d | [
"BSD-2-Clause"
] | null | null | null | # Add your own choices here!
fruit = ["apples", "oranges", "pears", "grapes", "blueberries"]
lunch = ["pho", "timmies", "thai", "burgers", "buffet!", "indian", "montanas"]
situations = {"fruit":fruit, "lunch":lunch}
| 36.333333 | 79 | 0.62844 |
65a027371c207094c43000aeb78dc0ce9124ddf6 | 1,806 | py | Python | testing.py | blairg23/rename-images-to-datetime | e4fc8e34be9d651c4442b023d851bd64fd613e7f | [
"MIT"
] | null | null | null | testing.py | blairg23/rename-images-to-datetime | e4fc8e34be9d651c4442b023d851bd64fd613e7f | [
"MIT"
] | null | null | null | testing.py | blairg23/rename-images-to-datetime | e4fc8e34be9d651c4442b023d851bd64fd613e7f | [
"MIT"
] | null | null | null | '''
Stolen straight from https://stackoverflow.com/a/51337247/1224827
'''
try:
import PIL
import PIL.Image as PILimage
from PIL import ImageDraw, ImageFont, ImageEnhance
from PIL.ExifTags import TAGS, GPSTAGS
import os
import glob
except ImportError as err:
exit(err)
if __name__ == '__main__':
input_directory = os.path.join(os.getcwd(), 'input')
glob_path = os.path.join(input_directory, '*.jpg')
filepaths = glob.glob(glob_path)
for filepath in filepaths:
filename, extension = os.path.splitext(filepath)
try:
# img = PILimage.open(path + filename)
img = PILimage.open(filepath)
image = Worker(img)
date = image.date
print(date)
except Exception as e:
print(e)
| 26.558824 | 65 | 0.55814 |
65a0da8d520c64ade98d09bb5d2663a8e3d3134d | 102 | py | Python | tftool/access/__init__.py | antsfamily/tftool | 0de72be13b3ca43e8a95c8be726c55841b389973 | [
"MIT"
] | null | null | null | tftool/access/__init__.py | antsfamily/tftool | 0de72be13b3ca43e8a95c8be726c55841b389973 | [
"MIT"
] | null | null | null | tftool/access/__init__.py | antsfamily/tftool | 0de72be13b3ca43e8a95c8be726c55841b389973 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from .load import load_ckpt
from .save import save_ckpt
| 17 | 39 | 0.794118 |
65a11747e48582b0ad97e6b0273c903fafd78306 | 1,730 | py | Python | scripts/box3d_trpo/sweep_ddpg_0.py | fredshentu/public_model_based_controller | 9301699bc56aa49ba5c699f7d5be299046a8aa0c | [
"MIT"
] | null | null | null | scripts/box3d_trpo/sweep_ddpg_0.py | fredshentu/public_model_based_controller | 9301699bc56aa49ba5c699f7d5be299046a8aa0c | [
"MIT"
] | null | null | null | scripts/box3d_trpo/sweep_ddpg_0.py | fredshentu/public_model_based_controller | 9301699bc56aa49ba5c699f7d5be299046a8aa0c | [
"MIT"
] | null | null | null | import os
from rllab.envs.normalized_env import normalize
from rllab.misc.instrument import stub, run_experiment_lite
from sandbox.rocky.tf.envs.base import TfEnv
from rllab.envs.gym_env import GymEnv
from railrl.algos.ddpg import DDPG
from railrl.policies.nn_policy import FeedForwardPolicy
from railrl.qfunctions.nn_qfunction import FeedForwardCritic
from rllab.exploration_strategies.ou_strategy import OUStrategy
from railrl.launchers.launcher_util import get_env_settings
from railrl.core.tf_util import BatchNormConfig
import itertools
import tensorflow as tf
stub(globals())
# Param ranges
seed = 0
policy_lrs = [1e-5, 1e-4, 1e-3]
qf_lrs = [1e-5, 1e-4, 1e-3]
gammas = [0.9, 0.99, 0.995]
taus = [1e-3, 1e-2]
for policy_lr, qf_lr, gamma, tau in itertools.product(policy_lrs, qf_lrs, gammas, taus):
env = TfEnv(normalize(env=GymEnv('Box3dReach-v4',record_video=False, \
log_dir='/tmp/gym_test',record_log=False)))
es = OUStrategy(env_spec=env.spec)
qf = FeedForwardCritic(
name_or_scope="critic",
env_spec=env.spec,
hidden_nonlinearity=tf.nn.tanh,
)
policy = FeedForwardPolicy(
name_or_scope="actor",
env_spec=env.spec,
hidden_nonlinearity=tf.nn.tanh,
)
algo = DDPG(
env,
es,
policy,
qf,
"/data0/dianchen/box3d/ddpg_box3d_state_v4_tf_policy_{0}_qf_{1}_gamma_{2}_tau_{3}".format(
policy_lr,
qf_lr,
gamma,
tau,
),
qf_learning_rate=qf_lr,
policy_learning_rate=policy_lr,
discount=gamma,
soft_target_tau=tau,
gpu_ratio=0.25,
)
run_experiment_lite(
algo.train(),
exp_prefix="ddpg_box3d_state_v4_tf_policy_{0}_qf_{1}_gamma_{2}_tau_{3}".format(
policy_lr,
qf_lr,
gamma,
tau,
),
n_parallel=1,
snapshot_mode="last",
seed=seed,
mode="local"
)
| 23.378378 | 92 | 0.750289 |
65a1934b198c619626a687dd053ddc9910070a15 | 17,974 | py | Python | tests/test_engine.py | popravich/hiku | 4ce6b46302de61fc17016ddf3af3f378b3fce119 | [
"BSD-3-Clause"
] | null | null | null | tests/test_engine.py | popravich/hiku | 4ce6b46302de61fc17016ddf3af3f378b3fce119 | [
"BSD-3-Clause"
] | null | null | null | tests/test_engine.py | popravich/hiku | 4ce6b46302de61fc17016ddf3af3f378b3fce119 | [
"BSD-3-Clause"
] | 1 | 2022-01-20T17:03:23.000Z | 2022-01-20T17:03:23.000Z | import re
import pytest
from hiku import query as q
from hiku.graph import Graph, Node, Field, Link, Option, Root
from hiku.types import Record, Sequence, Integer, Optional, TypeRef
from hiku.utils import listify
from hiku.engine import Engine, pass_context, Context
from hiku.builder import build, Q
from hiku.executors.sync import SyncExecutor
from .base import check_result, ANY, Mock
OPTION_BEHAVIOUR = [
(Option('op', None), {'op': 1812}, {'op': 1812}),
(Option('op', None, default=None), {}, {'op': None}),
(Option('op', None, default=None), {'op': 2340}, {'op': 2340}),
(Option('op', None, default=3914), {}, {'op': 3914}),
(Option('op', None, default=4254), {'op': None}, {'op': None}),
(Option('op', None, default=1527), {'op': 8361}, {'op': 8361}),
]
def test_root_link_many_func_result_validation():
with pytest.raises(TypeError) as err:
execute(
Graph([
Node('a', [
Field('b', None, Mock(return_value=[[3], [4]])),
]),
Root([
Link('c', Sequence[TypeRef['a']], Mock(return_value=123),
requires=None),
]),
]),
build([Q.c[Q.b]]),
)
err.match(re.escape(
"Can't store link values, node: '__root__', link: 'c', "
"expected: list, returned: 123"
))
def test_root_field_alias():
data = {'a': 42}
graph = Graph([
Root([
Field('a', None, root_fields),
]),
])
result = execute(graph, q.Node([
q.Field('a', alias='a1'),
q.Field('a', alias='a2'),
]))
check_result(result, {'a1': 42, 'a2': 42})
def test_node_field_alias():
data = {'x1': {'a': 42}}
graph = Graph([
Node('X', [
Field('a', None, x_fields),
]),
Root([
Link('x', TypeRef['X'], lambda: 'x1', requires=None),
]),
])
result = execute(graph, q.Node([
q.Link('x', q.Node([
q.Field('a', alias='a1'),
q.Field('a', alias='a2'),
])),
]))
check_result(result, {'x': {'a1': 42, 'a2': 42}})
def test_root_link_alias():
data = {
'xN': {'a': 1, 'b': 2},
}
graph = Graph([
Node('X', [
Field('a', None, x_fields),
Field('b', None, x_fields),
]),
Root([
Link('x', TypeRef['X'], lambda: 'xN', requires=None),
]),
])
result = execute(graph, q.Node([
q.Link('x', q.Node([q.Field('a')]), alias='x1'),
q.Link('x', q.Node([q.Field('b')]), alias='x2'),
]))
check_result(result, {
'x1': {'a': 1},
'x2': {'b': 2},
})
def test_node_link_alias():
data = {
'yN': {'a': 1, 'b': 2},
}
x2y = {'xN': 'yN'}
graph = Graph([
Node('Y', [
Field('a', None, y_fields),
Field('b', None, y_fields),
]),
Node('X', [
Field('id', None, id_field),
Link('y', TypeRef['Y'],
lambda ids: [x2y[i] for i in ids],
requires='id'),
]),
Root([
Link('x', TypeRef['X'], lambda: 'xN', requires=None),
]),
])
result = execute(graph, q.Node([
q.Link('x', q.Node([
q.Link('y', q.Node([q.Field('a')]), alias='y1'),
q.Link('y', q.Node([q.Field('b')]), alias='y2'),
])),
]))
check_result(result, {
'x': {
'y1': {'a': 1},
'y2': {'b': 2},
}
})
def test_conflicting_fields():
x_data = {'xN': {'a': 42}}
graph = Graph([
Node('X', [
Field('a', None, x_fields, options=[Option('k', Integer)]),
]),
Root([
Link('x1', TypeRef['X'], lambda: 'xN', requires=None),
Link('x2', TypeRef['X'], lambda: 'xN', requires=None),
]),
])
result = execute(graph, q.Node([
q.Link('x1', q.Node([q.Field('a', options={'k': 1})])),
q.Link('x2', q.Node([q.Field('a', options={'k': 2})])),
]))
check_result(result, {
'x1': {'a': '42-1'},
'x2': {'a': '42-2'},
})
def test_conflicting_links():
data = {
'yA': {'a': 1, 'b': 2},
'yB': {'a': 3, 'b': 4},
'yC': {'a': 5, 'b': 6},
}
x2y = {'xN': ['yA', 'yB', 'yC']}
graph = Graph([
Node('Y', [
Field('a', None, y_fields),
Field('b', None, y_fields),
]),
Node('X', [
Field('id', None, id_field),
Link('y', Sequence[TypeRef['Y']], x_to_y_link, requires='id',
options=[Option('exclude', None)]),
]),
Root([
Link('x1', TypeRef['X'], lambda: 'xN', requires=None),
Link('x2', TypeRef['X'], lambda: 'xN', requires=None),
]),
])
result = execute(graph, q.Node([
q.Link('x1', q.Node([
q.Link('y', q.Node([q.Field('a')]),
options={'exclude': ['yA']}),
])),
q.Link('x2', q.Node([
q.Link('y', q.Node([q.Field('b')]),
options={'exclude': ['yC']}),
])),
]))
check_result(result, {
'x1': {'y': [{'a': 3}, {'a': 5}]},
'x2': {'y': [{'b': 2}, {'b': 4}]},
})
def test_process_ordered_node():
ordering = []
graph = Graph([
Node('X', [
Field('e', None, f4),
]),
Root([
Field('a', None, f1),
Field('b', None, f1),
Field('c', None, f2),
Field('d', None, f2),
Link('x', TypeRef['X'], f3, requires=None),
]),
])
query = q.Node([
q.Field('d'),
q.Field('b'),
q.Field('a'),
q.Link('x', q.Node([
q.Field('e'),
])),
q.Field('c'),
], ordered=True)
engine = Engine(SyncExecutor())
result = engine.execute(graph, query)
check_result(result, {
'a': 'a',
'b': 'b',
'c': 'c',
'd': 'd',
'x': {
'e': 'x1-e',
},
})
assert ordering == [('d',), ('b', 'a'), 'x1', ('c',)]
| 27.780526 | 80 | 0.476355 |
65a1c52735b77b5b062b18c86f7b8f8507e5e9d2 | 90 | py | Python | helper.py | b-nguyen/cs3240-labdemo | ee8da87092bc46d6a774fa5030283224819a4b87 | [
"MIT"
] | null | null | null | helper.py | b-nguyen/cs3240-labdemo | ee8da87092bc46d6a774fa5030283224819a4b87 | [
"MIT"
] | null | null | null | helper.py | b-nguyen/cs3240-labdemo | ee8da87092bc46d6a774fa5030283224819a4b87 | [
"MIT"
] | null | null | null | __author__ = 'Brian Nguyen'
| 18 | 41 | 0.655556 |
65a24baaac6c0fcc20473db9883448f3352703ee | 6,251 | py | Python | twitter_verified_blocker.py | antoinemcgrath/twitter_blocker_tool | f4c0ed866830259a5ae6844dbb5fbdac8b3674b2 | [
"MIT"
] | null | null | null | twitter_verified_blocker.py | antoinemcgrath/twitter_blocker_tool | f4c0ed866830259a5ae6844dbb5fbdac8b3674b2 | [
"MIT"
] | null | null | null | twitter_verified_blocker.py | antoinemcgrath/twitter_blocker_tool | f4c0ed866830259a5ae6844dbb5fbdac8b3674b2 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
#### A tool for blocking all verified users on Twitter.
## You may want to create a (public or private) Twitter list named 'exceptions' and add verified users to it.
## This 'exceptions' list that you create on Twitter is for verified accounts that you like and do not want to block.
#### Import dependencies
import json
import tweepy
import re
import random
import sys
import timeit
#### Define variables
start = timeit.default_timer()
exception_title = 'exceptions'
mypath = "blocked.txt"
counter = 0
#### Get keys
Keys = get_api_keys()
#### Access Twitter API using Tweepy & key dictionary definitions
client = tweepy.Client( Keys['Bearer Token'] )
auth = tweepy.OAuth2AppHandler( Keys['Consumer Key (API Key)'], Keys['Consumer Secret (API Secret)'] )
api = tweepy.API(auth)
#### Fetch the user id's of those listed in the exceptions list
#### Checks id against exceptions list
#### Returns a human readable time difference
#### Check if user is already blocked, blocks & add to list if not
#### Increments counter by 1, if count is divisible by 100 print the count & time elapsed.
#### Process user id, check exceptions list, check & block & append to blocked list, trigger counter
#### Get an id from user & send to id processing
#### Work flow
#### Acquire 'exceptions' list for blocking protection/exclusion
protect_list = get_exceptions_list()
print("Protect list number of entries =", len(protect_list))
#### Block verified users that are on the twitter managed verified list
for a_user_id_2_block in tweepy.Cursor(api.friends_ids, id="verified", wait_on_rate_limit=True).items():
counter = process_a_user_id(a_user_id_2_block, counter)
#### Block verified users that are following you
for a_user in tweepy.Cursor(api.followers, screen_name=user, wait_on_rate_limit=True).items():
counter = process_a_user(a_user, counter)
#### Block verified users that are following the user handle "Twitter"
for a_user in tweepy.Cursor(api.followers, screen_name="Twitter", wait_on_rate_limit=True).items():
counter = process_a_user(a_user, counter)
###################################################################
# Do not use any of the code I have written with harmful intent. #
# #
# By using this code you accept that everyone has the #
# right to choose their own gender identity. #
###################################################################
| 38.58642 | 117 | 0.628379 |
65a29ad725144c4d2dc24167982660ac5a79324c | 586 | py | Python | src/pktmapper/common.py | Sapunov/pktmapper | 9d72a42c5b756c10c7fb0debcfc6c20031626aa1 | [
"MIT"
] | null | null | null | src/pktmapper/common.py | Sapunov/pktmapper | 9d72a42c5b756c10c7fb0debcfc6c20031626aa1 | [
"MIT"
] | null | null | null | src/pktmapper/common.py | Sapunov/pktmapper | 9d72a42c5b756c10c7fb0debcfc6c20031626aa1 | [
"MIT"
] | null | null | null | """
Common functions
---
Package: PACKET-MAPPER
Author: Sapunov Nikita <kiton1994@gmail.com>
"""
import netaddr
import socket
def ip2str(address):
"""
Print out an IP address given a string
Args:
address (inet struct): inet network address
Returns:
str: Printable/readable IP address
"""
return socket.inet_ntop(socket.AF_INET, address)
def ip2long(ip):
"""
Convert an IP string to long.
Args:
ip: readable IP address
Returns:
long: IP address in long format
"""
return long(netaddr.IPAddress(ip))
| 16.742857 | 52 | 0.641638 |
65a7dd3e05e8bc60ee17293d906552f32358fc04 | 1,236 | py | Python | custom_packages/CustomNeuralNetworks/test_CustomNeuralNetworks/test_resnet50_unet.py | davidelomeo/mangroves_deep_learning | 27ce24fe183b65f054c1d6b41417a64355cd0c9c | [
"MIT"
] | null | null | null | custom_packages/CustomNeuralNetworks/test_CustomNeuralNetworks/test_resnet50_unet.py | davidelomeo/mangroves_deep_learning | 27ce24fe183b65f054c1d6b41417a64355cd0c9c | [
"MIT"
] | null | null | null | custom_packages/CustomNeuralNetworks/test_CustomNeuralNetworks/test_resnet50_unet.py | davidelomeo/mangroves_deep_learning | 27ce24fe183b65f054c1d6b41417a64355cd0c9c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This script tests the function that builds the Un-Net model combined
# with the ResNet50 model as an encoder. The test does not look for
# numerical values but checks if the model returns am object or not.
# This is because there are several tests within the UNet class that
# checks if the input parameters are valid and returns None if they are not.
# The test simply checks if these preliminary tests work as intended.
#
# Author: Davide Lomeo
# Email: davide.lomeo20@imperial.ac.uk
# GitHub: https://github.com/acse-2020/acse2020-acse9-finalreport-acse-dl1420-3
# Date: 1 August 2021
# Version: 1.0
from CustomNeuralNetworks import resnet50_unet
def test_ResNet50Unet():
"Testing the ResNet50Unet class"
resnet50unet = resnet50_unet.ResNet50Unet(7)
function_output_1 = resnet50unet.build_model((256, 250, 3))
function_output_2 = resnet50unet.build_model((256, 256, -3))
function_output_3 = resnet50unet.build_model((300, 300, 3))
function_output_4 = resnet50unet.build_model((256, 256, 3))
assert function_output_1 is None
assert function_output_2 is None
assert function_output_3 is None
assert function_output_4 is not None
return
| 35.314286 | 79 | 0.755663 |
65a81a20a737d47906a247b2cf2e411a76cfdb20 | 1,988 | py | Python | htb/Knife/exploit/49933.py | oonray/Notes | 7e52bd058cce5ccf488977222fdb7d7e88aabbbf | [
"MIT"
] | null | null | null | htb/Knife/exploit/49933.py | oonray/Notes | 7e52bd058cce5ccf488977222fdb7d7e88aabbbf | [
"MIT"
] | null | null | null | htb/Knife/exploit/49933.py | oonray/Notes | 7e52bd058cce5ccf488977222fdb7d7e88aabbbf | [
"MIT"
] | null | null | null | # Exploit Title: PHP 8.1.0-dev - 'User-Agentt' Remote Code Execution
# Date: 23 may 2021
# Exploit Author: flast101
# Vendor Homepage: https://www.php.net/
# Software Link:
# - https://hub.docker.com/r/phpdaily/php
# - https://github.com/phpdaily/php
# Version: 8.1.0-dev
# Tested on: Ubuntu 20.04
# References:
# - https://github.com/php/php-src/commit/2b0f239b211c7544ebc7a4cd2c977a5b7a11ed8a
# - https://github.com/vulhub/vulhub/blob/master/php/8.1-backdoor/README.zh-cn.md
"""
Blog: https://flast101.github.io/php-8.1.0-dev-backdoor-rce/
Download: https://github.com/flast101/php-8.1.0-dev-backdoor-rce/blob/main/backdoor_php_8.1.0-dev.py
Contact: flast101.sec@gmail.com
An early release of PHP, the PHP 8.1.0-dev version was released with a backdoor on March 28th 2021, but the backdoor was quickly discovered and removed. If this version of PHP runs on a server, an attacker can execute arbitrary code by sending the User-Agentt header.
The following exploit uses the backdoor to provide a pseudo shell ont the host.
"""
#!/usr/bin/env python3
import os
import re
import requests
host = input("Enter the full host url:\n")
request = requests.Session()
response = request.get(host)
if str(response) == '<Response [200]>':
print("\nInteractive shell is opened on", host, "\nCan't acces tty; job crontol turned off.")
try:
while 1:
cmd = input("$ ")
headers = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:78.0) Gecko/20100101 Firefox/78.0",
"User-Agentt": "zerodiumsystem('" + cmd + "');"
}
response = request.get(host, headers = headers, allow_redirects = False)
current_page = response.text
stdout = current_page.split('<!DOCTYPE html>',1)
text = print(stdout[0])
except KeyboardInterrupt:
print("Exiting...")
exit
else:
print("\r")
print(response)
print("Host is not available, aborting...")
exit | 37.509434 | 267 | 0.667505 |
65a8a8d322da8f141e973ee61e8ca8e2f7c15699 | 2,271 | py | Python | flashcards/cli.py | elliott-king/flashcards | 5dd6ae3d996797b11e28b2bd8a5b0d6e038e1a5d | [
"MIT"
] | null | null | null | flashcards/cli.py | elliott-king/flashcards | 5dd6ae3d996797b11e28b2bd8a5b0d6e038e1a5d | [
"MIT"
] | null | null | null | flashcards/cli.py | elliott-king/flashcards | 5dd6ae3d996797b11e28b2bd8a5b0d6e038e1a5d | [
"MIT"
] | null | null | null | """
Module that contains the command line app.
Why does this file exist, and why not put this in __main__?
You might be tempted to import things from __main__ later, but that will cause
problems: the code will get executed twice:
- When you run `python -mflashcards` python will execute
``__main__.py`` as a script. That means there won't be any
``flashcards.__main__`` in ``sys.modules``.
- When you import __main__ it will get executed again (as a module) because
there's no ``flashcards.__main__`` in ``sys.modules``.
Also see (1) from http://click.pocoo.org/5/setuptools/#setuptools-integration
"""
import argparse
from .flashcards import start
parser = argparse.ArgumentParser(description='Command description.')
parser.add_argument('names', metavar='NAME', nargs=argparse.ZERO_OR_MORE,
help="A name of something.")
| 40.553571 | 80 | 0.65742 |
65a8c04b64b959ed6c434b2c56b2ea70ca122b10 | 744 | py | Python | C2C/simple_server.py | muhammedabdelkader/python_collection | 7084588ab983224ccc969f63688d62fcc988263a | [
"MIT"
] | null | null | null | C2C/simple_server.py | muhammedabdelkader/python_collection | 7084588ab983224ccc969f63688d62fcc988263a | [
"MIT"
] | null | null | null | C2C/simple_server.py | muhammedabdelkader/python_collection | 7084588ab983224ccc969f63688d62fcc988263a | [
"MIT"
] | null | null | null | import aiohttp
import asyncio
import time
start_time = time.time()
asyncio.run(main())
print(f"--{(time.time()-start_time)}--") | 27.555556 | 111 | 0.629032 |
65a9792b2934e3a0bc3ead9a9eef72f6382f49c5 | 3,454 | py | Python | Important_data/Thesis figure scripts/six_sigmoids.py | haakonvt/LearningTensorFlow | 6988a15af2ac916ae1a5e23b2c5bde9630cc0519 | [
"MIT"
] | 5 | 2018-09-06T12:52:12.000Z | 2020-05-09T01:40:12.000Z | Important_data/Thesis figure scripts/six_sigmoids.py | haakonvt/LearningTensorFlow | 6988a15af2ac916ae1a5e23b2c5bde9630cc0519 | [
"MIT"
] | null | null | null | Important_data/Thesis figure scripts/six_sigmoids.py | haakonvt/LearningTensorFlow | 6988a15af2ac916ae1a5e23b2c5bde9630cc0519 | [
"MIT"
] | 4 | 2018-02-06T08:42:06.000Z | 2019-04-16T11:23:06.000Z | from matplotlib import rc
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
rc('text', usetex=True)
rc('legend',**{'fontsize':11}) # Font size for legend
from mpl_toolkits.axes_grid.axislines import SubplotZero
import matplotlib as mpl
mpl.rcParams['lines.linewidth'] = 2.5
import matplotlib.pyplot as plt
from math import erf,sqrt
import numpy as np
xmin = -4; xmax = 4
x = np.linspace(xmin,xmax,1001)
y1 = lambda x: np.array([erf(0.5*i*sqrt(np.pi)) for i in x])
y2 = lambda x: np.tanh(x)
y3 = lambda x: 4./np.pi*np.arctan(np.tanh(np.pi*x/4.))
y4 = lambda x: x/np.sqrt(1.+x**2)
y5 = lambda x: 2.0/np.pi*np.arctan(np.pi/2.0 * x)
y6 = lambda x: x/(1+np.abs(x))
fig = plt.figure(1)
ax = SubplotZero(fig, 111)
fig.add_subplot(ax)
plt.subplots_adjust(left = 0.125, # the left side of the subplots of the figure
right = 0.9, # the right side of the subplots of the figure
bottom = 0.1, # the bottom of the subplots of the figure
top = 0.9, # the top of the subplots of the figure
wspace = 0., # the amount of width reserved for blank space between subplots
hspace = 0.) # the amount of height reserved for white space between subplots
plt.setp(ax, xticks=[-3,-2,-1,1,2,3], xticklabels=[" "," "," "," "," "," ",], yticks=[-1,1], yticklabels=[" "," ",])
# Make coordinate axes with "arrows"
for direction in ["xzero", "yzero"]:
ax.axis[direction].set_visible(True)
# Coordinate axes with arrow (guess what, these are the arrows)
plt.arrow(2.65, 0.0, 0.5, 0.0, color="k", clip_on=False, head_length=0.06, head_width=0.08)
plt.arrow(0.0, 1.03, 0.0, 0.1, color="k", clip_on=False, head_length=0.06, head_width=0.08)
# Remove edge around the entire plot
for direction in ["left", "right", "bottom", "top"]:
ax.axis[direction].set_visible(False)
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
colormap = plt.cm.Spectral #nipy_spectral # Other possible colormaps: Set1, Accent, nipy_spectral, Paired
colors = [colormap(i) for i in np.linspace(0, 1, 6)]
plt.title("Six sigmoid functions", fontsize=18, y=1.08)
leg_list = [r"$\mathrm{erf}\left(\frac{\sqrt{\pi}}{2}x \right)$",
r"$\tanh(x)$",
r"$\frac{2}{\pi}\mathrm{gd}\left( \frac{\pi}{2}x \right)$",
r"$x\left(1+x^2\right)^{-\frac{1}{2}}$",
r"$\frac{2}{\pi}\mathrm{arctan}\left( \frac{\pi}{2}x \right)$",
r"$x\left(1+|x|\right)^{-1}$"]
for i in range(1,7):
s = "ax.plot(x,y%s(x),color=colors[i-1])" %(str(i))
eval(s)
ax.legend(leg_list,loc="best", ncol=2, fancybox=True) # title="Legend", fontsize=12
# ax.grid(True, which='both')
ax.set_aspect('equal')
ax.set_xlim([-3.1,3.1])
ax.set_ylim([-1.1,1.1])
ax.annotate('1', xy=(0.08, 1-0.02))
ax.annotate('0', xy=(0.08, -0.2))
ax.annotate('-1', xy=(0.08, -1-0.03))
for i in [-3,-2,-1,1,2,3]:
ax.annotate('%s' %str(i), xy=(i-0.03, -0.2))
maybe = raw_input("\nUpdate figure directly in master thesis?\nEnter 'YES' (anything else = ONLY show to screen) ")
if maybe == "YES": # Only save to disc if need to be updated
filenameWithPath = "/Users/haakonvt/Dropbox/uio/master/latex-master/Illustrations/six_sigmoids.pdf"
plt.savefig(filenameWithPath, bbox_inches='tight') #, pad_inches=0.2)
print 'Saved over previous file in location:\n "%s"' %filenameWithPath
else:
print 'Figure was only shown on screen.'
plt.show()
| 40.635294 | 116 | 0.630573 |
65aa73e15457005cd520549df842b9dc33211c7c | 3,820 | py | Python | src/web/modules/search/controllers/search/control.py | unkyulee/elastic-cms | 3ccf4476c3523d4fefc0d8d9dee0196815b81489 | [
"MIT"
] | 2 | 2017-04-30T07:29:23.000Z | 2017-04-30T07:36:27.000Z | src/web/modules/search/controllers/search/control.py | unkyulee/elastic-cms | 3ccf4476c3523d4fefc0d8d9dee0196815b81489 | [
"MIT"
] | null | null | null | src/web/modules/search/controllers/search/control.py | unkyulee/elastic-cms | 3ccf4476c3523d4fefc0d8d9dee0196815b81489 | [
"MIT"
] | null | null | null | import json
import urllib2
import traceback
import cgi
from flask import render_template, request
import web.util.tools as tools
import lib.http as http
import lib.es as es
from web import app
from lib.read import readfile
| 33.217391 | 85 | 0.546073 |
65aa8588c528dddf9da0b75de2f8177f0b66e0ef | 1,043 | py | Python | Go6/policy_probabilistic_player.py | skyu0221/cmput496 | ad1e59805ab49324ec1e387ddeaf3dd3202518bc | [
"MIT"
] | null | null | null | Go6/policy_probabilistic_player.py | skyu0221/cmput496 | ad1e59805ab49324ec1e387ddeaf3dd3202518bc | [
"MIT"
] | null | null | null | Go6/policy_probabilistic_player.py | skyu0221/cmput496 | ad1e59805ab49324ec1e387ddeaf3dd3202518bc | [
"MIT"
] | null | null | null | #!/usr/bin/python3
from board_util import GoBoardUtil
from gtp_connection import GtpConnection
if __name__=='__main__':
createPolicyPlayer()
| 22.673913 | 80 | 0.633749 |
65ac271abc5546a6ef5541faf5bc32786bb4d4dc | 1,531 | py | Python | test_models.py | ChirilaLaura/covid-z | f1cc0818831519404486cd2fd2e78c36b789de24 | [
"MIT"
] | 2 | 2020-05-14T03:02:22.000Z | 2020-06-16T10:05:44.000Z | test_models.py | ChirilaLaura/covid-z | f1cc0818831519404486cd2fd2e78c36b789de24 | [
"MIT"
] | null | null | null | test_models.py | ChirilaLaura/covid-z | f1cc0818831519404486cd2fd2e78c36b789de24 | [
"MIT"
] | null | null | null | from keras.preprocessing.image import img_to_array
from keras.models import load_model
import numpy as np
import argparse
import imutils
import cv2
ap = argparse.ArgumentParser()
ap.add_argument("-m1", "--model1", required=True, help="path to model1")
ap.add_argument("-m2", "--model2", required=True, help="path to model2")
ap.add_argument("-i", "--image", required=True, help="path to image")
args = vars(ap.parse_args())
image = cv2.imread(args["image"])
orig = image.copy()
image = cv2.resize(image, (64, 64))
image = image.astype("float") / 255.0
image = img_to_array(image)
image = np.expand_dims(image, axis=0)
model1 = load_model(args["model1"])
model2 = load_model(args["model2"])
print("models loaded")
(other, xray) = model1.predict(image)[0]
label2 = "Xray" if xray > other else "Other"
proba = "Xray" if xray > other else other
label = "{}: {:.2f}%".format(label2, proba * 100)
if label2 == "Xray":
(infected, healthy) = model2.predict(image)[0]
label2 = "Healthy" if healthy > infected else "Infected"
proba = "Healthy" if healthy > infected else "Infected"
label = "{}: {:.2f}%".format(label2, proba * 100)
output = imutils.resize(orig, width=400)
cv2.putText(output, label, (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
cv2.imshow("Output", output)
cv2.waitKey(0)
else:
output = imutils.resize(orig, width=400)
cv2.putText(output, label, (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
cv2.imshow("Output", output)
cv2.waitKey(0)
| 31.895833 | 88 | 0.674722 |
65ac8cde7af97a0e6637820254f0d7a893315eae | 143 | py | Python | src/settings.py | MichaelJWelsh/bot-evolution | 6d8e3449fc5350f47e91a6aa7a3e8b719c0c2f16 | [
"MIT"
] | 151 | 2017-05-01T02:47:34.000Z | 2022-01-21T17:08:11.000Z | src/settings.py | MichaelJWelsh/bot-evolution | 6d8e3449fc5350f47e91a6aa7a3e8b719c0c2f16 | [
"MIT"
] | null | null | null | src/settings.py | MichaelJWelsh/bot-evolution | 6d8e3449fc5350f47e91a6aa7a3e8b719c0c2f16 | [
"MIT"
] | 26 | 2017-05-01T21:41:02.000Z | 2021-12-21T11:40:20.000Z | """
This module contains the general settings used across modules.
"""
FPS = 60
WINDOW_WIDTH = 1100
WINDOW_HEIGHT = 600
TIME_MULTIPLIER = 1.0
| 15.888889 | 62 | 0.748252 |
65ad5e7a545499575a16b2d06ffd961696d9832d | 7,974 | py | Python | katana-nbi/katana/api/nfvo.py | afoteas/katana-slice_manager | f03a8520fc06f7bed18ff5c2a01a9b8ea7da84c8 | [
"Apache-2.0"
] | null | null | null | katana-nbi/katana/api/nfvo.py | afoteas/katana-slice_manager | f03a8520fc06f7bed18ff5c2a01a9b8ea7da84c8 | [
"Apache-2.0"
] | null | null | null | katana-nbi/katana/api/nfvo.py | afoteas/katana-slice_manager | f03a8520fc06f7bed18ff5c2a01a9b8ea7da84c8 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import logging
from logging import handlers
import pickle
import time
import uuid
from bson.binary import Binary
from bson.json_util import dumps
from flask import request
from flask_classful import FlaskView
import pymongo
from requests import ConnectTimeout, ConnectionError
from katana.shared_utils.mongoUtils import mongoUtils
from katana.shared_utils.nfvoUtils import osmUtils
# Logging Parameters
logger = logging.getLogger(__name__)
file_handler = handlers.RotatingFileHandler("katana.log", maxBytes=10000, backupCount=5)
stream_handler = logging.StreamHandler()
formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s")
stream_formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s")
file_handler.setFormatter(formatter)
stream_handler.setFormatter(stream_formatter)
logger.setLevel(logging.DEBUG)
logger.addHandler(file_handler)
logger.addHandler(stream_handler)
| 39.088235 | 97 | 0.538375 |
65ad681676318e198f9ba24f925ddf67a7312897 | 7,400 | py | Python | helpers.py | mochja/ISA-DNS | 463713b97329b000721be2512c9581c4881d664c | [
"MIT"
] | null | null | null | helpers.py | mochja/ISA-DNS | 463713b97329b000721be2512c9581c4881d664c | [
"MIT"
] | null | null | null | helpers.py | mochja/ISA-DNS | 463713b97329b000721be2512c9581c4881d664c | [
"MIT"
] | null | null | null | import threading
import traceback
import socketserver
import struct
import time
import sys
import http.client
import json
import uuid
import config
import dns.rdatatype
import dns.rdataclass
args = config.args
QTYPES = {1:'A', 15: 'MX', 6: 'SOA'}
custom_mx = uuid.uuid4().hex
# https://github.com/shuque/pydig GNUv2 (edited)
def txt2domainname(input, canonical_form=False):
"""turn textual representation of a domain name into its wire format"""
if input == ".":
d = b'\x00'
else:
d = b""
for label in input.split('.'):
label = label.encode('ascii')
if canonical_form:
label = label.lower()
length = len(label)
d += struct.pack('B', length) + label
return d
# https://github.com/shuque/pydig GNUv2 (edited)
def get_domainname(pkt, offset):
"""decode a domainname at the given packet offset; see RFC 1035"""
global count_compression
labellist = [] # a domainname is a sequence of labels
Done = False
while not Done:
llen, = struct.unpack('B', pkt[offset:offset+1])
if (llen >> 6) == 0x3: # compression pointer, sec 4.1.4
count_compression += 1
c_offset, = struct.unpack('!H', pkt[offset:offset+2])
c_offset = c_offset & 0x3fff # last 14 bits
offset +=2
rightmostlabels, junk = get_domainname(pkt, c_offset)
labellist += rightmostlabels
Done = True
else:
offset += 1
label = pkt[offset:offset+llen]
offset += llen
labellist.append(label)
if llen == 0:
Done = True
return (labellist, offset)
# https://github.com/shuque/pydig GNUv2 (edited)
def pdomainname(labels):
"""given a sequence of domainname labels, return a quoted printable text
representation of the domain name"""
printables = b'0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_-*+'
result_list = []
for label in labels:
result = ''
for c in label:
if isinstance(c, int):
c_int, c_chr = c, chr(c)
else:
c_int, c_chr = ord(c), c.decode()
if c in printables:
result += c_chr
else:
result += ("\\%03d" % c_int)
result_list.append(result)
if result_list == ['']:
return "."
else:
return ".".join(result_list)
| 30.578512 | 148 | 0.553514 |
65ad8049b22c02c19b00ee9ceab0dd889c8339c3 | 3,278 | py | Python | convert/templatetags/convert_tags.py | aino/aino-convert | f3bd773f02a9645c75bfbd773e747dd8dc6e08f4 | [
"BSD-3-Clause"
] | 1 | 2015-07-15T07:40:19.000Z | 2015-07-15T07:40:19.000Z | convert/templatetags/convert_tags.py | aino/aino-convert | f3bd773f02a9645c75bfbd773e747dd8dc6e08f4 | [
"BSD-3-Clause"
] | null | null | null | convert/templatetags/convert_tags.py | aino/aino-convert | f3bd773f02a9645c75bfbd773e747dd8dc6e08f4 | [
"BSD-3-Clause"
] | null | null | null | from django.template import Library, Node, TemplateSyntaxError
from django.utils.encoding import force_unicode
from convert.base import MediaFile, EmptyMediaFile, convert_solo
from convert.conf import settings
register = Library()
| 31.519231 | 73 | 0.589079 |
65ad9a16451cd40a1e7a1f6a7b00166acc44cfb1 | 7,826 | py | Python | tests/utils_test.py | lovetrading10/tda-api | 0e38c85739248fbf3b0e3386eb2fb9bf9298f93d | [
"MIT"
] | 7 | 2020-05-03T16:25:08.000Z | 2021-11-03T22:08:27.000Z | tests/utils_test.py | lovetrading10/tda-api | 0e38c85739248fbf3b0e3386eb2fb9bf9298f93d | [
"MIT"
] | null | null | null | tests/utils_test.py | lovetrading10/tda-api | 0e38c85739248fbf3b0e3386eb2fb9bf9298f93d | [
"MIT"
] | 11 | 2020-06-26T22:09:05.000Z | 2022-02-13T13:30:52.000Z | from unittest.mock import MagicMock
import datetime
import json
import unittest
from tda.orders import EquityOrderBuilder
from tda.utils import Utils
from . import test_utils
| 37.806763 | 80 | 0.625607 |
65ae5ae925181ff1d726f472dfbdd87ce820d687 | 9,535 | py | Python | aiida/orm/entities.py | PercivalN/aiida-core | b215ed5a7ce9342bb7f671b67e95c1f474cc5940 | [
"BSD-2-Clause"
] | 1 | 2019-07-31T04:08:13.000Z | 2019-07-31T04:08:13.000Z | aiida/orm/entities.py | PercivalN/aiida-core | b215ed5a7ce9342bb7f671b67e95c1f474cc5940 | [
"BSD-2-Clause"
] | null | null | null | aiida/orm/entities.py | PercivalN/aiida-core | b215ed5a7ce9342bb7f671b67e95c1f474cc5940 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Module for all common top level AiiDA entity classes and methods"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import typing
from plumpy.base.utils import super_check, call_with_super_check
from aiida.common import exceptions
from aiida.common import datastructures
from aiida.common.lang import classproperty, type_check
from aiida.manage.manager import get_manager
__all__ = ('Entity', 'Collection')
EntityType = typing.TypeVar('EntityType') # pylint: disable=invalid-name
def get(self, **filters):
"""
Get a single collection entry that matches the filter criteria
:param filters: the filters identifying the object to get
:type filters: dict
:return: the entry
"""
res = self.find(filters=filters)
if not res:
raise exceptions.NotExistent("No {} with filter '{}' found".format(self.entity_type.__name__, filters))
if len(res) > 1:
raise exceptions.MultipleObjectsError("Multiple {}s found with the same id '{}'".format(
self.entity_type.__name__, id))
return res[0]
def find(self, filters=None, order_by=None, limit=None):
"""
Find collection entries matching the filter criteria
:param filters: the keyword value pair filters to match
:type filters: dict
:param order_by: a list of (key, direction) pairs specifying the sort order
:type order_by: list
:param limit: the maximum number of results to return
:type limit: int
:return: a list of resulting matches
:rtype: list
"""
query = self.query()
filters = filters or {}
query.add_filter(self.entity_type, filters)
if order_by:
query.order_by({self.entity_type: order_by})
if limit:
query.limit(limit)
return [_[0] for _ in query.all()]
def all(self):
"""
Get all entities in this collection
:return: A collection of users matching the criteria
:rtype: list
"""
return [_[0] for _ in self.query().all()]
class Entity(object): # pylint: disable=useless-object-inheritance
"""An AiiDA entity"""
_objects = None
# Define our collection type
Collection = Collection
def __init__(self, backend_entity):
"""
:param backend_entity: the backend model supporting this entity
:type backend_entity: :class:`aiida.orm.implementation.BackendEntity`
"""
self._backend_entity = backend_entity
call_with_super_check(self.initialize)
def init_from_backend(self, backend_entity):
"""
:param backend_entity: the backend model supporting this entity
:type backend_entity: :class:`aiida.orm.implementation.BackendEntity`
"""
self._backend_entity = backend_entity
def store(self):
"""Store the entity."""
self._backend_entity.store()
return self
| 32.431973 | 116 | 0.627687 |
65ae685c4283988c38775f88a233b7c8ac475f6e | 2,088 | py | Python | src/fullyautomatednutcracker/cogs/antiselfdeprecation.py | dovedevic/fullyautomatednutcracker | c746601f93097b88febea64adb09be5ef569adaa | [
"MIT"
] | 5 | 2020-08-12T00:30:03.000Z | 2020-08-24T08:24:34.000Z | src/fullyautomatednutcracker/cogs/antiselfdeprecation.py | dovedevic/fullyautomatednutcracker | c746601f93097b88febea64adb09be5ef569adaa | [
"MIT"
] | 3 | 2020-08-12T19:25:00.000Z | 2020-08-28T00:23:18.000Z | src/fullyautomatednutcracker/cogs/antiselfdeprecation.py | dovedevic/fullyautomatednutcracker | c746601f93097b88febea64adb09be5ef569adaa | [
"MIT"
] | 8 | 2020-08-12T00:37:03.000Z | 2020-08-20T19:49:32.000Z | from discord.ext import commands
import asyncio
import time
| 43.5 | 198 | 0.632184 |
65aee5c9340fded7e6ab5b1f35346dad94ab5fed | 10,809 | py | Python | pyaff4/lexicon.py | timbolle-unil/pyaff4 | 845bec2dc7a274766e3c9a96adf10a812a925cd7 | [
"Apache-2.0"
] | null | null | null | pyaff4/lexicon.py | timbolle-unil/pyaff4 | 845bec2dc7a274766e3c9a96adf10a812a925cd7 | [
"Apache-2.0"
] | null | null | null | pyaff4/lexicon.py | timbolle-unil/pyaff4 | 845bec2dc7a274766e3c9a96adf10a812a925cd7 | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""The AFF4 lexicon."""
from __future__ import unicode_literals
# This is the version of the AFF4 specification we support - not the library
# version itself.
from builtins import object
import rdflib
from pyaff4 import rdfvalue
AFF4_VERSION = "0.2"
AFF4_MAX_READ_LEN = 1024*1024*100
AFF4_NAMESPACE = "http://aff4.org/Schema#"
AFF4_LEGACY_NAMESPACE = "http://afflib.org/2009/aff4#"
XSD_NAMESPACE = "http://www.w3.org/2001/XMLSchema#"
RDF_NAMESPACE = "http://www.w3.org/1999/02/22-rdf-syntax-ns#"
AFF4_MEMORY_NAMESPACE = "http://aff4.org/Schema#memory/"
AFF4_DISK_NAMESPACE = "http://aff4.org/Schema#disk/"
AFF4_MACOS_NAMESPACE = "http://aff4.org/Schema#macos/"
# Attributes in this namespace will never be written to persistant
# storage. They are simply used as a way for storing metadata about an AFF4
# object internally.
AFF4_VOLATILE_NAMESPACE = "http://aff4.org/VolatileSchema#"
# The configuration space of the library itself. All these should be volatile
# and therefore not persistant or interoperable with other AFF4 implementations.
AFF4_CONFIG_NAMESPACE = AFF4_NAMESPACE + "config"
# Location of the cache (contains AFF4_FILE_NAME)
AFF4_CONFIG_CACHE_DIR = AFF4_CONFIG_NAMESPACE + "/cache"
# Commonly used RDF types.
URNType = "URN"
XSDStringType = (XSD_NAMESPACE + "string")
RDFBytesType = (XSD_NAMESPACE + "hexBinary")
XSDIntegerType = (XSD_NAMESPACE + "integer")
XSDIntegerTypeInt = (XSD_NAMESPACE + "int")
XSDIntegerTypeLong = (XSD_NAMESPACE + "long")
XSDBooleanType = (XSD_NAMESPACE + "boolean")
# Attribute names for different AFF4 objects.
# Base AFF4Object
AFF4_TYPE = (RDF_NAMESPACE + "type")
AFF4_STORED = (AFF4_NAMESPACE + "stored")
AFF4_CONTAINS = (AFF4_NAMESPACE + "contains")
# Each container should have this file which contains the URN of the container.
AFF4_CONTAINER_DESCRIPTION = "container.description"
AFF4_CONTAINER_INFO_TURTLE = "information.turtle"
AFF4_CONTAINER_INFO_YAML = "information.yaml"
# AFF4 ZipFile containers.
AFF4_ZIP_TYPE = (AFF4_NAMESPACE + "zip_volume")
# AFF4Stream
AFF4_STREAM_SIZE = (AFF4_NAMESPACE + "size")
AFF4_LEGACY_STREAM_SIZE = (AFF4_LEGACY_NAMESPACE + "size")
# The original filename the stream had.
AFF4_STREAM_ORIGINAL_FILENAME = (AFF4_NAMESPACE + "original_filename")
# Can be "read", "truncate", "append"
AFF4_STREAM_WRITE_MODE = (AFF4_VOLATILE_NAMESPACE + "writable")
# FileBackedObjects are either marked explicitly or using the file:// scheme.
AFF4_FILE_TYPE = (AFF4_NAMESPACE + "file")
# file:// based URNs do not always have a direct mapping to filesystem
# paths. This volatile attribute is used to control the filename mapping.
AFF4_FILE_NAME = (AFF4_VOLATILE_NAMESPACE + "filename")
# The original filename the stream had.
AFF4_STREAM_ORIGINAL_FILENAME = (AFF4_NAMESPACE + "original_filename")
# ZipFileSegment
AFF4_ZIP_SEGMENT_TYPE = (AFF4_NAMESPACE + "zip_segment")
# ZipStoredLogicalStream
AFF4_ZIP_SEGMENT_IMAGE_TYPE = (AFF4_NAMESPACE + "ZipSegment")
AFF4_FILEIMAGE = (AFF4_NAMESPACE + "FileImage")
# AFF4 Image Stream - stores a stream using Bevies.
AFF4_IMAGE_TYPE = (AFF4_NAMESPACE + "ImageStream")
AFF4_LEGACY_IMAGE_TYPE = (AFF4_LEGACY_NAMESPACE + "stream")
AFF4_SCUDETTE_IMAGE_TYPE = (AFF4_NAMESPACE + "image")
AFF4_IMAGE_CHUNK_SIZE = (AFF4_NAMESPACE + "chunkSize")
AFF4_LEGACY_IMAGE_CHUNK_SIZE = (AFF4_LEGACY_NAMESPACE + "chunkSize")
AFF4_IMAGE_CHUNKS_PER_SEGMENT = (AFF4_NAMESPACE + "chunksInSegment")
AFF4_LEGACY_IMAGE_CHUNKS_PER_SEGMENT = (AFF4_LEGACY_NAMESPACE + "chunksInSegment")
AFF4_IMAGE_COMPRESSION = (AFF4_NAMESPACE + "compressionMethod")
AFF4_LEGACY_IMAGE_COMPRESSION = (AFF4_LEGACY_NAMESPACE + "CompressionMethod")
AFF4_IMAGE_COMPRESSION_ZLIB = "https://www.ietf.org/rfc/rfc1950.txt"
AFF4_IMAGE_COMPRESSION_SNAPPY = "http://code.google.com/p/snappy/"
AFF4_IMAGE_COMPRESSION_SNAPPY_SCUDETTE = "https://github.com/google/snappy"
AFF4_IMAGE_COMPRESSION_STORED = (AFF4_NAMESPACE + "compression/stored")
AFF4_IMAGE_AES_XTS = "https://doi.org/10.1109/IEEESTD.2008.4493450"
# AFF4Map - stores a mapping from one stream to another.
AFF4_MAP_TYPE = (AFF4_NAMESPACE + "Map")
AFF4_LEGACY_MAP_TYPE = (AFF4_LEGACY_NAMESPACE + "map")
AFF4_SCUDETTE_MAP_TYPE = (AFF4_NAMESPACE + "map")
# Encrypted Streams
AFF4_ENCRYPTEDSTREAM_TYPE = (AFF4_NAMESPACE + "EncryptedStream")
AFF4_RANDOMSTREAM_TYPE = (AFF4_NAMESPACE + "RandomAccessImageStream")
AFF4_KEYBAG = (AFF4_NAMESPACE + "keyBag")
AFF4_WRAPPEDKEY = (AFF4_NAMESPACE + "wrappedKey")
AFF4_SALT = (AFF4_NAMESPACE + "salt")
AFF4_ITERATIONS = (AFF4_NAMESPACE + "iterations")
AFF4_KEYSIZEBYTES = (AFF4_NAMESPACE + "keySizeInBytes")
AFF4_CERT_ENCRYPTED_KEYBAG = (AFF4_NAMESPACE + "PublicKeyEncryptedKeyBag")
AFF4_PASSWORD_WRAPPED_KEYBAG = (AFF4_NAMESPACE + "PasswordWrappedKeyBag")
AFF4_SERIALNUMBER = (AFF4_NAMESPACE + "serialNumber")
AFF4_SUBJECTNAME = (AFF4_NAMESPACE + "x509SubjectName")
# Categories describe the general type of an image.
AFF4_CATEGORY = (AFF4_NAMESPACE + "category")
# These represent standard attributes to describe memory forensics images.
AFF4_MEMORY_PHYSICAL = (AFF4_MEMORY_NAMESPACE + "physical")
AFF4_MEMORY_VIRTUAL = (AFF4_MEMORY_NAMESPACE + "virtual")
AFF4_MEMORY_PAGEFILE = (AFF4_MEMORY_NAMESPACE + "pagefile")
AFF4_MEMORY_PAGEFILE_NUM = (AFF4_MEMORY_NAMESPACE + "pagefile_number")
AFF4_DISK_RAW = (AFF4_DISK_NAMESPACE + "raw")
AFF4_DISK_PARTITION = (AFF4_DISK_NAMESPACE + "partition")
AFF4_DIRECTORY_TYPE = (AFF4_NAMESPACE + "directory")
#The constant stream is a psuedo stream which just returns a constant.
AFF4_CONSTANT_TYPE = (AFF4_NAMESPACE + "constant")
# The constant to repeat (default 0).
AFF4_CONSTANT_CHAR = (AFF4_NAMESPACE + "constant_char")
# An AFF4 Directory stores all members as files on the filesystem. Some
# filesystems can not represent the URNs properly, hence we need a mapping
# between the URN and the filename. This attribute stores the _relative_ path
# of the filename for the member URN relative to the container's path.
AFF4_DIRECTORY_CHILD_FILENAME = (AFF4_NAMESPACE + "directory/filename")
HASH_SHA512 = rdflib.URIRef("http://aff4.org/Schema#SHA512")
HASH_SHA256 = rdflib.URIRef("http://aff4.org/Schema#SHA256")
HASH_SHA1 = rdflib.URIRef("http://aff4.org/Schema#SHA1")
HASH_MD5 = rdflib.URIRef("http://aff4.org/Schema#MD5")
HASH_BLAKE2B = rdflib.URIRef("http://aff4.org/Schema#Blake2b")
HASH_BLOCKMAPHASH_SHA512 = rdflib.URIRef("http://aff4.org/Schema#blockMapHashSHA512")
# early logical imaging support for pmem
legacy = LegacyLexicon()
standard = StdLexicon()
scudette = ScudetteLexicon()
standard11 = Std11Lexicon()
pmemlogical = PmemLogicalPreStd()
def AutoResolveAttribute(resolver, urn, attribute):
"""Iterate over all lexicons to autodetect the attribute."""
for lexicon in (standard, scudette, legacy):
result = resolver.Get(urn, getattr(lexicon, attribute))
if result is not None:
return result
transient_graph = rdfvalue.URN("http://aff4.org/Schema#transient")
any = rdfvalue.URN("http://aff4.org/Schema#any") | 38.603571 | 85 | 0.753261 |
65af59058300b104393557367f8057f6940196d0 | 431 | py | Python | dusted/dustforce/linux.py | AlexMorson/dustforce-tas-editor | 80546ca525ba215252c23a74807857e9c7c2566c | [
"MIT"
] | 1 | 2021-03-20T07:43:33.000Z | 2021-03-20T07:43:33.000Z | dusted/dustforce/linux.py | AlexMorson/dustforce-tas-editor | 80546ca525ba215252c23a74807857e9c7c2566c | [
"MIT"
] | null | null | null | dusted/dustforce/linux.py | AlexMorson/dustforce-tas-editor | 80546ca525ba215252c23a74807857e9c7c2566c | [
"MIT"
] | null | null | null | import queue
import threading
from subprocess import PIPE, Popen
procs = []
stdout = queue.Queue()
| 22.684211 | 73 | 0.679814 |
65afb03352fe6b2c1a60ffb0e33ef381c9954df6 | 1,834 | py | Python | joplin/pages/official_documents_page/factories.py | cityofaustin/joplin | 01424e46993e9b1c8e57391d6b7d9448f31d596b | [
"MIT"
] | 15 | 2018-09-27T07:36:30.000Z | 2021-08-03T16:01:21.000Z | joplin/pages/official_documents_page/factories.py | cityofaustin/joplin | 01424e46993e9b1c8e57391d6b7d9448f31d596b | [
"MIT"
] | 183 | 2017-11-16T23:30:47.000Z | 2020-12-18T21:43:36.000Z | joplin/pages/official_documents_page/factories.py | cityofaustin/joplin | 01424e46993e9b1c8e57391d6b7d9448f31d596b | [
"MIT"
] | 12 | 2017-12-12T22:48:05.000Z | 2021-03-01T18:01:24.000Z | import factory
from pages.official_documents_page.models import OfficialDocumentPage, OfficialDocumentCollectionOfficialDocumentPage
from pages.base_page.factories import JanisBasePageFactory
from pages.official_documents_collection.factories import OfficialDocumentCollectionFactory
from wagtail.documents.models import Document
| 35.960784 | 117 | 0.745911 |
65b0c43d10ec56796ba655b95a3c9d479381e676 | 6,927 | py | Python | flask_qa/routes/main.py | gouravdhar/youtube_video_code | ade7b8dded7992149d34137f801ebe9c26e9bcf0 | [
"Unlicense"
] | null | null | null | flask_qa/routes/main.py | gouravdhar/youtube_video_code | ade7b8dded7992149d34137f801ebe9c26e9bcf0 | [
"Unlicense"
] | null | null | null | flask_qa/routes/main.py | gouravdhar/youtube_video_code | ade7b8dded7992149d34137f801ebe9c26e9bcf0 | [
"Unlicense"
] | null | null | null | from flask import Blueprint, render_template, request, redirect, url_for
from flask_login import current_user, login_required
from flask_cors import CORS
from flask_qa.extensions import db
from flask_qa.models import Question, User, Stats, Notes
import json
main = Blueprint('main', __name__)
| 27.379447 | 147 | 0.549877 |
65b1a21d6fc172f7d80c2944e861d993aee45a5a | 7,453 | py | Python | src/compas_rhino/utilities/misc.py | XingxinHE/compas | d2901dbbacdaf4694e5adae78ba8f093f10532bf | [
"MIT"
] | 235 | 2017-11-07T07:33:22.000Z | 2022-03-25T16:20:00.000Z | src/compas_rhino/utilities/misc.py | XingxinHE/compas | d2901dbbacdaf4694e5adae78ba8f093f10532bf | [
"MIT"
] | 770 | 2017-09-22T13:42:06.000Z | 2022-03-31T21:26:45.000Z | src/compas_rhino/utilities/misc.py | XingxinHE/compas | d2901dbbacdaf4694e5adae78ba8f093f10532bf | [
"MIT"
] | 99 | 2017-11-06T23:15:28.000Z | 2022-03-25T16:05:36.000Z | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
try:
basestring
except NameError:
basestring = str
import os
import sys
import ast
from compas_rhino.forms import TextForm
from compas_rhino.forms import ImageForm
import System
import rhinoscriptsyntax as rs
import Rhino
import clr
clr.AddReference('Rhino.UI')
import Rhino.UI # noqa: E402
from Rhino.UI.Dialogs import ShowMessageBox # noqa: E402
try:
from compas_rhino.forms import PropertyListForm
except ImportError:
from Rhino.UI.Dialogs import ShowPropertyListBox
__all__ = [
'wait',
'get_tolerance',
'toggle_toolbargroup',
'pick_point',
'browse_for_folder',
'browse_for_file',
'print_display_on',
'display_message',
'display_text',
'display_image',
'display_html',
'update_settings',
'update_named_values',
'screenshot_current_view',
'select_folder',
'select_file',
'unload_modules',
]
# ==============================================================================
# Truly miscellaneous :)
# ==============================================================================
def screenshot_current_view(path,
width=1920,
height=1080,
scale=1,
draw_grid=False,
draw_world_axes=False,
draw_cplane_axes=False,
background=False):
"""Take a screenshot of the current view.
Parameters
----------
path : str
The filepath for saving the screenshot.
Other Parameters
----------------
width : int, optional
height : int, optional
scale : float, optional
draw_grid : bool, optional
draw_world_axes : bool, optional
draw_cplane_axes : bool, optional
background : bool, optional
Returns
-------
bool
True if the command was successful.
False otherwise.
"""
properties = [draw_grid, draw_world_axes, draw_cplane_axes, background]
properties = ["Yes" if item else "No" for item in properties]
scale = max(1, scale) # the rhino command requires a scale > 1
rs.EnableRedraw(True)
rs.Sleep(0)
result = rs.Command("-_ViewCaptureToFile \"" + os.path.abspath(path) + "\""
" Width=" + str(width) +
" Height=" + str(height) +
" Scale=" + str(scale) +
" DrawGrid=" + properties[0] +
" DrawWorldAxes=" + properties[1] +
" DrawCPlaneAxes=" + properties[2] +
" TransparentBackground=" + properties[3] +
" _enter", False)
rs.EnableRedraw(False)
return result
def get_tolerance():
"""Get the absolute tolerance.
Returns
-------
float
The tolerance.
"""
return rs.UnitAbsoluteTolerance()
# ==============================================================================
# File system
# ==============================================================================
select_folder = browse_for_folder
select_file = browse_for_file
# ==============================================================================
# Display
# ==============================================================================
# ==============================================================================
# Settings and attributes
# ==============================================================================
def unload_modules(top_level_module_name):
"""Unloads all modules named starting with the specified string.
This function eases the development workflow when editing a library that is
used from Rhino/Grasshopper.
Parameters
----------
top_level_module_name : :obj:`str`
Name of the top-level module to unload.
Returns
-------
list
List of unloaded module names.
"""
modules = filter(lambda m: m.startswith(top_level_module_name), sys.modules)
for module in modules:
sys.modules.pop(module)
return modules
| 27.603704 | 96 | 0.56058 |
65b235fdfa7ea03f6e55907463fc98d053669de0 | 3,539 | py | Python | lib/utils/visualization/fixup_resnet.py | yandex-research/learnable-init | 480627217763912e83251833df2d678c8b6ea6fd | [
"Apache-2.0"
] | 4 | 2021-07-14T19:18:47.000Z | 2022-03-21T17:50:46.000Z | lib/utils/visualization/fixup_resnet.py | yandex-research/learnable-init | 480627217763912e83251833df2d678c8b6ea6fd | [
"Apache-2.0"
] | null | null | null | lib/utils/visualization/fixup_resnet.py | yandex-research/learnable-init | 480627217763912e83251833df2d678c8b6ea6fd | [
"Apache-2.0"
] | null | null | null | import torch
import numpy as np
import matplotlib.pyplot as plt
from lib.utils import moving_average, check_numpy
def draw_plots(base_train_loss, base_test_loss, base_test_error,
maml_train_loss, maml_test_loss, maml_test_error):
plt.figure(figsize=(20, 6))
plt.subplot(1,3,1)
plt.plot(moving_average(base_train_loss, span=10), label='Baseline')
plt.plot(moving_average(maml_train_loss, span=10), c='g', label='DIMAML')
plt.legend(fontsize=14)
plt.title("Train loss", fontsize=14)
plt.subplot(1,3,2)
plt.plot(base_test_loss, label='Baseline')
plt.plot(maml_test_loss, c='g', label='DIMAML')
plt.legend(fontsize=14)
plt.title("Test loss", fontsize=14)
plt.subplot(1,3,3)
plt.plot(base_test_error, label='Baseline')
plt.plot(maml_test_error, c='g', label='DIMAML')
plt.legend(fontsize=14)
plt.title("Test classification error", fontsize=14) | 39.322222 | 98 | 0.607234 |
65b25da916e80ac5c60ab157203cd5360dfed5f5 | 3,170 | py | Python | DataPreprocessing/load_diabetes.py | iosifidisvasileios/CumulativeCostBoosting | 05a51390c7cadb23eb47b94406b2aa509d25716d | [
"MIT"
] | null | null | null | DataPreprocessing/load_diabetes.py | iosifidisvasileios/CumulativeCostBoosting | 05a51390c7cadb23eb47b94406b2aa509d25716d | [
"MIT"
] | null | null | null | DataPreprocessing/load_diabetes.py | iosifidisvasileios/CumulativeCostBoosting | 05a51390c7cadb23eb47b94406b2aa509d25716d | [
"MIT"
] | null | null | null | from __future__ import division
# import urllib2
import os, sys
import numpy as np
import pandas as pd
from collections import defaultdict
from sklearn import feature_extraction
from sklearn import preprocessing
from random import seed, shuffle
# sys.path.insert(0, '../../fair_classification/') # the code for fair classification is in this directory
# import utils as ut
SEED = 1234
seed(SEED)
np.random.seed(SEED)
| 44.647887 | 121 | 0.613565 |
65b262473f8b6de6d59edf029ac0e4e27f71979d | 2,300 | py | Python | python/scripts/copy_pin.py | ehabnaduvi/api-quickstart | 956409098cbce1bf3674d739fe64ebafaaf63ca3 | [
"Apache-2.0"
] | null | null | null | python/scripts/copy_pin.py | ehabnaduvi/api-quickstart | 956409098cbce1bf3674d739fe64ebafaaf63ca3 | [
"Apache-2.0"
] | null | null | null | python/scripts/copy_pin.py | ehabnaduvi/api-quickstart | 956409098cbce1bf3674d739fe64ebafaaf63ca3 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# Copying a pin is not representative of typical user behavior on Pinterest.
#
# This script is intended to demonstrate how to use the API to developers,
# and to provide functionality that might be convenient for developers.
# For example, it might be used as part of a program to generate an
# account to be used to test an API-based application.
#
import argparse
import sys
from os.path import abspath, dirname, join
sys.path.append(abspath(join(dirname(__file__), "..", "src")))
from api_config import ApiConfig
from arguments import common_arguments
def main(argv=[]):
"""
This script copies a pin to a board, both of which are specified by identifiers
that can be found using the get_user_pins.py and get_user_boards.py script.
If a section identifier is specified in addition to a board identifier,
this script will copy the pin to the board section. Section identifiers can be
found using the get_board.py script. A section identifier may not be specified
without a board identifier.
"""
parser = argparse.ArgumentParser(description="Copy a Pin")
parser.add_argument("-p", "--pin-id", required=True, help="source pin identifier")
parser.add_argument("-m", "--media", help="media path or id")
parser.add_argument(
"-b", "--board-id", required=True, help="destination board identifier"
)
parser.add_argument("-s", "--section", help="destination board section")
common_arguments(parser)
args = parser.parse_args(argv)
# get configuration from defaults and/or the environment
api_config = ApiConfig(verbosity=args.log_level, version=args.api_version)
# imports that depend on the version of the API
from access_token import AccessToken
from oauth_scope import Scope
from pin import Pin
access_token = AccessToken(api_config, name=args.access_token)
access_token.fetch(scopes=[Scope.READ_PINS, Scope.WRITE_BOARDS, Scope.WRITE_PINS])
pin = Pin(args.pin_id, api_config, access_token)
pin_data = pin.get()
print("source pin:")
Pin.print_summary(pin_data)
new_pin_data = pin.create(pin_data, args.board_id, args.section, args.media)
print("new pin:")
Pin.print_summary(new_pin_data)
if __name__ == "__main__":
main(sys.argv[1:])
| 37.096774 | 86 | 0.729565 |
65b30f63399f7d1910889d551fa68b83b2e4d6e6 | 10,308 | py | Python | BL_ColorRamp4_MF.py | SpectralVectors/TransMat | 590b04b273005d95f02b567562c08042c2937af4 | [
"MIT"
] | 31 | 2020-10-16T03:15:06.000Z | 2022-01-31T03:06:44.000Z | BL_ColorRamp4_MF.py | SpectralVectors/TransMat | 590b04b273005d95f02b567562c08042c2937af4 | [
"MIT"
] | 1 | 2020-10-16T07:02:25.000Z | 2020-10-16T13:05:39.000Z | BL_ColorRamp4_MF.py | SpectralVectors/TransMat | 590b04b273005d95f02b567562c08042c2937af4 | [
"MIT"
] | null | null | null | import unreal
BL_ColorRamp4 = unreal.AssetToolsHelpers.get_asset_tools().create_asset('BL_ColorRamp4', '/Engine/Functions/BLUI/', unreal.MaterialFunction, unreal.MaterialFunctionFactoryNew())
BL_ColorRamp4.set_editor_property("expose_to_library", True)
BL_ColorRamp4.set_editor_property("library_categories_text", ("BLUI", "Custom", "Utility"))
create_expression = unreal.MaterialEditingLibrary.create_material_expression_in_function
create_connection = unreal.MaterialEditingLibrary.connect_material_expressions
connect_property = unreal.MaterialEditingLibrary.connect_material_property
update_function = unreal.MaterialEditingLibrary.update_material_function
mat_func_separate = unreal.load_asset('/Engine/Functions/Engine_MaterialFunctions02/Utility/BreakOutFloat3Components')
mat_func_combine = unreal.load_asset('/Engine/Functions/Engine_MaterialFunctions02/Utility/MakeFloat3')
### Creating Nodes
Mix = create_expression(BL_ColorRamp4,unreal.MaterialExpressionLinearInterpolate,-340.0, 3620.0)
Reroute01 = create_expression(BL_ColorRamp4,unreal.MaterialExpressionReroute,-1840.0, 3360.0)
Math20 = create_expression(BL_ColorRamp4,unreal.MaterialExpressionDivide,-640.0, 4415.648193359375)
Math19 = create_expression(BL_ColorRamp4,unreal.MaterialExpressionSubtract,-800.0, 4415.648193359375)
Math18 = create_expression(BL_ColorRamp4,unreal.MaterialExpressionSubtract,-800.0, 4235.648193359375)
Math21 = create_expression(BL_ColorRamp4,unreal.MaterialExpressionDivide,-640.0, 4235.648193359375)
Mix01 = create_expression(BL_ColorRamp4,unreal.MaterialExpressionLinearInterpolate,-20.0, 4480.0)
Math22 = create_expression(BL_ColorRamp4,unreal.MaterialExpressionMultiply,-480.0, 4260.0)
Reroute10 = create_expression(BL_ColorRamp4,unreal.MaterialExpressionReroute,-1120.0, 4360.0)
Reroute09 = create_expression(BL_ColorRamp4,unreal.MaterialExpressionReroute,-1120.0, 4360.0)
Reroute08 = create_expression(BL_ColorRamp4,unreal.MaterialExpressionReroute,-1120.0, 4360.0)
Math23 = create_expression(BL_ColorRamp4,unreal.MaterialExpressionAdd,-320.0, 4320.0)
Reroute06 = create_expression(BL_ColorRamp4,unreal.MaterialExpressionReroute,-1840.0, 4400.0)
Reroute07 = create_expression(BL_ColorRamp4,unreal.MaterialExpressionReroute,-1840.0, 4400.0)
Reroute05 = create_expression(BL_ColorRamp4,unreal.MaterialExpressionReroute,-1849.2108154296875, 5160.0)
Reroute02 = create_expression(BL_ColorRamp4,unreal.MaterialExpressionReroute,-960.0, 5080.0)
Reroute03 = create_expression(BL_ColorRamp4,unreal.MaterialExpressionReroute,-960.0, 5080.0)
Reroute04 = create_expression(BL_ColorRamp4,unreal.MaterialExpressionReroute,-960.0, 5080.0)
Math24 = create_expression(BL_ColorRamp4,unreal.MaterialExpressionAdd,-120.0, 5080.0)
Math25 = create_expression(BL_ColorRamp4,unreal.MaterialExpressionMultiply,-280.0, 5040.0)
Math27 = create_expression(BL_ColorRamp4,unreal.MaterialExpressionSubtract,-600.0, 5195.648193359375)
Math28 = create_expression(BL_ColorRamp4,unreal.MaterialExpressionSubtract,-600.0, 5015.648193359375)
Math29 = create_expression(BL_ColorRamp4,unreal.MaterialExpressionDivide,-440.0, 5015.648193359375)
Math26 = create_expression(BL_ColorRamp4,unreal.MaterialExpressionDivide,-440.0, 5195.648193359375)
Mix02 = create_expression(BL_ColorRamp4,unreal.MaterialExpressionLinearInterpolate,100.0, 5180.0)
Math12 = create_expression(BL_ColorRamp4,unreal.MaterialExpressionSubtract,-1080.0, 3460.0)
Math15 = create_expression(BL_ColorRamp4,unreal.MaterialExpressionDivide,-920.0, 3460.0)
Math16 = create_expression(BL_ColorRamp4,unreal.MaterialExpressionMultiply,-760.0, 3480.0)
Math17 = create_expression(BL_ColorRamp4,unreal.MaterialExpressionAdd,-600.0, 3540.0)
Math14 = create_expression(BL_ColorRamp4,unreal.MaterialExpressionDivide,-900.0, 3640.0)
Math13 = create_expression(BL_ColorRamp4, unreal.MaterialExpressionSubtract, -1080.0, 3640.0)
Position0 = create_expression(BL_ColorRamp4, unreal.MaterialExpressionFunctionInput, -1580.0, 3540.0)
Color0 = create_expression(BL_ColorRamp4, unreal.MaterialExpressionFunctionInput, -1580.0, 3620.0)
Position1 = create_expression(BL_ColorRamp4, unreal.MaterialExpressionFunctionInput, -1580.0, 3800.0)
Color1 = create_expression(BL_ColorRamp4, unreal.MaterialExpressionFunctionInput, -1580.0, 3880.0)
Position2 = create_expression(BL_ColorRamp4, unreal.MaterialExpressionFunctionInput, -1560.0, 4540.0)
Color2 = create_expression(BL_ColorRamp4, unreal.MaterialExpressionFunctionInput, -1560.0, 4620.0)
Position3 = create_expression(BL_ColorRamp4, unreal.MaterialExpressionFunctionInput, -1360.0, 5320.0)
Color3 = create_expression(BL_ColorRamp4, unreal.MaterialExpressionFunctionInput,-1360.0, 5400.0)
Factor = create_expression(BL_ColorRamp4, unreal.MaterialExpressionFunctionInput, -2200.0, 3320.0)
OutputResult = create_expression(BL_ColorRamp4, unreal.MaterialExpressionFunctionOutput,400, 5280)
### Loading Material Functions and Textures
### Setting Values
Color0.input_name = 'Color0'
Color0.sort_priority = 0
Color0.preview_value = (0.0, 0.0, 0.0, 1.0)
Color0.use_preview_value_as_default = True
Position0.input_name = 'Position0'
Position0.input_type = unreal.FunctionInputType.FUNCTION_INPUT_SCALAR
Position0.sort_priority = 1
Position0.preview_value = (0.0, 0.0, 0.0, 1.0)
Position0.use_preview_value_as_default = True
Color1.input_name = 'Color1'
Color1.sort_priority = 2
Color1.preview_value = (1.0, 0.0, 0.0, 1.0)
Color1.use_preview_value_as_default = True
Position1.input_name = "Position1"
Position1.input_type = unreal.FunctionInputType.FUNCTION_INPUT_SCALAR
Position1.sort_priority = 3
Position1.preview_value = (0.125, 0, 0, 1)
Position1.use_preview_value_as_default = True
Color2.input_name = 'Color2'
Color2.sort_priority = 4
Color2.preview_value = (1.0, 0.5, 0.0, 1)
Color2.use_preview_value_as_default = True
Position2.input_name = "Position2"
Position2.input_type = unreal.FunctionInputType.FUNCTION_INPUT_SCALAR
Position2.sort_priority = 5
Position2.preview_value = (0.250, 0, 0, 1)
Position2.use_preview_value_as_default = True
Color3.input_name = 'Color3'
Color3.sort_priority = 6
Color3.preview_value = (1.0, 1, 0.0, 1)
Color3.use_preview_value_as_default = True
Position3.input_name = "Position3"
Position3.input_type = unreal.FunctionInputType.FUNCTION_INPUT_SCALAR
Position3.sort_priority = 7
Position3.preview_value = (1, 0, 0, 1)
Position3.use_preview_value_as_default = True
Factor.input_name = 'Factor'
Factor.input_type = unreal.FunctionInputType.FUNCTION_INPUT_SCALAR
Factor.sort_priority = 8
Factor.preview_value = (0.0, 0.0, 0.0, 1.0)
Factor.use_preview_value_as_default = True
### Creating Connections
Color1_connection = create_connection(Color1, '', Mix, 'B')
Position1_connection = create_connection(Position1, '', Math12, 'A')
Position1_connection = create_connection(Position1, '', Math13, 'B')
Position1_connection = create_connection(Position1, '', Reroute09, '')
Position1_connection = create_connection(Position1, '', Reroute10, '')
Position1_connection = create_connection(Position1, '', Reroute08, '')
Mix_connection = create_connection(Mix, '', Mix01, 'A')
Position0_connection = create_connection(Position0, '', Math12, 'B')
Position0_connection = create_connection(Position0, '', Math14, 'A')
Position0_connection = create_connection(Position0, '', Math13, 'A')
Color0_connection = create_connection(Color0, '', Mix, 'A')
Reroute01_connection = create_connection(Reroute01, '', Reroute06, '')
Reroute01_connection = create_connection(Reroute01, '', Math16, 'B')
Reroute01_connection = create_connection(Reroute01, '', Reroute07, '')
Math20_connection = create_connection(Math20, '', Math23, 'B')
Math19_connection = create_connection(Math19, '', Math20, 'B')
Math18_connection = create_connection(Math18, '', Math21, 'B')
Math21_connection = create_connection(Math21, '', Math22, 'A')
Color2_connection = create_connection(Color2, '', Mix01, 'B')
Mix01_connection = create_connection(Mix01, '', Mix02, 'A')
Position2_connection = create_connection(Position2, '', Math18, 'A')
Position2_connection = create_connection(Position2, '', Math19, 'B')
Position2_connection = create_connection(Position2, '', Reroute03, '')
Position2_connection = create_connection(Position2, '', Reroute04, '')
Position2_connection = create_connection(Position2, '', Reroute02, '')
Math22_connection = create_connection(Math22, '', Math23, 'A')
Reroute10_connection = create_connection(Reroute10, '', Math20, 'A')
Reroute09_connection = create_connection(Reroute09, '', Math18, 'B')
Reroute08_connection = create_connection(Reroute08, '', Math19, 'A')
Math23_connection = create_connection(Math23, '', Mix01, 'Alpha')
Reroute06_connection = create_connection(Reroute06, '', Math22, 'B')
Reroute07_connection = create_connection(Reroute07, '', Reroute05, '')
Reroute05_connection = create_connection(Reroute05, '', Math25, 'B')
Reroute02_connection = create_connection(Reroute02, '', Math26, 'A')
Reroute03_connection = create_connection(Reroute03, '', Math28, 'B')
Reroute04_connection = create_connection(Reroute04, '', Math27, 'A')
Math24_connection = create_connection(Math24, '', Mix02, 'Alpha')
Math25_connection = create_connection(Math25, '', Math24, 'A')
Math27_connection = create_connection(Math27, '', Math26, 'B')
Math28_connection = create_connection(Math28, '', Math29, 'B')
Math29_connection = create_connection(Math29, '', Math25, 'A')
Color3_connection = create_connection(Color3, '', Mix02, 'B')
Math26_connection = create_connection(Math26, '', Math24, 'B')
Position3_connection = create_connection(Position3, '', Math28, 'A')
Position3_connection = create_connection(Position3, '', Math27, 'B')
Factor_connection = create_connection(Factor, '', Reroute01, '')
Math12_connection = create_connection(Math12, '', Math15, 'B')
Math15_connection = create_connection(Math15, '', Math16, 'A')
Math16_connection = create_connection(Math16, '', Math17, 'A')
Math17_connection = create_connection(Math17, '', Mix, 'Alpha')
Math14_connection = create_connection(Math14, '', Math17, 'B')
Math13_connection = create_connection(Math13, '', Math14, 'B')
Mix02_connection = create_connection(Mix02, '', OutputResult, '')
update_function() | 60.994083 | 178 | 0.796954 |
65b36386e6a8fce39db4d492a4e6ead8f8c27f5c | 6,731 | py | Python | tools/log2csv.py | Haixing-Hu/lambda-tensorflow-benchmark | 080a6b7fee1c651228f227f52a2bed6ff90579cf | [
"BSD-3-Clause"
] | null | null | null | tools/log2csv.py | Haixing-Hu/lambda-tensorflow-benchmark | 080a6b7fee1c651228f227f52a2bed6ff90579cf | [
"BSD-3-Clause"
] | null | null | null | tools/log2csv.py | Haixing-Hu/lambda-tensorflow-benchmark | 080a6b7fee1c651228f227f52a2bed6ff90579cf | [
"BSD-3-Clause"
] | null | null | null | import os
import re
import glob
import argparse
import pandas as pd
list_test = ['alexnet',
'inception3',
'inception4',
'resnet152',
'resnet50',
'vgg16']
# Naming convention
# Key: log name
# Value: ([num_gpus], [names])
# num_gpus: Since each log folder has all the record for different numbers of GPUs, it is convenient to specify the benchmarks you want to pull by listing the num_gpus
# names: rename the experiments so they are easier to undertand
list_system = {
"i7-6850K-GeForce_GTX_1080_Ti": ([1], ['GTX 1080Ti']),
"i7-9750H-GeForce_RTX_2070_with_Max-Q_Design_XLA_TF1_15": ([1], ['RTX 2070 MAX-Q']),
"i7-9750H-GeForce_RTX_2080_with_Max-Q_Design_XLA_TF1_15": ([1], ['RTX 2080 MAX-Q']),
"i7-10875H-GeForce_RTX_2080_Super_with_Max-Q_Design_XLA_TF2_2": ([1], ['RTX 2080 SUPER MAX-Q']),
"Gold_6230-GeForce_RTX_2080_Ti_NVLink_XLA_trt_TF1_15": ([2, 4, 8], ['2x RTX 2080Ti NVLink', '4x RTX 2080Ti NVLink', '8x RTX 2080Ti NVLink']),
"Gold_6230-GeForce_RTX_2080_Ti_XLA_trt_TF1_15": ([1, 2, 4, 8], ['RTX 2080Ti', '2x RTX 2080Ti', '4x RTX 2080Ti', '8x RTX 2080Ti']),
"Platinum-Tesla_V100-SXM3-32GB_HP16_TF2_2": ([1, 8], ['V100 32GB', '8x V100 32GB']),
"Gold_6230-Quadro_RTX_8000_XLA_trt_TF1_15": ([1, 2, 4, 8], ['RTX 8000', '2x RTX 8000', '4x RTX 8000', '8x RTX 8000']),
"Gold_6230-Quadro_RTX_8000_NVLink_XLA_trt_TF1_15": ([2, 4, 8], ['2x RTX 8000 NVLink', '4x RTX 8000 NVLink', '8x RTX 8000 NVLink']),
"7502-A100-PCIE-40GB": ([1, 2, 4, 8], ['A100 40GB PCIe', '2x A100 40GB PCIe', '4x A100 40GB PCIe', '8x A100 40GB PCIe']),
"3960X-GeForce_RTX_3080_XLA": ([1, 2], ['RTX 3080', '2x RTX 3080']),
"3970X-GeForce_RTX_3090_XLA": ([1, 2, 3], ['RTX 3090', '2x RTX 3090', '3x RTX 3090']),
"7502-RTX_A6000_XLA_TF1_15": ([1, 2, 4, 8], ['RTX A6000', '2x RTX A6000', '4x RTX A6000', '8x RTX A6000'])
}
if __name__ == "__main__":
main()
| 38.028249 | 167 | 0.606002 |
65b8b4c75d35105b5ff106a11aa54530eaf30029 | 2,687 | py | Python | stellar_sdk/xdr/survey_response_body.py | Shaptic/py-stellar-base | f5fa47f4d96f215889d99249fb25c7be002f5cf3 | [
"Apache-2.0"
] | null | null | null | stellar_sdk/xdr/survey_response_body.py | Shaptic/py-stellar-base | f5fa47f4d96f215889d99249fb25c7be002f5cf3 | [
"Apache-2.0"
] | 27 | 2022-01-12T10:55:38.000Z | 2022-03-28T01:38:24.000Z | stellar_sdk/xdr/survey_response_body.py | Shaptic/py-stellar-base | f5fa47f4d96f215889d99249fb25c7be002f5cf3 | [
"Apache-2.0"
] | 2 | 2021-12-02T12:42:03.000Z | 2021-12-07T20:53:10.000Z | # This is an automatically generated file.
# DO NOT EDIT or your changes may be overwritten
import base64
from xdrlib import Packer, Unpacker
from ..type_checked import type_checked
from .survey_message_command_type import SurveyMessageCommandType
from .topology_response_body import TopologyResponseBody
__all__ = ["SurveyResponseBody"]
| 32.373494 | 80 | 0.668031 |
65b9bd2ad1163a0006a5a233a9d9d9cd5e6a3646 | 763 | py | Python | poll/migrations/0002_auto_20210114_2215.py | slk007/SahiGalat.com | 786688e07237f3554187b90e01149225efaa1713 | [
"MIT"
] | null | null | null | poll/migrations/0002_auto_20210114_2215.py | slk007/SahiGalat.com | 786688e07237f3554187b90e01149225efaa1713 | [
"MIT"
] | null | null | null | poll/migrations/0002_auto_20210114_2215.py | slk007/SahiGalat.com | 786688e07237f3554187b90e01149225efaa1713 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.5 on 2021-01-14 22:15
from django.db import migrations, models
| 28.259259 | 114 | 0.571429 |
65bb496751451a7bd133a1ac2b24c5b70ac17431 | 5,375 | py | Python | setup.py | JakaKokosar/pysqlite3-binary | 808e9689c69b1ada784eda3d5a8ea7865c8318ad | [
"Zlib"
] | null | null | null | setup.py | JakaKokosar/pysqlite3-binary | 808e9689c69b1ada784eda3d5a8ea7865c8318ad | [
"Zlib"
] | null | null | null | setup.py | JakaKokosar/pysqlite3-binary | 808e9689c69b1ada784eda3d5a8ea7865c8318ad | [
"Zlib"
] | null | null | null | # -*- coding: ISO-8859-1 -*-
# setup.py: the distutils script
#
import os
import setuptools
import sys
from distutils import log
from distutils.command.build_ext import build_ext
from setuptools import Extension
# If you need to change anything, it should be enough to change setup.cfg.
PACKAGE_NAME = 'pysqlite3'
VERSION = '0.4.1'
# define sqlite sources
sources = [os.path.join('src', source)
for source in ["module.c", "connection.c", "cursor.c", "cache.c",
"microprotocols.c", "prepare_protocol.c",
"statement.c", "util.c", "row.c", "blob.c"]]
# define packages
packages = [PACKAGE_NAME]
EXTENSION_MODULE_NAME = "._sqlite3"
# Work around clang raising hard error for unused arguments
if sys.platform == "darwin":
os.environ['CFLAGS'] = "-Qunused-arguments"
log.info("CFLAGS: " + os.environ['CFLAGS'])
define_macros = [('MODULE_NAME', quote_argument(PACKAGE_NAME + '.dbapi2'))]
def get_setup_args():
return dict(
name=PACKAGE_NAME,
version=VERSION,
description="DB-API 2.0 interface for Sqlite 3.x",
long_description='',
author="Charles Leifer",
author_email="coleifer@gmail.com",
license="zlib/libpng",
platforms="ALL",
url="https://github.com/coleifer/pysqlite3",
package_dir={PACKAGE_NAME: "pysqlite3"},
packages=packages,
ext_modules=[Extension(
name=PACKAGE_NAME + EXTENSION_MODULE_NAME,
sources=sources,
define_macros=define_macros)
],
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: zlib/libpng License",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Programming Language :: C",
"Programming Language :: Python",
"Topic :: Database :: Database Engines/Servers",
"Topic :: Software Development :: Libraries :: Python Modules"],
cmdclass={
"build_static": AmalgationLibSqliteBuilder,
"build_ext": SystemLibSqliteBuilder
}
)
def main():
try:
setuptools.setup(**get_setup_args())
except BaseException as ex:
log.info(str(ex))
if __name__ == "__main__":
main()
| 32.97546 | 77 | 0.618233 |
65bc3f6e1793bcf43d99a8c4a348a352385aa4a0 | 5,267 | py | Python | gridder/gridder.py | PDFGridder/PDFGridder | 94bc6e76eadc3799905c905a70228fcd6b30c4fb | [
"MIT"
] | 2 | 2016-09-07T18:32:44.000Z | 2016-11-24T19:45:06.000Z | gridder/gridder.py | PDFGridder/PDFGridder | 94bc6e76eadc3799905c905a70228fcd6b30c4fb | [
"MIT"
] | null | null | null | gridder/gridder.py | PDFGridder/PDFGridder | 94bc6e76eadc3799905c905a70228fcd6b30c4fb | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import cairo
from .utils import hex_to_rgba, parse_unit
def parse_size(size):
"""take a size as str (es: 14px), return its value in px/pt as int
"""
if hasattr(size, 'isdigit'):
if size.isdigit():
return int(size)
return parse_unit(size[:-2], size[-2:])
return size
| 36.324138 | 180 | 0.58838 |
65bc5bc0726d3703c47b9225540efbf4baf75f28 | 462 | py | Python | wanikani_api/constants.py | peraperacafe/wanikani_api | 7340fde25ef4b102545e4fa2c485339d79136e17 | [
"BSD-3-Clause"
] | 12 | 2019-04-30T13:11:52.000Z | 2021-05-14T02:52:05.000Z | wanikani_api/constants.py | peraperacafe/wanikani_api | 7340fde25ef4b102545e4fa2c485339d79136e17 | [
"BSD-3-Clause"
] | 323 | 2018-07-13T00:39:22.000Z | 2022-03-31T19:29:08.000Z | wanikani_api/constants.py | peraperacafe/wanikani_api | 7340fde25ef4b102545e4fa2c485339d79136e17 | [
"BSD-3-Clause"
] | 9 | 2020-02-14T14:56:00.000Z | 2022-01-09T19:14:07.000Z |
ROOT_WK_API_URL = "https://api.wanikani.com/v2/"
RESOURCES_WITHOUT_IDS = ["user", "collection", "report"]
SUBJECT_ENDPOINT = "subjects"
SINGLE_SUBJECT_ENPOINT = r"subjects/\d+"
ASSIGNMENT_ENDPOINT = "assignments"
REVIEW_STATS_ENDPOINT = "review_statistics"
STUDY_MATERIALS_ENDPOINT = "study_materials"
REVIEWS_ENDPOINT = "reviews"
LEVEL_PROGRESSIONS_ENDPOINT = "level_progressions"
RESETS_ENDPOINT = "resets"
SUMMARY_ENDPOINT = "summary"
USER_ENDPOINT = "user"
| 33 | 56 | 0.798701 |
65bd680ebf2391800df849001a9518d85eba50ba | 1,943 | py | Python | utils/dataloader.py | Jiaqi0602/adversarial-attack-from-leakage | 90db721bed10094ac7d458b232ad5b1573884338 | [
"BSD-3-Clause"
] | 9 | 2021-06-17T00:46:19.000Z | 2022-03-05T13:57:38.000Z | utils/dataloader.py | Jiaqi0602/adversarial-attack-from-leakage | 90db721bed10094ac7d458b232ad5b1573884338 | [
"BSD-3-Clause"
] | null | null | null | utils/dataloader.py | Jiaqi0602/adversarial-attack-from-leakage | 90db721bed10094ac7d458b232ad5b1573884338 | [
"BSD-3-Clause"
] | null | null | null | from inversefed import consts
import torch
from torchvision import datasets, transforms
| 39.653061 | 104 | 0.609882 |
65be1830984a29d7acd4c26b6de2aa0995caf8fb | 10,651 | py | Python | hintedhandoff_test.py | Ankou76ers/cassandra-dtest | 54f5a983738a1580fbbe43bdb7201ff9b2664401 | [
"Apache-2.0"
] | 44 | 2017-07-13T14:20:42.000Z | 2022-03-27T23:55:27.000Z | hintedhandoff_test.py | Ankou76ers/cassandra-dtest | 54f5a983738a1580fbbe43bdb7201ff9b2664401 | [
"Apache-2.0"
] | 64 | 2017-07-26T16:06:01.000Z | 2022-03-17T22:57:03.000Z | hintedhandoff_test.py | Ankou76ers/cassandra-dtest | 54f5a983738a1580fbbe43bdb7201ff9b2664401 | [
"Apache-2.0"
] | 105 | 2017-07-13T14:28:14.000Z | 2022-03-23T04:22:46.000Z | import os
import time
import pytest
import logging
from cassandra import ConsistencyLevel
from dtest import Tester, create_ks
from tools.data import create_c1c2_table, insert_c1c2, query_c1c2
from tools.assertions import assert_stderr_clean
since = pytest.mark.since
ported_to_in_jvm = pytest.mark.ported_to_in_jvm
logger = logging.getLogger(__name__)
| 40.192453 | 118 | 0.648202 |
65be1ffede01306450a5f34b42845bf53968f1d8 | 248 | py | Python | pre_definition/solve_caller.py | sr9000/stepik_code_task_baking | 60a5197f659db1734132eeb9d82624f1b7aaeb3f | [
"MIT"
] | null | null | null | pre_definition/solve_caller.py | sr9000/stepik_code_task_baking | 60a5197f659db1734132eeb9d82624f1b7aaeb3f | [
"MIT"
] | null | null | null | pre_definition/solve_caller.py | sr9000/stepik_code_task_baking | 60a5197f659db1734132eeb9d82624f1b7aaeb3f | [
"MIT"
] | null | null | null | from collections.abc import Iterable as ABCIterable
| 22.545455 | 51 | 0.665323 |
65bea9d189e5ba73f3c48d6d3eae40bf9da3717b | 817 | py | Python | wikipedia_test.py | pedrogengo/TopicBlob | e6a7736d39c7a174d0289b21c152cd8bb02f2669 | [
"Apache-2.0"
] | null | null | null | wikipedia_test.py | pedrogengo/TopicBlob | e6a7736d39c7a174d0289b21c152cd8bb02f2669 | [
"Apache-2.0"
] | null | null | null | wikipedia_test.py | pedrogengo/TopicBlob | e6a7736d39c7a174d0289b21c152cd8bb02f2669 | [
"Apache-2.0"
] | null | null | null | import wikipedia
from topicblob import TopicBlob
#get random wikipeida summaries
wiki_pages = ["Facebook","New York City","Barack Obama","Wikipedia","Topic Modeling","Python (programming language)","Snapchat"]
wiki_pages = ["Facebook","New York City","Barack Obama"]
texts = []
for page in wiki_pages:
text = wikipedia.summary(page)
#print(text)
texts.append(text)
tb = TopicBlob(texts, 20, 50)
#Do topic search for social
topic_search = tb.search_docs_by_topics("social")
print(topic_search)
print("\n")
#Do a ranked search for president
search = tb.ranked_search_docs_by_words("president")
print(search)
print("\n")
#Find similar text for
print("Finding similar document for\n" + tb.blobs[0]["doc"])
print("\n")
sims = tb.get_sim(0)
for sim in sims.keys():
print(tb.get_doc(sim)) | 18.568182 | 128 | 0.71481 |
65c19e6d0f4a645a3e85871f601e50a70618990c | 215 | py | Python | component/model/dmp_model.py | 12rambau/damage_proxy_maps | 98a004bf4420c6ce1b7ecd77e426e8fe7d512f52 | [
"MIT"
] | 1 | 2021-09-01T18:27:19.000Z | 2021-09-01T18:27:19.000Z | component/model/dmp_model.py | 12rambau/damage_proxy_maps | 98a004bf4420c6ce1b7ecd77e426e8fe7d512f52 | [
"MIT"
] | 3 | 2021-06-01T10:15:36.000Z | 2021-10-07T10:00:16.000Z | component/model/dmp_model.py | 12rambau/damage_proxy_maps | 98a004bf4420c6ce1b7ecd77e426e8fe7d512f52 | [
"MIT"
] | 2 | 2021-06-01T10:16:03.000Z | 2021-06-10T12:43:47.000Z | from sepal_ui import model
from traitlets import Any
| 19.545455 | 39 | 0.693023 |
65c1e68e0dc7466b357152cbb876f5ad24ac99ef | 9,154 | py | Python | SaIL/envs/state_lattice_planner_env.py | yonetaniryo/SaIL | c7404024c7787184c3638e9730bd185373ed0bf6 | [
"BSD-3-Clause"
] | 12 | 2018-05-18T19:29:09.000Z | 2020-05-15T13:47:12.000Z | SaIL/envs/state_lattice_planner_env.py | yonetaniryo/SaIL | c7404024c7787184c3638e9730bd185373ed0bf6 | [
"BSD-3-Clause"
] | 1 | 2018-05-18T19:36:42.000Z | 2018-07-20T03:03:13.000Z | SaIL/envs/state_lattice_planner_env.py | yonetaniryo/SaIL | c7404024c7787184c3638e9730bd185373ed0bf6 | [
"BSD-3-Clause"
] | 10 | 2018-01-11T21:23:40.000Z | 2021-11-10T04:38:07.000Z | #!/usr/bin/env python
"""An environment that takes as input databases of environments and runs episodes,
where each episode is a search based planner. It then returns the average number of expansions,
and features (if training)
Author: Mohak Bhardwaj
"""
from collections import defaultdict
import numpy as np
import os
from SaIL.learners.supervised_regression_network import SupervisedRegressionNetwork
from planning_python.data_structures.priority_queue import PriorityQueue
from planning_python.planners.search_based_planner import SearchBasedPlanner
from planning_python.environment_interface.env_2d import Env2D
from planning_python.state_lattices.common_lattice.xy_analytic_lattice import XYAnalyticLattice
from planning_python.state_lattices.common_lattice.xyh_analytic_lattice import XYHAnalyticLattice
from planning_python.cost_functions.cost_function import PathLengthNoAng, DubinsPathLength
from planning_python.heuristic_functions.heuristic_function import EuclideanHeuristicNoAng, ManhattanHeuristicNoAng, DubinsHeuristic
from planning_python.data_structures.planning_problem import PlanningProblem
| 45.093596 | 192 | 0.693358 |
65c266ffeb9dad82408ef950252b4d7368839fc3 | 966 | py | Python | opi_dragon_api/auth/__init__.py | CEAC33/opi-dragon-api | 8f050a0466dab4aaeec13151b9f49990bbd73640 | [
"MIT"
] | null | null | null | opi_dragon_api/auth/__init__.py | CEAC33/opi-dragon-api | 8f050a0466dab4aaeec13151b9f49990bbd73640 | [
"MIT"
] | null | null | null | opi_dragon_api/auth/__init__.py | CEAC33/opi-dragon-api | 8f050a0466dab4aaeec13151b9f49990bbd73640 | [
"MIT"
] | null | null | null | from sanic_jwt import exceptions
users = [User(1, "opi-user", "~Zujh*B2D`9T!<j")]
username_table = {u.username: u for u in users}
userid_table = {u.user_id: u for u in users} | 29.272727 | 79 | 0.677019 |
65c2afb8b2d130681f854965474e19205bdcd378 | 5,087 | py | Python | tests/test_observable/test_dowhile.py | yutiansut/RxPY | c3bbba77f9ebd7706c949141725e220096deabd4 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2018-11-16T09:07:13.000Z | 2018-11-16T09:07:13.000Z | tests/test_observable/test_dowhile.py | yutiansut/RxPY | c3bbba77f9ebd7706c949141725e220096deabd4 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | tests/test_observable/test_dowhile.py | yutiansut/RxPY | c3bbba77f9ebd7706c949141725e220096deabd4 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2020-05-08T08:23:08.000Z | 2020-05-08T08:23:08.000Z | import unittest
from rx.testing import TestScheduler, ReactiveTest
| 31.79375 | 68 | 0.525654 |
65c311baef365241a86f5ea9eee583a30d354076 | 1,459 | py | Python | ZAP_Scripts/passive/14-4-2-api-content-disposition-header.py | YaleUniversity/ZAP_ASVS_Checks | f69b57f5fe0bc196ffc57fb1bb0762ffb367c1cb | [
"MIT"
] | 3 | 2022-01-22T11:21:23.000Z | 2022-03-09T06:45:55.000Z | ZAP_Scripts/passive/14-4-2-api-content-disposition-header.py | YaleUniversity/ZAP_ASVS_Checks | f69b57f5fe0bc196ffc57fb1bb0762ffb367c1cb | [
"MIT"
] | null | null | null | ZAP_Scripts/passive/14-4-2-api-content-disposition-header.py | YaleUniversity/ZAP_ASVS_Checks | f69b57f5fe0bc196ffc57fb1bb0762ffb367c1cb | [
"MIT"
] | null | null | null | """
Script testing 14.4.2 control from OWASP ASVS 4.0:
'Verify that all API responses contain a Content-Disposition: attachment;
filename="api.json" header (or other appropriate filename for the content
type).'
The script will raise an alert if 'Content-Disposition' header is present but not follow the format - Content-Disposition: attachment; filename=
"""
| 41.685714 | 175 | 0.749143 |
65c34c95b750096053aaef54a2b648be5c44772c | 230 | py | Python | server.py | Peopple-Shopping-App/mockserver | c38c3f325e44f4eaba39cdbe24544e3181307218 | [
"MIT"
] | 1 | 2021-07-23T03:43:19.000Z | 2021-07-23T03:43:19.000Z | server.py | Peopple-Shopping-App/mockserver | c38c3f325e44f4eaba39cdbe24544e3181307218 | [
"MIT"
] | null | null | null | server.py | Peopple-Shopping-App/mockserver | c38c3f325e44f4eaba39cdbe24544e3181307218 | [
"MIT"
] | null | null | null | import uvicorn
if __name__ == '__main__':
<<<<<<< HEAD
uvicorn.run('app.main:app', host='0.0.0.0', port=80)
=======
uvicorn.run('app.main:app', host='0.0.0.0', port=2323)
>>>>>>> c583e3d93c9b7f8e76ce1d676a24740b62ef3552
| 23 | 58 | 0.630435 |
65c37b82e34797425fdb4ac383cf6c771dd605d3 | 399 | py | Python | 9020/main.py | yeonghoey/baekjoon | a3f7c0aa901ad0e2ca6a863f1867fc574feb8c8e | [
"MIT"
] | 1 | 2018-09-20T05:15:30.000Z | 2018-09-20T05:15:30.000Z | 9020/main.py | yeonghoey/baekjoon | a3f7c0aa901ad0e2ca6a863f1867fc574feb8c8e | [
"MIT"
] | null | null | null | 9020/main.py | yeonghoey/baekjoon | a3f7c0aa901ad0e2ca6a863f1867fc574feb8c8e | [
"MIT"
] | null | null | null | MAX_N = 10000 + 1
isprime = [True] * (MAX_N)
isprime[0] = False
isprime[1] = False
for i in range(2, MAX_N):
if not isprime[i]:
continue
for j in range(i+i, MAX_N, i):
isprime[j] = False
T = int(input())
for _ in range(T):
n = int(input())
for i in range(n//2, 1, -1):
if isprime[i] and isprime[n-i]:
print('%d %d' % (i, n-i))
break
| 21 | 39 | 0.513784 |
65c451b4c4af62ac430c54bacf4793ebfef0c2ef | 48,201 | py | Python | pysnmp-with-texts/DOCS-LOADBALANCING-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 8 | 2019-05-09T17:04:00.000Z | 2021-06-09T06:50:51.000Z | pysnmp-with-texts/DOCS-LOADBALANCING-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 4 | 2019-05-31T16:42:59.000Z | 2020-01-31T21:57:17.000Z | pysnmp-with-texts/DOCS-LOADBALANCING-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module DOCS-LOADBALANCING-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/DOCS-LOADBALANCING-MIB
# Produced by pysmi-0.3.4 at Wed May 1 12:53:17 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ValueSizeConstraint, ValueRangeConstraint, ConstraintsUnion, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ValueSizeConstraint", "ValueRangeConstraint", "ConstraintsUnion", "SingleValueConstraint")
clabProjDocsis, = mibBuilder.importSymbols("CLAB-DEF-MIB", "clabProjDocsis")
docsIfCmtsCmStatusIndex, docsIfCmtsCmStatusEntry = mibBuilder.importSymbols("DOCS-IF-MIB", "docsIfCmtsCmStatusIndex", "docsIfCmtsCmStatusEntry")
InterfaceIndex, = mibBuilder.importSymbols("IF-MIB", "InterfaceIndex")
ObjectGroup, NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "ObjectGroup", "NotificationGroup", "ModuleCompliance")
ModuleIdentity, Gauge32, Counter32, IpAddress, MibIdentifier, MibScalar, MibTable, MibTableRow, MibTableColumn, NotificationType, Bits, TimeTicks, Counter64, Unsigned32, zeroDotZero, Integer32, iso, ObjectIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "ModuleIdentity", "Gauge32", "Counter32", "IpAddress", "MibIdentifier", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "NotificationType", "Bits", "TimeTicks", "Counter64", "Unsigned32", "zeroDotZero", "Integer32", "iso", "ObjectIdentity")
TimeStamp, TruthValue, TextualConvention, DisplayString, RowStatus, RowPointer, MacAddress = mibBuilder.importSymbols("SNMPv2-TC", "TimeStamp", "TruthValue", "TextualConvention", "DisplayString", "RowStatus", "RowPointer", "MacAddress")
docsLoadBalanceMib = ModuleIdentity((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2))
docsLoadBalanceMib.setRevisions(('2004-03-10 17:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: docsLoadBalanceMib.setRevisionsDescriptions(('Initial version of this mib module.',))
if mibBuilder.loadTexts: docsLoadBalanceMib.setLastUpdated('200403101700Z')
if mibBuilder.loadTexts: docsLoadBalanceMib.setOrganization('Cable Television Laboratories, Inc')
if mibBuilder.loadTexts: docsLoadBalanceMib.setContactInfo(' Postal: Cable Television Laboratories, Inc. 400 Centennial Parkway Louisville, Colorado 80027-1266 U.S.A. Phone: +1 303-661-9100 Fax: +1 303-661-9199 E-mail: mibs@cablelabs.com')
if mibBuilder.loadTexts: docsLoadBalanceMib.setDescription('This is the MIB Module for the load balancing. Load balancing is manageable on a per-CM basis. Each CM is assigned: a) to a set of channels (a Load Balancing Group) among which it can be moved by the CMTS b) a policy which governs if and when the CM can be moved c) a priority value which can be used by the CMTS in order to select CMs to move.')
docsLoadBalNotifications = MibIdentifier((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 0))
docsLoadBalMibObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1))
docsLoadBalSystem = MibIdentifier((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 1))
docsLoadBalChgOverObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 2))
docsLoadBalGrpObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 3))
docsLoadBalPolicyObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 4))
docsLoadBalChgOverGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 2, 1))
docsLoadBalEnable = MibScalar((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 1, 1), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: docsLoadBalEnable.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalEnable.setDescription('Setting this object to true(1) enables internal autonomous load balancing operation on this CMTS. Setting it to false(2) disables the autonomous load balancing operations. However moving a cable modem via docsLoadBalChgOverTable is allowed even when this object is set to false(2).')
docsLoadBalChgOverMacAddress = MibScalar((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 2, 1, 1), MacAddress().clone(hexValue="000000000000")).setMaxAccess("readwrite")
if mibBuilder.loadTexts: docsLoadBalChgOverMacAddress.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalChgOverMacAddress.setDescription('The mac address of the cable modem that the CMTS instructs to move to a new downstream frequency and/or upstream channel.')
docsLoadBalChgOverDownFrequency = MibScalar((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 2, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1000000000))).setUnits('hertz').setMaxAccess("readwrite")
if mibBuilder.loadTexts: docsLoadBalChgOverDownFrequency.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalChgOverDownFrequency.setDescription('The new downstream frequency to which the cable modem is instructed to move. The value 0 indicates that the CMTS does not create a TLV for the downstream frequency in the DCC-REQ message. This object has no meaning when executing UCC operations.')
docsLoadBalChgOverUpChannelId = MibScalar((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 255)).clone(-1)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: docsLoadBalChgOverUpChannelId.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalChgOverUpChannelId.setDescription('The new upstream channel ID to which the cable modem is instructed to move. The value -1 indicates that the CMTS does not create a TLV for the upstream channel ID in the channel change request.')
docsLoadBalChgOverInitTech = MibScalar((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 2, 1, 4), ChannelChgInitTechMap()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: docsLoadBalChgOverInitTech.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalChgOverInitTech.setDescription("The initialization technique that the cable modem is instructed to use when performing change over operation. By default this object is initialized with all the defined bits having a value of '1'.")
docsLoadBalChgOverCmd = MibScalar((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 2, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("any", 1), ("dcc", 2), ("ucc", 3))).clone('any')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: docsLoadBalChgOverCmd.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalChgOverCmd.setDescription('The change over command that the CMTS is instructed use when performing change over operation. The any(1) value indicates that the CMTS is to use its own algorithm to determine the appropriate command.')
docsLoadBalChgOverCommit = MibScalar((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 2, 1, 6), TruthValue().clone('false')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: docsLoadBalChgOverCommit.setReference('Data-Over-Cable Service Interface Specifications: Radio Frequency Interface Specification SP-RFIv2.0-I04-030730, Sections C.4.1, 11.4.5.1.')
if mibBuilder.loadTexts: docsLoadBalChgOverCommit.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalChgOverCommit.setDescription("The command to execute the DCC/UCC operation when set to true(1). The following are reasons for rejecting an SNMP SET to this object: - The MAC address in docsLoadBalChgOverMacAddr is not an existing MAC address in docsIfCmtsMacToCmEntry. - docsLoadBalChgOverCmd is ucc(3) and docsLoadBalChgOverUpChannelId is '-1', - docsLoadBalChgOverUpChannelId is '-1' and docsLoadBalChgOverDownFrequency is '0'. - DCC/UCC operation is currently being executed for the cable modem, on which the new command is committed, specifically if the value of docsLoadBalChgOverStatusValue is one of: messageSent(1), modemDeparting(4), waitToSendMessage(6). - An UCC operation is committed for a non-existing upstream channel ID or the corresponding ifOperStatus is down(2). - A DCC operation is committed for an invalid or non-existing downstream frequency, or the corresponding ifOperStatus is down(2). In those cases, the SET is rejected with an error code 'commitFailed'. After processing the SNMP SET the information in docsLoadBalChgOverGroup is updated in a corresponding entry in docsLoadBalChgOverStatusEntry. Reading this object always returns false(2).")
docsLoadBalChgOverLastCommit = MibScalar((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 2, 1, 7), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsLoadBalChgOverLastCommit.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalChgOverLastCommit.setDescription('The value of sysUpTime when docsLoadBalChgOverCommit was last set to true. Zero if never set.')
docsLoadBalChgOverStatusTable = MibTable((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 2, 2), )
if mibBuilder.loadTexts: docsLoadBalChgOverStatusTable.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalChgOverStatusTable.setDescription('A table of CMTS operation entries to reports the status of cable modems instructed to move to a new downstream and/or upstream channel. Using the docsLoadBalChgOverGroup objects. An entry in this table is created or updated for the entry with docsIfCmtsCmStatusIndex that correspond to the cable modem MAC address of the Load Balancing operation. docsLoadBalChgOverCommit to true(1).')
docsLoadBalChgOverStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 2, 2, 1), ).setIndexNames((0, "DOCS-IF-MIB", "docsIfCmtsCmStatusIndex"))
if mibBuilder.loadTexts: docsLoadBalChgOverStatusEntry.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalChgOverStatusEntry.setDescription('A CMTS operation entry to instruct a cable modem to move to a new downstream frequency and/or upstream channel. An operator can use this to initiate an operation in CMTS to instruct the selected cable modem to move to a new downstream frequency and/or upstream channel.')
docsLoadBalChgOverStatusMacAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 2, 2, 1, 1), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsLoadBalChgOverStatusMacAddr.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalChgOverStatusMacAddr.setDescription('The mac address set in docsLoadBalChgOverMacAddress.')
docsLoadBalChgOverStatusDownFreq = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 2, 2, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1000000000))).setUnits('hertz').setMaxAccess("readonly")
if mibBuilder.loadTexts: docsLoadBalChgOverStatusDownFreq.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalChgOverStatusDownFreq.setDescription('The Downstream frequency set in docsLoadBalChgOverDownFrequency.')
docsLoadBalChgOverStatusUpChnId = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 2, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 255)).clone(-1)).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsLoadBalChgOverStatusUpChnId.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalChgOverStatusUpChnId.setDescription('The upstream channel ID set in docsLoadBalChgOverUpChannelId.')
docsLoadBalChgOverStatusInitTech = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 2, 2, 1, 4), ChannelChgInitTechMap()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsLoadBalChgOverStatusInitTech.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalChgOverStatusInitTech.setDescription('The initialization technique set in docsLoadBalChgOverInitTech.')
docsLoadBalChgOverStatusCmd = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 2, 2, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("any", 1), ("dcc", 2), ("ucc", 3))).clone('any')).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsLoadBalChgOverStatusCmd.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalChgOverStatusCmd.setDescription('The load balancing command set in docsLoadBalChgOverCmd.')
docsLoadBalChgOverStatusValue = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 2, 2, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))).clone(namedValues=NamedValues(("messageSent", 1), ("noOpNeeded", 2), ("modemDeparting", 3), ("waitToSendMessage", 4), ("cmOperationRejected", 5), ("cmtsOperationRejected", 6), ("timeOutT13", 7), ("timeOutT15", 8), ("rejectinit", 9), ("success", 10))).clone('waitToSendMessage')).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsLoadBalChgOverStatusValue.setReference('Data-Over-Cable Service Interface Specifications: Radio Frequency Interface Specification SP-RFIv2.0-I04-030730, Sections C.4.1, 11.4.5.1.')
if mibBuilder.loadTexts: docsLoadBalChgOverStatusValue.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalChgOverStatusValue.setDescription("The status of the specified DCC/UCC operation. The enumerations are: messageSent(1): The CMTS has sent change over request message to the cable modem. noOpNeed(2): A operation was requested in which neither the DS Frequency nor the Upstream Channel ID was changed. An active value in this entry's row status indicates that no CMTS operation is required. modemDeparting(3): The cable modem has responded with a change over response of either a DCC-RSP with a confirmation code of depart(180) or a UCC-RSP. waitToSendMessage(4): The specified operation is active and CMTS is waiting to send the channel change message with channel info to the cable modem. cmOperationRejected(5): Channel Change (such as DCC or UCC) operation was rejected by the cable modem. cmtsOperationRejected(6) Channel Change (such as DCC or UCC) operation was rejected by the Cable modem Termination System. timeOutT13(7): Failure due to no DCC-RSP with confirmation code depart(180) received prior to expiration of the T13 timer. timeOutT15(8): T15 timer timed out prior to the arrival of a bandwidth request, RNG-REQ message, or DCC-RSP message with confirmation code of arrive(181) from the cable modem. rejectInit(9): DCC operation rejected due to unsupported initialization tech requested. success(10): CMTS received an indication that the CM successfully completed the change over operation. e.g., If an initialization technique of re-initialize the MAC is used, success in indicated by the receipt of a DCC-RSP message with a confirmation code of depart(180). In all other cases, success is indicated by: (1) the CMTS received a DCC-RSP message with confirmation code of arrive(181) or (2) the CMTS internally confirms the presence of the CM on the new channel.")
docsLoadBalChgOverStatusUpdate = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 2, 2, 1, 7), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsLoadBalChgOverStatusUpdate.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalChgOverStatusUpdate.setDescription('The value of sysUpTime when docsLoadBalChgOverStatusValue was last updated.')
docsLoadBalGrpTable = MibTable((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 3, 1), )
if mibBuilder.loadTexts: docsLoadBalGrpTable.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalGrpTable.setDescription('This table contains the attributes of the load balancing groups present in this CMTS.')
docsLoadBalGrpEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 3, 1, 1), ).setIndexNames((0, "DOCS-LOADBALANCING-MIB", "docsLoadBalGrpId"))
if mibBuilder.loadTexts: docsLoadBalGrpEntry.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalGrpEntry.setDescription('A set of attributes of load balancing group in the CMTS. It is index by a docsLoadBalGrpId which is unique within a CMTS. Entries in this table persist after CMTS initialization.')
docsLoadBalGrpId = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 3, 1, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295)))
if mibBuilder.loadTexts: docsLoadBalGrpId.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalGrpId.setDescription('A unique index assigned to the load balancing group by the CMTS.')
docsLoadBalGrpIsRestricted = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 3, 1, 1, 2), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsLoadBalGrpIsRestricted.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalGrpIsRestricted.setDescription('A value true(1)Indicates type of load balancing group. A Restricted Load Balancing Group is associated to a specific provisioned set of cable modems. Restricted Load Balancing Group is used to accommodate a topology specific or provisioning specific restriction. Example such as a group that are reserved for business customers). Setting this object to true(1) means it is a Restricted Load Balancing type and setting it to false(2) means it is a General Load Balancing group type. This object should not be changed while its group ID is referenced by an active entry in docsLoadBalRestrictCmEntry.')
docsLoadBalGrpInitTech = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 3, 1, 1, 3), ChannelChgInitTechMap()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsLoadBalGrpInitTech.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalGrpInitTech.setDescription("The initialization techniques that the CMTS can use when load balancing cable modems in the load balancing group. By default this object is initialized with all the defined bits having a value of '1'.")
docsLoadBalGrpDefaultPolicy = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 3, 1, 1, 4), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsLoadBalGrpDefaultPolicy.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalGrpDefaultPolicy.setDescription('Each Load Balancing Group has a default Load Balancing Policy. A policy is described by a set of conditions (rules) that govern the load balancing process for a cable modem. The CMTS assigns this Policy ID value to a cable modem associated with the group ID when the cable modem does not signal a Policy ID during registration. The Policy ID value is intended to be a numeric reference to a row entry in docsLoadBalPolicyEntry. However, It is not required to have an existing or active entry in docsLoadBalPolicyEntry when setting the value of docsLoadBalGrpDefaultPolicy, in which case it indicates no policy is associated with the load Balancing Group. The Policy ID of value 0 is reserved to indicate no policy is associated with the load balancing group.')
docsLoadBalGrpEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 3, 1, 1, 5), TruthValue().clone('true')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsLoadBalGrpEnable.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalGrpEnable.setDescription('Setting this object to true(1) enables internal autonomous load balancing on this group. Setting it to false(2) disables the load balancing operation on this group.')
docsLoadBalGrpChgOverSuccess = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 3, 1, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsLoadBalGrpChgOverSuccess.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalGrpChgOverSuccess.setDescription('The number of successful load balancing change over operations initiated within this load balancing group.')
docsLoadBalGrpChgOverFails = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 3, 1, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsLoadBalGrpChgOverFails.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalGrpChgOverFails.setDescription('The number of failed load balancing change over operations initiated within this load balancing group.')
docsLoadBalGrpStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 3, 1, 1, 8), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsLoadBalGrpStatus.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalGrpStatus.setDescription("Indicates the status of the row in this table. Setting this object to 'destroy' or 'notInService' for a group ID entry already referenced by docsLoadBalChannelEntry, docsLoadBalChnPairsEntry or docsLoadBalRestrictCmEntry returns an error code inconsistentValue.")
docsLoadBalChannelTable = MibTable((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 3, 2), )
if mibBuilder.loadTexts: docsLoadBalChannelTable.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalChannelTable.setDescription('Lists all upstream and downstream channels associated with load balancing groups.')
docsLoadBalChannelEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 3, 2, 1), ).setIndexNames((0, "DOCS-LOADBALANCING-MIB", "docsLoadBalGrpId"), (0, "DOCS-LOADBALANCING-MIB", "docsLoadBalChannelIfIndex"))
if mibBuilder.loadTexts: docsLoadBalChannelEntry.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalChannelEntry.setDescription('Lists a specific upstream or downstream, within a load Balancing group. An entry in this table exists for each ifEntry with an ifType of docsCableDownstream(128) and docsCableUpstream(129) associated with the Load Balancing Group. Entries in this table persist after CMTS initialization.')
docsLoadBalChannelIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 3, 2, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: docsLoadBalChannelIfIndex.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalChannelIfIndex.setDescription('The ifIndex of either the downstream or upstream.')
docsLoadBalChannelStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 3, 2, 1, 2), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsLoadBalChannelStatus.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalChannelStatus.setDescription("Indicates the status of the rows in this table. Creating entries in this table requires an existing value for docsLoadBalGrpId in docsLoadBalGrpEntry and an existing value of docsLoadBalChannelIfIndex in ifEntry, otherwise is rejected with error 'noCreation'. Setting this object to 'destroy' or 'notInService for a a row entry that is being referenced by docsLoadBalChnPairsEntry is rejected with error code inconsistentValue.")
docsLoadBalChnPairsTable = MibTable((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 3, 3), )
if mibBuilder.loadTexts: docsLoadBalChnPairsTable.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalChnPairsTable.setDescription('This table contains pairs of upstream channels within a Load Balancing Group. Entries in this table are used to override the initialization techniques defined for the associated Load Balancing Group.')
docsLoadBalChnPairsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 3, 3, 1), ).setIndexNames((0, "DOCS-LOADBALANCING-MIB", "docsLoadBalGrpId"), (0, "DOCS-LOADBALANCING-MIB", "docsLoadBalChnPairsIfIndexDepart"), (0, "DOCS-LOADBALANCING-MIB", "docsLoadBalChnPairsIfIndexArrive"))
if mibBuilder.loadTexts: docsLoadBalChnPairsEntry.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalChnPairsEntry.setDescription('An entry in this table describes a channel pair for which an initialization technique override is needed. On a CMTS which supports logical upstream channels (ifType is equal to docsCableUpstreamChannel(205)), the entries in this table correspond to pairs of ifType 205. On a CMTS which only supports physical upstream channels (ifType is equal to docsCableUpstream(129)), the entries in this table correspond to pairs of ifType 129. Entries in this table persist after CMTS initialization.')
docsLoadBalChnPairsIfIndexDepart = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 3, 3, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: docsLoadBalChnPairsIfIndexDepart.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalChnPairsIfIndexDepart.setDescription('This index indicates the ifIndex of the upstream channel from which a cable modem would depart in a load balancing channel change operation.')
docsLoadBalChnPairsIfIndexArrive = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 3, 3, 1, 2), InterfaceIndex())
if mibBuilder.loadTexts: docsLoadBalChnPairsIfIndexArrive.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalChnPairsIfIndexArrive.setDescription('This index indicates the ifIndex of the upstream channel on which a cable modem would arrive in a load balancing channel change operation.')
docsLoadBalChnPairsOperStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 3, 3, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("operational", 1), ("notOperational", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsLoadBalChnPairsOperStatus.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalChnPairsOperStatus.setDescription('Operational status of the channel pair. The value operational(1) indicates that ifOperStatus of both channels is up(1). The value notOperational(2) means that ifOperStatus of one or both is not up(1).')
docsLoadBalChnPairsInitTech = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 3, 3, 1, 4), ChannelChgInitTechMap()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsLoadBalChnPairsInitTech.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalChnPairsInitTech.setDescription("Specifies initialization technique for load balancing for the Depart/Arrive pair. By default this object's value is the initialization technique configured for the Load Balancing Group indicated by docsLoadBalGrpId.")
docsLoadBalChnPairsRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 3, 3, 1, 5), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsLoadBalChnPairsRowStatus.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalChnPairsRowStatus.setDescription("The object for conceptual rows creation. An attempt to create a row with values for docsLoadBalChnPairsIfIndexDepart or docsLoadBalChnPairsIfIndexArrive which are not a part of the Load Balancing Group (or for a 2.0 CMTS are not logical channels (ifType 205)) are rejected with a 'noCreation' error status reported. There is no restriction on settings columns in this table when the value of docsLoadBalChnPairsRowStatus is active(1).")
docsLoadBalRestrictCmTable = MibTable((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 3, 4), )
if mibBuilder.loadTexts: docsLoadBalRestrictCmTable.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalRestrictCmTable.setDescription('Lists all cable modems in each Restricted Load Balancing Groups.')
docsLoadBalRestrictCmEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 3, 4, 1), ).setIndexNames((0, "DOCS-LOADBALANCING-MIB", "docsLoadBalGrpId"), (0, "DOCS-LOADBALANCING-MIB", "docsLoadBalRestrictCmIndex"))
if mibBuilder.loadTexts: docsLoadBalRestrictCmEntry.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalRestrictCmEntry.setDescription('An entry of modem within a restricted load balancing group type. An entry represents a cable modem that is associated with the Restricted Load Balancing Group ID of a Restricted Load Balancing Group. Entries in this table persist after CMTS initialization.')
docsLoadBalRestrictCmIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 3, 4, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295)))
if mibBuilder.loadTexts: docsLoadBalRestrictCmIndex.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalRestrictCmIndex.setDescription('The index that uniquely identifies an entry which represents restricted cable modem(s) within each Restricted Load Balancing Group.')
docsLoadBalRestrictCmMACAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 3, 4, 1, 2), MacAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsLoadBalRestrictCmMACAddr.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalRestrictCmMACAddr.setDescription('Mac Address of the cable modem within the restricted load balancing group.')
docsLoadBalRestrictCmMacAddrMask = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 3, 4, 1, 3), OctetString().subtype(subtypeSpec=ConstraintsUnion(ValueSizeConstraint(0, 0), ValueSizeConstraint(6, 6), )).clone(hexValue="")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsLoadBalRestrictCmMacAddrMask.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalRestrictCmMacAddrMask.setDescription('A bit mask acting as a wild card to associate a set of modem MAC addresses to the same Group ID. Cable modem look up is performed first with entries containing this value not null, if several entries match, the largest consecutive bit match from MSB to LSB is used. Empty value is equivalent to the bit mask all in ones.')
docsLoadBalRestrictCmStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 3, 4, 1, 4), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsLoadBalRestrictCmStatus.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalRestrictCmStatus.setDescription("Indicates the status of the rows in this table. The attempt to create an entry associated to a group ID with docsLoadBalGrpIsRestricted equal to false(2) returns an error 'noCreation'. There is no restriction on settings columns in this table any time.")
docsLoadBalPolicyTable = MibTable((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 4, 1), )
if mibBuilder.loadTexts: docsLoadBalPolicyTable.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalPolicyTable.setDescription('This table describes the set of Load Balancing policies. Rows in this table might be referenced by rows in docsLoadBalGrpEntry.')
docsLoadBalPolicyEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 4, 1, 1), ).setIndexNames((0, "DOCS-LOADBALANCING-MIB", "docsLoadBalPolicyId"), (0, "DOCS-LOADBALANCING-MIB", "docsLoadBalPolicyRuleId"))
if mibBuilder.loadTexts: docsLoadBalPolicyEntry.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalPolicyEntry.setDescription('Entries containing rules for policies. When a load balancing policy is defined by multiple rules, all the rules apply. Load balancing rules can be created to allow for specific vendor-defined load balancing actions. However there is a basic rule that the CMTS is required to support by configuring a pointer in docsLoadBalPolicyRulePtr to the table docsLoadBalBasicRuleTable. Vendor specific rules may be added by pointing the object docsLoadBalPolicyRulePtr to proprietary mib structures. Entries in this table persist after CMTS initialization.')
docsLoadBalPolicyId = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 4, 1, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295)))
if mibBuilder.loadTexts: docsLoadBalPolicyId.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalPolicyId.setDescription('An index identifying the Load Balancing Policy.')
docsLoadBalPolicyRuleId = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 4, 1, 1, 2), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295)))
if mibBuilder.loadTexts: docsLoadBalPolicyRuleId.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalPolicyRuleId.setDescription('An index for the rules entries associated within a policy.')
docsLoadBalPolicyRulePtr = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 4, 1, 1, 3), RowPointer().clone((0, 0))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsLoadBalPolicyRulePtr.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalPolicyRulePtr.setDescription('A pointer to an entry in a rule table. E.g., docsLoadBalBasicRuleEnable in docsLoadBalBasicRuleEntry. A value pointing to zeroDotZero, an inactive Row or a non-existing entry is treated as no rule defined for this policy entry.')
docsLoadBalPolicyRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 4, 1, 1, 5), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsLoadBalPolicyRowStatus.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalPolicyRowStatus.setDescription("The status of this conceptual row. There is no restriction on settings columns in this table when the value of docsLoadBalPolicyRowStatus is active(1). Setting this object to 'destroy' or 'notInService' for a row entry that is being referenced by docsLoadBalGrpDefaultPolicy in docsLoadBalGrpEntry returns an error code inconsistentValue.")
docsLoadBalBasicRuleTable = MibTable((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 4, 2), )
if mibBuilder.loadTexts: docsLoadBalBasicRuleTable.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalBasicRuleTable.setDescription('DOCSIS defined basic ruleset for load Balancing Policy. This table enables of disable load balancing for the groups pointing to this ruleset in the policy group.')
docsLoadBalBasicRuleEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 4, 2, 1), ).setIndexNames((0, "DOCS-LOADBALANCING-MIB", "docsLoadBalBasicRuleId"))
if mibBuilder.loadTexts: docsLoadBalBasicRuleEntry.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalBasicRuleEntry.setDescription('An entry of DOCSIS defined basic ruleset. The object docsLoadBalBasicRuleEnable is used for instantiating an entry in this table via a RowPointer. Entries in this table persist after CMTS initialization.')
docsLoadBalBasicRuleId = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 4, 2, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295)))
if mibBuilder.loadTexts: docsLoadBalBasicRuleId.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalBasicRuleId.setDescription('The unique index for this row.')
docsLoadBalBasicRuleEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 4, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2), ("disabledPeriod", 3)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsLoadBalBasicRuleEnable.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalBasicRuleEnable.setDescription('When using this ruleset, load balancing is enabled or disabled by the values enabled(1) and disabled(2) respectively. Additionally, a Load Balancing disabling period is defined in docsLoadBalBasicRuleDisStart and docsLoadBalBasicRuleDisPeriod if this object value is set to disabledPeriod(3).')
docsLoadBalBasicRuleDisStart = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 4, 2, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 86400))).setUnits('seconds').setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsLoadBalBasicRuleDisStart.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalBasicRuleDisStart.setDescription('if object docsLoadBalBasicRuleEnable is disablePeriod(3) Load Balancing is disabled starting at this object value time (seconds from 12 AM). Otherwise, this object has no meaning.')
docsLoadBalBasicRuleDisPeriod = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 4, 2, 1, 4), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 86400))).setUnits('seconds').setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsLoadBalBasicRuleDisPeriod.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalBasicRuleDisPeriod.setDescription('If object docsLoadBalBasicRuleEnable is disablePeriod(3) Load Balancing is disabled for the period of time defined between docsLoadBalBasicRuleDisStart and docsLoadBalBasicRuleDisStart plus the period of time of docsLoadBalBasicRuleDisPeriod. Otherwise, this object value has no meaning.')
docsLoadBalBasicRuleRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 4, 2, 1, 5), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsLoadBalBasicRuleRowStatus.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalBasicRuleRowStatus.setDescription("This object is to create or delete rows in this table. There is no restriction for changing this row status or object's values in this table at any time.")
docsLoadBalCmtsCmStatusTable = MibTable((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 1, 4), )
if mibBuilder.loadTexts: docsLoadBalCmtsCmStatusTable.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalCmtsCmStatusTable.setDescription('The list contains the load balancing attributes associated with the cable modem. ')
docsLoadBalCmtsCmStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 1, 4, 1), )
docsIfCmtsCmStatusEntry.registerAugmentions(("DOCS-LOADBALANCING-MIB", "docsLoadBalCmtsCmStatusEntry"))
docsLoadBalCmtsCmStatusEntry.setIndexNames(*docsIfCmtsCmStatusEntry.getIndexNames())
if mibBuilder.loadTexts: docsLoadBalCmtsCmStatusEntry.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalCmtsCmStatusEntry.setDescription('Additional objects for docsIfCmtsCmStatusTable entry that relate to load balancing ')
docsLoadBalCmtsCmStatusGroupId = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 1, 4, 1, 1), Unsigned32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: docsLoadBalCmtsCmStatusGroupId.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalCmtsCmStatusGroupId.setDescription('The Group ID associated with this cable modem.')
docsLoadBalCmtsCmStatusPolicyId = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 1, 4, 1, 2), Unsigned32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: docsLoadBalCmtsCmStatusPolicyId.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalCmtsCmStatusPolicyId.setDescription('The Policy ID associated with this cable modem.')
docsLoadBalCmtsCmStatusPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 1, 4, 1, 3), Unsigned32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: docsLoadBalCmtsCmStatusPriority.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalCmtsCmStatusPriority.setDescription('The Priority associated with this cable modem.')
docsLoadBalConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 2))
docsLoadBalCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 2, 1))
docsLoadBalGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 2, 2))
docsLoadBalBasicCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 2, 1, 1)).setObjects(("DOCS-LOADBALANCING-MIB", "docsLoadBalSystemGroup"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalParametersGroup"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalPoliciesGroup"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalBasicRuleGroup"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalCmtsCmStatusGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
docsLoadBalBasicCompliance = docsLoadBalBasicCompliance.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalBasicCompliance.setDescription('The compliance statement for DOCSIS load balancing systems.')
docsLoadBalSystemGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 2, 2, 1)).setObjects(("DOCS-LOADBALANCING-MIB", "docsLoadBalEnable"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalChgOverMacAddress"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalChgOverDownFrequency"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalChgOverUpChannelId"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalChgOverInitTech"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalChgOverCmd"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalChgOverCommit"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalChgOverLastCommit"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalChgOverStatusMacAddr"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalChgOverStatusDownFreq"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalChgOverStatusUpChnId"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalChgOverStatusInitTech"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalChgOverStatusCmd"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalChgOverStatusValue"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalChgOverStatusUpdate"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
docsLoadBalSystemGroup = docsLoadBalSystemGroup.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalSystemGroup.setDescription('A collection of objects providing system-wide parameters for load balancing.')
docsLoadBalParametersGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 2, 2, 2)).setObjects(("DOCS-LOADBALANCING-MIB", "docsLoadBalGrpIsRestricted"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalGrpInitTech"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalGrpDefaultPolicy"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalGrpEnable"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalGrpChgOverSuccess"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalGrpChgOverFails"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalGrpStatus"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalChannelStatus"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalChnPairsOperStatus"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalChnPairsInitTech"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalChnPairsRowStatus"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalRestrictCmMACAddr"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalRestrictCmMacAddrMask"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalRestrictCmStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
docsLoadBalParametersGroup = docsLoadBalParametersGroup.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalParametersGroup.setDescription('A collection of objects containing the load balancing parameters.')
docsLoadBalPoliciesGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 2, 2, 3)).setObjects(("DOCS-LOADBALANCING-MIB", "docsLoadBalPolicyRulePtr"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalPolicyRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
docsLoadBalPoliciesGroup = docsLoadBalPoliciesGroup.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalPoliciesGroup.setDescription('A collection of objects providing policies.')
docsLoadBalBasicRuleGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 2, 2, 4)).setObjects(("DOCS-LOADBALANCING-MIB", "docsLoadBalBasicRuleEnable"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalBasicRuleDisStart"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalBasicRuleDisPeriod"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalBasicRuleRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
docsLoadBalBasicRuleGroup = docsLoadBalBasicRuleGroup.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalBasicRuleGroup.setDescription('DOCSIS defined basic Ruleset for load balancing policies.')
docsLoadBalCmtsCmStatusGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 2, 2, 5)).setObjects(("DOCS-LOADBALANCING-MIB", "docsLoadBalCmtsCmStatusGroupId"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalCmtsCmStatusPolicyId"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalCmtsCmStatusPriority"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
docsLoadBalCmtsCmStatusGroup = docsLoadBalCmtsCmStatusGroup.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalCmtsCmStatusGroup.setDescription('Cable mode status extension objects.')
mibBuilder.exportSymbols("DOCS-LOADBALANCING-MIB", docsLoadBalChgOverStatusEntry=docsLoadBalChgOverStatusEntry, docsLoadBalCmtsCmStatusTable=docsLoadBalCmtsCmStatusTable, docsLoadBalCmtsCmStatusEntry=docsLoadBalCmtsCmStatusEntry, docsLoadBalBasicRuleDisStart=docsLoadBalBasicRuleDisStart, docsLoadBalBasicCompliance=docsLoadBalBasicCompliance, docsLoadBalChnPairsIfIndexDepart=docsLoadBalChnPairsIfIndexDepart, docsLoadBalChgOverStatusValue=docsLoadBalChgOverStatusValue, docsLoadBalMibObjects=docsLoadBalMibObjects, docsLoadBalEnable=docsLoadBalEnable, docsLoadBalGrpChgOverFails=docsLoadBalGrpChgOverFails, docsLoadBalCmtsCmStatusPriority=docsLoadBalCmtsCmStatusPriority, docsLoadBalBasicRuleDisPeriod=docsLoadBalBasicRuleDisPeriod, docsLoadBalChgOverStatusMacAddr=docsLoadBalChgOverStatusMacAddr, docsLoadBalGrpDefaultPolicy=docsLoadBalGrpDefaultPolicy, docsLoadBalGrpInitTech=docsLoadBalGrpInitTech, docsLoadBalRestrictCmStatus=docsLoadBalRestrictCmStatus, docsLoadBalChgOverGroup=docsLoadBalChgOverGroup, docsLoadBalChnPairsIfIndexArrive=docsLoadBalChnPairsIfIndexArrive, docsLoadBalChgOverLastCommit=docsLoadBalChgOverLastCommit, docsLoadBalPolicyEntry=docsLoadBalPolicyEntry, docsLoadBalChgOverStatusUpdate=docsLoadBalChgOverStatusUpdate, docsLoadBalChannelEntry=docsLoadBalChannelEntry, docsLoadBalChnPairsEntry=docsLoadBalChnPairsEntry, docsLoadBalGrpIsRestricted=docsLoadBalGrpIsRestricted, docsLoadBalSystem=docsLoadBalSystem, docsLoadBalChnPairsInitTech=docsLoadBalChnPairsInitTech, docsLoadBalBasicRuleGroup=docsLoadBalBasicRuleGroup, docsLoadBalChgOverStatusUpChnId=docsLoadBalChgOverStatusUpChnId, docsLoadBalParametersGroup=docsLoadBalParametersGroup, docsLoadBalBasicRuleEntry=docsLoadBalBasicRuleEntry, docsLoadBalRestrictCmMacAddrMask=docsLoadBalRestrictCmMacAddrMask, docsLoadBalPolicyRulePtr=docsLoadBalPolicyRulePtr, docsLoadBalGrpStatus=docsLoadBalGrpStatus, docsLoadBalSystemGroup=docsLoadBalSystemGroup, docsLoadBalGrpChgOverSuccess=docsLoadBalGrpChgOverSuccess, docsLoadBalPolicyObjects=docsLoadBalPolicyObjects, docsLoadBalGroups=docsLoadBalGroups, docsLoadBalanceMib=docsLoadBalanceMib, docsLoadBalChgOverInitTech=docsLoadBalChgOverInitTech, docsLoadBalChgOverStatusDownFreq=docsLoadBalChgOverStatusDownFreq, docsLoadBalGrpObjects=docsLoadBalGrpObjects, docsLoadBalChnPairsTable=docsLoadBalChnPairsTable, docsLoadBalCompliances=docsLoadBalCompliances, docsLoadBalCmtsCmStatusPolicyId=docsLoadBalCmtsCmStatusPolicyId, docsLoadBalGrpEnable=docsLoadBalGrpEnable, docsLoadBalBasicRuleRowStatus=docsLoadBalBasicRuleRowStatus, docsLoadBalChgOverStatusInitTech=docsLoadBalChgOverStatusInitTech, docsLoadBalGrpTable=docsLoadBalGrpTable, docsLoadBalChgOverCmd=docsLoadBalChgOverCmd, docsLoadBalGrpEntry=docsLoadBalGrpEntry, docsLoadBalRestrictCmIndex=docsLoadBalRestrictCmIndex, docsLoadBalChannelTable=docsLoadBalChannelTable, docsLoadBalChgOverObjects=docsLoadBalChgOverObjects, docsLoadBalPolicyTable=docsLoadBalPolicyTable, docsLoadBalBasicRuleTable=docsLoadBalBasicRuleTable, docsLoadBalGrpId=docsLoadBalGrpId, docsLoadBalChgOverDownFrequency=docsLoadBalChgOverDownFrequency, docsLoadBalChgOverUpChannelId=docsLoadBalChgOverUpChannelId, docsLoadBalChgOverCommit=docsLoadBalChgOverCommit, docsLoadBalPolicyRowStatus=docsLoadBalPolicyRowStatus, docsLoadBalRestrictCmMACAddr=docsLoadBalRestrictCmMACAddr, docsLoadBalPolicyId=docsLoadBalPolicyId, docsLoadBalRestrictCmTable=docsLoadBalRestrictCmTable, PYSNMP_MODULE_ID=docsLoadBalanceMib, docsLoadBalNotifications=docsLoadBalNotifications, docsLoadBalBasicRuleEnable=docsLoadBalBasicRuleEnable, docsLoadBalPolicyRuleId=docsLoadBalPolicyRuleId, docsLoadBalChnPairsOperStatus=docsLoadBalChnPairsOperStatus, docsLoadBalChgOverMacAddress=docsLoadBalChgOverMacAddress, docsLoadBalRestrictCmEntry=docsLoadBalRestrictCmEntry, docsLoadBalBasicRuleId=docsLoadBalBasicRuleId, docsLoadBalChannelIfIndex=docsLoadBalChannelIfIndex, docsLoadBalCmtsCmStatusGroup=docsLoadBalCmtsCmStatusGroup, docsLoadBalConformance=docsLoadBalConformance, docsLoadBalCmtsCmStatusGroupId=docsLoadBalCmtsCmStatusGroupId, docsLoadBalChannelStatus=docsLoadBalChannelStatus, docsLoadBalChnPairsRowStatus=docsLoadBalChnPairsRowStatus, docsLoadBalChgOverStatusTable=docsLoadBalChgOverStatusTable, ChannelChgInitTechMap=ChannelChgInitTechMap, docsLoadBalChgOverStatusCmd=docsLoadBalChgOverStatusCmd, docsLoadBalPoliciesGroup=docsLoadBalPoliciesGroup)
| 187.552529 | 4,381 | 0.801954 |
65c5048befc6241d54580f74f3551d1f18adabab | 671 | py | Python | src/interview-cake/permutation-palindrome/test_permutation_palindrome.py | nwthomas/code-challenges | 49c2532ff597495474e67b13f2ed9b9ad93d40b5 | [
"MIT"
] | 1 | 2020-12-11T05:54:59.000Z | 2020-12-11T05:54:59.000Z | src/interview-cake/permutation-palindrome/test_permutation_palindrome.py | nwthomas/code-challenges | 49c2532ff597495474e67b13f2ed9b9ad93d40b5 | [
"MIT"
] | 1 | 2021-04-10T06:53:30.000Z | 2021-04-10T06:53:30.000Z | src/interview-cake/permutation-palindrome/test_permutation_palindrome.py | nwthomas/code-challenges | 49c2532ff597495474e67b13f2ed9b9ad93d40b5 | [
"MIT"
] | 7 | 2019-11-24T12:10:35.000Z | 2020-12-14T22:36:31.000Z | from permutation_palindrome import is_permutation_palindrome
import unittest
if __name__ == "__main__":
unittest.main()
| 35.315789 | 96 | 0.754098 |
65c5b96f2aa86a20d59448029f070a81f3667eea | 3,199 | py | Python | sagemaker_tidymodels/tidymodels.py | tmastny/sagemaker-tidymodels | fdb6a71d2ca54b7ffce7c5bab12067413ebb4026 | [
"MIT"
] | 3 | 2020-11-23T18:16:05.000Z | 2021-03-23T16:48:24.000Z | sagemaker_tidymodels/tidymodels.py | tmastny/sagemaker-tidymodels | fdb6a71d2ca54b7ffce7c5bab12067413ebb4026 | [
"MIT"
] | 4 | 2020-07-25T21:49:55.000Z | 2020-08-03T15:37:49.000Z | sagemaker_tidymodels/tidymodels.py | tmastny/sagemaker-tidymodels | fdb6a71d2ca54b7ffce7c5bab12067413ebb4026 | [
"MIT"
] | null | null | null | from sagemaker.estimator import Framework
from sagemaker.model import FrameworkModel
from sagemaker.predictor import RealTimePredictor
import subprocess
| 30.759615 | 147 | 0.660832 |
65c60fb41ff8d93478e349410ca2f8f7c41a7cea | 835 | py | Python | articles/pdf2bib.py | kenbeese/articles | 389ed551fb5ed0c6a5c64726e527acd6154e83f5 | [
"BSD-3-Clause"
] | 4 | 2015-02-07T10:04:50.000Z | 2022-01-17T18:33:26.000Z | articles/pdf2bib.py | termoshtt/articles | 389ed551fb5ed0c6a5c64726e527acd6154e83f5 | [
"BSD-3-Clause"
] | null | null | null | articles/pdf2bib.py | termoshtt/articles | 389ed551fb5ed0c6a5c64726e527acd6154e83f5 | [
"BSD-3-Clause"
] | null | null | null | # coding=utf-8
| 27.833333 | 86 | 0.589222 |
65c64d0d6e346b2c86db0238e477f1aee46d6160 | 2,313 | py | Python | tensorflow/python/data/experimental/kernel_tests/serialization/textline_dataset_serialization_test.py | DanMitroshin/tensorflow | 74aa353842f1788bdb7506ecceaf6ba99140e165 | [
"Apache-2.0"
] | 4 | 2021-06-02T03:21:44.000Z | 2021-11-08T09:47:24.000Z | tensorflow/python/data/experimental/kernel_tests/serialization/textline_dataset_serialization_test.py | DanMitroshin/tensorflow | 74aa353842f1788bdb7506ecceaf6ba99140e165 | [
"Apache-2.0"
] | 7 | 2021-11-10T20:21:23.000Z | 2022-03-22T19:18:39.000Z | tensorflow/python/data/experimental/kernel_tests/serialization/textline_dataset_serialization_test.py | DanMitroshin/tensorflow | 74aa353842f1788bdb7506ecceaf6ba99140e165 | [
"Apache-2.0"
] | 3 | 2021-05-09T13:41:29.000Z | 2021-06-24T06:12:05.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for checkpointing the TextLineDataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.data.experimental.kernel_tests import reader_dataset_ops_test_base
from tensorflow.python.data.kernel_tests import checkpoint_test_base
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import readers as core_readers
from tensorflow.python.framework import combinations
from tensorflow.python.platform import test
if __name__ == "__main__":
test.main()
| 39.20339 | 89 | 0.750108 |
65c8b9280ebaf25f0fb4b1658671be5a8f2ed641 | 228 | py | Python | apps/news/forms.py | LishenZz/my_project | c2ac8199efb467e303d343ea34ed1969b64280d7 | [
"Apache-2.0"
] | null | null | null | apps/news/forms.py | LishenZz/my_project | c2ac8199efb467e303d343ea34ed1969b64280d7 | [
"Apache-2.0"
] | null | null | null | apps/news/forms.py | LishenZz/my_project | c2ac8199efb467e303d343ea34ed1969b64280d7 | [
"Apache-2.0"
] | null | null | null | #Author:Li Shen
from django import forms
from apps.forms import FormMixin
| 22.8 | 46 | 0.79386 |
65c9968621cc82c96799c6059ed2551c70dfc1c5 | 6,446 | py | Python | data_preprocessing.py | hwRG/FastSpeech2-Pytorch-old-man_city | c32ee3a09bf2a53fcd17a2d0b74e8d1c93586573 | [
"MIT"
] | null | null | null | data_preprocessing.py | hwRG/FastSpeech2-Pytorch-old-man_city | c32ee3a09bf2a53fcd17a2d0b74e8d1c93586573 | [
"MIT"
] | null | null | null | data_preprocessing.py | hwRG/FastSpeech2-Pytorch-old-man_city | c32ee3a09bf2a53fcd17a2d0b74e8d1c93586573 | [
"MIT"
] | null | null | null | ### Data Preprocessing
## 1. Json to Transcript
## 2. Aligner
## 3. Text Replace
from jamo import h2j
import json
import os, re, tqdm
import unicodedata
from tqdm import tqdm
import hparams as hp
name = hp.dataset
first_dir = os.getcwd()
transcript = name + '_transcript.txt'
dict_name = name + '_korean_dict.txt'
data_dir = 'wavs'
json_label_dir = 'label'
if __name__ == '__main__':
os.chdir('dataset/' + hp.dataset)
change_name('wavs', 'wav')
#change_name('label', 'json')
#json_to_transcripts()
aligner()
mfa_train()
lab_separate() | 31.910891 | 135 | 0.51691 |
65cb00b6e400d3acf13ccac0a2014cd803772f2a | 1,435 | py | Python | cachet-tools/purge-cachet.py | thearifismail/black-box-tester | 23114fa73394a141bc091d6903e3ef4202f80bbf | [
"MIT"
] | null | null | null | cachet-tools/purge-cachet.py | thearifismail/black-box-tester | 23114fa73394a141bc091d6903e3ef4202f80bbf | [
"MIT"
] | 3 | 2020-01-02T13:04:07.000Z | 2020-02-05T14:18:50.000Z | cachet-tools/purge-cachet.py | thearifismail/black-box-tester | 23114fa73394a141bc091d6903e3ef4202f80bbf | [
"MIT"
] | 5 | 2019-11-07T20:55:05.000Z | 2020-07-15T13:59:07.000Z | #!/usr/bin/env python3
import requests
import os
import json
CACHET_HOSTNAME = os.environ.get("CACHET_HOSTNAME")
URL = f"https://{CACHET_HOSTNAME}/api/v1/components"
HEADERS = {
'X-Cachet-Token': os.environ.get("CACHET_TOKEN")
}
with requests.Session() as session:
session.headers.update(HEADERS)
response = session.get(URL + "/groups", verify=False)
groups = response.json()['data']
print("Number of groups found: " + str(len(groups)))
for group in groups:
components = group['enabled_components']
print(group['name'] + " contains " + str(len(components)) + " components")
for component in components:
print("Deleting component: " + component['name'])
cdr = session.delete(URL + "/" + str(component['id']), verify=False, )
print (cdr)
# delete the group
print("Deleting group " + group['name'])
gdr = session.delete(URL + "/groups/" + str(group['id']), verify=False, )
print(gdr)
# check and delete components not in any groups
response = session.get(URL, verify=False)
components = response.json()['data']
print("Number of components not in any group: " + str(len(components)))
for component in components:
print("Deleting component: " + component['name'])
cdr = session.delete(URL + "/" + str(component['id']), verify=False, )
print (cdr)
print("Done!!!")
| 33.372093 | 82 | 0.622997 |
65cb24c821d26b2c77253d1d9836328c541460bf | 489 | py | Python | astrophysics_toolset/utilities/tests/test_funcs.py | cphyc/astrophysics_toolset | 36be3f459a1bbca73af6f39f0957bfac0cb122eb | [
"MIT"
] | 3 | 2020-07-19T15:46:48.000Z | 2021-08-02T21:58:49.000Z | astrophysics_toolset/utilities/tests/test_funcs.py | cphyc/astrophysics_toolset | 36be3f459a1bbca73af6f39f0957bfac0cb122eb | [
"MIT"
] | 30 | 2020-05-12T11:07:47.000Z | 2022-02-27T12:54:08.000Z | astrophysics_toolset/utilities/tests/test_funcs.py | cphyc/astrophysics_toolset | 36be3f459a1bbca73af6f39f0957bfac0cb122eb | [
"MIT"
] | null | null | null | import numpy as np
from mpmath import besselj, mpf, pi, sqrt
from ..funcs import j1_over_x
| 21.26087 | 79 | 0.615542 |
65cb50fe55b88d486a160d6a37760bb1772d7906 | 2,176 | py | Python | tools/isolate-run.py | France-ioi/taskgrader | 72b043195af752d68cfee1d28fb52ae6012bc9a2 | [
"MIT"
] | 12 | 2015-02-19T20:09:04.000Z | 2021-12-25T13:52:17.000Z | tools/isolate-run.py | France-ioi/taskgrader | 72b043195af752d68cfee1d28fb52ae6012bc9a2 | [
"MIT"
] | 102 | 2015-08-03T14:07:46.000Z | 2022-02-18T19:56:55.000Z | tools/isolate-run.py | France-ioi/taskgrader | 72b043195af752d68cfee1d28fb52ae6012bc9a2 | [
"MIT"
] | 3 | 2016-05-12T15:03:16.000Z | 2019-07-31T14:38:24.000Z | #!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
# Copyright (c) 2016 France-IOI, MIT license
#
# http://opensource.org/licenses/MIT
# This tool launches an isolated execution. It is intended as a wrapper around
# the execution of any command.
import argparse, os, sys
DEFAULT_EXECPARAMS = {
'timeLimitMs': 60000,
'memoryLimitKb': 128*1024,
'useCache': False,
'stdoutTruncateKb': -1,
'stderrTruncateKb': -1,
'getFiles': []
}
# Add taskgrader folder to PATH
SELFDIR = os.path.normpath(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(os.path.join(SELFDIR, '../'))
from taskgrader import IsolatedExecution
if __name__ == '__main__':
argParser = argparse.ArgumentParser(description="Makes a 'standalone' JSON file, bundling files referenced by path into the JSON to remove any reference to paths.")
argParser.add_argument('-i', '--stdin', help='Set file to pass on stdin.')
argParser.add_argument('-m', '--memory-limit', help='Set memory limit for execution, in kilobytes.', type=int)
argParser.add_argument('-t', '--time-limit', help='Set time limit for execution, in milliseconds.', type=int)
argParser.add_argument('-p', '--path', help='Set the working directory for the execution.', default='.')
argParser.add_argument('args', nargs=argparse.REMAINDER)
args = argParser.parse_args()
# Check cmd line
if not args.args:
argParser.error("No command specified.")
if '--' in args.args: args.args.remove('--')
# Set up execution parameters
execParams = {}
execParams.update(DEFAULT_EXECPARAMS)
if args.memory_limit: execParams['memoryLimitKb'] = args.memory_limit
if args.time_limit: execParams['timeLimitMs'] = args.time_limit
# Prepare files
cmdLine = ' '.join(args.args)
stdoutPath = os.path.join(args.path, 'isolate-run.stdout')
# Launch the isolated execution
execution = IsolatedExecution(None, execParams, cmdLine)
report = execution.execute(args.path, stdinFile=args.stdin, stdoutFile=stdoutPath)
sys.stdout.write(open(stdoutPath, 'r').read())
sys.stderr.write(report['stderr']['data'])
sys.exit(report['exitCode'])
| 35.672131 | 168 | 0.696691 |
65cc242de89c19efa4090dc93f9caa33777e25e0 | 837 | py | Python | monitor/monitorlibs/sendemail.py | vaedit/- | 4e68910737ac794390df05ac34a6cf46339b0002 | [
"Apache-2.0"
] | 1 | 2021-04-09T05:47:42.000Z | 2021-04-09T05:47:42.000Z | monitor/monitorlibs/sendemail.py | vaedit/python-monitor-script | 4e68910737ac794390df05ac34a6cf46339b0002 | [
"Apache-2.0"
] | null | null | null | monitor/monitorlibs/sendemail.py | vaedit/python-monitor-script | 4e68910737ac794390df05ac34a6cf46339b0002 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import smtplib
from email.mime.text import MIMEText
from email.header import Header
#
| 26.15625 | 54 | 0.577061 |
65ccdd74df24a36712a75efa27299093b23c6844 | 583 | py | Python | submissions/abc146/f.py | m-star18/atcoder | 08e475810516602fa088f87daf1eba590b4e07cc | [
"Unlicense"
] | 1 | 2021-05-10T01:16:28.000Z | 2021-05-10T01:16:28.000Z | submissions/abc146/f.py | m-star18/atcoder | 08e475810516602fa088f87daf1eba590b4e07cc | [
"Unlicense"
] | 3 | 2021-05-11T06:14:15.000Z | 2021-06-19T08:18:36.000Z | submissions/abc146/f.py | m-star18/atcoder | 08e475810516602fa088f87daf1eba590b4e07cc | [
"Unlicense"
] | null | null | null | import sys
read = sys.stdin.buffer.read
readline = sys.stdin.buffer.readline
readlines = sys.stdin.buffer.readlines
sys.setrecursionlimit(10 ** 7)
from collections import deque
n, m = map(int, readline().split())
s = readline().rstrip().decode()[::-1]
index = 0
ans = deque([])
for i in range(n):
for j in range(m, 0, -1):
if index + j >= n:
ans.appendleft(n - index)
print(*ans)
exit()
if s[index + j] == '0':
ans.appendleft(j)
index += j
break
else:
print(-1)
exit()
| 22.423077 | 38 | 0.538593 |
65cd5a403032e83361e632b7cbcf870eef107bce | 1,790 | py | Python | src/tests/__init__.py | laichimirum/docker-appium-emulator | 3549c5f1fc09bbc650dd30351ad4f509a72a90fa | [
"Apache-2.0"
] | 8 | 2019-04-26T04:09:40.000Z | 2022-01-04T05:24:12.000Z | src/tests/__init__.py | laichimirum/docker-appium-emulator | 3549c5f1fc09bbc650dd30351ad4f509a72a90fa | [
"Apache-2.0"
] | null | null | null | src/tests/__init__.py | laichimirum/docker-appium-emulator | 3549c5f1fc09bbc650dd30351ad4f509a72a90fa | [
"Apache-2.0"
] | 2 | 2019-12-16T15:34:57.000Z | 2020-10-22T07:03:15.000Z | """Unit test to test app."""
import os
from unittest import TestCase
import mock
from src import app
| 32.545455 | 70 | 0.653073 |
65cdd034fed36877b4031f60332f1c40cdb5f6a5 | 2,224 | py | Python | tools/python-mock-server/python-mock-server.py | msmagnanijr/jboss-kie-modules | 1ab85aa12e70db810a4d607fb6aaa85a19bb8607 | [
"Apache-2.0"
] | 8 | 2018-07-20T02:32:39.000Z | 2022-03-27T10:52:55.000Z | tools/python-mock-server/python-mock-server.py | msmagnanijr/jboss-kie-modules | 1ab85aa12e70db810a4d607fb6aaa85a19bb8607 | [
"Apache-2.0"
] | 167 | 2017-12-19T14:33:35.000Z | 2022-03-22T11:47:20.000Z | tools/python-mock-server/python-mock-server.py | msmagnanijr/jboss-kie-modules | 1ab85aa12e70db810a4d607fb6aaa85a19bb8607 | [
"Apache-2.0"
] | 52 | 2017-12-18T13:55:24.000Z | 2022-02-09T14:07:14.000Z | #!/usr/bin/python3
import os
import sys
from http.server import HTTPServer, BaseHTTPRequestHandler
httpd = HTTPServer(("localhost", 8080), MyHandler)
httpd.serve_forever()
| 37.694915 | 179 | 0.642536 |
65cff554030214e04d5a8a2df9a42dced600b89e | 11,487 | py | Python | test/nn/test_nonlinearities_fliprotations.py | steven-lang/e2cnn | 48f49760766ec958b52d0dd7b02483886dfa2096 | [
"BSD-3-Clause"
] | 356 | 2019-11-22T10:37:22.000Z | 2022-03-25T14:42:45.000Z | test/nn/test_nonlinearities_fliprotations.py | steven-lang/e2cnn | 48f49760766ec958b52d0dd7b02483886dfa2096 | [
"BSD-3-Clause"
] | 52 | 2020-01-20T16:51:36.000Z | 2022-03-31T21:40:19.000Z | test/nn/test_nonlinearities_fliprotations.py | steven-lang/e2cnn | 48f49760766ec958b52d0dd7b02483886dfa2096 | [
"BSD-3-Clause"
] | 48 | 2019-12-11T09:29:30.000Z | 2022-03-18T17:51:55.000Z | import unittest
from unittest import TestCase
from e2cnn.nn import *
from e2cnn.gspaces import *
import random
if __name__ == '__main__':
unittest.main()
| 30.149606 | 107 | 0.572038 |
65d01a4d1ad87624330d3bcc5a359ecdd7b3f0fa | 5,880 | py | Python | TestModule/AnonymousPlayerTest.py | INYEONGKIM/Quattro | 0fd70b08716f71404f520941791cd314d90de83a | [
"MIT"
] | null | null | null | TestModule/AnonymousPlayerTest.py | INYEONGKIM/Quattro | 0fd70b08716f71404f520941791cd314d90de83a | [
"MIT"
] | null | null | null | TestModule/AnonymousPlayerTest.py | INYEONGKIM/Quattro | 0fd70b08716f71404f520941791cd314d90de83a | [
"MIT"
] | null | null | null | import unittest
from QuattroComponents.Player import Anonymous_player
from QuattroComponents.Card import Card
from TestModule.GetMethodName import get_method_name_decorator
from collections import deque
| 43.880597 | 126 | 0.633844 |
65d0a80d19258c77b9d91fc06cfaa6455396ecc8 | 10,012 | py | Python | octopus_deploy_swagger_client/models/phase_resource.py | cvent/octopus-deploy-api-client | 0e03e842e1beb29b132776aee077df570b88366a | [
"Apache-2.0"
] | null | null | null | octopus_deploy_swagger_client/models/phase_resource.py | cvent/octopus-deploy-api-client | 0e03e842e1beb29b132776aee077df570b88366a | [
"Apache-2.0"
] | null | null | null | octopus_deploy_swagger_client/models/phase_resource.py | cvent/octopus-deploy-api-client | 0e03e842e1beb29b132776aee077df570b88366a | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Octopus Server API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 2019.6.7+Branch.tags-2019.6.7.Sha.aa18dc6809953218c66f57eff7d26481d9b23d6a
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from octopus_deploy_swagger_client.models.retention_period import RetentionPeriod # noqa: F401,E501
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PhaseResource):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 33.373333 | 257 | 0.663504 |
65d15f45a4747bc3b8090b4f6795c908d2c9cd6a | 660 | py | Python | src/cryptoadvance/specter/util/common.py | jonathancross/specter-desktop | 0178aa3879134415d63d62098b7f4f1b17db1d13 | [
"MIT"
] | null | null | null | src/cryptoadvance/specter/util/common.py | jonathancross/specter-desktop | 0178aa3879134415d63d62098b7f4f1b17db1d13 | [
"MIT"
] | null | null | null | src/cryptoadvance/specter/util/common.py | jonathancross/specter-desktop | 0178aa3879134415d63d62098b7f4f1b17db1d13 | [
"MIT"
] | null | null | null | import re
def str2bool(my_str):
"""returns a reasonable boolean from a string so that "False" will result in False"""
if my_str is None:
return False
elif isinstance(my_str, str) and my_str.lower() == "false":
return False
elif isinstance(my_str, str) and my_str.lower() == "off":
return False
return bool(my_str)
def camelcase2snake_case(name):
"""If you pass DeviceManager it returns device_manager"""
pattern = re.compile(r"(?<!^)(?=[A-Z])")
name = pattern.sub("_", name).lower()
return name
| 27.5 | 89 | 0.645455 |
65d1935d60c24b3adbd4cfdad6ba81b04f4d1399 | 9,501 | py | Python | pyspi/SPILike.py | grburgess/pyspi | 084884c3fd06a09ef3a850cd19e7b751d7929e59 | [
"BSD-3-Clause"
] | null | null | null | pyspi/SPILike.py | grburgess/pyspi | 084884c3fd06a09ef3a850cd19e7b751d7929e59 | [
"BSD-3-Clause"
] | null | null | null | pyspi/SPILike.py | grburgess/pyspi | 084884c3fd06a09ef3a850cd19e7b751d7929e59 | [
"BSD-3-Clause"
] | 3 | 2018-06-05T12:57:29.000Z | 2019-01-09T17:12:31.000Z | import collections
from typing import Optional
import numpy as np
from astromodels import Parameter, Model
from astromodels.functions.priors import Cosine_Prior, Uniform_prior
from threeML import PluginPrototype
from threeML.io.file_utils import sanitize_filename
from threeML.plugins.DispersionSpectrumLike import DispersionSpectrumLike
from threeML.plugins.SpectrumLike import SpectrumLike
from threeML.io.logging import setup_logger
from pyspi.utils.response.spi_drm import SPIDRM
log = setup_logger(__name__)
| 30.549839 | 83 | 0.592674 |
65d2ce3a02d1d452763725cfe14c593fd3bb7cbb | 1,881 | py | Python | dianna/visualization/image.py | cffbots/dianna | 21e272dce2862747a5109341b622798f667d9248 | [
"Apache-2.0"
] | null | null | null | dianna/visualization/image.py | cffbots/dianna | 21e272dce2862747a5109341b622798f667d9248 | [
"Apache-2.0"
] | null | null | null | dianna/visualization/image.py | cffbots/dianna | 21e272dce2862747a5109341b622798f667d9248 | [
"Apache-2.0"
] | null | null | null | import matplotlib.pyplot as plt
def plot_image(heatmap, original_data=None, heatmap_cmap=None, data_cmap=None, show_plot=True, output_filename=None): # pylint: disable=too-many-arguments
"""
Plots a heatmap image.
Optionally, the heatmap (typically a saliency map of an explainer) can be
plotted on top of the original data. In that case both images are plotted
transparantly with alpha = 0.5.
Args:
heatmap: the saliency map or other heatmap to be plotted.
original_data: the data to plot together with the heatmap, both with
alpha = 0.5 (optional).
heatmap_cmap: color map for the heatmap plot (see mpl.Axes.imshow
documentation for options).
data_cmap: color map for the (optional) data image (see mpl.Axes.imshow
documentation for options). By default, if the image is two
dimensional, the color map is set to 'gray'.
show_plot: Shows plot if true (for testing or writing plots to disk
instead).
output_filename: Name of the file to save the plot to (optional).
Returns:
None
"""
# default cmap depends on shape: grayscale or colour
_, ax = plt.subplots()
alpha = 1
if original_data is not None:
if len(original_data.shape) == 2 and data_cmap is None:
# 2D array, grayscale
data_cmap = 'gray'
ax.imshow(original_data, cmap=data_cmap, vmin=0, vmax=_determine_vmax(original_data.max()))
alpha = .5
ax.imshow(heatmap, cmap=heatmap_cmap, alpha=alpha)
if show_plot:
plt.show()
if output_filename:
plt.savefig(output_filename)
| 34.833333 | 155 | 0.642212 |
65d4480d133aa88d0ea4335e0f5e5f6c3cb17894 | 4,680 | py | Python | models/lenet.py | davidstutz/random-bit-error-robustness | 59d8533c8db87ba1b220a64032cf929e5d67fbfa | [
"Unlicense"
] | null | null | null | models/lenet.py | davidstutz/random-bit-error-robustness | 59d8533c8db87ba1b220a64032cf929e5d67fbfa | [
"Unlicense"
] | null | null | null | models/lenet.py | davidstutz/random-bit-error-robustness | 59d8533c8db87ba1b220a64032cf929e5d67fbfa | [
"Unlicense"
] | null | null | null | import torch
import common.torch
from .classifier import Classifier
from .utils import get_normalization2d, get_activation
| 37.142857 | 146 | 0.59594 |
65d4761a181f8a12d33c2a0e4fbbb20be034782f | 309 | py | Python | project/server/main/modules/__init__.py | ardikabs/dnsmanager | 4d2f302ea9f54fd4d5416328dc46a1c47b573e5b | [
"MIT"
] | 1 | 2019-01-15T10:33:04.000Z | 2019-01-15T10:33:04.000Z | project/server/main/modules/__init__.py | ardikabs/dnsmanager | 4d2f302ea9f54fd4d5416328dc46a1c47b573e5b | [
"MIT"
] | null | null | null | project/server/main/modules/__init__.py | ardikabs/dnsmanager | 4d2f302ea9f54fd4d5416328dc46a1c47b573e5b | [
"MIT"
] | null | null | null | """ All Available Module on Server Belong to Here """
AVAILABLE_MODULES = (
"api",
) | 23.769231 | 53 | 0.614887 |
65d585b3c927a0a65da4783e776cd19589017f27 | 2,154 | py | Python | slixmpp/plugins/xep_0380/eme.py | cnngimenez/slixmpp | bb61f0f39dfba205282dab50c0f3a47b26145c74 | [
"BSD-3-Clause"
] | null | null | null | slixmpp/plugins/xep_0380/eme.py | cnngimenez/slixmpp | bb61f0f39dfba205282dab50c0f3a47b26145c74 | [
"BSD-3-Clause"
] | null | null | null | slixmpp/plugins/xep_0380/eme.py | cnngimenez/slixmpp | bb61f0f39dfba205282dab50c0f3a47b26145c74 | [
"BSD-3-Clause"
] | null | null | null | """
Slixmpp: The Slick XMPP Library
Copyright (C) 2016 Emmanuel Gil Peyrot <linkmauve@linkmauve.fr>
This file is part of Slixmpp.
See the file LICENSE for copying permission.
"""
import logging
import slixmpp
from slixmpp.stanza import Message
from slixmpp.xmlstream.handler import Callback
from slixmpp.xmlstream.matcher import StanzaPath
from slixmpp.xmlstream import register_stanza_plugin, ElementBase, ET
from slixmpp.plugins import BasePlugin
from slixmpp.plugins.xep_0380 import stanza, Encryption
log = logging.getLogger(__name__)
| 29.916667 | 90 | 0.659703 |
65d5f60d4b7acc40612bcf45d7c9efe894269057 | 1,050 | py | Python | JSS Users Cleanup/setup.py | killahquam/JAMF | 77b003a72375b9b01bdb961cb466b7519c859116 | [
"MIT"
] | 34 | 2015-06-11T16:37:54.000Z | 2021-06-02T20:42:55.000Z | JSS Users Cleanup/setup.py | killahquam/JAMF | 77b003a72375b9b01bdb961cb466b7519c859116 | [
"MIT"
] | 1 | 2016-01-03T04:05:30.000Z | 2016-09-26T20:25:51.000Z | JSS Users Cleanup/setup.py | killahquam/JAMF | 77b003a72375b9b01bdb961cb466b7519c859116 | [
"MIT"
] | 6 | 2015-12-29T20:39:56.000Z | 2020-06-30T19:33:23.000Z | #!/usr/bin/python
#Quam Sodji 2015
#Setup script to install the needed python modules
#Installs kn/Slack and python-jss modules
#We assume you have Git installed.......
import subprocess
import os
import sys
import shutil
clone_jss = subprocess.check_output(['git','clone','git://github.com/sheagcraig/python-jss.git'])
clone_slack = subprocess.check_output(['git','clone','git://github.com/kn/slack.git'])
path = os.path.dirname(os.path.realpath(__file__))
#Installing Slack
print "Installing Slack"
slack_folder = os.chdir(path + '/slack')
install_slack = subprocess.check_output(['python','setup.py','install'])
print "slack module installed"
#Installing Python JSS
print "Installing Python JSS"
jss_folder = os.chdir(path + '/python-jss')
install_jss = subprocess.check_output(['python','setup.py','install'])
print "python-jss module installed"
#Cleaning up
print "Cleaning up"
change_location = os.chdir(path)
remove_slack_clone = shutil.rmtree(path + '/slack')
remove_jss_clone = shutil.rmtree(path + '/python-jss')
print "Done."
sys.exit(0) | 33.870968 | 97 | 0.75619 |
65da9cfd0758b74606005cccaa574f86bf734619 | 969 | py | Python | sharpy/linear/utils/sselements.py | ACea15/sharpy | c89ecb74be3cb9e37b23ac8a282c73b9b55dd792 | [
"BSD-3-Clause"
] | 80 | 2018-08-30T13:01:52.000Z | 2022-03-24T15:02:48.000Z | sharpy/linear/utils/sselements.py | ACea15/sharpy | c89ecb74be3cb9e37b23ac8a282c73b9b55dd792 | [
"BSD-3-Clause"
] | 88 | 2018-05-17T16:18:58.000Z | 2022-03-11T21:05:48.000Z | sharpy/linear/utils/sselements.py | ACea15/sharpy | c89ecb74be3cb9e37b23ac8a282c73b9b55dd792 | [
"BSD-3-Clause"
] | 44 | 2018-01-02T14:27:28.000Z | 2022-03-12T13:49:36.000Z | """
Linear State Space Element Class
""" | 28.5 | 119 | 0.643963 |
65db9046fcabc0fdacbff5217e489cc008a5a30b | 4,819 | py | Python | pt_mesh_renderer/RasterizeTriangles.py | FuxiCV/pt_mesh_renderer | 15153fbbe73d7c4c59d8f0b2bce7320173b3d396 | [
"Apache-2.0"
] | 61 | 2020-08-06T06:39:15.000Z | 2022-03-25T03:48:02.000Z | pt_mesh_renderer/RasterizeTriangles.py | shty32/pt_mesh_renderer | 15153fbbe73d7c4c59d8f0b2bce7320173b3d396 | [
"Apache-2.0"
] | 6 | 2020-09-08T10:30:56.000Z | 2021-07-10T14:24:23.000Z | pt_mesh_renderer/RasterizeTriangles.py | shty32/pt_mesh_renderer | 15153fbbe73d7c4c59d8f0b2bce7320173b3d396 | [
"Apache-2.0"
] | 9 | 2020-08-25T08:46:08.000Z | 2021-07-25T04:43:41.000Z | # Copyright 2017 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Modifications: this file implements a pytorch interface to c++ codes
# Copyright 2020 Netease Fuxi AI LAB
# SPDX-License-Identifier: Apache-2.0
from torch.autograd import Function
import pt_mesh_renderer.kernels.rasterize_triangles as rasterize_triangles_kernels
try:
import pt_mesh_renderer.kernels.rasterize_triangles_cuda as rasterize_triangles_kernels_cuda
except Exception:
print("Cannot import cuda rasterizer, renderer is running in CPU mode.")
rasterize_triangles = RasterizeTriangles.apply
| 50.197917 | 110 | 0.708861 |
65db99db18c44b4e940ff60964e5dae8b718ca83 | 3,988 | py | Python | datamining_assignments/datamining_assiment_3/nmf.py | xuerenlv/PaperWork | f096b57a80e8d771f080a02b925a22edbbee722a | [
"Apache-2.0"
] | 1 | 2015-10-15T12:26:07.000Z | 2015-10-15T12:26:07.000Z | datamining_assignments/datamining_assiment_3/nmf.py | xuerenlv/PaperWork | f096b57a80e8d771f080a02b925a22edbbee722a | [
"Apache-2.0"
] | null | null | null | datamining_assignments/datamining_assiment_3/nmf.py | xuerenlv/PaperWork | f096b57a80e8d771f080a02b925a22edbbee722a | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''
Created on Oct 27, 2015
@author: nlp
'''
import numpy as np
import math
# nmf
# listlistlist; list[] i
#***************** **********************************
# purity
# Gini
#****************************************************************************
if __name__ == '__main__':
nmf_main("german.txt", 2)
nmf_main("mnist.txt", 10)
pass
| 31.15625 | 89 | 0.570963 |
65dca40840f63106b93a09800755e3aaddd6d379 | 3,834 | py | Python | heat2d.py | atk91/heat-batman | ce76fa25ba56e65b842575a99a029379be54e687 | [
"BSD-2-Clause"
] | null | null | null | heat2d.py | atk91/heat-batman | ce76fa25ba56e65b842575a99a029379be54e687 | [
"BSD-2-Clause"
] | null | null | null | heat2d.py | atk91/heat-batman | ce76fa25ba56e65b842575a99a029379be54e687 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
import numpy as np
import scipy.linalg as la
import matplotlib.pyplot as plt
from scipy.sparse.linalg import cgs
from scipy.sparse import csr_matrix
fig = plt.figure()
t_min = 0.0
t_max = 10.0
x_min = -10.0
x_max = 10.0
y_min = -10.0
y_max = 10.0
a = 1.0
c = 5.0
m = 400
n = 100
x = np.linspace(x_min, x_max, m)
y = np.linspace(y_min, y_max, m)
t = np.linspace(t_min, t_max, n)
dx = (x_max - x_min)/(m - 1)
dy = (y_max - y_min)/(m - 1)
dt = (t_max - t_min)/(n - 1)
matr_size = m**2
L = csr_matrix((matr_size, matr_size))
right = np.zeros(matr_size)
u_prev = np.zeros(m * m)
u = np.zeros(m * m)
for i in range(m):
for j in range(m):
u_prev[(m - 1 - j) * m + i] = u_init(x_min + i * dx, y_min + j * dy)
u[(m - 1 - j) * m + i] = u_init(x_min + i * dx, y_min + j * dy)
for k in range(n):
data = []
row = []
col = []
L = csr_matrix((matr_size, matr_size))
to_plot = np.zeros((m, m))
for i in range(m):
for j in range(m):
to_plot[i][j] = u_prev[i * m + j]
ax = fig.add_subplot(111)
ax.set_title("Heat equation solution, t = %.2f" % (k * dt))
plt.imshow(to_plot, vmax=1.0)
cax = fig.add_axes([0.12, 0.1, 0.78, 0.8])
cax.get_xaxis().set_visible(False)
cax.get_yaxis().set_visible(False)
cax.get_yaxis().set_ticklabels([])
cax.get_xaxis().set_ticklabels([])
cax.patch.set_alpha(0)
cax.set_frame_on(False)
plt.colorbar(orientation='vertical')
plt.savefig("images/%d.png" % k)
plt.clf()
for i in range(m):
for j in range(m):
str_num = i * m + j
if i == 0 or i == m - 1:
data.append(1.0)
row.append(str_num)
col.append(ind(i, j))
right[str_num] = x_0(j * dx)
elif j == 0 or j == m - 1:
data.append(1.0)
row.append(str_num)
col.append(ind(i, j))
right[str_num] = y_0(i * dy)
else:
data.append(c / (dx**2))
row.append(str_num)
col.append(ind(i - 1, j))
data.append(c / (dx**2))
row.append(str_num)
col.append(ind(i, j - 1))
data.append(- 4.0*c/(dx**2) - 1.0/dt)
row.append(str_num)
col.append(ind(i, j))
data.append(c / (dx**2))
row.append(str_num)
col.append(ind(i + 1, j))
data.append(c / (dx**2))
row.append(str_num)
col.append(ind(i, j + 1))
right[str_num] = - u_prev[ind(i, j)] / dt
L = csr_matrix((np.array(data), (np.array(row), np.array(col))), shape=(matr_size, matr_size))
u, info = cgs(L, right, x0 = u_prev, tol=1e-10)
# print "residual: %le" % la.norm(np.dot(L, u) - right)
# print "norm u + u_prev = %le" % la.norm(u - u_prev)
u_prev = u
| 28.61194 | 114 | 0.490871 |
65dceda09e2e4a4ab6cb1d2b5780ccbfb1f4f6c7 | 2,667 | py | Python | DQN DDQN Dueling/network.py | eayvali/DeepRL | 4722af0f75487dd3167faafd4eabe8f01aea4305 | [
"MIT"
] | 2 | 2020-01-29T20:49:29.000Z | 2020-03-27T21:45:12.000Z | DQN DDQN Dueling/network.py | eayvali/DeepRL | 4722af0f75487dd3167faafd4eabe8f01aea4305 | [
"MIT"
] | null | null | null | DQN DDQN Dueling/network.py | eayvali/DeepRL | 4722af0f75487dd3167faafd4eabe8f01aea4305 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sun Jan 19 23:19:43 2020
@author: elif.ayvali
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import OrderedDict
| 31.011628 | 81 | 0.544432 |
65dd2821be3445190af71c053c0a5fe0757716d8 | 3,352 | py | Python | bin/meyer.py | leipzig/meripseqpipe | b16139dfa0805827fec54a33c2a3583d99780591 | [
"MIT"
] | 13 | 2020-06-09T05:45:11.000Z | 2022-02-17T09:44:34.000Z | bin/meyer.py | leipzig/meripseqpipe | b16139dfa0805827fec54a33c2a3583d99780591 | [
"MIT"
] | 2 | 2021-04-02T21:22:19.000Z | 2021-09-28T15:48:50.000Z | bin/meyer.py | leipzig/meripseqpipe | b16139dfa0805827fec54a33c2a3583d99780591 | [
"MIT"
] | 12 | 2020-06-09T05:55:51.000Z | 2022-02-09T03:07:20.000Z | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 23 18:53:30 2019
@author: zky
"""
from sys import argv
from math import log
from scipy import stats
input_bin25_file = argv[1]
ip_bin25_file = argv[2]
input_total_reads_count = int(argv[3])
ip_total_reads_count = int(argv[4])
peak_windows_number = int(argv[5])
output_ip_file = argv[6]
def windows_fisher_test(input_count, ip_count, input_total_reads_count, ip_total_reads_count):
"""fisher test for the PeakCalling of meyer"""
site_input_rest_reads_count = input_total_reads_count - int(input_count)
site_ip_rest_reads_count = ip_total_reads_count - int(ip_count)
ip_oddsratio, ip_pvalue = stats.fisher_exact([[input_count, ip_count], [input_total_reads_count, ip_total_reads_count]], 'less')
input_oddsratio, input_pvalue = stats.fisher_exact([[input_count, ip_count], [site_input_rest_reads_count, site_ip_rest_reads_count]], 'greater')
return input_pvalue,ip_pvalue
with open (input_bin25_file) as input_bin25,open (ip_bin25_file) as ip_bin25:
"""Generate the list of bonferroni_filter_windows"""
ip_bonferroni_filter_list = []
ip_index = 0
print ("Generate the list of bonferroni_filter_windows")
while True:
input_line = input_bin25.readline().rstrip("\n")
ip_line = ip_bin25.readline().rstrip("\n")
if input_line == '':
break
input_line_list = input_line.split("\t")
ip_line_list = ip_line.split("\t")
input_pvalue,ip_pvalue = windows_fisher_test(input_line_list[-1],ip_line_list[-1],input_total_reads_count,ip_total_reads_count)
if (ip_pvalue < 0.05/peak_windows_number ):
del ip_line_list[-1]
ip_line_list.append(ip_pvalue)
ip_line_list[1] = int(ip_line_list[1])
ip_line_list[2] = int(ip_line_list[2])
ip_bonferroni_filter_list.append([])
ip_bonferroni_filter_list[ip_index] = ip_line_list
ip_index += 1
"""Generate the list of bonferroni_filter_peaks"""
print ("Generate the list of bonferroni_filter_peaks")
ip_bonferroni_peak = cluster_bin(ip_bonferroni_filter_list[:])
"""Write the list of bonferroni_filter_peaks"""
print ("Write the list of bonferroni_filter_peaks")
with open(output_ip_file,'w') as output_file:
for data in ip_bonferroni_peak:
output_file.write('\t'.join(str(i) for i in data))
output_file.write('\n') | 41.9 | 149 | 0.672136 |
65ddc57bb1b73bd27f58c41a027c88ec873b6740 | 2,541 | py | Python | setup.py | jimbydamonk/jenkins-job-builder-addons | 172672e25089992ed94dc223c7e30f29c46719b0 | [
"Apache-2.0"
] | 8 | 2015-08-21T15:53:22.000Z | 2019-04-09T20:42:58.000Z | setup.py | jimbydamonk/jenkins-job-builder-addons | 172672e25089992ed94dc223c7e30f29c46719b0 | [
"Apache-2.0"
] | 5 | 2016-03-23T17:46:16.000Z | 2018-03-05T13:56:17.000Z | setup.py | jimbydamonk/jenkins-job-builder-addons | 172672e25089992ed94dc223c7e30f29c46719b0 | [
"Apache-2.0"
] | 11 | 2015-10-05T21:58:33.000Z | 2019-04-14T04:50:48.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools.command.test import test as TestCommand
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read().replace('.. :changelog:', '')
requirements = [
# TODO: put package requirements here
]
test_requirements = [
# TODO: put package test requirements here
]
setup(
name='jenkins-job-builder-addons',
version='1.0.5',
description="A suite of jenkins job builder addons",
long_description=readme + '\n\n' + history,
author="Mike Buzzetti",
author_email='mike.buzzetti@gmail.com',
url='https://github.com/jimbydamonk/jenkins-job-builder-addons',
packages=['jenkins_jobs_addons'],
include_package_data=True,
install_requires=requirements,
license="Apache",
zip_safe=False,
keywords='jenkins ',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
test_suite='tests',
tests_require=['tox'] + test_requirements,
cmdclass={'test': Tox},
entry_points={
'jenkins_jobs.projects': [
'folder=jenkins_jobs_addons.folders:Folder',
],
'jenkins_jobs.views': [
'all=jenkins_jobs_addons.views:all_view',
'build_pipeline=jenkins_jobs_addons.views:build_pipeline_view',
'delivery_pipeline=jenkins_jobs_addons.'
'views:delivery_pipeline_view'
],
'jenkins_jobs.modules': [
'views=jenkins_jobs_addons.views:Views'
]
},
)
| 28.550562 | 75 | 0.637151 |
65de85c428d2e16780398c226cf7243329f834fa | 1,895 | py | Python | arrp/utils/sanitize.py | LucaCappelletti94/arrp_dataset | bcea455a504e8ff718458ce12623c63e0314badb | [
"MIT"
] | null | null | null | arrp/utils/sanitize.py | LucaCappelletti94/arrp_dataset | bcea455a504e8ff718458ce12623c63e0314badb | [
"MIT"
] | null | null | null | arrp/utils/sanitize.py | LucaCappelletti94/arrp_dataset | bcea455a504e8ff718458ce12623c63e0314badb | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
from typing import Tuple, Dict
from .load_csv import load_raw_classes, load_raw_epigenomic_data, load_raw_nucleotides_sequences
from .store_csv import store_raw_classes, store_raw_epigenomic_data, store_raw_nucleotides_sequences
from auto_tqdm import tqdm
def drop_unknown_datapoints(epigenomic_data:pd.DataFrame, nucleotides_sequences:np.ndarray, nucleotides_sequences_index:np.ndarray, classes:pd.DataFrame)->Tuple[pd.DataFrame, np.ndarray, np.ndarray, pd.DataFrame]:
"""Remove datapoints labeled as unknown (UK)."""
unknown = classes["UK"] == 1
epigenomic_data = epigenomic_data.drop(index=epigenomic_data.index[unknown])
nucleotides_sequences = nucleotides_sequences[~unknown]
nucleotides_sequences_index = nucleotides_sequences_index[~unknown]
classes = classes.drop(index=classes.index[unknown])
classes = classes.drop(columns=["UK"])
return epigenomic_data, nucleotides_sequences, nucleotides_sequences_index, classes | 67.678571 | 213 | 0.803694 |
65ded556650f5e35ee3489143d332a0dbd1e324c | 7,857 | py | Python | plugin.video.plexodus/resources/lib/indexers/fanarttv.py | MR-Unknown-Cm/addons | 8df1ebe58c95620bb02a05dbae7bf37954915cbd | [
"Apache-2.0"
] | 1 | 2020-03-03T10:01:21.000Z | 2020-03-03T10:01:21.000Z | plugin.video.plexodus/resources/lib/indexers/fanarttv.py | MR-Unknown-Cm/addons | 8df1ebe58c95620bb02a05dbae7bf37954915cbd | [
"Apache-2.0"
] | null | null | null | plugin.video.plexodus/resources/lib/indexers/fanarttv.py | MR-Unknown-Cm/addons | 8df1ebe58c95620bb02a05dbae7bf37954915cbd | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''
plexOdus Add-on
'''
import json
from resources.lib.modules import client
from resources.lib.modules import control
user = control.setting('fanart.tv.user')
if user == '' or user is None:
user = 'cf0ebcc2f7b824bd04cf3a318f15c17d'
headers = {'api-key': '3eb5ed2c401a206391ea8d1a0312c347'}
if not user == '':
headers.update({'client-key': user})
base_url = "http://webservice.fanart.tv/v3/%s/%s"
lang = control.apiLanguage()['trakt']
| 38.704433 | 187 | 0.54881 |
65df761ba100f14026cfc9d900fd8f340f52bd34 | 3,599 | py | Python | nuqql/conversation/helper.py | hwipl/nuqql | 410ea5bd42e455d656b1b34612c3b0d5a0b433ef | [
"MIT"
] | 3 | 2019-04-15T18:33:36.000Z | 2019-04-21T19:18:10.000Z | nuqql/conversation/helper.py | hwipl/nuqql | 410ea5bd42e455d656b1b34612c3b0d5a0b433ef | [
"MIT"
] | 15 | 2019-04-15T18:35:56.000Z | 2019-09-14T08:24:32.000Z | nuqql/conversation/helper.py | hwipl/nuqql | 410ea5bd42e455d656b1b34612c3b0d5a0b433ef | [
"MIT"
] | 1 | 2019-06-16T12:00:30.000Z | 2019-06-16T12:00:30.000Z | """
nuqql conversation helpers
"""
import datetime
import logging
from typing import TYPE_CHECKING
import nuqql.win
from .conversation import CONVERSATIONS
from .logmessage import LogMessage
if TYPE_CHECKING: # imports for typing
# pylint: disable=cyclic-import
from nuqql.backend import Backend # noqa
logger = logging.getLogger(__name__)
def remove_backend_conversations(backend: "Backend") -> None:
"""
Remove all conversations beloning to the backend
"""
logger.debug("removing all conversations of backend %s", backend.name)
for conv in CONVERSATIONS[:]:
if conv.backend == backend:
conv.wins.list_win.remove(conv)
conv.wins.list_win.redraw()
logger.debug("removed conversation %s of backend %s",
conv.name, backend.name)
def log_main_window(msg: str) -> None:
"""
Log message to main windows
"""
logger.debug("logging message to main window: %s", msg)
now = datetime.datetime.now()
log_msg = LogMessage(now, "nuqql", msg)
nuqql.win.MAIN_WINS["log"].add(log_msg)
def log_nuqql_conv(msg: str) -> None:
"""
Log message to the nuqql conversation
"""
logger.debug("logging message to nuqql conversation: %s", msg)
for conv in CONVERSATIONS:
if conv.name == "nuqql":
conv.log("nuqql", msg)
return
def resize_main_window() -> None:
"""
Resize main window
"""
logger.debug("resizing main window")
# get main win
screen = nuqql.win.MAIN_WINS["screen"]
# get new maxima
max_y, max_x = screen.getmaxyx()
# redraw main windows
screen.clear()
screen.refresh()
# redraw conversation windows
found_active = False
for conv in CONVERSATIONS:
# resize and move conversation windows
if conv.wins.list_win:
size_y, size_x = conv.wins.list_win.config.get_size()
conv.wins.list_win.resize_win(size_y, size_x)
if conv.wins.log_win:
# TODO: move zoom/resizing to win.py?
if conv.wins.log_win.state.zoomed:
size_y, size_x = max_y, max_x
pos_y, pos_x = 0, 0
conv.wins.log_win.state.pad_y = 0 # reset pad position
else:
size_y, size_x = conv.wins.log_win.config.get_size()
pos_y, pos_x = conv.wins.log_win.config.get_pos()
conv.wins.log_win.resize_win(size_y, size_x)
conv.wins.log_win.move_win(pos_y, pos_x)
if conv.wins.input_win:
size_y, size_x = conv.wins.input_win.config.get_size()
conv.wins.input_win.resize_win(size_y, size_x)
pos_y, pos_x = conv.wins.input_win.config.get_pos()
conv.wins.input_win.move_win(pos_y, pos_x)
# redraw active conversation windows
if conv.is_active():
found_active = True
conv.wins.list_win.redraw()
conv.wins.input_win.redraw()
conv.wins.log_win.redraw()
# if there are no active conversations, redraw nuqql main windows
if not found_active:
# list win
list_win = nuqql.win.MAIN_WINS["list"]
size_y, size_x = list_win.config.get_size()
list_win.resize_win(size_y, size_x)
list_win.redraw()
# log main win
log_win = nuqql.win.MAIN_WINS["log"]
size_y, size_x = log_win.config.get_size()
pos_y, pos_x = log_win.config.get_pos()
log_win.resize_win(size_y, size_x)
log_win.move_win(pos_y, pos_x)
log_win.redraw()
| 29.260163 | 74 | 0.624618 |
65df788e5e4510c44fcdee2981d1538a1d6e2abd | 801 | py | Python | tests/gcs_test.py | rishi1111/vaex | b3516201d04e9277b8918dadab9df33a7c83c01a | [
"MIT"
] | 1 | 2020-08-31T17:53:01.000Z | 2020-08-31T17:53:01.000Z | tests/gcs_test.py | rishi1111/vaex | b3516201d04e9277b8918dadab9df33a7c83c01a | [
"MIT"
] | null | null | null | tests/gcs_test.py | rishi1111/vaex | b3516201d04e9277b8918dadab9df33a7c83c01a | [
"MIT"
] | null | null | null | import vaex
import pytest
| 33.375 | 83 | 0.627965 |
65dfc680b069d19bcf150f9f7a0bdfd6384fb313 | 388 | py | Python | arcutils/const.py | zhuitrec/django-arcutils | 4079ef641f43baab4cda4681b1f76e320f12eb38 | [
"MIT"
] | null | null | null | arcutils/const.py | zhuitrec/django-arcutils | 4079ef641f43baab4cda4681b1f76e320f12eb38 | [
"MIT"
] | null | null | null | arcutils/const.py | zhuitrec/django-arcutils | 4079ef641f43baab4cda4681b1f76e320f12eb38 | [
"MIT"
] | null | null | null | """Constants."""
# A ``None``-ish constant for use where ``None`` may be a valid value.
NOT_SET = type('NOT_SET', (), {
'__bool__': (lambda self: False),
'__str__': (lambda self: 'NOT_SET'),
'__repr__': (lambda self: 'NOT_SET'),
'__copy__': (lambda self: self),
})()
# An alias for NOT_SET that may be more semantically-correct in some
# contexts.
NO_DEFAULT = NOT_SET
| 25.866667 | 70 | 0.639175 |
65e0bbe0d695a274843f413dfb11aa1bde11659d | 340 | py | Python | students/K33422/Elizaveta_Makhotina/labs/lab1/3/server_html.py | agentofknowledge/ITMO_ICT_WebDevelopment_2020-2021 | 7d5eab0d68af378083f21473cbbd5e5def6aa60a | [
"MIT"
] | 4 | 2020-09-03T15:41:42.000Z | 2021-12-24T15:28:20.000Z | students/K33422/Elizaveta_Makhotina/labs/lab1/3/server_html.py | agentofknowledge/ITMO_ICT_WebDevelopment_2020-2021 | 7d5eab0d68af378083f21473cbbd5e5def6aa60a | [
"MIT"
] | 48 | 2020-09-13T20:22:42.000Z | 2021-04-30T11:13:30.000Z | students/K33422/Elizaveta_Makhotina/labs/lab1/3/server_html.py | agentofknowledge/ITMO_ICT_WebDevelopment_2020-2021 | 7d5eab0d68af378083f21473cbbd5e5def6aa60a | [
"MIT"
] | 69 | 2020-09-06T10:32:37.000Z | 2021-11-28T18:13:17.000Z | import socket
print("Waiting for connections...")
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('', 9090))
sock.listen()
n_sock, addr = sock.accept()
print("Client connected! Sending HTTP-message")
conn.send(
b"HTTP/1.0 200 OK\nContent-Type: text/html\n\n" + open("index.html", "rb").read()
)
n_sock.close() | 18.888889 | 85 | 0.694118 |
65e1870d1694528b9c38bfaea11b991273afa141 | 2,998 | py | Python | tests/test_private_storage.py | glasslion/django-qiniu-storage | b046ec0b67ebcf8cd9eb09c60f7db4a7e4fab7ad | [
"MIT"
] | 209 | 2015-01-04T09:24:42.000Z | 2022-03-20T12:29:05.000Z | tests/test_private_storage.py | manlan2/django-qiniu-storage | b046ec0b67ebcf8cd9eb09c60f7db4a7e4fab7ad | [
"MIT"
] | 39 | 2015-04-10T05:38:07.000Z | 2021-09-09T02:26:54.000Z | tests/test_private_storage.py | manlan2/django-qiniu-storage | b046ec0b67ebcf8cd9eb09c60f7db4a7e4fab7ad | [
"MIT"
] | 69 | 2015-03-03T14:31:20.000Z | 2021-10-11T08:31:25.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals, print_function
from datetime import datetime
import os
from os.path import dirname, join
import sys
import time
import unittest
import uuid
import logging
LOGGING_FORMAT = '\n%(levelname)s %(asctime)s %(message)s'
logging.basicConfig(level=logging.INFO, format=LOGGING_FORMAT)
logger = logging.getLogger(__name__)
import six
import django
from requests.exceptions import ConnectionError
from qiniu import BucketManager
from .utils import retry
# Add repo/demo_site to sys.path
DEMO_SITE_DIR = join(dirname(dirname(__file__)), 'demo_site')
sys.path.append(DEMO_SITE_DIR)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "demo_site.settings")
try:
django.setup()
except AttributeError:
# Setup isn't necessary in Django < 1.7
pass
from django.conf import settings
from qiniustorage.backends import QiniuPrivateStorage, QiniuFile, get_qiniu_config
from qiniustorage.utils import QiniuError
USING_TRAVIS = os.environ.get('USING_TRAVIS', None) is None
UNIQUE_PATH = str(uuid.uuid4())
| 29.106796 | 82 | 0.645097 |
65e1926c2e3ccf0ad609aeee24520c09fc7d1f0b | 1,227 | py | Python | inference_sagemaker_simple.py | benayas1/MNIST-deployment | 36eab6589816ca6598a42d637755ad1432cb8b1c | [
"MIT"
] | null | null | null | inference_sagemaker_simple.py | benayas1/MNIST-deployment | 36eab6589816ca6598a42d637755ad1432cb8b1c | [
"MIT"
] | null | null | null | inference_sagemaker_simple.py | benayas1/MNIST-deployment | 36eab6589816ca6598a42d637755ad1432cb8b1c | [
"MIT"
] | null | null | null | # This file implements functions model_fn, input_fn, predict_fn and output_fn.
# Function model_fn is mandatory. The other functions can be omitted so the standard sagemaker function will be used.
# An alternative to the last 3 functions is to use function transform_fn(model, data, input_content_type, output_content_type)
#
# More info on https://github.com/aws/sagemaker-inference-toolkit/tree/master/src/sagemaker_inference
#
import torch
from mnist_demo.models.model import Net
import os
from torchvision import transforms
from sagemaker_inference import (
content_types,
decoder,
encoder,
errors,
utils,
)
def model_fn(model_dir):
"""
Function used for Sagemaker to load a model. The function must have this signature. Sagemaker will look for this function.
Used only when Elastic Inference is not used.
"""
print('Loading model')
model = Net()
with open(os.path.join(model_dir, 'model.pth'), 'rb') as f: # model_cnn.pth is the name given in the train script
model.load_state_dict(torch.load(f))
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device) #let's keep inference in CPU
print('Model loaded')
return model | 37.181818 | 126 | 0.740016 |
65e1ff2eb00e84049f3aabe94179a02fc82570ba | 802 | py | Python | hw/scripts/__main__.py | jonasblixt/mongoose | 4f392353f42d9c9245cdb5d9511348ec40bd936f | [
"BSD-3-Clause"
] | 4 | 2019-07-31T17:59:14.000Z | 2019-10-06T11:46:28.000Z | hw/scripts/__main__.py | jonasblixt/mongoose | 4f392353f42d9c9245cdb5d9511348ec40bd936f | [
"BSD-3-Clause"
] | null | null | null | hw/scripts/__main__.py | jonasblixt/mongoose | 4f392353f42d9c9245cdb5d9511348ec40bd936f | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import kicad
import model
from stackups import JLCPCB6Layers
#from dram import lp4
# IMX8MM
# Diff pairs should be matched within 1ps
# CK_t/CK_c max 200 ps
# CA[5:0]
# CS[1:0] min: CK_t - 25ps, max: CK_t + 25ps
# CKE[1:0]
# DQS0_t/DQS0_c min: CK_t - 85ps, max CK_t + 85ps
# DQ[7:0] min: DQS0_t - 10ps, max DQS0_t + 10ps
# DM0
# DQS1_t/DQS1_c min: CK_t - 85ps, max CK_t + 85ps
# DQ[15:8] min: DQS1_t - 10ps, max DQS1_t + 10ps
# DM1
if __name__ == "__main__":
pcb = kicad.KicadPCB("../mongoose.kicad_pcb", JLCPCB6Layers())
# DiffPair(pcb, "_n","_p", max_delay_ps=200.0, max_skew_ps=1.0)
for net_index in pcb.get_nets().keys():
net = pcb.get_nets()[net_index]
print(net.get_name() + " dly: %.2f ps"%(net.get_delay_ps()))
| 21.675676 | 68 | 0.627182 |
65e2a0c64857964a543fdd7ce72cd8eee8d2cbac | 165 | py | Python | farms2face/subscriptions/views.py | dev1farms2face/f2f | 54e58187a68574bf2bd0dfb7e58a2b416336106a | [
"MIT"
] | null | null | null | farms2face/subscriptions/views.py | dev1farms2face/f2f | 54e58187a68574bf2bd0dfb7e58a2b416336106a | [
"MIT"
] | null | null | null | farms2face/subscriptions/views.py | dev1farms2face/f2f | 54e58187a68574bf2bd0dfb7e58a2b416336106a | [
"MIT"
] | 2 | 2018-06-19T12:12:08.000Z | 2018-06-25T18:45:36.000Z | from django.shortcuts import render
# Create your views here.
| 20.625 | 44 | 0.636364 |
65e2db02f151a8da25b3c6a7203333c4f0b917f2 | 4,795 | py | Python | scripts/runOptimizer.py | sschulz365/PhC_Optimization | 9a4add4eb638d797647cabbdf0f96b29b78114f2 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 2 | 2017-05-13T05:33:06.000Z | 2021-02-26T14:39:44.000Z | scripts/runOptimizer.py | sschulz365/PhC_Optimization | 9a4add4eb638d797647cabbdf0f96b29b78114f2 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | scripts/runOptimizer.py | sschulz365/PhC_Optimization | 9a4add4eb638d797647cabbdf0f96b29b78114f2 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | #Sean Billings, 2015
import random
import numpy
import subprocess
import constraints
from experiment import Experiment
from objectiveFunctions import WeightedSumObjectiveFunction, IdealDifferentialObjectiveFunction
from waveGuideMPBOptimizer import differentialEvolution, createPopulation, gradientDescentAlgorithm
import utilities
import math
paramMap = {}
paramMap["s1"] = 0 # First row vertical shift
paramMap["s2"] = 0 # Second row vertical shift
paramMap["s3"] = 0 # Third row vertical shift
paramMap["p1"] = 0 # First row horizontal shift
paramMap["p2"] = 0 # Second row horizontal shift
paramMap["p3"] = 0 # Third row horizontal shift
paramMap["r0"] = 0.3 # Default air-hole radius
paramMap["r1"] = 0.3 # Default first row radius
paramMap["r2"] = 0.3 # Default second row radius
paramMap["r3"] = 0.3 # Default third row radius
# absolute path to the mpb executable
mpb = "/Users/sean/documents/mpb-1.5/mpb/mpb"
# absolute path to the input ctl
inputFile = "/Users/sean/documents/W1_2D_v03.ctl.txt"
# absolute path to the output ctl
outputFile = "/Users/sean/documents/optimizerTestFile.txt"
# we define a general experiment object
# that we reuse whenever we need to make a command-line mpb call
# see experiment.py for functionality
experiment = Experiment(mpb, inputFile, outputFile)
# ex.setParams(paramVector)
experiment.setCalculationType('4') # accepts an int from 0 to 5
experiment.setBand(23)
# see constraints.py
constraintFunctions = [constraints.latticeConstraintsLD]
max_generation = 15 # number of iterations of the DE alg
population_size = 20 # number of solutions to consider in DE
random_update = 0.2 # chance of updating vector fields in DE alg
elite_size = 10 # number of solutions to store in DE, and use for GD
band = 23 # band of interest for MPB computations
# specify the weights for the IdealDifferentialObjectiveFunction
w1 = 0 #0.01 # bandwidth weight
w2 = 30 #100 # group index weight
w3 = 0 # average loss weight
w4 = 0 # BGP weight
w5 = 30 #0.002 # loss at ngo (group index) weight
w6 = 0
# these wights are use in the Objective Function to score mpb results
weights = [ w1, w2, w3, w4, w5, w6]
ideal_group_index = 30 #self.ideal_solution[0]
ideal_bandwidth = 0.007 #self.ideal_solution[1]
ideal_loss_at_group_index = 30 #self.ideal_solution[2]
ideal_bgp = 0.3 #self.ideal_solution[3]
ideal_delay = 300 #self.ideal_solution[4]
ideal = [ideal_group_index, ideal_bandwidth, ideal_loss_at_group_index, ideal_bgp, ideal_delay]
#Initialize objective function
#objFunc = IdealDifferentialObjectiveFunction(weights, experiment, ideal)
objFunc = WeightedSumObjectiveFunction(weights, experiment)
# Differential Evolution section
print "Starting Differential Evolution Optimizer"
# DEsolutions is an array of solutions generated by the DE alg
DEsolutions = differentialEvolution(constraintFunctions, objFunc,
max_generation, population_size, random_update,
paramMap, elite_size, experiment)
print "\nDifferential Evolution solutions generated"
population = DEsolutions
# test line
#population = createPopulation(constraintFunctions, population_size, paramMap)
descent_scaler = 0.2
completion_scaler = 0.1
alpha_scaler = 0.9
# Gradient Descent Section
print "\nStarting Gradient Descent Optimizer"
# GDsolutions is an array of solutions generated by the GD algorihtms
GDsolutions = gradientDescentAlgorithm(objFunc,
constraintFunctions,
population, descent_scaler,
completion_scaler, alpha_scaler)
population = GDsolutions
print "\nResults"
for solution in population:
print "\nSolution: " + str(solution)
results = objFunc.evaluate(solution)
solution_score = results[0]
bandwidth = results[1]
group_index = results[2]
avgLoss = results[3] # average loss
bandwidth_group_index_product = results[4] #BGP
loss_at_ng0 = results[5] # loss at group index
print "\nScore: " + str(solution_score)
print "\nNormalized Bandwidth: " + str(bandwidth)
print "\nGroup Index: " + str(group_index)
print "\nAverage Loss: " + str(avgLoss)
print "\nLoss at Group Index: " + str(loss_at_ng0)
print "\nBGP: " + str(bandwidth_group_index_product)
#print "\nComputing Fabrication Stability..."
#laplacian = utilities.computeLaplacian(weights, weightedSumObjectiveFunction, solution, experiment)
#fabrication_stability = 0
#for key in laplacian.keys():
# fabrication_stability = fabrication_stability + laplacian[key]**2
#fabrication_stability = math.sqrt(fabrication_stability)
#print "\nFabrication Stability " + str(fabrication_stability)
print "\nOptimization Complete"
| 33.531469 | 104 | 0.737018 |
65e30ee0c4097461b20374e7d55ddbfdf3a3908e | 2,781 | py | Python | packages/w3af/w3af/core/data/url/handlers/cookie_handler.py | ZooAtmosphereGroup/HelloPackages | 0ccffd33bf927b13d28c8f715ed35004c33465d9 | [
"Apache-2.0"
] | 3 | 2019-04-09T22:59:33.000Z | 2019-06-14T09:23:24.000Z | tools/w3af/w3af/core/data/url/handlers/cookie_handler.py | sravani-m/Web-Application-Security-Framework | d9f71538f5cba6fe1d8eabcb26c557565472f6a6 | [
"MIT"
] | null | null | null | tools/w3af/w3af/core/data/url/handlers/cookie_handler.py | sravani-m/Web-Application-Security-Framework | d9f71538f5cba6fe1d8eabcb26c557565472f6a6 | [
"MIT"
] | null | null | null | """
cookie_handler.py
Copyright 2006 Andres Riancho
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
from cookielib import MozillaCookieJar
from urllib2 import HTTPCookieProcessor
| 30.56044 | 78 | 0.678533 |
65e31c331679c439236e3ccff96fa39b9166d6f4 | 435 | py | Python | setup.py | jigyasudhingra/music-recommendation-system | 09c66c4f207002b200d6394cf72e853741e44b6e | [
"MIT"
] | 2 | 2021-12-04T08:47:41.000Z | 2021-12-06T16:54:36.000Z | setup.py | jigyasudhingra/music-recommendation-system | 09c66c4f207002b200d6394cf72e853741e44b6e | [
"MIT"
] | null | null | null | setup.py | jigyasudhingra/music-recommendation-system | 09c66c4f207002b200d6394cf72e853741e44b6e | [
"MIT"
] | 1 | 2020-12-12T15:55:20.000Z | 2020-12-12T15:55:20.000Z | import os
import urllib.request
from zipfile import ZipFile
HOME_DIRECTORY = os.path.join('datasets','raw')
ROOT_URL = 'https://os.unil.cloud.switch.ch/fma/fma_metadata.zip'
if not os.path.isdir(HOME_DIRECTORY):
os.makedirs(HOME_DIRECTORY)
zip_path = os.path.join(HOME_DIRECTORY, 'data.zip')
urllib.request.urlretrieve(ROOT_URL, zip_path)
with ZipFile(zip_path, 'r') as zip:
zip.extractall(HOME_DIRECTORY)
print("Done!") | 29 | 65 | 0.758621 |
65e488ce45eec9f35c2f059eb9cb3c3452d611ab | 2,601 | py | Python | src/thesis/parsers/utils.py | emanuelevivoli/2021-Master-Thesis-UNIFI | e702795f71ecf0e09fca64e72780f4f6367558c9 | [
"MIT"
] | 1 | 2022-02-14T00:06:21.000Z | 2022-02-14T00:06:21.000Z | src/thesis/parsers/utils.py | emanuelevivoli/2021-Master-Thesis-UNIFI | e702795f71ecf0e09fca64e72780f4f6367558c9 | [
"MIT"
] | null | null | null | src/thesis/parsers/utils.py | emanuelevivoli/2021-Master-Thesis-UNIFI | e702795f71ecf0e09fca64e72780f4f6367558c9 | [
"MIT"
] | 1 | 2022-02-14T00:06:14.000Z | 2022-02-14T00:06:14.000Z | from thesis.parsers.classes import Args
def tag_generation(args_: Args):
"""
Generation of the Run tags (from arguments).
"""
# empty tags' list
tags = []
dataset_args, training_args, model_args, embedding_args, visual_args, run_args, log_args = split_args(
args_)
model_args = args_.model
visual_args = args_.visual
# PAPER FIELDs
tags += visual_args.fields
# EMBEDDING network
tags += [model_args.model_name_or_path]
# PRE
tags += [f'pre.choice: {visual_args.pre.choice}']
if visual_args.pre.choice == 'UMAP':
tags += [f'UMAP.pre.n_neighbors: {visual_args.pre.umap.n_neighbors}',
f'UMAP.pre.n_components: {visual_args.pre.umap.n_components}',
f'UMAP.pre.metric: {visual_args.pre.umap.metric}']
elif visual_args.pre.choice == 'PCA':
tags += [f'PCA.pre.n_components: {visual_args.pre.pca.n_components}']
elif visual_args.pre.choice == 'TSNE':
tags += [f'TSNE.pre.n_components: {visual_args.pre.tsne.n_components}']
# CLUSTER
tags += [f'clust.choice: {visual_args.clust.choice}']
if visual_args.clust.choice == 'KMEANS':
tags += [f'KMEANS.n_clusters: {visual_args.clust.kmeans.n_clusters}']
elif visual_args.clust.choice == 'HDBSCAN':
tags += [f'HDBSCAN.min_cluster_size: {visual_args.clust.hdbscan.min_cluster_size}',
f'HDBSCAN.metric: {visual_args.clust.hdbscan.metric}',
f'HDBSCAN.cluster_selection_method: {visual_args.clust.hdbscan.cluster_selection_method}']
# POST
tags += [f'post.choice: {visual_args.post.choice}']
if visual_args.post.choice == 'UMAP':
tags += [f'UMAP.post.n_neighbors: {visual_args.post.umap.n_neighbors}',
f'UMAP.post.n_components: {visual_args.post.umap.n_components}',
f'UMAP.post.min_dist: {visual_args.post.umap.min_dist}',
f'UMAP.post.metric: {visual_args.post.umap.metric}']
elif visual_args.post.choice == 'PCA':
tags += [f'PCA.post.n_components: {visual_args.post.pca.n_components}']
elif visual_args.post.choice == 'TSNE':
tags += [f'TSNE.post.n_components: {visual_args.post.tsne.n_components}']
return tags
| 35.148649 | 107 | 0.662438 |
65e524699bbb4c1ad2b8a3703eba4df80c4f8ec4 | 6,625 | py | Python | tensorflow_examples/lite/model_maker/core/task/custom_model.py | Abhi1code/FaceMaskDetection | 689abda8243665c218193384aa655c11d555c4e9 | [
"Apache-2.0"
] | 1 | 2022-01-08T16:02:18.000Z | 2022-01-08T16:02:18.000Z | tensorflow_examples/lite/model_maker/core/task/custom_model.py | Abhi1code/MaskDetection | 689abda8243665c218193384aa655c11d555c4e9 | [
"Apache-2.0"
] | 4 | 2021-06-08T21:30:20.000Z | 2022-03-12T00:28:38.000Z | tensorflow_examples/lite/model_maker/core/task/custom_model.py | Abhi1code/MaskDetection | 689abda8243665c218193384aa655c11d555c4e9 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base custom model that is already retained by data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import os
import tempfile
import tensorflow.compat.v2 as tf
from tensorflow_examples.lite.model_maker.core import compat
DEFAULT_QUANTIZATION_STEPS = 2000
def _gen_dataset(self,
data,
batch_size=32,
is_training=True,
input_pipeline_context=None):
"""Generates training / validation dataset."""
# The dataset is always sharded by number of hosts.
# num_input_pipelines is the number of hosts rather than number of cores.
ds = data.dataset
if input_pipeline_context and input_pipeline_context.num_input_pipelines > 1:
ds = ds.shard(input_pipeline_context.num_input_pipelines,
input_pipeline_context.input_pipeline_id)
ds = ds.map(
self.preprocess, num_parallel_calls=tf.data.experimental.AUTOTUNE)
if is_training:
if self.shuffle:
ds = ds.shuffle(buffer_size=min(data.size, 100))
ds = ds.repeat()
ds = ds.batch(batch_size)
ds = ds.prefetch(tf.data.experimental.AUTOTUNE)
return ds
def _export_saved_model(self,
filepath,
overwrite=True,
include_optimizer=True,
save_format=None,
signatures=None,
options=None):
"""Saves the model to Tensorflow SavedModel or a single HDF5 file.
Args:
filepath: String, path to SavedModel or H5 file to save the model.
overwrite: Whether to silently overwrite any existing file at the target
location, or provide the user with a manual prompt.
include_optimizer: If True, save optimizer's state together.
save_format: Either 'tf' or 'h5', indicating whether to save the model to
Tensorflow SavedModel or HDF5. Defaults to 'tf' in TF 2.X, and 'h5' in
TF 1.X.
signatures: Signatures to save with the SavedModel. Applicable to the 'tf'
format only. Please see the `signatures` argument in
`tf.saved_model.save` for details.
options: Optional `tf.saved_model.SaveOptions` object that specifies
options for saving to SavedModel.
"""
if filepath is None:
raise ValueError(
"SavedModel filepath couldn't be None when exporting to SavedModel.")
self.model.save(filepath, overwrite, include_optimizer, save_format,
signatures, options)
def _export_tflite(self,
tflite_filepath,
quantized=False,
quantization_steps=None,
representative_data=None):
"""Converts the retrained model to tflite format and saves it.
Args:
tflite_filepath: File path to save tflite model.
quantized: boolean, if True, save quantized model.
quantization_steps: Number of post-training quantization calibration steps
to run. Used only if `quantized` is True.
representative_data: Representative data used for post-training
quantization. Used only if `quantized` is True.
"""
if tflite_filepath is None:
raise ValueError(
"TFLite filepath couldn't be None when exporting to tflite.")
tf.compat.v1.logging.info('Exporting to tflite model in %s.',
tflite_filepath)
temp_dir = None
if compat.get_tf_behavior() == 1:
temp_dir = tempfile.TemporaryDirectory()
save_path = os.path.join(temp_dir.name, 'saved_model')
self.model.save(save_path, include_optimizer=False, save_format='tf')
converter = tf.compat.v1.lite.TFLiteConverter.from_saved_model(save_path)
else:
converter = tf.lite.TFLiteConverter.from_keras_model(self.model)
if quantized:
if quantization_steps is None:
quantization_steps = DEFAULT_QUANTIZATION_STEPS
if representative_data is None:
raise ValueError(
'representative_data couldn\'t be None if model is quantized.')
ds = self._gen_dataset(
representative_data, batch_size=1, is_training=False)
converter.representative_dataset = tf.lite.RepresentativeDataset(
get_representative_dataset_gen(ds, quantization_steps))
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.inference_input_type = tf.uint8
converter.inference_output_type = tf.uint8
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS_INT8
]
tflite_model = converter.convert()
if temp_dir:
temp_dir.cleanup()
with tf.io.gfile.GFile(tflite_filepath, 'wb') as f:
f.write(tflite_model)
| 36.401099 | 83 | 0.683321 |
65e60e80d09e1199bb195a86c8a1614239235c24 | 3,479 | py | Python | src/architectures/nmp/stacked_nmp/stacked_fixed_nmp.py | isaachenrion/jets | 59aeba81788d0741af448192d9dfb764fb97cf8d | [
"BSD-3-Clause"
] | 9 | 2017-10-09T17:01:52.000Z | 2018-06-12T18:06:05.000Z | src/architectures/nmp/stacked_nmp/stacked_fixed_nmp.py | isaachenrion/jets | 59aeba81788d0741af448192d9dfb764fb97cf8d | [
"BSD-3-Clause"
] | 31 | 2017-11-01T14:39:02.000Z | 2018-04-18T15:34:24.000Z | src/architectures/nmp/stacked_nmp/stacked_fixed_nmp.py | isaachenrion/jets | 59aeba81788d0741af448192d9dfb764fb97cf8d | [
"BSD-3-Clause"
] | 10 | 2017-10-17T19:23:14.000Z | 2020-07-05T04:44:45.000Z | import os
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from src.architectures.readout import READOUTS
from src.architectures.embedding import EMBEDDINGS
from .attention_pooling import POOLING_LAYERS
from ..message_passing import MP_LAYERS
from ..adjacency import construct_adjacency
from src.monitors import BatchMatrixMonitor
from src.monitors import Histogram
| 31.627273 | 111 | 0.540673 |
65e7b3e3e21dcd54310f063dfef34f4349a1bdff | 533 | py | Python | slixmpp/plugins/xep_0122/data_validation.py | anirudhrata/slixmpp | 1fcee0e80a212eeb274d2f560e69099d8a61bf7f | [
"BSD-3-Clause"
] | 86 | 2016-07-04T13:26:02.000Z | 2022-02-19T10:26:21.000Z | slixmpp/plugins/xep_0122/data_validation.py | anirudhrata/slixmpp | 1fcee0e80a212eeb274d2f560e69099d8a61bf7f | [
"BSD-3-Clause"
] | 10 | 2016-09-30T18:55:41.000Z | 2020-05-01T14:22:47.000Z | slixmpp/plugins/xep_0122/data_validation.py | anirudhrata/slixmpp | 1fcee0e80a212eeb274d2f560e69099d8a61bf7f | [
"BSD-3-Clause"
] | 45 | 2016-09-30T18:48:41.000Z | 2022-03-18T21:39:33.000Z | from slixmpp.xmlstream import register_stanza_plugin
from slixmpp.plugins import BasePlugin
from slixmpp.plugins.xep_0004 import stanza
from slixmpp.plugins.xep_0004.stanza import FormField
from slixmpp.plugins.xep_0122.stanza import FormValidation
| 26.65 | 58 | 0.754221 |
65e86aaff0a0cc5f5a6394f9f3fd03cd47bf0ab3 | 497 | py | Python | q037.py | sjf/project_euler | 8514710e2018136ba8a087ae58cba35370700f6f | [
"MIT"
] | null | null | null | q037.py | sjf/project_euler | 8514710e2018136ba8a087ae58cba35370700f6f | [
"MIT"
] | null | null | null | q037.py | sjf/project_euler | 8514710e2018136ba8a087ae58cba35370700f6f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import lib
N=1000000
sieve = lib.get_prime_sieve(N)
primes = lib.primes(N, sieve)
primes = primes[4:]
result = []
for i in primes:
if is_truncatable(i):
result.append(i)
if len(result) == 11:
break
print(sum(result)) | 15.53125 | 30 | 0.581489 |