hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fe9d9591df2f2c4858eb64ae4def8e712c9e40a0 | 1,183 | py | Python | migrations/versions/1a89721126f7_only_one_validation_per_mission_user_.py | MTES-MCT/mobilic-api | b3754de2282262fd60a27dc90e40777df9c1e230 | [
"MIT"
] | null | null | null | migrations/versions/1a89721126f7_only_one_validation_per_mission_user_.py | MTES-MCT/mobilic-api | b3754de2282262fd60a27dc90e40777df9c1e230 | [
"MIT"
] | 8 | 2021-04-19T17:47:55.000Z | 2022-02-16T17:40:18.000Z | migrations/versions/1a89721126f7_only_one_validation_per_mission_user_.py | MTES-MCT/mobilic-api | b3754de2282262fd60a27dc90e40777df9c1e230 | [
"MIT"
] | null | null | null | """Only one validation per mission, user and actor
Revision ID: 1a89721126f7
Revises: fa96dfc8237d
Create Date: 2021-10-14 11:22:01.124488
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "1a89721126f7"
down_revision = "fa96dfc8237d"
branch_labels = None
depends_on = None
| 23.66 | 117 | 0.633136 |
fe9dfa2f69a678e6192380ed28bf692cc55ff822 | 1,979 | py | Python | packages/facilities/rtdb/python/rtdb2_get.py | Falcons-Robocup/code | 2281a8569e7f11cbd3238b7cc7341c09e2e16249 | [
"Apache-2.0"
] | 2 | 2021-01-15T13:27:19.000Z | 2021-08-04T08:40:52.000Z | packages/facilities/rtdb/python/rtdb2_get.py | Falcons-Robocup/code | 2281a8569e7f11cbd3238b7cc7341c09e2e16249 | [
"Apache-2.0"
] | null | null | null | packages/facilities/rtdb/python/rtdb2_get.py | Falcons-Robocup/code | 2281a8569e7f11cbd3238b7cc7341c09e2e16249 | [
"Apache-2.0"
] | 5 | 2018-05-01T10:39:31.000Z | 2022-03-25T03:02:35.000Z | # Copyright 2020 Jan Feitsma (Falcons)
# SPDX-License-Identifier: Apache-2.0
#!/usr/bin/python
import os
import sys
import argparse
from rtdb2 import RtDB2Store, RTDB2_DEFAULT_PATH
import rtdb2tools
from hexdump import hexdump
# Main structure of the program
if __name__ == "__main__":
# Argument parsing.
descriptionTxt = 'This tool reads a value from the database given an RtDB key.\n'
exampleTxt = """Example: rtdb2_get.py -a 6 ROBOT_STATE
age: 2h
shared: True
list: False
value: [2, [1581172987, 618438], [0.05368572473526001, -0.2938263416290283, 5.330356597900391], [0.1385340541601181, -0.8020891547203064, 0.7817431688308716], False, [0.0, 0.0], 6, 'A']
Example: rtdb2_get.py -a 2 DIAG_WORLDMODEL_LOCAL -x "['balls'][0]['result']"
[[5.3209381103515625, 0.5837346315383911, 0.15281200408935547], [-0.0029433025047183037, 0.01433953270316124, 1.2758345292240847e-05], 1.0, [22033, 1889585904]]
"""
parser = argparse.ArgumentParser(description=descriptionTxt, epilog=exampleTxt, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-a', '--agent', help='agent ID to use', type=int, default=rtdb2tools.guessAgentId())
parser.add_argument('-s', '--serialized', help='also show serialized string (as hexdump)', action='store_true')
parser.add_argument('-p', '--path', help='database path to use', type=str, default=RTDB2_DEFAULT_PATH)
parser.add_argument('-x', '--expression', help='evaluate expression, useful to fetch a specific element', type=str)
parser.add_argument('key', help='RtDB key to read')
args = parser.parse_args()
# Create instance of RtDB2Store and read databases from disk
rtdb2Store = RtDB2Store(args.path)
item = rtdb2Store.get(args.agent, args.key, timeout=None)
if args.expression:
print(eval("item.value" + args.expression))
else:
print(str(item))
if args.serialized:
hexdump(item.value_serialized)
rtdb2Store.closeAll()
| 42.106383 | 186 | 0.723598 |
fe9ed7b6294e532592cc4dcafea632566b56df4d | 2,219 | py | Python | algorithms/A3C/atari/atari_env_deprecated.py | what3versin/reinforce_py | 46769da50aea65346cd3a300b55306d25f1f2683 | [
"MIT"
] | 1 | 2018-11-09T02:56:27.000Z | 2018-11-09T02:56:27.000Z | algorithms/A3C/atari/atari_env_deprecated.py | syd951186545/reinforce_py | 46769da50aea65346cd3a300b55306d25f1f2683 | [
"MIT"
] | null | null | null | algorithms/A3C/atari/atari_env_deprecated.py | syd951186545/reinforce_py | 46769da50aea65346cd3a300b55306d25f1f2683 | [
"MIT"
] | null | null | null | from __future__ import print_function
from __future__ import division
import os
import gym
import numpy as np
from skimage.transform import resize
from skimage.color import rgb2gray
| 32.632353 | 80 | 0.581794 |
fe9f7091809e30b40cd88cb5967081a6b1484eed | 5,935 | py | Python | content/_build/jupyter_execute/macm.py | NBCLab/nimare-paper | 2b9e70febcfde4ca12420adc3c2910ff622252f2 | [
"MIT"
] | 3 | 2020-10-20T10:24:04.000Z | 2021-12-20T13:31:01.000Z | content/_build/jupyter_execute/macm.py | NBCLab/nimare-paper | 2b9e70febcfde4ca12420adc3c2910ff622252f2 | [
"MIT"
] | 20 | 2021-03-07T17:18:48.000Z | 2022-03-09T15:13:02.000Z | content/_build/jupyter_execute/macm.py | NBCLab/nimare-paper | 2b9e70febcfde4ca12420adc3c2910ff622252f2 | [
"MIT"
] | 3 | 2020-05-05T14:42:18.000Z | 2021-11-30T19:52:27.000Z | #!/usr/bin/env python
# coding: utf-8
# # Meta-Analytic Coactivation Modeling
# In[1]:
# First, import the necessary modules and functions
import os
from datetime import datetime
import matplotlib.pyplot as plt
from myst_nb import glue
from repo2data.repo2data import Repo2Data
import nimare
start = datetime.now()
# Install the data if running locally, or points to cached data if running on neurolibre
DATA_REQ_FILE = os.path.join("../binder/data_requirement.json")
FIG_DIR = os.path.abspath("../images")
# Download data
repo2data = Repo2Data(DATA_REQ_FILE)
data_path = repo2data.install()
data_path = os.path.join(data_path[0], "data")
# Now, load the Datasets we will use in this chapter
neurosynth_dset = nimare.dataset.Dataset.load(os.path.join(data_path, "neurosynth_dataset.pkl.gz"))
# Meta-analytic coactivation modeling (MACM) {cite:p}`Laird2009-gc,Robinson2010-iv,Eickhoff2010-vx`, also known as meta-analytic connectivity modeling, uses meta-analytic data to measure co-occurrence of activations between brain regions providing evidence of functional connectivity of brain regions across tasks.
# In coordinate-based MACM, whole-brain studies within the database are selected based on whether or not they report at least one peak in a region of interest specified for the analysis.
# These studies are then subjected to a meta-analysis, often comparing the selected studies to those remaining in the database.
# In this way, the significance of each voxel in the analysis corresponds to whether there is greater convergence of foci at the voxel among studies which also report foci in the region of interest than those which do not.
#
# <!-- TODO: Determine appropriate citation style here. -->
#
# MACM results have historically been accorded a similar interpretation to task-related functional connectivity (e.g., {cite:p}`Hok2015-lt,Kellermann2013-en`), although this approach is quite removed from functional connectivity analyses of task fMRI data (e.g., beta-series correlations, psychophysiological interactions, or even seed-to-voxel functional connectivity analyses on task data).
# Nevertheless, MACM analyses do show high correspondence with resting-state functional connectivity {cite:p}`Reid2017-ez`.
# MACM has been used to characterize the task-based functional coactivation of the cerebellum {cite:p}`Riedel2015-tx`, lateral prefrontal cortex {cite:p}`Reid2016-ba`, fusiform gyrus {cite:p}`Caspers2014-ja`, and several other brain regions.
#
# Within NiMARE, MACMs can be performed by selecting studies in a Dataset based on the presence of activation within a target mask or coordinate-centered sphere.
#
# In this section, we will perform two MACMs- one with a target mask and one with a coordinate-centered sphere.
# For the former, we use {py:meth}`nimare.dataset.Dataset.get_studies_by_mask`.
# For the latter, we use {py:meth}`nimare.dataset.Dataset.get_studies_by_coordinate`.
# In[2]:
# Create Dataset only containing studies with peaks within the amygdala mask
amygdala_mask = os.path.join(data_path, "amygdala_roi.nii.gz")
amygdala_ids = neurosynth_dset.get_studies_by_mask(amygdala_mask)
dset_amygdala = neurosynth_dset.slice(amygdala_ids)
# Create Dataset only containing studies with peaks within the sphere ROI
sphere_ids = neurosynth_dset.get_studies_by_coordinate([[24, -2, -20]], r=6)
dset_sphere = neurosynth_dset.slice(sphere_ids)
# In[3]:
import numpy as np
from nilearn import input_data, plotting
# In order to plot a sphere with a precise radius around a coordinate with
# nilearn, we need to use a NiftiSpheresMasker
mask_img = neurosynth_dset.masker.mask_img
sphere_masker = input_data.NiftiSpheresMasker([[24, -2, -20]], radius=6, mask_img=mask_img)
sphere_masker.fit(mask_img)
sphere_img = sphere_masker.inverse_transform(np.array([[1]]))
fig, axes = plt.subplots(figsize=(6, 4), nrows=2)
display = plotting.plot_roi(
amygdala_mask,
annotate=False,
draw_cross=False,
axes=axes[0],
figure=fig,
)
axes[0].set_title("Amygdala ROI")
display = plotting.plot_roi(
sphere_img,
annotate=False,
draw_cross=False,
axes=axes[1],
figure=fig,
)
axes[1].set_title("Spherical ROI")
glue("figure_macm_rois", fig, display=False)
# ```{glue:figure} figure_macm_rois
# :name: figure_macm_rois
# :align: center
#
# Region of interest masks for (1) a target mask-based MACM and (2) a coordinate-based MACM.
# ```
# Once the `Dataset` has been reduced to studies with coordinates within the mask or sphere requested, any of the supported CBMA Estimators can be run.
# In[4]:
from nimare import meta
meta_amyg = meta.cbma.ale.ALE(kernel__sample_size=20)
results_amyg = meta_amyg.fit(dset_amygdala)
meta_sphere = meta.cbma.ale.ALE(kernel__sample_size=20)
results_sphere = meta_sphere.fit(dset_sphere)
# In[5]:
meta_results = {
"Amygdala ALE MACM": results_amyg.get_map("z", return_type="image"),
"Sphere ALE MACM": results_sphere.get_map("z", return_type="image"),
}
fig, axes = plt.subplots(figsize=(6, 4), nrows=2)
for i_meta, (name, file_) in enumerate(meta_results.items()):
display = plotting.plot_stat_map(
file_,
annotate=False,
axes=axes[i_meta],
cmap="Reds",
cut_coords=[24, -2, -20],
draw_cross=False,
figure=fig,
)
axes[i_meta].set_title(name)
colorbar = display._cbar
colorbar_ticks = colorbar.get_ticks()
if colorbar_ticks[0] < 0:
new_ticks = [colorbar_ticks[0], 0, colorbar_ticks[-1]]
else:
new_ticks = [colorbar_ticks[0], colorbar_ticks[-1]]
colorbar.set_ticks(new_ticks, update_ticks=True)
glue("figure_macm", fig, display=False)
# ```{glue:figure} figure_macm
# :name: figure_macm
# :align: center
#
# Unthresholded z-statistic maps for (1) the target mask-based MACM and (2) the coordinate-based MACM.
# ```
# In[6]:
end = datetime.now()
print(f"macm.md took {end - start} to build.")
| 36.411043 | 392 | 0.752148 |
fe9f96734192b94aa40844f25ed620f799a5da53 | 50,863 | py | Python | cisco-ios-xe/ydk/models/cisco_ios_xe/CISCO_IPSLA_ECHO_MIB.py | Maikor/ydk-py | b86c4a7c570ae3b2c5557d098420446df5de4929 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | cisco-ios-xe/ydk/models/cisco_ios_xe/CISCO_IPSLA_ECHO_MIB.py | Maikor/ydk-py | b86c4a7c570ae3b2c5557d098420446df5de4929 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | cisco-ios-xe/ydk/models/cisco_ios_xe/CISCO_IPSLA_ECHO_MIB.py | Maikor/ydk-py | b86c4a7c570ae3b2c5557d098420446df5de4929 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | """ CISCO_IPSLA_ECHO_MIB
This MIB module defines the templates for IP SLA operations of
ICMP echo, UDP echo and TCP connect.
The ICMP echo operation measures end\-to\-end response time between
a Cisco router and any IP enabled device by computing the time
taken between sending an ICMP echo request message to the
destination and receiving an ICMP echo reply.
The UDP echo operation measures end\-to\-end response time between
a Cisco router and any IP enabled device by computing the time
taken between sending an UDP echo request message to the
destination and receiving an UDP echo reply.
The TCP connect operation measures end\-to\-end response time between
a Cisco router and any IP enabled device by computing the time
taken to perform a TCP connect operation.
"""
from collections import OrderedDict
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
| 55.527293 | 720 | 0.624855 |
fea2c153f85345b8df258b2faf5084ce932ff128 | 4,057 | py | Python | example/model-parallel/matrix_factorization/train.py | tkameyama/incubator-mxnet | 47b0bdd00e7c5e1c9a448809b02e68c0e4b72e96 | [
"Apache-2.0"
] | 1 | 2022-01-22T02:29:24.000Z | 2022-01-22T02:29:24.000Z | example/model-parallel/matrix_factorization/train.py | tkameyama/incubator-mxnet | 47b0bdd00e7c5e1c9a448809b02e68c0e4b72e96 | [
"Apache-2.0"
] | null | null | null | example/model-parallel/matrix_factorization/train.py | tkameyama/incubator-mxnet | 47b0bdd00e7c5e1c9a448809b02e68c0e4b72e96 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import logging
import time
import mxnet as mx
import numpy as np
from get_data import get_movielens_iter, get_movielens_data
from model import matrix_fact_model_parallel_net
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser(description="Run model parallel version of matrix factorization",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--num-epoch', type=int, default=3,
help='number of epochs to train')
parser.add_argument('--batch-size', type=int, default=256,
help='number of examples per batch')
parser.add_argument('--print-every', type=int, default=100,
help='logging interval')
parser.add_argument('--factor-size', type=int, default=128,
help="the factor size of the embedding operation")
parser.add_argument('--num-gpus', type=int, default=2,
help="number of gpus to use")
MOVIELENS = {
'dataset': 'ml-10m',
'train': './ml-10M100K/r1.train',
'val': './ml-10M100K/r1.test',
'max_user': 71569,
'max_movie': 65135,
}
if __name__ == '__main__':
head = '%(asctime)-15s %(message)s'
logging.basicConfig(level=logging.INFO, format=head)
# arg parser
args = parser.parse_args()
logging.info(args)
num_epoch = args.num_epoch
batch_size = args.batch_size
optimizer = 'sgd'
factor_size = args.factor_size
print_every = args.print_every
num_gpus = args.num_gpus
momentum = 0.9
learning_rate = 0.1
# prepare dataset and iterators
max_user = MOVIELENS['max_user']
max_movies = MOVIELENS['max_movie']
get_movielens_data(MOVIELENS['dataset'])
train_iter = get_movielens_iter(MOVIELENS['train'], batch_size)
val_iter = get_movielens_iter(MOVIELENS['val'], batch_size)
# construct the model
net = matrix_fact_model_parallel_net(factor_size, factor_size, max_user, max_movies)
# construct the module
# map the ctx_group attribute to the context assignment
group2ctxs={'dev1':[mx.cpu()]*num_gpus, 'dev2':[mx.gpu(i) for i in range(num_gpus)]}
# Creating a module by passing group2ctxs attribute which maps
# the ctx_group attribute to the context assignment
mod = mx.module.Module(symbol=net, context=[mx.cpu()]*num_gpus, data_names=['user', 'item'],
label_names=['score'], group2ctxs=group2ctxs)
# the initializer used to initialize the parameters
initializer = mx.init.Xavier(factor_type="in", magnitude=2.34)
# the parameters for the optimizer constructor
optimizer_params = {
'learning_rate': learning_rate,
'wd': 1e-4,
'momentum': momentum,
'rescale_grad': 1.0/batch_size}
# use MSE as the metric
metric = mx.gluon.metric.create(['MSE'])
speedometer = mx.callback.Speedometer(batch_size, print_every)
# start training
mod.fit(train_iter,
val_iter,
eval_metric = metric,
num_epoch = num_epoch,
optimizer = optimizer,
optimizer_params = optimizer_params,
initializer = initializer,
batch_end_callback = speedometer)
| 36.881818 | 98 | 0.682031 |
fea4ed769af71f922b55fc3fe0ad5f2f54ffbfef | 762 | py | Python | scripts/libfranka_gui_gripper_run.py | nbfigueroa/franka_interactive_controllers | 7befdd5fbaa3c7a83b931292fab39ab98754a60c | [
"MIT"
] | 6 | 2021-12-08T09:32:57.000Z | 2022-03-20T09:22:29.000Z | scripts/libfranka_gui_gripper_run.py | nbfigueroa/franka_interactive_controllers | 7befdd5fbaa3c7a83b931292fab39ab98754a60c | [
"MIT"
] | null | null | null | scripts/libfranka_gui_gripper_run.py | nbfigueroa/franka_interactive_controllers | 7befdd5fbaa3c7a83b931292fab39ab98754a60c | [
"MIT"
] | 3 | 2022-02-01T12:30:47.000Z | 2022-03-24T10:31:04.000Z | #!/usr/bin/env python3
import shlex
from tkinter import *
from tkinter import messagebox
from psutil import Popen
top = Tk()
top.title("Franka Gripper Control")
top.geometry("300x75")
B1 = Button(top, text = "Open Gripper", command = open)
B1.place(x = 30,y = 20)
B2 = Button(top, text = "Close Gripper", command = close)
B2.place(x = 160,y = 20)
top.mainloop()
| 25.4 | 99 | 0.745407 |
fea585d93413c287bd31eaa0525d97e26cbdcd0b | 742 | py | Python | codeforces.com/1669F/solution.py | zubtsov/competitive-programming | 919d63130144347d7f6eddcf8f5bc2afb85fddf3 | [
"MIT"
] | null | null | null | codeforces.com/1669F/solution.py | zubtsov/competitive-programming | 919d63130144347d7f6eddcf8f5bc2afb85fddf3 | [
"MIT"
] | null | null | null | codeforces.com/1669F/solution.py | zubtsov/competitive-programming | 919d63130144347d7f6eddcf8f5bc2afb85fddf3 | [
"MIT"
] | null | null | null | for i in range(int(input())):
number_of_candies = int(input())
candies_weights = list(map(int, input().split()))
bob_pos = number_of_candies - 1
alice_pos = 0
bob_current_weight = 0
alice_current_weight = 0
last_equal_candies_total_number = 0
while alice_pos <= bob_pos:
if alice_current_weight <= bob_current_weight:
alice_current_weight += candies_weights[alice_pos]
alice_pos += 1
else:
bob_current_weight += candies_weights[bob_pos]
bob_pos -= 1
if alice_current_weight == bob_current_weight:
last_equal_candies_total_number = alice_pos + (number_of_candies - bob_pos - 1)
print(last_equal_candies_total_number)
| 29.68 | 91 | 0.665768 |
fea64ce26f29e53484b8013f735f948fef203460 | 12,293 | py | Python | client/client_build.py | patriotemeritus/grr | bf2b9268c8b9033ab091e27584986690438bd7c3 | [
"Apache-2.0"
] | 1 | 2015-06-24T09:07:20.000Z | 2015-06-24T09:07:20.000Z | client/client_build.py | patriotemeritus/grr | bf2b9268c8b9033ab091e27584986690438bd7c3 | [
"Apache-2.0"
] | 3 | 2020-02-11T22:29:15.000Z | 2021-06-10T17:44:31.000Z | client/client_build.py | wandec/grr | 7fb7e6d492d1325a5fe1559d3aeae03a301c4baa | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""This tool builds or repacks the client binaries.
This handles invocations for the build across the supported platforms including
handling Visual Studio, pyinstaller and other packaging mechanisms.
"""
import logging
import os
import platform
import time
# pylint: disable=unused-import
from grr.client import client_plugins
# pylint: enable=unused-import
from grr.lib import build
from grr.lib import builders
from grr.lib import config_lib
from grr.lib import flags
from grr.lib import startup
parser = flags.PARSER
# Guess which arch we should be building based on where we are running.
if platform.architecture()[0] == "32bit":
default_arch = "i386"
else:
default_arch = "amd64"
default_platform = platform.system().lower()
parser.add_argument(
"--platform", choices=["darwin", "linux", "windows"],
default=default_platform,
help="The platform to build or repack for. This will default to "
"the current platform: %s." % platform.system())
parser.add_argument(
"--arch", choices=["amd64", "i386"],
default=default_arch,
help="The architecture to build or repack for.")
# Guess which package format we should be building based on where we are
# running.
if default_platform == "linux":
distro = platform.linux_distribution()[0]
if distro in ["Ubuntu", "debian"]:
default_package = "deb"
elif distro in ["CentOS Linux", "CentOS", "centos", "redhat", "fedora"]:
default_package = "rpm"
else:
default_package = None
elif default_platform == "darwin":
default_package = "dmg"
elif default_platform == "windows":
default_package = "exe"
parser.add_argument(
"--package_format", choices=["deb", "rpm"],
default=default_package,
help="The packaging format to use when building a Linux client.")
# Initialize sub parsers and their arguments.
subparsers = parser.add_subparsers(
title="subcommands", dest="subparser_name", description="valid subcommands")
# Build arguments.
parser_build = subparsers.add_parser(
"build", help="Build a client from source.")
parser_repack = subparsers.add_parser(
"repack", help="Repack a zip file into an installer (Only useful when "
"signing).")
parser_repack.add_argument("--template", default=None,
help="The template zip file to repack.")
parser_repack.add_argument("--output", default=None,
help="The path to write the output installer.")
parser_repack.add_argument("--outputdir", default="",
help="The directory to which we should write the "
"output installer. Installers will be named "
"automatically from config options. Incompatible"
" with --output")
parser_repack.add_argument("--debug_build", action="store_true", default=False,
help="Create a debug client.")
parser_repack.add_argument("-p", "--plugins", default=[], nargs="+",
help="Additional python files that will be loaded "
"as custom plugins.")
parser_deploy = subparsers.add_parser(
"deploy", help="Build a deployable self installer from a package.")
parser_deploy.add_argument("--template", default=None,
help="The template zip file to deploy.")
parser_deploy.add_argument("--templatedir", default="",
help="Directory containing template zip files to "
"repack. Incompatible with --template")
parser_deploy.add_argument("--output", default=None,
help="The path to write the output installer.")
parser_deploy.add_argument("--outputdir", default="",
help="The directory to which we should write the "
"output installer. Installers will be named "
"automatically from config options. Incompatible"
" with --output")
parser_deploy.add_argument("-p", "--plugins", default=[], nargs="+",
help="Additional python files that will be loaded "
"as custom plugins.")
parser_deploy.add_argument("--debug_build", action="store_true", default=False,
help="Create a debug client.")
parser_buildanddeploy = subparsers.add_parser(
"buildanddeploy",
help="Build and deploy clients for multiple labels and architectures.")
parser_buildanddeploy.add_argument("--template", default=None,
help="The template zip file to repack, if "
"none is specified we will build it.")
args = parser.parse_args()
def GetBuilder(context):
"""Get the appropriate builder based on the selected flags."""
try:
if args.platform == "darwin":
context = ["Platform:Darwin"] + context
builder_obj = builders.DarwinClientBuilder
elif args.platform == "windows":
context = ["Platform:Windows"] + context
builder_obj = builders.WindowsClientBuilder
elif args.platform == "linux":
if args.package_format == "deb":
context = ["Platform:Linux"] + context
builder_obj = builders.LinuxClientBuilder
elif args.package_format == "rpm":
context = ["Platform:Linux", "Target:LinuxRpm"] + context
builder_obj = builders.CentosClientBuilder
else:
parser.error("Couldn't guess packaging format for: %s" %
platform.linux_distribution()[0])
else:
parser.error("Unsupported build platform: %s" % args.platform)
except AttributeError:
raise RuntimeError("Unable to build for platform %s when running "
"on current platform." % args.platform)
return builder_obj(context=context)
def GetDeployer(context):
"""Get the appropriate client deployer based on the selected flags."""
if args.platform == "darwin":
context = ["Platform:Darwin"] + context
deployer_obj = build.DarwinClientDeployer
elif args.platform == "windows":
context = ["Platform:Windows"] + context
deployer_obj = build.WindowsClientDeployer
elif args.platform == "linux":
if args.package_format == "deb":
context = ["Platform:Linux"] + context
deployer_obj = build.LinuxClientDeployer
else:
context = ["Platform:Linux", "Target:LinuxRpm"] + context
deployer_obj = build.CentosClientDeployer
else:
parser.error("Unsupported build platform: %s" % args.platform)
return deployer_obj(context=context)
def TemplateInputFilename(context):
"""Build template file name from config."""
if args.templatedir:
filename = config_lib.CONFIG.Get("PyInstaller.template_filename",
context=context)
return os.path.join(args.templatedir, filename)
return None
def BuildAndDeploy(context):
"""Run build and deploy to create installers."""
# ISO 8601 date
timestamp = time.strftime("%Y-%m-%dT%H:%M:%S%z")
if args.plugins:
config_lib.CONFIG.Set("Client.plugins", args.plugins)
# Output directory like: 2015-02-13T21:48:47-0800/linux_amd64_deb/
spec = "_".join((args.platform, args.arch, args.package_format))
output_dir = os.path.join(config_lib.CONFIG.Get(
"ClientBuilder.executables_path", context=context), timestamp, spec)
# If we weren't passed a template, build one
if args.template:
template_path = args.template
else:
template_path = os.path.join(output_dir, config_lib.CONFIG.Get(
"PyInstaller.template_filename", context=context))
builder_obj = GetBuilder(context)
builder_obj.MakeExecutableTemplate(output_file=template_path)
# Get the list of contexts which we should be building.
context_list = config_lib.CONFIG.Get("ClientBuilder.BuildTargets")
logging.info("Building installers for: %s", context_list)
config_orig = config_lib.CONFIG.ExportState()
deployed_list = []
for deploycontext in context_list:
# Add the settings for this context
for newcontext in deploycontext.split(","):
config_lib.CONFIG.AddContext(newcontext)
context.append(newcontext)
try:
# If the ClientBuilder.target_platforms doesn't match our environment,
# skip.
if not config_lib.CONFIG.MatchBuildContext(args.platform, args.arch,
args.package_format):
continue
deployer = GetDeployer(context)
# Make a nicer filename out of the context string.
context_filename = deploycontext.replace(
"AllPlatforms Context,", "").replace(",", "_").replace(" ", "_")
deployed_list.append(context_filename)
output_filename = os.path.join(
output_dir, context_filename,
config_lib.CONFIG.Get("ClientBuilder.output_filename",
context=deployer.context))
logging.info("Deploying %s as %s with labels: %s", deploycontext,
config_lib.CONFIG.Get(
"Client.name", context=deployer.context),
config_lib.CONFIG.Get(
"Client.labels", context=deployer.context))
deployer.MakeDeployableBinary(template_path, output_filename)
finally:
# Remove the custom settings for the next deploy
for newcontext in deploycontext.split(","):
context.remove(newcontext)
config_lib.ImportConfigManger(config_orig)
logging.info("Complete, installers for %s are in %s", deployed_list,
output_dir)
def main(_):
"""Launch the appropriate builder."""
config_lib.CONFIG.AddContext(
"ClientBuilder Context",
"Context applied when we run the client builder script.")
startup.ClientInit()
# Make sure we have all the secondary configs since they may be set under the
# ClientBuilder Context
for secondconfig in config_lib.CONFIG["ConfigIncludes"]:
config_lib.CONFIG.LoadSecondaryConfig(secondconfig)
# Use basic console output logging so we can see what is happening.
logger = logging.getLogger()
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
logger.handlers = [handler]
# The following is used to change the identity of the builder based on the
# target platform.
context = flags.FLAGS.context
if args.arch == "amd64":
context.append("Arch:amd64")
else:
context.append("Arch:i386")
if args.subparser_name == "build":
builder_obj = GetBuilder(context)
builder_obj.MakeExecutableTemplate()
elif args.subparser_name == "repack":
if args.plugins:
config_lib.CONFIG.Set("Client.plugins", args.plugins)
if args.debug_build:
context += ["DebugClientBuild Context"]
deployer = GetDeployer(context)
output_filename = os.path.join(
args.outputdir, config_lib.CONFIG.Get(
"ClientBuilder.output_filename", context=deployer.context))
deployer.RepackInstaller(open(args.template, "rb").read(), args.output or
output_filename)
elif args.subparser_name == "deploy":
if args.plugins:
config_lib.CONFIG.Set("Client.plugins", args.plugins)
if args.debug_build:
context += ["DebugClientBuild Context"]
deployer = GetDeployer(context)
template_path = (args.template or TemplateInputFilename(deployer.context) or
config_lib.CONFIG.Get("ClientBuilder.template_path",
context=deployer.context))
# If neither output filename or output directory is specified,
# use the default location from the config file.
output = None
if args.output:
output = args.output
elif args.outputdir:
# If output filename isn't specified, write to args.outputdir with a
# .deployed extension so we can distinguish it from repacked binaries.
filename = ".".join(
(config_lib.CONFIG.Get("ClientBuilder.output_filename",
context=deployer.context), "deployed"))
output = os.path.join(args.outputdir, filename)
deployer.MakeDeployableBinary(template_path, output)
elif args.subparser_name == "buildanddeploy":
BuildAndDeploy(context)
if __name__ == "__main__":
flags.StartMain(main)
| 35.631884 | 80 | 0.663467 |
fea677c9a939d2a74e86aae5f8b7734e53289cfd | 1,549 | py | Python | Greyatom-projects/code.py | naveena41/greyatom-python-for-data-science | 3aa63878ff12e0e8cdf0e63bafe9b4a2c082f7b1 | [
"MIT"
] | null | null | null | Greyatom-projects/code.py | naveena41/greyatom-python-for-data-science | 3aa63878ff12e0e8cdf0e63bafe9b4a2c082f7b1 | [
"MIT"
] | null | null | null | Greyatom-projects/code.py | naveena41/greyatom-python-for-data-science | 3aa63878ff12e0e8cdf0e63bafe9b4a2c082f7b1 | [
"MIT"
] | null | null | null | # --------------
# Code starts here
# Create the lists
class_1 = ['geoffrey hinton', 'andrew ng', 'sebastian raschka', 'yoshu bengio']
class_2 = ['hilary mason', 'carla gentry', 'corinna cortes']
# Concatenate both the strings
new_class = class_1+class_2
print(new_class)
# Append the list
new_class.append('peter warden')
# Print updated list
print(new_class)
# Remove the element from the list
new_class.remove('carla gentry')
# Print the list
print(new_class)
# Create the Dictionary
courses = {"math": 65, "english": 70, "history": 80, "french": 70, "science":60}
# Slice the dict and stores the all subjects marks in variable
total = 65+70+80+70+60
print(total)
# Store the all the subject in one variable `Total`
# Print the total
# Insert percentage formula
percentage =float(total)*(100/500)
# Print the percentage
print(percentage)
# Create the Dictionary
mathematics = {"geoffery hinton" :78, "andrew ng" :95, "sebastian raschka" :65, "yoshua benjio" :50, "hilary mason" :70, "corinna cortes" :66, "peter warden" :75}
topper = max(mathematics,key = mathematics.get)
print(topper)
# Given string
print(topper.split())
# Create variable first_name
first_name = 'andrew'
# Create variable Last_name and store last two element in the list
Last_name ='ng'
# Concatenate the string
full_name = Last_name+' '+first_name
# print the full_name
print(full_name)
# print the name in upper case
certificate_name = full_name.upper()
print(certificate_name)
# Code ends here
| 24.983871 | 163 | 0.701097 |
fea776840ba3b32f75565766babfd041aa64ab68 | 1,830 | py | Python | environments/recommenders/recsim_wrapper_test.py | jackblandin/ml-fairness-gym | dce1feaacf2588e0a2d6187e896796241a25ed81 | [
"Apache-2.0"
] | null | null | null | environments/recommenders/recsim_wrapper_test.py | jackblandin/ml-fairness-gym | dce1feaacf2588e0a2d6187e896796241a25ed81 | [
"Apache-2.0"
] | null | null | null | environments/recommenders/recsim_wrapper_test.py | jackblandin/ml-fairness-gym | dce1feaacf2588e0a2d6187e896796241a25ed81 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2022 The ML Fairness Gym Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for recsim.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
import test_util
from environments.recommenders import recsim_wrapper
from recsim.environments import interest_exploration
if __name__ == '__main__':
absltest.main()
| 31.551724 | 74 | 0.742623 |
fea7d2eca288a3ef4c60e731703c65a5e9641808 | 3,034 | py | Python | moss_client_cli.py | mernst32/dl-searchcode-code | 504fe59df245ba123ad8ad6e45f03b17de6ef236 | [
"MIT"
] | null | null | null | moss_client_cli.py | mernst32/dl-searchcode-code | 504fe59df245ba123ad8ad6e45f03b17de6ef236 | [
"MIT"
] | null | null | null | moss_client_cli.py | mernst32/dl-searchcode-code | 504fe59df245ba123ad8ad6e45f03b17de6ef236 | [
"MIT"
] | null | null | null | import argparse
import csv
import os
from moss_client.core import submit_and_dl, parse_moss_reports
data_folder = 'data'
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="MOSS CLI client for submitting java files to the service and downloading the report from the "
"service locally. Will go through the sub folders of the given folder and submit the java files "
"for plagiarism checks and download the reports locally, creating a linking file in the process")
parser.add_argument('user_id', metavar='U', nargs=1, help="Your user-id for the MOSS service.")
parser.add_argument('folder', metavar='F', nargs=1, help="The folder whose contents you want to submit.")
parser.add_argument('-p', '--parse', action='store_true', help="Parses the moss reports into a csv file.")
parser.add_argument('-o', '--only-parse', action='store_true',
help="Only parses the local moss reports and does not submit files and download the reports. "
"Requires the reports and the links_to_reports html file created normally by this app.")
parser.add_argument('-j', '--join-file', nargs=1, default=[""],
help="When the parse or only-parse option is given, joins the parsed data with the parsed data.")
parser.add_argument('-b', '--batch-mode', action='store_true',
help="Only submits a 100 folders to the Moss Service, also looks for already processed folders so "
"that it does not submit those again.")
args = parser.parse_args()
handle_input(args.user_id[0], args.folder[0], args.parse, args.only_parse, args.join_file[0], args.batch_mode)
| 57.245283 | 123 | 0.680949 |
fea81883e0bc239697344b2c58f07b4a45f346d3 | 6,495 | py | Python | catkin_ws/src/localization/src/localization_node.py | DiegoOrtegoP/Software | 4a07dd2dab29db910ca2e26848fa6b53b7ab00cd | [
"CC-BY-2.0"
] | 12 | 2016-04-14T12:21:46.000Z | 2021-06-18T07:51:40.000Z | catkin_ws/src/localization/src/localization_node.py | DiegoOrtegoP/Software | 4a07dd2dab29db910ca2e26848fa6b53b7ab00cd | [
"CC-BY-2.0"
] | 14 | 2017-03-03T23:33:05.000Z | 2018-04-03T18:07:53.000Z | catkin_ws/src/localization/src/localization_node.py | DiegoOrtegoP/Software | 4a07dd2dab29db910ca2e26848fa6b53b7ab00cd | [
"CC-BY-2.0"
] | 113 | 2016-05-03T06:11:42.000Z | 2019-06-01T14:37:38.000Z | #!/usr/bin/env python
import rospy
#from apriltags_ros.msg import AprilTagDetectionArray
from duckietown_msgs.msg import AprilTagsWithInfos
import tf2_ros
from tf2_msgs.msg import TFMessage
import tf.transformations as tr
from geometry_msgs.msg import Transform, TransformStamped
import numpy as np
from localization import PoseAverage
from visualization_msgs.msg import Marker
# Localization Node
# Author: Teddy Ort
# Inputs: apriltags/duckietown_msgs/AprilTags - A list of april tags in a camera frame
# Outputs: pose2d/duckietown_msgs/Pose2dStamped - The estimated pose of the robot in the world frame in 2D coordinates
# pose3d/geometry_msgs/PoseStamped - The estimated pose of the robot in the world frame in 3D coordinates
if __name__ == '__main__':
rospy.init_node('localization_node', anonymous=False)
localization_node = LocalizationNode()
rospy.spin()
| 45.41958 | 147 | 0.652194 |
fea8219f00f084855cf10ddacc7d1729db19658a | 1,030 | py | Python | gen_data/get_teams.py | wusui/NCAA2019 | d33a69926dc2d5355f33f9b69e39475c54d03c56 | [
"MIT"
] | null | null | null | gen_data/get_teams.py | wusui/NCAA2019 | d33a69926dc2d5355f33f9b69e39475c54d03c56 | [
"MIT"
] | null | null | null | gen_data/get_teams.py | wusui/NCAA2019 | d33a69926dc2d5355f33f9b69e39475c54d03c56 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# pylint: disable=W0223
"""
Get a list of teams
"""
from html.parser import HTMLParser
import requests
DATALOC = "http://www.espn.com/mens-college-basketball/tournament/bracket"
def check_teams():
"""
Extract a list of teams (schools)
"""
req = requests.get(DATALOC)
parser = ChkTeams()
parser.feed(req.text)
retv = parser.retval
return retv[8:]
def make_team_list():
"""
Call check_teams and stick result in text file
"""
listv = check_teams()
with open('teams.txt', 'w') as ofile:
for team in listv:
ofile.write(team + '\n')
if __name__ == '__main__':
make_team_list()
| 20.196078 | 74 | 0.590291 |
fea8cf21ba50623dff52ac8ea09d727a155060be | 32,904 | py | Python | pysnmp-with-texts/Nortel-MsCarrier-MscPassport-SubnetInterfaceMIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 8 | 2019-05-09T17:04:00.000Z | 2021-06-09T06:50:51.000Z | pysnmp-with-texts/Nortel-MsCarrier-MscPassport-SubnetInterfaceMIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 4 | 2019-05-31T16:42:59.000Z | 2020-01-31T21:57:17.000Z | pysnmp-with-texts/Nortel-MsCarrier-MscPassport-SubnetInterfaceMIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module Nortel-MsCarrier-MscPassport-SubnetInterfaceMIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/Nortel-MsCarrier-MscPassport-SubnetInterfaceMIB
# Produced by pysmi-0.3.4 at Wed May 1 14:31:21 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ConstraintsIntersection, SingleValueConstraint, ValueRangeConstraint, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ConstraintsIntersection", "SingleValueConstraint", "ValueRangeConstraint", "ValueSizeConstraint")
mscMod, mscModIndex = mibBuilder.importSymbols("Nortel-MsCarrier-MscPassport-BaseShelfMIB", "mscMod", "mscModIndex")
DisplayString, RowStatus, StorageType, Unsigned32, Integer32 = mibBuilder.importSymbols("Nortel-MsCarrier-MscPassport-StandardTextualConventionsMIB", "DisplayString", "RowStatus", "StorageType", "Unsigned32", "Integer32")
DigitString, NonReplicated = mibBuilder.importSymbols("Nortel-MsCarrier-MscPassport-TextualConventionsMIB", "DigitString", "NonReplicated")
mscPassportMIBs, = mibBuilder.importSymbols("Nortel-MsCarrier-MscPassport-UsefulDefinitionsMIB", "mscPassportMIBs")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Counter32, Counter64, IpAddress, ObjectIdentity, Bits, iso, Unsigned32, Gauge32, MibIdentifier, MibScalar, MibTable, MibTableRow, MibTableColumn, NotificationType, Integer32, TimeTicks, ModuleIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "Counter32", "Counter64", "IpAddress", "ObjectIdentity", "Bits", "iso", "Unsigned32", "Gauge32", "MibIdentifier", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "NotificationType", "Integer32", "TimeTicks", "ModuleIdentity")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
subnetInterfaceMIB = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 45))
mscModVcs = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2))
mscModVcsRowStatusTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 1), )
if mibBuilder.loadTexts: mscModVcsRowStatusTable.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsRowStatusTable.setDescription('This entry controls the addition and deletion of mscModVcs components.')
mscModVcsRowStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 1, 1), ).setIndexNames((0, "Nortel-MsCarrier-MscPassport-BaseShelfMIB", "mscModIndex"), (0, "Nortel-MsCarrier-MscPassport-SubnetInterfaceMIB", "mscModVcsIndex"))
if mibBuilder.loadTexts: mscModVcsRowStatusEntry.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsRowStatusEntry.setDescription('A single entry in the table represents a single mscModVcs component.')
mscModVcsRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 1, 1, 1), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscModVcsRowStatus.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsRowStatus.setDescription('This variable is used as the basis for SNMP naming of mscModVcs components. These components can be added and deleted.')
mscModVcsComponentName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscModVcsComponentName.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsComponentName.setDescription("This variable provides the component's string name for use with the ASCII Console Interface")
mscModVcsStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 1, 1, 4), StorageType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscModVcsStorageType.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsStorageType.setDescription('This variable represents the storage type value for the mscModVcs tables.')
mscModVcsIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 1, 1, 10), NonReplicated())
if mibBuilder.loadTexts: mscModVcsIndex.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsIndex.setDescription('This variable represents the index for the mscModVcs tables.')
mscModVcsAccOptTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 10), )
if mibBuilder.loadTexts: mscModVcsAccOptTable.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsAccOptTable.setDescription("Accounting information is owned by the Vc System; it is stored in the Vc Accounting component, which itself is considered to be a component on the switch. The Accounting Component contains a bit map indicating which of the accounting facilities are to be spooled in the accounting record - for example, bit '0' if set indicates that the accounting facility with facility code H.00 should be spooled if present in the Vc for accounting purposes. The data contained in the Vc Accounting must be identical network wide even though the component can be changed and upgraded on a module by module basis.")
mscModVcsAccOptEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 10, 1), ).setIndexNames((0, "Nortel-MsCarrier-MscPassport-BaseShelfMIB", "mscModIndex"), (0, "Nortel-MsCarrier-MscPassport-SubnetInterfaceMIB", "mscModVcsIndex"))
if mibBuilder.loadTexts: mscModVcsAccOptEntry.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsAccOptEntry.setDescription('An entry in the mscModVcsAccOptTable.')
mscModVcsSegmentSize = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 10, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12))).clone(namedValues=NamedValues(("n1", 0), ("n2", 1), ("n4", 2), ("n8", 3), ("n16", 4), ("n32", 5), ("n64", 6), ("n128", 7), ("n256", 8), ("n512", 9), ("n1024", 10), ("n2048", 11), ("n4096", 12))).clone('n128')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscModVcsSegmentSize.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsSegmentSize.setDescription('This attribute specifies the segment size for accounting of national calls. Minimum allowed segment size is 1. If data segment is sent which is less than segmentSize it is still counted as one segment.')
mscModVcsUnitsCounted = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 10, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("segments", 0), ("frames", 1))).clone('segments')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscModVcsUnitsCounted.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsUnitsCounted.setDescription('This attribute specifies what is counted by frame services. If set to frames, frames are counted, else segments are counted.')
mscModVcsAccountingFax = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 10, 1, 3), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1).clone(hexValue="20")).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscModVcsAccountingFax.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsAccountingFax.setDescription('Each value corresponds to an accounting facility code, of which there are currently 10 facility codes defined with codes H.00 to H.09, and corresponding to the above 10 facilities. Each of the above facilities may or may not be present and stored in the Vc for accounting purposes, depending on the nature of the call. For example, only those Vcs where a NUI (Network User Identifier) is used for charging or identification purposes will have a NUI stored in the Vc. Description of bits: notused0(0) notused1(1) originalCalledAddressFax(2)')
mscModVcsGenerationMode = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 10, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("bothEnds", 0), ("singleEnd", 1))).clone('singleEnd')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscModVcsGenerationMode.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsGenerationMode.setDescription('This attribute specifies part of the rules by which the network generates accounting records. If set to bothEnds, then both ends of the Vc generate accounting records. If set to singleEnd, then the charged end of the Vc generates accounting records. In single end generation mode, if the call does not clear gracefully, both ends of the Vc will generate accounting record.')
mscModVcsAddOptTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12), )
if mibBuilder.loadTexts: mscModVcsAddOptTable.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsAddOptTable.setDescription('The Vc AddressingOptions group describes the addressing parameters. It is currently owned by the Vc. Most of the data contained in the Vc AddressingOptions group is identical network wide even though the group can be changed and upgraded on a module by module basis.')
mscModVcsAddOptEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12, 1), ).setIndexNames((0, "Nortel-MsCarrier-MscPassport-BaseShelfMIB", "mscModIndex"), (0, "Nortel-MsCarrier-MscPassport-SubnetInterfaceMIB", "mscModVcsIndex"))
if mibBuilder.loadTexts: mscModVcsAddOptEntry.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsAddOptEntry.setDescription('An entry in the mscModVcsAddOptTable.')
mscModVcsDefaultNumberingPlan = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("x121", 0), ("e164", 1))).clone('x121')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscModVcsDefaultNumberingPlan.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsDefaultNumberingPlan.setDescription('This attribute specifies the numbering plan used which determines the address format: X.121-- the international numbering plan for public packet switched data networks or E.164-- the international numbering plan for ISDN and PSTN. The default numbering plan does not need to be consistent across all of the nodes in the network.')
mscModVcsNetworkIdType = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("dnic", 0), ("inic", 1))).clone('dnic')).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscModVcsNetworkIdType.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsNetworkIdType.setDescription('This attribute specifies whether the network uses a DNIC or INIC. It is used by X.75 Gateways to indicate whether in network the DNIC or INIC is used in various utilities. If it is DNIC it can be DNIC or DCC type. If it is INIC it can be 4 digits only.')
mscModVcsX121Type = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("dnic", 0), ("dcc", 1))).clone('dnic')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscModVcsX121Type.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsX121Type.setDescription('This attribute specifies whether DNIC mode or DCC mode is used in X.121 address of international calls. If DCC is specified, then the first 3 digits of each DNA must be the Network ID Code. If this attribute is changed all Dnas in the network must start with this code. Numbering plan is affected by the change.')
mscModVcsNetworkIdCode = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12, 1, 6), DigitString().subtype(subtypeSpec=ValueSizeConstraint(3, 4))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscModVcsNetworkIdCode.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsNetworkIdCode.setDescription('This attribute specifies the DNIC (Data Network ID Code) of the network or DCC code.')
mscModVcsX121IntlAddresses = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disallowed", 0), ("allowed", 1))).clone('allowed')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscModVcsX121IntlAddresses.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsX121IntlAddresses.setDescription('This attribute indicates if any DTE is allowed to signal international addresses.')
mscModVcsX121IntllPrefixDigit = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12, 1, 8), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 9)).clone(9)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscModVcsX121IntllPrefixDigit.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsX121IntllPrefixDigit.setDescription('This attribute indicates the prefix digit to be used for X.121 international calls. When this digit is provided the call will have full international address.')
mscModVcsX121MinAddressLength = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12, 1, 11), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 16)).clone(1)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscModVcsX121MinAddressLength.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsX121MinAddressLength.setDescription('This attribute indicates minimum length of x121 address.')
mscModVcsX121MaxAddressLength = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12, 1, 12), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 16)).clone(15)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscModVcsX121MaxAddressLength.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsX121MaxAddressLength.setDescription('This attribute indicates maximum length of x121 address.')
mscModVcsX121ToE164EscapeSignificance = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("no", 0), ("yes", 1))).clone('no')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscModVcsX121ToE164EscapeSignificance.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsX121ToE164EscapeSignificance.setDescription('This attribute specifies whether an X.121 to E.164 escape digit has significance in selecting an X.32 (analog) or an ISDN switched path. If two values are significant (the value 0 or the value 9) then yes is set to this attribute. If the value of the originally entered escape digit is not significant in routing the call then value of no is assigned to this attribute.')
mscModVcsE164IntlFormatAllowed = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12, 1, 14), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disallowed", 0), ("allowed", 1))).clone('allowed')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscModVcsE164IntlFormatAllowed.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsE164IntlFormatAllowed.setDescription("This attribute indicates whether or not to allow national format E.164 addresses. If this attribute is set to a value of Yes (=1) then national format E.164 addresses are not allowed and international format addresses only are allowed. If this attribute is set to a value of No (=0), then national format E.164 addresses are allowed. If only international format E.164 addresses are allowed, then the 'e164NatlPrefixDigit' attribute is not required, nor is the 'e164IntlPrefixDigits' required.")
mscModVcsE164IntlPrefixDigits = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12, 1, 15), DigitString().subtype(subtypeSpec=ValueSizeConstraint(0, 3)).clone(hexValue="30")).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscModVcsE164IntlPrefixDigits.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsE164IntlPrefixDigits.setDescription("This attribute specifies the E.164 international prefix digits. If applicable, it is specified as 1 to 3 BCD digits. The 3 BCD digits are stored with the length of the international prefix in the low order nibble, nibble [0] followed by the most significant digit of the international prefix in the next low order nibble, nibble [1], etc. This attribute is not required if the corresponding attribute, 'e164IntlFormatOnly' is set to a value of Yes (=1).")
mscModVcsE164NatlPrefixDigit = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12, 1, 16), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 9)).clone(1)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscModVcsE164NatlPrefixDigit.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsE164NatlPrefixDigit.setDescription('This attribute contains the E.164 national prefix which may be added in front of E.164 local or national call. If e164IntlFormatOnly is set to 1, this attribute is not needed.')
mscModVcsE164LocalAddressLength = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12, 1, 17), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(4, 15)).clone(7)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscModVcsE164LocalAddressLength.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsE164LocalAddressLength.setDescription('This attribute indicates the length of a local E.164 DNA on this module. This attribute is not required if the corresponding attribute, e164IntlFormatOnly is set to a value of yes. This attribute does not need to be consistent across all of the nodes in the network.')
mscModVcsE164TeleCountryCode = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12, 1, 18), DigitString().subtype(subtypeSpec=ValueSizeConstraint(1, 4)).clone(hexValue="31")).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscModVcsE164TeleCountryCode.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsE164TeleCountryCode.setDescription('This attribute specifies the E.164 Telephone Country Code (TCC) for the country in which the network resides. If applicable, it is specified as 1 to 3 BCD digits. The 3 BCD digits are stored with the length of the TCC in the low order nibble, nibble [0] followed by the most significant digit of the TCC in the next low order nibble, nibble [1], etc.')
mscModVcsE164NatlMinAddressLength = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12, 1, 20), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 16)).clone(1)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscModVcsE164NatlMinAddressLength.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsE164NatlMinAddressLength.setDescription('This attribute indicates minimum length of e164 national address.')
mscModVcsE164NatlMaxAddressLength = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12, 1, 21), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 16)).clone(15)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscModVcsE164NatlMaxAddressLength.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsE164NatlMaxAddressLength.setDescription('This attribute indicates maximum length of e164 national address.')
mscModVcsE164IntlMinAddressLength = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12, 1, 22), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 16)).clone(1)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscModVcsE164IntlMinAddressLength.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsE164IntlMinAddressLength.setDescription('This attribute indicates minimum length of e164 international address.')
mscModVcsE164IntlMaxAddressLength = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12, 1, 23), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 16)).clone(15)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscModVcsE164IntlMaxAddressLength.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsE164IntlMaxAddressLength.setDescription('This attribute indicates maximum length of e164 international address.')
mscModVcsE164LocalMinAddressLength = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12, 1, 24), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 16)).clone(1)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscModVcsE164LocalMinAddressLength.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsE164LocalMinAddressLength.setDescription('This attribute indicates minimum length of e164 local address.')
mscModVcsE164LocalMaxAddressLength = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12, 1, 25), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 16)).clone(15)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscModVcsE164LocalMaxAddressLength.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsE164LocalMaxAddressLength.setDescription('This attribute indicates maximum length of e164 local address.')
mscModVcsIntOptTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 13), )
if mibBuilder.loadTexts: mscModVcsIntOptTable.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsIntOptTable.setDescription('The Vc InterfaceOptions group defines Vc system parameters common in the network. It is owned by the Vc and is considered to be a module wide component on the switch. The data contained in the Vc InterfaceOptions group must be identical network wide even though this group can be changed and upgraded on a module by module basis.')
mscModVcsIntOptEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 13, 1), ).setIndexNames((0, "Nortel-MsCarrier-MscPassport-BaseShelfMIB", "mscModIndex"), (0, "Nortel-MsCarrier-MscPassport-SubnetInterfaceMIB", "mscModVcsIndex"))
if mibBuilder.loadTexts: mscModVcsIntOptEntry.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsIntOptEntry.setDescription('An entry in the mscModVcsIntOptTable.')
mscModVcsHighPriorityPacketSizes = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 13, 1, 1), OctetString().subtype(subtypeSpec=ValueSizeConstraint(2, 2)).setFixedLength(2).clone(hexValue="ff80")).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscModVcsHighPriorityPacketSizes.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsHighPriorityPacketSizes.setDescription('This attribute indicates which packet sizes are supported for high priority calls within the network. Description of bits: n16(0) n32(1) n64(2) n128(3) n256(4) n512(5) n1024(6) n2048(7) n4096(8)')
mscModVcsMaxSubnetPacketSize = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 13, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(4, 5, 6, 7, 8, 9, 10, 11, 12))).clone(namedValues=NamedValues(("n16", 4), ("n32", 5), ("n64", 6), ("n128", 7), ("n256", 8), ("n512", 9), ("n1024", 10), ("n2048", 11), ("n4096", 12))).clone('n512')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscModVcsMaxSubnetPacketSize.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsMaxSubnetPacketSize.setDescription('This attribute specifies the maximum subnet packet size used for the connections originating or terminating on this module. All modules in the same network should have the same maxSubnetPacketSize. If this value is not identical throughout the network, the following points need to be considered: a) When Passport and DPN switches are connected in the same network, the maxSubnetPacketSize on a DPN switch can be at most 2048 and the DPN part of the network must be configured with hardware which supports this size: - Dedicated PE386 Network link/Trunk - Minimum measured link speed of 256Kbits/sec This hardware has to be present on every potential data path between connecting end points! b) The calling end of the connection signals the maxSubnetPacketSize value to the called end. The called end then compares this value to its own provisioned value and selects the smaller value. Note that this smaller value is not signalled back to the calling end. The calling and called ends can therefore have different maxSubnetPacketSize values.')
mscModVcsCallSetupTimer = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 13, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(5, 100)).clone(5)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscModVcsCallSetupTimer.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsCallSetupTimer.setDescription('This attribute specifies the Vc callSetupTimer in units of 1 second ticks. This timer specifies how long the Vc will wait, after sending a subnet Call Request packet into the network, for a response from the remote end of the Vc (in the form of a subnet Raccept packet). If, after sending a subnet Call packet into the network, a response is not received within this time period, the Vc will time out, clearing the call in the assumption that the remote end is unreachable. This timer must be long enough to take into account the time required for routing the subnet Call Request through the Source Call Routing and the Destination Call Routing systems in order to be delivered to the final destination.')
mscModVcsCallRetryTimer = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 13, 1, 4), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(30, 300)).clone(60)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscModVcsCallRetryTimer.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsCallRetryTimer.setDescription('This attribute specifies, for Vc implementing Direct Calls with the auto-call retry feature (including PVCs), the Vc callRetryTimer in units of 1 second ticks. This timer specifies how long the Vc will wait between unsuccessful call attempts.')
mscModVcsDelaySubnetAcks = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 13, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("no", 0), ("yes", 1))).clone('no')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscModVcsDelaySubnetAcks.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsDelaySubnetAcks.setDescription('This attribute specifies delay acknowledgment timer mechanism. If this attribute is set to no, then the Vc will automatically return acknowledgment packets without delay. If this attribute is set to yes, then the Vc will wait for one second in an attempt to piggyback the acknowledgment packet on another credit or data packet. If the Vc cannot piggyback the acknowledgment packet within this time, then the packet is returned without piggybacking.')
mscModVcsWinsTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 213), )
if mibBuilder.loadTexts: mscModVcsWinsTable.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsWinsTable.setDescription('This is the windowSize corresponding to the given packet size and throughput class. All Vcs using the windowSize matrix support large Vc windows on both ends of the Vc, and support the signalling of the chosen Vc window size from the destination (called) end to the source (calling) end. This is the only matrix supported. The windowSize should be configured in the same way network wide, though it can be upgraded on a module by module basis. Vcs using the windowSize matrix will run properly if the matrices on different nodes differ since the Vc window is selected by the destination (called) side of the Vc.')
mscModVcsWinsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 213, 1), ).setIndexNames((0, "Nortel-MsCarrier-MscPassport-BaseShelfMIB", "mscModIndex"), (0, "Nortel-MsCarrier-MscPassport-SubnetInterfaceMIB", "mscModVcsIndex"), (0, "Nortel-MsCarrier-MscPassport-SubnetInterfaceMIB", "mscModVcsWinsPktIndex"), (0, "Nortel-MsCarrier-MscPassport-SubnetInterfaceMIB", "mscModVcsWinsTptIndex"))
if mibBuilder.loadTexts: mscModVcsWinsEntry.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsWinsEntry.setDescription('An entry in the mscModVcsWinsTable.')
mscModVcsWinsPktIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 213, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11))).clone(namedValues=NamedValues(("n16", 0), ("n32", 1), ("n64", 2), ("n128", 3), ("n256", 4), ("n512", 5), ("n1024", 6), ("n2048", 7), ("n4096", 8), ("n8192", 9), ("n32768", 10), ("n65535", 11))))
if mibBuilder.loadTexts: mscModVcsWinsPktIndex.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsWinsPktIndex.setDescription('This variable represents the next to last index for the mscModVcsWinsTable.')
mscModVcsWinsTptIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 213, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 15)))
if mibBuilder.loadTexts: mscModVcsWinsTptIndex.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsWinsTptIndex.setDescription('This variable represents the final index for the mscModVcsWinsTable.')
mscModVcsWinsValue = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 213, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 63))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscModVcsWinsValue.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsWinsValue.setDescription('This variable represents an individual value for the mscModVcsWinsTable.')
subnetInterfaceGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 45, 1))
subnetInterfaceGroupCA = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 45, 1, 1))
subnetInterfaceGroupCA02 = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 45, 1, 1, 3))
subnetInterfaceGroupCA02A = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 45, 1, 1, 3, 2))
subnetInterfaceCapabilities = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 45, 3))
subnetInterfaceCapabilitiesCA = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 45, 3, 1))
subnetInterfaceCapabilitiesCA02 = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 45, 3, 1, 3))
subnetInterfaceCapabilitiesCA02A = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 45, 3, 1, 3, 2))
mibBuilder.exportSymbols("Nortel-MsCarrier-MscPassport-SubnetInterfaceMIB", mscModVcsStorageType=mscModVcsStorageType, mscModVcs=mscModVcs, mscModVcsRowStatusEntry=mscModVcsRowStatusEntry, mscModVcsX121MinAddressLength=mscModVcsX121MinAddressLength, mscModVcsRowStatus=mscModVcsRowStatus, mscModVcsE164NatlMinAddressLength=mscModVcsE164NatlMinAddressLength, mscModVcsAccOptTable=mscModVcsAccOptTable, mscModVcsE164LocalAddressLength=mscModVcsE164LocalAddressLength, mscModVcsE164IntlMinAddressLength=mscModVcsE164IntlMinAddressLength, mscModVcsE164IntlMaxAddressLength=mscModVcsE164IntlMaxAddressLength, mscModVcsE164LocalMaxAddressLength=mscModVcsE164LocalMaxAddressLength, mscModVcsWinsTptIndex=mscModVcsWinsTptIndex, mscModVcsE164IntlPrefixDigits=mscModVcsE164IntlPrefixDigits, mscModVcsComponentName=mscModVcsComponentName, mscModVcsIndex=mscModVcsIndex, subnetInterfaceGroupCA=subnetInterfaceGroupCA, mscModVcsX121IntllPrefixDigit=mscModVcsX121IntllPrefixDigit, mscModVcsDelaySubnetAcks=mscModVcsDelaySubnetAcks, mscModVcsX121Type=mscModVcsX121Type, mscModVcsWinsTable=mscModVcsWinsTable, mscModVcsE164NatlPrefixDigit=mscModVcsE164NatlPrefixDigit, subnetInterfaceMIB=subnetInterfaceMIB, mscModVcsAccountingFax=mscModVcsAccountingFax, mscModVcsMaxSubnetPacketSize=mscModVcsMaxSubnetPacketSize, mscModVcsAddOptTable=mscModVcsAddOptTable, mscModVcsWinsValue=mscModVcsWinsValue, subnetInterfaceCapabilitiesCA02A=subnetInterfaceCapabilitiesCA02A, subnetInterfaceCapabilities=subnetInterfaceCapabilities, subnetInterfaceGroupCA02=subnetInterfaceGroupCA02, subnetInterfaceCapabilitiesCA=subnetInterfaceCapabilitiesCA, mscModVcsX121MaxAddressLength=mscModVcsX121MaxAddressLength, mscModVcsE164IntlFormatAllowed=mscModVcsE164IntlFormatAllowed, subnetInterfaceGroup=subnetInterfaceGroup, mscModVcsSegmentSize=mscModVcsSegmentSize, mscModVcsX121IntlAddresses=mscModVcsX121IntlAddresses, mscModVcsGenerationMode=mscModVcsGenerationMode, mscModVcsWinsEntry=mscModVcsWinsEntry, mscModVcsUnitsCounted=mscModVcsUnitsCounted, mscModVcsNetworkIdType=mscModVcsNetworkIdType, mscModVcsAccOptEntry=mscModVcsAccOptEntry, mscModVcsAddOptEntry=mscModVcsAddOptEntry, mscModVcsX121ToE164EscapeSignificance=mscModVcsX121ToE164EscapeSignificance, mscModVcsDefaultNumberingPlan=mscModVcsDefaultNumberingPlan, mscModVcsIntOptTable=mscModVcsIntOptTable, mscModVcsCallRetryTimer=mscModVcsCallRetryTimer, mscModVcsWinsPktIndex=mscModVcsWinsPktIndex, mscModVcsCallSetupTimer=mscModVcsCallSetupTimer, mscModVcsE164NatlMaxAddressLength=mscModVcsE164NatlMaxAddressLength, subnetInterfaceGroupCA02A=subnetInterfaceGroupCA02A, mscModVcsNetworkIdCode=mscModVcsNetworkIdCode, mscModVcsE164TeleCountryCode=mscModVcsE164TeleCountryCode, mscModVcsIntOptEntry=mscModVcsIntOptEntry, subnetInterfaceCapabilitiesCA02=subnetInterfaceCapabilitiesCA02, mscModVcsE164LocalMinAddressLength=mscModVcsE164LocalMinAddressLength, mscModVcsRowStatusTable=mscModVcsRowStatusTable, mscModVcsHighPriorityPacketSizes=mscModVcsHighPriorityPacketSizes)
| 197.02994 | 2,993 | 0.792973 |
fea8eab09203e9965fd3c37311110a5d329a6d18 | 2,882 | py | Python | svgserver/app.py | omniscale/svgserver | a98f75ec9547fda25941129e854af046ba8f5dfe | [
"Apache-2.0"
] | 2 | 2018-10-18T07:15:58.000Z | 2020-04-09T20:42:07.000Z | svgserver/app.py | omniscale/svgserver | a98f75ec9547fda25941129e854af046ba8f5dfe | [
"Apache-2.0"
] | null | null | null | svgserver/app.py | omniscale/svgserver | a98f75ec9547fda25941129e854af046ba8f5dfe | [
"Apache-2.0"
] | 2 | 2019-06-20T01:29:59.000Z | 2021-12-01T12:18:55.000Z | import codecs
import tempfile
from contextlib import closing
from .cgi import CGIClient
from .combine import CombineSVG
from .mapserv import MapServer, InternalError
from .tree import build_tree
if __name__ == "__main__":
import os
import logging
logging.basicConfig(level=logging.DEBUG)
params = {
"service": "WMS",
"version": "1.1.1",
"request": "GetMap",
"width": 1234,
"height": 769,
"srs": "EPSG:3857",
"styles": "",
"format": "image/svg+xml",
"bbox": "775214.9923087133,6721788.224989068,776688.4414913012,6722705.993822992",
"map": os.path.abspath(os.path.dirname(__file__) + "/../tests/ms.map"),
}
with closing(layered_svg(params)) as f:
print(f.read())
| 29.408163 | 90 | 0.586051 |
feaaec4a50d5a134457fe10cd74a02481c434561 | 440 | py | Python | 11_app/script/purchase_order.py | israillaky/ERPOSAPP11 | 90dd26213fecce7f6301bfa2f2356d8f5d3a8086 | [
"MIT"
] | null | null | null | 11_app/script/purchase_order.py | israillaky/ERPOSAPP11 | 90dd26213fecce7f6301bfa2f2356d8f5d3a8086 | [
"MIT"
] | null | null | null | 11_app/script/purchase_order.py | israillaky/ERPOSAPP11 | 90dd26213fecce7f6301bfa2f2356d8f5d3a8086 | [
"MIT"
] | null | null | null | import frappe
| 44 | 117 | 0.740909 |
feab2f73df218463681f43ce0d3584c476b63adb | 925 | py | Python | src/common/bio/smiles.py | duttaprat/proteinGAN | 92b32192ab959e327e1d713d09fc9b40dc01d757 | [
"MIT"
] | 8 | 2020-12-23T21:44:47.000Z | 2021-07-09T05:46:16.000Z | src/common/bio/smiles.py | duttaprat/proteinGAN | 92b32192ab959e327e1d713d09fc9b40dc01d757 | [
"MIT"
] | null | null | null | src/common/bio/smiles.py | duttaprat/proteinGAN | 92b32192ab959e327e1d713d09fc9b40dc01d757 | [
"MIT"
] | null | null | null | from common.bio.constants import SMILES_CHARACTER_TO_ID, ID_TO_SMILES_CHARACTER
def from_smiles_to_id(data, column):
"""Converts sequences from smiles to ids
Args:
data: data that contains characters that need to be converted to ids
column: a column of the dataframe that contains characters that need to be converted to ids
Returns:
array of ids
"""
return [[SMILES_CHARACTER_TO_ID[char] for char in val] for index, val in data[column].iteritems()]
def from_id_from_smiles(data, column):
"""Converts sequences from ids to smiles characters
Args:
data: data that contains ids that need to be converted to characters
column: a column of the dataframe that contains ids that need to be converted to characters
Returns:
array of characters
"""
return [[ID_TO_SMILES_CHARACTER[id] for id in val] for index, val in data[column].iteritems()]
| 28.030303 | 102 | 0.721081 |
feab97b0913494abc7216c346f3470dd95d2e154 | 1,001 | py | Python | test/lib_config_test.py | yokoyama-flogics/ibp_monitor_2 | 1a7df55a524ff3a7908df330e7e02c9f27e24ae0 | [
"BSD-2-Clause"
] | 3 | 2017-11-23T13:29:47.000Z | 2021-01-08T09:28:35.000Z | test/lib_config_test.py | yokoyama-flogics/ibp_monitor_2 | 1a7df55a524ff3a7908df330e7e02c9f27e24ae0 | [
"BSD-2-Clause"
] | null | null | null | test/lib_config_test.py | yokoyama-flogics/ibp_monitor_2 | 1a7df55a524ff3a7908df330e7e02c9f27e24ae0 | [
"BSD-2-Clause"
] | 2 | 2018-02-15T08:11:24.000Z | 2021-01-08T09:28:43.000Z | import os
import sys
import unittest
# Set Python search path to the parent directory
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from lib.config import *
if __name__ == "__main__":
unittest.main(buffer=True)
| 33.366667 | 70 | 0.679321 |
feac612781029aac47e6d21c85d8519de53dcb55 | 7,188 | py | Python | tests/test_installation.py | phdye/nimporter | 64eccc74950811e03efdde50649e84ca1fe87ae4 | [
"MIT"
] | null | null | null | tests/test_installation.py | phdye/nimporter | 64eccc74950811e03efdde50649e84ca1fe87ae4 | [
"MIT"
] | null | null | null | tests/test_installation.py | phdye/nimporter | 64eccc74950811e03efdde50649e84ca1fe87ae4 | [
"MIT"
] | null | null | null | """
Test to make sure that libraries built with Nimporter can be installed via Pip.
"""
import sys, os, subprocess, shutil, pkg_resources, json, warnings
from pathlib import Path
import pytest
import nimporter
PYTHON = 'python' if sys.platform == 'win32' else 'python3'
PIP = 'pip' if shutil.which('pip') else 'pip3'
| 38.854054 | 80 | 0.564969 |
feae2347f1d740037425173028bb1b3d8af9f2a3 | 153 | py | Python | hotpot_sample_dict.py | bvanaken/pytorch-pretrained-BERT | 71c1660fb082fa5ebde4afd8c7db2bc96b80bb59 | [
"Apache-2.0"
] | 1 | 2022-02-06T15:59:12.000Z | 2022-02-06T15:59:12.000Z | hotpot_sample_dict.py | bvanaken/pytorch-pretrained-BERT | 71c1660fb082fa5ebde4afd8c7db2bc96b80bb59 | [
"Apache-2.0"
] | null | null | null | hotpot_sample_dict.py | bvanaken/pytorch-pretrained-BERT | 71c1660fb082fa5ebde4afd8c7db2bc96b80bb59 | [
"Apache-2.0"
] | null | null | null | samples = {
"2_brother_plays": {
"question_parts": [range(1, 13), range(13, 17)],
"sp_parts": [range(20, 43), range(50, 60)]
}
}
| 21.857143 | 56 | 0.51634 |
feb04d32f16beda0e1b583eb23a6f47a91df44ef | 695 | py | Python | src/applications/blog/migrations/0003_post_author.py | alexander-sidorov/tms-z43 | 61ecd204f5de4e97ff0300f6ef91c36c2bcda31c | [
"MIT"
] | 2 | 2020-12-17T20:19:21.000Z | 2020-12-22T12:46:43.000Z | src/applications/blog/migrations/0003_post_author.py | alexander-sidorov/tms-z43 | 61ecd204f5de4e97ff0300f6ef91c36c2bcda31c | [
"MIT"
] | 4 | 2021-04-20T08:40:30.000Z | 2022-02-10T07:50:30.000Z | src/applications/blog/migrations/0003_post_author.py | alexander-sidorov/tms-z43 | 61ecd204f5de4e97ff0300f6ef91c36c2bcda31c | [
"MIT"
] | 1 | 2021-02-10T06:42:19.000Z | 2021-02-10T06:42:19.000Z | # Generated by Django 3.1.7 on 2021-03-24 17:41
import django.db.models.deletion
from django.conf import settings
from django.db import migrations
from django.db import models
| 24.821429 | 66 | 0.604317 |
feb0e950cc084ec84da234840633db92453d5121 | 16,227 | py | Python | sdk/python/pulumi_aws/cloudformation/stack_set.py | mdop-wh/pulumi-aws | 05bb32e9d694dde1c3b76d440fd2cd0344d23376 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/cloudformation/stack_set.py | mdop-wh/pulumi-aws | 05bb32e9d694dde1c3b76d440fd2cd0344d23376 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/cloudformation/stack_set.py | mdop-wh/pulumi-aws | 05bb32e9d694dde1c3b76d440fd2cd0344d23376 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
from .. import _utilities, _tables
__all__ = ['StackSet']
| 57.747331 | 403 | 0.680841 |
feb1798a65bfb807865b5bcdd876a894d5048086 | 319 | py | Python | code/config/imports.py | farioso-fernando/cover-meu-beat | b15a9c0c97086e51e42cee4dd40e7d0650130d0e | [
"MIT"
] | null | null | null | code/config/imports.py | farioso-fernando/cover-meu-beat | b15a9c0c97086e51e42cee4dd40e7d0650130d0e | [
"MIT"
] | null | null | null | code/config/imports.py | farioso-fernando/cover-meu-beat | b15a9c0c97086e51e42cee4dd40e7d0650130d0e | [
"MIT"
] | null | null | null | from kivy.uix.screenmanager import ScreenManager
from kivy.uix.boxlayout import BoxLayout
from kivy.lang.builder import Builder
from kivy.animation import Animation
from kivy.core.window import Window
from kivymd.app import MDApp
import kivymd
import kivy
print(
)
print(
) | 16.789474 | 48 | 0.789969 |
feb1c1e0c98bd37c082895d1888d0fe15b8aaccf | 19,367 | py | Python | claripy/vsa/valueset.py | kwalberg/claripy | b5cfa0a355eaa3cd5403e1d81f0b80bb3db20c90 | [
"BSD-2-Clause"
] | null | null | null | claripy/vsa/valueset.py | kwalberg/claripy | b5cfa0a355eaa3cd5403e1d81f0b80bb3db20c90 | [
"BSD-2-Clause"
] | null | null | null | claripy/vsa/valueset.py | kwalberg/claripy | b5cfa0a355eaa3cd5403e1d81f0b80bb3db20c90 | [
"BSD-2-Clause"
] | null | null | null | import functools
import itertools
import numbers
from ..backend_object import BackendObject
from ..annotation import Annotation
vs_id_ctr = itertools.count()
#
# Overriding base methods
#
def __hash__(self):
return hash((self.region_id, self.region_base_addr, hash(self.offset)))
def __repr__(self):
return "<RegionAnnotation %s:%#08x>" % (self.region_id, self.offset)
class ValueSet(BackendObject):
"""
ValueSet is a mapping between memory regions and corresponding offsets.
"""
def __init__(self, name=None, region=None, region_base_addr=None, bits=None, val=None):
"""
Constructor.
:param str name: Name of this ValueSet object. Only for debugging purposes.
:param str region: Region ID.
:param int region_base_addr: Base address of the region.
:param int bits: Size of the ValueSet.
:param val: an initial offset
"""
self._name = 'VS_%d' % next(vs_id_ctr) if name is None else name
if bits is None:
raise ClaripyVSAError('bits must be specified when creating a ValueSet.')
self._bits = bits
self._si = StridedInterval.empty(bits)
self._regions = {}
self._region_base_addrs = {}
self._reversed = False
# Shortcuts for initialization
# May not be useful though...
if region is not None and region_base_addr is not None and val is not None:
if isinstance(region_base_addr, numbers.Number):
# Convert it to a StridedInterval
region_base_addr = StridedInterval(bits=self._bits, stride=1,
lower_bound=region_base_addr,
upper_bound=region_base_addr)
if isinstance(val, numbers.Number):
val = StridedInterval(bits=bits, stride=0, lower_bound=val, upper_bound=val)
if isinstance(val, StridedInterval):
self._set_si(region, region_base_addr, val)
else:
raise ClaripyVSAError("Unsupported type '%s' for argument 'val'" % type(val))
else:
if region is not None or val is not None:
raise ClaripyVSAError("You must specify 'region' and 'val' at the same time.")
#
# Properties
#
#
# Private methods
#
def _set_si(self, region, region_base_addr, si):
if isinstance(si, numbers.Number):
si = StridedInterval(bits=self.bits, stride=0, lower_bound=si, upper_bound=si)
if isinstance(region_base_addr, numbers.Number):
region_base_addr = StridedInterval(bits=self.bits, stride=0, lower_bound=region_base_addr,
upper_bound=region_base_addr
)
if not isinstance(si, StridedInterval):
raise ClaripyVSAOperationError('Unsupported type %s for si' % type(si))
self._regions[region] = si
self._region_base_addrs[region] = region_base_addr
self._si = self._si.union(region_base_addr + si)
def _merge_si(self, region, region_base_addr, si):
if isinstance(region_base_addr, numbers.Number):
region_base_addr = StridedInterval(bits=self.bits, stride=0, lower_bound=region_base_addr,
upper_bound=region_base_addr
)
if region not in self._regions:
self._set_si(region, region_base_addr, si)
else:
self._regions[region] = self._regions[region].union(si)
self._region_base_addrs[region] = self._region_base_addrs[region].union(region_base_addr)
self._si = self._si.union(region_base_addr + si)
#
# Public methods
#
def copy(self):
"""
Make a copy of self and return.
:return: A new ValueSet object.
:rtype: ValueSet
"""
vs = ValueSet(bits=self.bits)
vs._regions = self._regions.copy()
vs._region_base_addrs = self._region_base_addrs.copy()
vs._reversed = self._reversed
vs._si = self._si.copy()
return vs
def apply_annotation(self, annotation):
"""
Apply a new annotation onto self, and return a new ValueSet object.
:param RegionAnnotation annotation: The annotation to apply.
:return: A new ValueSet object
:rtype: ValueSet
"""
vs = self.copy()
vs._merge_si(annotation.region_id, annotation.region_base_addr, annotation.offset)
return vs
#
# Arithmetic operations
#
def __eq__(self, other):
"""
Binary operation: ==
:param other: The other operand
:return: True/False/Maybe
"""
if isinstance(other, ValueSet):
same = False
different = False
for region, si in other.regions.items():
if region in self.regions:
comp_ret = self.regions[region] == si
if BoolResult.has_true(comp_ret):
same = True
if BoolResult.has_false(comp_ret):
different = True
else:
different = True
if same and not different:
return TrueResult()
if same and different:
return MaybeResult()
return FalseResult()
elif isinstance(other, StridedInterval):
if 'global' in self.regions:
return self.regions['global'] == other
else:
return FalseResult()
else:
return FalseResult()
def __ne__(self, other):
"""
Binary operation: ==
:param other: The other operand
:return: True/False/Maybe
"""
return ~ (self == other)
#
# Backend operations
#
def reverse(self):
# TODO: obviously valueset.reverse is not properly implemented. I'm disabling the old annoying output line for
# TODO: now. I will implement the proper reversing support soon.
vs = self.copy()
vs._reversed = not vs._reversed
return vs
def extract(self, high_bit, low_bit):
"""
Operation extract
- A cheap hack is implemented: a copy of self is returned if (high_bit - low_bit + 1 == self.bits), which is a
ValueSet instance. Otherwise a StridedInterval is returned.
:param high_bit:
:param low_bit:
:return: A ValueSet or a StridedInterval
"""
if high_bit - low_bit + 1 == self.bits:
return self.copy()
if ('global' in self._regions and len(self._regions.keys()) > 1) or \
len(self._regions.keys()) > 0:
si_ret = StridedInterval.top(high_bit - low_bit + 1)
else:
if 'global' in self._regions:
si = self._regions['global']
si_ret = si.extract(high_bit, low_bit)
else:
si_ret = StridedInterval.empty(high_bit - low_bit + 1)
return si_ret
def concat(self, b):
new_vs = ValueSet(bits=self.bits + b.bits)
# TODO: This logic is obviously flawed. Correct it later :-(
if isinstance(b, StridedInterval):
for region, si in self._regions.items():
new_vs._set_si(region, self._region_base_addrs[region], si.concat(b))
elif isinstance(b, ValueSet):
for region, si in self._regions.items():
new_vs._set_si(region, self._region_base_addrs[region], si.concat(b.get_si(region)))
else:
raise ClaripyVSAOperationError('ValueSet.concat() got an unsupported operand %s (type %s)' % (b, type(b)))
return new_vs
def identical(self, o):
"""
Used to make exact comparisons between two ValueSets.
:param o: The other ValueSet to compare with.
:return: True if they are exactly same, False otherwise.
"""
if self._reversed != o._reversed:
return False
for region, si in self.regions.items():
if region in o.regions:
o_si = o.regions[region]
if not si.identical(o_si):
return False
else:
return False
return True
from ..ast.base import Base
from .strided_interval import StridedInterval
from .bool_result import BoolResult, TrueResult, FalseResult, MaybeResult
from .errors import ClaripyVSAOperationError, ClaripyVSAError
from ..errors import ClaripyValueError
| 29.795385 | 120 | 0.58357 |
feb21c64003d71c234c911e57ed8a4baa217c7cb | 2,663 | py | Python | fardaastationapi.py | sina-cb/fardaastationapi | 0e27afe05195f346e17fd52e1c30b853c954a3b0 | [
"Apache-2.0"
] | null | null | null | fardaastationapi.py | sina-cb/fardaastationapi | 0e27afe05195f346e17fd52e1c30b853c954a3b0 | [
"Apache-2.0"
] | 1 | 2017-12-21T19:54:36.000Z | 2018-01-08T02:05:11.000Z | fardaastationapi.py | sina-cb/fardaastationapi | 0e27afe05195f346e17fd52e1c30b853c954a3b0 | [
"Apache-2.0"
] | null | null | null | import logging
from episodes import find_updates, db, count_all
from logging import error as logi
from flask import Flask, jsonify, request
| 33.708861 | 119 | 0.592189 |
feb27ff41ef1690499bd0cbcb5cc15ed8e07d63d | 868 | py | Python | pytglib/api/types/can_transfer_ownership_result_password_too_fresh.py | iTeam-co/pytglib | e5e75e0a85f89b77762209b32a61b0a883c0ae61 | [
"MIT"
] | 6 | 2019-10-30T08:57:27.000Z | 2021-02-08T14:17:43.000Z | pytglib/api/types/can_transfer_ownership_result_password_too_fresh.py | iTeam-co/python-telegram | e5e75e0a85f89b77762209b32a61b0a883c0ae61 | [
"MIT"
] | 1 | 2021-08-19T05:44:10.000Z | 2021-08-19T07:14:56.000Z | pytglib/api/types/can_transfer_ownership_result_password_too_fresh.py | iTeam-co/python-telegram | e5e75e0a85f89b77762209b32a61b0a883c0ae61 | [
"MIT"
] | 5 | 2019-12-04T05:30:39.000Z | 2021-05-21T18:23:32.000Z |
from ..utils import Object
| 26.30303 | 96 | 0.68318 |
feb49cfe9fd1f9a9e260952a3552e9f39bc9e707 | 12,199 | py | Python | catapult.py | spraakbanken/sparv-catapult | 03273985ceea6feef47a56084c595580d0338f7d | [
"MIT"
] | null | null | null | catapult.py | spraakbanken/sparv-catapult | 03273985ceea6feef47a56084c595580d0338f7d | [
"MIT"
] | 2 | 2021-12-13T19:47:29.000Z | 2021-12-15T16:14:50.000Z | catapult.py | spraakbanken/sparv-catapult | 03273985ceea6feef47a56084c595580d0338f7d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# catapult: runs python scripts in already running processes to eliminate the
# python interpreter startup time.
#
# The lexicon for sparv.saldo.annotate and sparv.saldo.compound can be pre-loaded and
# shared between processes. See the variable annotators in handle and start.
#
# Run scripts in the catapult with the c program catalaunch.
from builtins import range, object
from multiprocessing import Process, cpu_count
from decorator import decorator
import logging
import os
import re
import runpy
import socket
import sys
import traceback
import sparv.util as util
RECV_LEN = 4096
# Important to preload all modules otherwise processes will need to do
# it upon request, introducing new delays.
#
# These imports uses the __all__ variables in the __init__ files.
from sparv.util import *
from sparv import *
logging.basicConfig(format="%(process)d %(asctime)-15s %(message)s")
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
"""
Splits at every space that is not preceded by a backslash.
"""
splitter = re.compile('(?<!\\\\) ')
def set_last_argument(*values):
"""
Decorates a function f, setting its last argument(s) to the given value(s).
Used for setting the saldo lexicons to sparv.saldo.annotate and
sparv.saldo.compound, and the process "dictionary" to sparv.malt.maltparse.
The decorator module is used to give the same signature and
docstring to the function, which is exploited in sparv.util.run.
"""
return inner
def handle(client_sock, verbose, annotators):
"""
Handle a client: parse the arguments, change to the relevant
directory, then run the script. Stdout and stderr are directed
to /dev/null or to the client socket.
"""
def chunk_send(msg):
"""
Sends a message chunk until it is totally received in the other end
"""
msg = msg.encode(util.UTF8)
while len(msg) > 0:
sent = client_sock.send(msg)
if sent == 0:
raise RuntimeError("socket connection broken")
msg = msg[sent:]
def set_stdout_stderr():
"""
Put stdout and stderr to the client_sock, if verbose.
Returns the clean-up handler.
"""
orig_stds = sys.stdout, sys.stderr
w = Writer()
sys.stdout = w
sys.stderr = w
def cleanup():
"""
Restores stdout and stderr
"""
sys.stdout = orig_stds[0]
sys.stderr = orig_stds[1]
client_sock.close()
return cleanup
# Receive data
data = b""
new_data = None
# Message is terminated with a lone \
while new_data is None or not new_data.endswith(b'\\'):
new_data = client_sock.recv(RECV_LEN)
log.debug("Received %s", new_data)
data += new_data
if len(new_data) == 0:
log.warning("Received null!")
chunk_send("Error when receiving: got an empty message")
return
# Drop the terminating \
data = data[0:-1]
# Split arguments on spaces, and replace '\ ' to ' ' and \\ to \
args = [arg.replace('\\ ', ' ').replace('\\\\', '\\')
for arg in re.split(splitter, data.decode(util.UTF8))]
log.debug("Args: %s", args)
### PING? ###
if len(args) == 2 and args[1] == "PING":
log.info("Ping requested")
chunk_send("PONG")
return
# If the first argument is -m, the following argument is a module
# name instead of a script name
module_flag = len(args) > 2 and args[1] == '-m'
if module_flag:
args.pop(1)
if len(args) > 1:
# First argument is the pwd of the caller
old_pwd = os.getcwd()
pwd = args.pop(0)
log.info('Running %s', args[0])
log.debug('with arguments: %s', ' '.join(args[1:]))
log.debug('in directory %s', pwd)
# Set stdout and stderr, which returns the cleaup function
cleanup = set_stdout_stderr()
# Run the command
try:
sys.argv = args
os.chdir(pwd)
if module_flag:
annotator = annotators.get(args[0], None)
if not annotator:
# some of the annotators require two arguments
annotator = annotators.get((args[0], args[1]), None)
if annotator:
# skip the first argument now
sys.argv = args[0]
sys.argv.extend(args[2:])
if annotator:
util.run.main(annotator)
else:
runpy.run_module(args[0], run_name='__main__')
else:
runpy.run_path(args[0], run_name='__main__')
except (ImportError, IOError):
# If file does not exist, send the error message
chunk_send("%s\n" % sys.exc_info()[1])
cleanup()
log.exception("File does not exist")
except:
# Send other errors, and if verbose, send tracebacks
chunk_send("%s\n" % sys.exc_info()[1])
traceback.print_exception(*sys.exc_info())
cleanup()
log.exception("Unknown error")
else:
cleanup()
os.chdir(old_pwd)
# Run the cleanup function if there is one (only used with malt)
annotators.get((args[0], 'cleanup'), lambda: None)()
log.info('Completed %s', args[0])
else:
log.info('Cannot handle %s', data)
chunk_send('Cannot handle %s\n' % data)
def worker(server_socket, verbose, annotators, malt_args=None, swener_args=None):
"""
Workers listen to the socket server, and handle incoming requests
Each process starts an own maltparser process, because they are
cheap and cannot serve multiple clients at the same time.
"""
if malt_args:
process_dict = dict(process=None, restart=True)
start_malt()
annotators['sparv.malt', 'cleanup'] = start_malt
if swener_args:
process_dict = dict(process=None, restart=True)
start_swener()
annotators['sparv.swener', 'cleanup'] = start_swener
if verbose:
log.info("Worker running!")
while True:
client_sock, addr = server_socket.accept()
try:
handle(client_sock, verbose, annotators)
except:
log.exception('Error in handling code')
traceback.print_exception(*sys.exc_info())
client_sock.close()
def start(socket_path, processes=1, verbose='false',
saldo_model=None, compound_model=None, stats_model=None,
dalin_model=None, swedberg_model=None, blingbring_model=None,
malt_jar=None, malt_model=None, malt_encoding=util.UTF8,
sentiment_model=None, swefn_model=None, swener=False,
swener_encoding=util.UTF8):
"""
Starts a catapult on a socket file, using a number of processes.
If verbose is false, all stdout and stderr programs produce is
piped to /dev/null, otherwise it is sent to the client. The
computation is done by the catapult processes, however.
Regardless of what verbose is, client errors should be reported
both in the catapult and to the client.
The saldo model and compound model can be pre-loaded and shared in
memory between processes.
Start processes using catalaunch.
"""
if os.path.exists(socket_path):
log.error('socket %s already exists', socket_path)
exit(1)
verbose = verbose.lower() == 'true'
log.info('Verbose: %s', verbose)
# If processes does not contain an int, set it to the number of processors
try:
processes = int(processes)
except:
processes = cpu_count()
# Start the socket
server_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
server_socket.bind(socket_path)
server_socket.listen(processes)
# The dictionary of functions with saved lexica, indexed by module name strings
annotators = {}
# Load Saldo and older lexicons
lexicons = [m for m in [saldo_model, dalin_model, swedberg_model] if m]
if lexicons:
lexicon_dict = {}
for lexicon in lexicons:
lexicon_dict[os.path.basename(lexicon).rstrip(".pickle")] = saldo.SaldoLexicon(lexicon)
annotators['sparv.saldo'] = set_last_argument(lexicon_dict)(saldo.annotate)
if stats_model and compound_model:
annotators['sparv.compound'] = set_last_argument(
compound.SaldoCompLexicon(compound_model),
compound.StatsLexicon(stats_model))(compound.annotate)
elif compound_model:
annotators['sparv.compound_simple'] = set_last_argument(
compound_simple.SaldoLexicon(compound_model))(compound_simple.annotate)
# if blingbring_model:
# annotators['sparv.lexical_classes'] = set_last_argument(
# util.PickledLexicon(blingbring_model))(lexical_classes.annotate_bb_words)
# if swefn_model:
# annotators['sparv.lexical_classes'] = set_last_argument(
# util.PickledLexicon(swefn_model))(lexical_classes.annotate_swefn_words)
if sentiment_model:
annotators['sparv.sentiment'] = set_last_argument(
util.PickledLexicon(sentiment_model))(sentiment.sentiment)
# if models_1700s:
# models = models_1700s.split()
# lexicons = [saldo.SaldoLexicon(lex) for lex in models]
# annotators[('sparv.fsv', '--annotate_fallback')] = set_last_argument(lexicons)(fsv.annotate_fallback)
# annotators[('sparv.fsv', '--annotate_full')] = set_last_argument(lexicons)(fsv.annotate_full)
if verbose:
log.info('Loaded annotators: %s', list(annotators.keys()))
if malt_jar and malt_model:
malt_args = dict(maltjar=malt_jar, model=malt_model,
encoding=malt_encoding, send_empty_sentence=True)
else:
malt_args = None
if swener:
swener_args = dict(stdin="", encoding=swener_encoding, verbose=True)
else:
swener_args = None
# Start processes-1 workers
workers = [Process(target=worker, args=[server_socket, verbose, annotators, malt_args])
for i in range(processes - 1)]
for p in workers:
p.start()
# Additionally, let this thread be worker 0
worker(server_socket, verbose, annotators, malt_args, swener_args)
if __name__ == '__main__':
util.run.main(start)
| 32.617647 | 111 | 0.61792 |
feb55dc64767ea42fd4dbdb633eb49cefc5afea8 | 2,445 | py | Python | tests/test_sentiments.py | rajeshkumargp/TextBlob | a8709368f2a8a8ba4d87730111f8b6675d0735cd | [
"MIT"
] | 6,608 | 2015-01-02T13:13:16.000Z | 2022-03-31T13:44:41.000Z | tests/test_sentiments.py | rajeshkumargp/TextBlob | a8709368f2a8a8ba4d87730111f8b6675d0735cd | [
"MIT"
] | 277 | 2015-01-01T15:08:55.000Z | 2022-03-28T20:00:06.000Z | tests/test_sentiments.py | rajeshkumargp/TextBlob | a8709368f2a8a8ba4d87730111f8b6675d0735cd | [
"MIT"
] | 1,110 | 2015-01-01T22:04:39.000Z | 2022-03-20T20:39:26.000Z | from __future__ import unicode_literals
import unittest
from nose.tools import * # PEP8 asserts
from nose.plugins.attrib import attr
from textblob.sentiments import PatternAnalyzer, NaiveBayesAnalyzer, DISCRETE, CONTINUOUS
def assert_about_equal(first, second, places=4):
return assert_equal(round(first, places), second)
if __name__ == '__main__':
unittest.main()
| 35.434783 | 89 | 0.685481 |
feb57d630ade4f4d7aefdadbe2f5755982d89a54 | 127 | py | Python | src/unicef_security/apps.py | unicef/unicef-security | cc51ba52cddb845b8174cf3dc94706f0334453b2 | [
"Apache-2.0"
] | null | null | null | src/unicef_security/apps.py | unicef/unicef-security | cc51ba52cddb845b8174cf3dc94706f0334453b2 | [
"Apache-2.0"
] | 10 | 2019-04-24T14:33:49.000Z | 2020-12-19T01:07:06.000Z | src/unicef_security/apps.py | unicef/unicef-security | cc51ba52cddb845b8174cf3dc94706f0334453b2 | [
"Apache-2.0"
] | 1 | 2019-04-11T15:34:18.000Z | 2019-04-11T15:34:18.000Z | from django.apps import AppConfig
| 18.142857 | 36 | 0.740157 |
feb6feac24e99949d73380d3a6510ebf108ac24b | 229 | py | Python | utils/pretty-tests.py | isJuhn/pcsx2_ipc | 51f92d51aec05dffa82d418c97fc1d628b2ed40f | [
"MIT"
] | 7 | 2021-07-09T20:23:19.000Z | 2022-03-14T06:56:14.000Z | utils/pretty-tests.py | isJuhn/pcsx2_ipc | 51f92d51aec05dffa82d418c97fc1d628b2ed40f | [
"MIT"
] | 2 | 2021-03-07T16:14:44.000Z | 2021-03-30T07:48:05.000Z | utils/pretty-tests.py | isJuhn/pcsx2_ipc | 51f92d51aec05dffa82d418c97fc1d628b2ed40f | [
"MIT"
] | 1 | 2021-03-07T15:59:31.000Z | 2021-03-07T15:59:31.000Z | import json
import sys
f=open(sys.argv[1])
y = json.loads(f.read())
print("Tests results: " + str(y["result"]))
print("Tests duration: " + str(y["duration"]))
print("Tests output:\n~~~~~~~~~~~~~~~~~~~~\n" + str(y["stdout"]))
| 25.444444 | 66 | 0.576419 |
feb7b66503cd218d51059640f9914912cefb66a6 | 14,533 | py | Python | tests/scripts/thread-cert/test_network_layer.py | AdityaHPatwardhan/openthread | a201e9d5d0273bb51fa20efc8758be20a725018e | [
"BSD-3-Clause"
] | 2,962 | 2016-05-11T15:06:06.000Z | 2022-03-27T20:06:16.000Z | tests/scripts/thread-cert/test_network_layer.py | AdityaHPatwardhan/openthread | a201e9d5d0273bb51fa20efc8758be20a725018e | [
"BSD-3-Clause"
] | 5,899 | 2016-05-11T19:21:49.000Z | 2022-03-31T18:17:20.000Z | tests/scripts/thread-cert/test_network_layer.py | AdityaHPatwardhan/openthread | a201e9d5d0273bb51fa20efc8758be20a725018e | [
"BSD-3-Clause"
] | 1,113 | 2016-05-11T15:37:42.000Z | 2022-03-31T09:37:04.000Z | #!/usr/bin/env python3
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import io
import random
import struct
import unittest
import common
import network_layer
if __name__ == "__main__":
unittest.main()
| 29.538618 | 103 | 0.718021 |
feb8045cb4a0a0c1c1b374f1a7ddff3513dfcc95 | 7,079 | py | Python | salt/modules/kernelpkg_linux_apt.py | markgras/salt | d66cd3c935533c63870b83228b978ce43e0ef70d | [
"Apache-2.0"
] | 9,425 | 2015-01-01T05:59:24.000Z | 2022-03-31T20:44:05.000Z | salt/modules/kernelpkg_linux_apt.py | markgras/salt | d66cd3c935533c63870b83228b978ce43e0ef70d | [
"Apache-2.0"
] | 33,507 | 2015-01-01T00:19:56.000Z | 2022-03-31T23:48:20.000Z | salt/modules/kernelpkg_linux_apt.py | markgras/salt | d66cd3c935533c63870b83228b978ce43e0ef70d | [
"Apache-2.0"
] | 5,810 | 2015-01-01T19:11:45.000Z | 2022-03-31T02:37:20.000Z | """
Manage Linux kernel packages on APT-based systems
"""
import functools
import logging
import re
try:
from salt.utils.versions import LooseVersion as _LooseVersion
from salt.exceptions import CommandExecutionError
HAS_REQUIRED_LIBS = True
except ImportError:
HAS_REQUIRED_LIBS = False
log = logging.getLogger(__name__)
__virtualname__ = "kernelpkg"
def __virtual__():
"""
Load this module on Debian-based systems only
"""
if not HAS_REQUIRED_LIBS:
return (False, "Required library could not be imported")
if __grains__.get("os_family", "") in ("Kali", "Debian"):
return __virtualname__
elif __grains__.get("os_family", "") == "Cumulus":
return __virtualname__
return (False, "Module kernelpkg_linux_apt: no APT based system detected")
def active():
"""
Return the version of the running kernel.
CLI Example:
.. code-block:: bash
salt '*' kernelpkg.active
"""
if "pkg.normalize_name" in __salt__:
return __salt__["pkg.normalize_name"](__grains__["kernelrelease"])
return __grains__["kernelrelease"]
def list_installed():
"""
Return a list of all installed kernels.
CLI Example:
.. code-block:: bash
salt '*' kernelpkg.list_installed
"""
pkg_re = re.compile(r"^{}-[\d.-]+-{}$".format(_package_prefix(), _kernel_type()))
pkgs = __salt__["pkg.list_pkgs"](versions_as_list=True)
if pkgs is None:
pkgs = []
result = list(filter(pkg_re.match, pkgs))
if result is None:
return []
prefix_len = len(_package_prefix()) + 1
return sorted(
[pkg[prefix_len:] for pkg in result], key=functools.cmp_to_key(_cmp_version)
)
def latest_available():
"""
Return the version of the latest kernel from the package repositories.
CLI Example:
.. code-block:: bash
salt '*' kernelpkg.latest_available
"""
result = __salt__["pkg.latest_version"](
"{}-{}".format(_package_prefix(), _kernel_type())
)
if result == "":
return latest_installed()
version = re.match(r"^(\d+\.\d+\.\d+)\.(\d+)", result)
return "{}-{}-{}".format(version.group(1), version.group(2), _kernel_type())
def latest_installed():
"""
Return the version of the latest installed kernel.
CLI Example:
.. code-block:: bash
salt '*' kernelpkg.latest_installed
.. note::
This function may not return the same value as
:py:func:`~salt.modules.kernelpkg_linux_apt.active` if a new kernel
has been installed and the system has not yet been rebooted.
The :py:func:`~salt.modules.kernelpkg_linux_apt.needs_reboot` function
exists to detect this condition.
"""
pkgs = list_installed()
if pkgs:
return pkgs[-1]
return None
def needs_reboot():
"""
Detect if a new kernel version has been installed but is not running.
Returns True if a new kernel is installed, False otherwise.
CLI Example:
.. code-block:: bash
salt '*' kernelpkg.needs_reboot
"""
return _LooseVersion(active()) < _LooseVersion(latest_installed())
def upgrade(reboot=False, at_time=None):
"""
Upgrade the kernel and optionally reboot the system.
reboot : False
Request a reboot if a new kernel is available.
at_time : immediate
Schedule the reboot at some point in the future. This argument
is ignored if ``reboot=False``. See
:py:func:`~salt.modules.system.reboot` for more details
on this argument.
CLI Example:
.. code-block:: bash
salt '*' kernelpkg.upgrade
salt '*' kernelpkg.upgrade reboot=True at_time=1
.. note::
An immediate reboot often shuts down the system before the minion has a
chance to return, resulting in errors. A minimal delay (1 minute) is
useful to ensure the result is delivered to the master.
"""
result = __salt__["pkg.install"](
name="{}-{}".format(_package_prefix(), latest_available())
)
_needs_reboot = needs_reboot()
ret = {
"upgrades": result,
"active": active(),
"latest_installed": latest_installed(),
"reboot_requested": reboot,
"reboot_required": _needs_reboot,
}
if reboot and _needs_reboot:
log.warning("Rebooting system due to kernel upgrade")
__salt__["system.reboot"](at_time=at_time)
return ret
def upgrade_available():
"""
Detect if a new kernel version is available in the repositories.
Returns True if a new kernel is available, False otherwise.
CLI Example:
.. code-block:: bash
salt '*' kernelpkg.upgrade_available
"""
return _LooseVersion(latest_available()) > _LooseVersion(latest_installed())
def remove(release):
"""
Remove a specific version of the kernel.
release
The release number of an installed kernel. This must be the entire release
number as returned by :py:func:`~salt.modules.kernelpkg_linux_apt.list_installed`,
not the package name.
CLI Example:
.. code-block:: bash
salt '*' kernelpkg.remove 4.4.0-70-generic
"""
if release not in list_installed():
raise CommandExecutionError(
"Kernel release '{}' is not installed".format(release)
)
if release == active():
raise CommandExecutionError("Active kernel cannot be removed")
target = "{}-{}".format(_package_prefix(), release)
log.info("Removing kernel package %s", target)
__salt__["pkg.purge"](target)
return {"removed": [target]}
def cleanup(keep_latest=True):
"""
Remove all unused kernel packages from the system.
keep_latest : True
In the event that the active kernel is not the latest one installed, setting this to True
will retain the latest kernel package, in addition to the active one. If False, all kernel
packages other than the active one will be removed.
CLI Example:
.. code-block:: bash
salt '*' kernelpkg.cleanup
"""
removed = []
# Loop over all installed kernel packages
for kernel in list_installed():
# Keep the active kernel package
if kernel == active():
continue
# Optionally keep the latest kernel package
if keep_latest and kernel == latest_installed():
continue
# Remove the kernel package
removed.extend(remove(kernel)["removed"])
return {"removed": removed}
def _package_prefix():
"""
Return static string for the package prefix
"""
return "linux-image"
def _kernel_type():
"""
Parse the kernel name and return its type
"""
return re.match(r"^[\d.-]+-(.+)$", active()).group(1)
def _cmp_version(item1, item2):
"""
Compare function for package version sorting
"""
vers1 = _LooseVersion(item1)
vers2 = _LooseVersion(item2)
if vers1 < vers2:
return -1
if vers1 > vers2:
return 1
return 0
| 24.49481 | 98 | 0.638932 |
feb9338f0d564ca62f3ee051a6a33301b2ea1017 | 1,818 | py | Python | main.py | david-slatinek/running-a-program-on-the-CPU-vs.-on-the-GPU | 971b911efee8f52c5950ba777b79e58a4f840024 | [
"Apache-2.0"
] | null | null | null | main.py | david-slatinek/running-a-program-on-the-CPU-vs.-on-the-GPU | 971b911efee8f52c5950ba777b79e58a4f840024 | [
"Apache-2.0"
] | null | null | null | main.py | david-slatinek/running-a-program-on-the-CPU-vs.-on-the-GPU | 971b911efee8f52c5950ba777b79e58a4f840024 | [
"Apache-2.0"
] | null | null | null | import json
import numpy as np
from numba import jit
from timeit import default_timer as timer
# Constant, used in the formula.
# Defined here to speed up the calculation, i.e. it's calculated only once
# and then placed in the formula.
SQRT_2PI = np.float32(np.sqrt(2 * np.pi))
# This function will run on the CPU.
def gaussian_cpu(values, mean, sigma):
"""Calculate values of the Gaussian function.
:param values: list, function input parameters.
:param mean: float, arithmetic mean.
:param sigma: float, standard deviation.
:return: list.
"""
result = np.zeros_like(values)
for index, item in enumerate(values):
result[index] = (1 / (sigma * SQRT_2PI)) * (np.e ** (-0.5 * ((item - mean) / sigma) ** 2))
return result
# This function will run on the GPU.
gaussian_gpu = jit(gaussian_cpu)
def write_to_file(name, values):
"""Write results to a file.
:param name: string, file name, only prefix.
:param values: dictionary, values to write.
"""
with open(name + ".json", 'w') as f:
json.dump(values, f, indent=4)
if __name__ == "__main__":
# Randomly generated values.
x = np.random.uniform(-3, 3, size=1000000).astype(np.float32)
# Randomly generated mean.
m = np.random.uniform(1, 10)
# Randomly generated standard deviation.
s = np.random.uniform(1, 10)
# The number of rounds.
n = 1
# Used to store execution time.
time_results = {}
for i in range(n):
start = timer()
gaussian_cpu(x, m, s)
end = timer() - start
time_results[i] = end
write_to_file("cpu", time_results)
for i in range(n):
start = timer()
gaussian_gpu(x, m, s)
end = timer() - start
time_results[i] = end
write_to_file("gpu", time_results)
| 25.605634 | 98 | 0.633663 |
feb98f525f627b833eb5f7cdfb89e344a5f06574 | 103 | py | Python | src/jj_analyzer/__init__.py | ninetymiles/jj-logcat-analyzer | d4ae0fddfefc303ae9c17e6c9e08aad6a231e036 | [
"Apache-1.1"
] | null | null | null | src/jj_analyzer/__init__.py | ninetymiles/jj-logcat-analyzer | d4ae0fddfefc303ae9c17e6c9e08aad6a231e036 | [
"Apache-1.1"
] | null | null | null | src/jj_analyzer/__init__.py | ninetymiles/jj-logcat-analyzer | d4ae0fddfefc303ae9c17e6c9e08aad6a231e036 | [
"Apache-1.1"
] | null | null | null | #! /usr/bin/python
import sys
if sys.version_info[0] == 3:
from .__main__ import *
else:
pass | 12.875 | 28 | 0.640777 |
227dbc607b392dad80b7a078ce5ee4e6eb5704f6 | 5,605 | py | Python | utility_functions.py | Team-501-The-PowerKnights/Powerknights-Slack-Bot | 1ce25c954aa0c089aa93a3d63bd475d585d39bb6 | [
"Apache-2.0"
] | 1 | 2019-05-03T13:20:09.000Z | 2019-05-03T13:20:09.000Z | utility_functions.py | Team-501-The-PowerKnights/Powerknights-Slack-Bot | 1ce25c954aa0c089aa93a3d63bd475d585d39bb6 | [
"Apache-2.0"
] | 8 | 2019-05-04T17:06:21.000Z | 2020-05-29T12:37:06.000Z | utility_functions.py | Team-501-The-PowerKnights/Powerknights-Slack-Bot | 1ce25c954aa0c089aa93a3d63bd475d585d39bb6 | [
"Apache-2.0"
] | null | null | null | import datetime
def iso_extract_info(string):
"""
Will get all of the info and return it as an array
:param string: ISO formatted string that will be used for extraction
:return: array [year, month, day, military_time_hour, minutes, hours]
:note: every item is an int except for minutes
:note: hours only is there is military_time_hour is greater than 12
"""
elements = []
characters = list(string)
year_int = int("".join(characters[0:4]))
month_int = int("".join(characters[5:7]))
day_int = int("".join(characters[8:10]))
military_time_hours_int = int("".join(characters[11:13]))
minutes_int = "".join(characters[14:16])
hours = 0
elements.append(year_int)
elements.append(month_int)
elements.append(day_int)
elements.append(minutes_int)
if military_time_hours_int > 12:
hours += military_time_hours_int - 12
elements.append(hours)
return elements
# # Testing:
# print("[year, month, day, military_time_hour, minutes, hours]")
# print(iso_extract_info('2019-04-27T16:00:00-04:00'))
# Doesn't use the "iso_extract_info" function
def iso_format_to_regular(string):
"""
Will take a string that is an iso formatted string and make it look readable
:param string: the iso formatted string
:return: str
"""
characters = list(string)
year_int = int("".join(characters[0:4]))
month_int = int("".join(characters[5:7]))
day_int = int("".join(characters[8:10]))
military_time_hours_int = int("".join(characters[11:13]))
minutes_int = "".join(characters[14:16])
if military_time_hours_int > 12:
hours = military_time_hours_int - 12
final_string = "{month}/{day}/{year} {hour}:{minute}PM".format(
month=month_int, day=day_int, year=year_int, hour=hours, minute=minutes_int)
return final_string
else:
final_string = "{month}/{day}/{year} {hour}:{minute}AM".format(
month=month_int, day=day_int, year=year_int, hour=military_time_hours_int, minute=minutes_int)
return final_string
# Testing:
# print(iso_format_to_regular('2019-04-27T16:00:00-04:00'))
# Doesn't use the "iso_extract_info" function
def fix_time(strange_date):
"""
Will rearrange the strange date that Google gives and repalce it with the normal string.
:param strange_date: strange time that google gives when an event is marked as "all day"
:return: str
"""
items = strange_date.split("-")
year_int = int(items[0])
month_int = int(items[1])
day_int = int(items[2])
new_str = "{month}/{day}/{year}".format(
month=month_int, day=day_int, year=year_int)
return new_str
# Doesn't use the "iso_extract_info" function
def multiday_checker_STRANGE(start_date, end_date):
"""
Will check if an event is more than day long
:param start_date: Strange Google formatted date of the start of the event
:param end_date: Strange Google formatted date of the end of the event
:return: Boolean
"""
start_date_items = start_date.split("-")
end_date_items = end_date.split("-")
start_date_sum = 0
end_date_sum = 0
for string in start_date_items:
number = int(string)
start_date_sum += number
for string in end_date_items:
number = int(string)
end_date_sum += number
date_dif = start_date_sum - end_date_sum
if date_dif > 2:
return True
else:
return False
# Testing:
# print(multiday_checker_STRANGE('2019-04-21', '2019-04-22'))
# Doesn't use the "iso_extract_info" function
def STRANGE_string_weekday(string):
"""
Will take a string that is a date formatted in the Google format and find what day of the week it is
:param string: Google formatted string for the date
:return: string
"""
items = string.split("/")
year_int = int(items[2])
month_int = int(items[0])
day_int = int(items[1])
datetime_instance = datetime.date(year_int, month_int, day_int)
week_day_number = datetime_instance.weekday()
if week_day_number == 0:
return "Monday"
elif week_day_number == 1:
return "Tuesday"
elif week_day_number == 2:
return "Wendsday"
elif week_day_number == 3:
return "Thursday"
elif week_day_number == 4:
return "Friday"
elif week_day_number == 5:
return "Saturday"
elif week_day_number == 6:
return "Sunday"
else:
return "Error"
# Testing:
# print(STRANGE_string_weekday("2019-04-27"))
# Doesn't use the "iso_extract_info" function
def ISO_string_weekday(string):
"""
Will take a string that is a date formatted in the ISO format and find what day of the week it is
:param string: ISO formatted string for the date
:return: string
"""
characters = list(string)
year_int = int("".join(characters[0:4]))
month_int = int("".join(characters[5:7]))
day_int = int("".join(characters[8:10]))
datetime_instance = datetime.date(year_int, month_int, day_int)
week_day_number = datetime_instance.weekday()
if week_day_number == 0:
return "Monday"
elif week_day_number == 1:
return "Tuesday"
elif week_day_number == 2:
return "Wendsday"
elif week_day_number == 3:
return "Thursday"
elif week_day_number == 4:
return "Friday"
elif week_day_number == 5:
return "Saturday"
elif week_day_number == 6:
return "Sunday"
else:
return "Error"
# Testing:
# print(ISO_string_weekday('2019-06-28T16:00:00-04:00'))
| 31.846591 | 106 | 0.662979 |
228079c406da2849bf07a999b9fbe4042daf4300 | 1,424 | py | Python | python/ch_06_Animatronic_Head.py | tallamjr/mbms | 6763faa870d1a16f272b3eade70b433ed3df0e51 | [
"MIT"
] | 18 | 2018-06-07T07:11:59.000Z | 2022-02-28T20:08:23.000Z | python/ch_06_Animatronic_Head.py | tallamjr/mbms | 6763faa870d1a16f272b3eade70b433ed3df0e51 | [
"MIT"
] | 1 | 2020-05-20T16:24:24.000Z | 2020-05-21T09:03:24.000Z | python/ch_06_Animatronic_Head.py | tallamjr/mbms | 6763faa870d1a16f272b3eade70b433ed3df0e51 | [
"MIT"
] | 8 | 2019-04-10T16:04:11.000Z | 2022-01-08T20:39:15.000Z | from microbit import *
import random, speech, radio
eye_angles = [50, 140, 60, 90, 140]
radio.off()
sentences = [
"Hello my name is Mike",
"What is your name",
"I am looking at you",
"Exterminate exterminate exterminate",
"Number Five is alive",
"I cant do that Dave",
"daisee daisee give me your answer do"
]
lips0 = Image("00000:"
"00000:"
"99999:"
"00000:"
"00000")
lips1 = Image("00000:"
"00900:"
"99099:"
"00900:"
"00000")
lips2 = Image("00000:"
"09990:"
"99099:"
"09990:"
"00000")
lips = [lips0, lips1, lips2]
base_z = 0
while True:
new_z = abs(accelerometer.get_z())
if abs(new_z - base_z) > 20:
base_z = new_z
act()
if random.randint(0, 1000) == 0: # say something 1 time in 1000
act()
sleep(200)
| 21.575758 | 67 | 0.525281 |
22807a6716e561a1f502377b8a28eba78ad26040 | 322 | py | Python | debugtalk.py | caoyp2/HRunDemo | 41810a2fd366c780ea8f2bf9b4328fdd60aba171 | [
"Apache-2.0"
] | null | null | null | debugtalk.py | caoyp2/HRunDemo | 41810a2fd366c780ea8f2bf9b4328fdd60aba171 | [
"Apache-2.0"
] | null | null | null | debugtalk.py | caoyp2/HRunDemo | 41810a2fd366c780ea8f2bf9b4328fdd60aba171 | [
"Apache-2.0"
] | null | null | null | import datetime
import time
| 16.947368 | 44 | 0.677019 |
228122dba71ea421f33f3e5c51b862184d5fc4c8 | 205 | py | Python | hubcare/metrics/community_metrics/issue_template/urls.py | aleronupe/2019.1-hubcare-api | 3f031eac9559a10fdcf70a88ee4c548cf93e4ac2 | [
"MIT"
] | 7 | 2019-03-31T17:58:45.000Z | 2020-02-29T22:44:27.000Z | hubcare/metrics/community_metrics/issue_template/urls.py | aleronupe/2019.1-hubcare-api | 3f031eac9559a10fdcf70a88ee4c548cf93e4ac2 | [
"MIT"
] | 90 | 2019-03-26T01:14:54.000Z | 2021-06-10T21:30:25.000Z | hubcare/metrics/community_metrics/issue_template/urls.py | aleronupe/2019.1-hubcare-api | 3f031eac9559a10fdcf70a88ee4c548cf93e4ac2 | [
"MIT"
] | null | null | null | from django.urls import path
from issue_template.views import IssueTemplateView
urlpatterns = [
path(
'<str:owner>/<str:repo>/<str:token_auth>/',
IssueTemplateView.as_view()
),
]
| 18.636364 | 51 | 0.668293 |
2283023fbf32c038ed31074c2a312a5a7aa70d38 | 5,248 | py | Python | src/hammer-vlsi/technology/sky130/sram_compiler/__init__.py | httpsgithu/hammer | 6099f4169a49f71cee2e24bb1052f273039505cd | [
"BSD-3-Clause"
] | 138 | 2017-08-15T18:56:55.000Z | 2022-03-29T05:23:37.000Z | src/hammer-vlsi/technology/sky130/sram_compiler/__init__.py | httpsgithu/hammer | 6099f4169a49f71cee2e24bb1052f273039505cd | [
"BSD-3-Clause"
] | 444 | 2017-09-11T01:15:37.000Z | 2022-03-31T17:30:33.000Z | src/hammer-vlsi/technology/sky130/sram_compiler/__init__.py | httpsgithu/hammer | 6099f4169a49f71cee2e24bb1052f273039505cd | [
"BSD-3-Clause"
] | 33 | 2017-10-30T14:23:53.000Z | 2022-03-25T01:36:13.000Z |
import os, tempfile, subprocess
from hammer_vlsi import MMMCCorner, MMMCCornerType, HammerTool, HammerToolStep, HammerSRAMGeneratorTool, SRAMParameters
from hammer_vlsi.units import VoltageValue, TemperatureValue
from hammer_tech import Library, ExtraLibrary
from typing import NamedTuple, Dict, Any, List
from abc import ABCMeta, abstractmethod
tool=SKY130SRAMGenerator
| 51.960396 | 126 | 0.582127 |
2283626d76b9fe6781848e584e29b4b24ab5e062 | 2,837 | py | Python | Section 4/nlp-4-ngrams.py | PacktPublishing/Hands-on-NLP-with-NLTK-and-scikit-learn- | 8bb2095093a822363675368a4216d30d14cac501 | [
"MIT"
] | 34 | 2018-08-14T09:59:13.000Z | 2021-11-08T13:12:50.000Z | Section 4/nlp-4-ngrams.py | anapatgl/Hands-on-NLP-with-NLTK-and-scikit-learn- | 8bb2095093a822363675368a4216d30d14cac501 | [
"MIT"
] | 1 | 2018-11-28T19:20:37.000Z | 2018-11-28T19:20:37.000Z | Section 4/nlp-4-ngrams.py | anapatgl/Hands-on-NLP-with-NLTK-and-scikit-learn- | 8bb2095093a822363675368a4216d30d14cac501 | [
"MIT"
] | 31 | 2018-08-07T07:34:33.000Z | 2022-03-15T08:50:44.000Z | import collections
import nltk
import os
from sklearn import (
datasets, model_selection, feature_extraction, linear_model, naive_bayes,
ensemble
)
def extract_features(corpus):
'''Extract TF-IDF features from corpus'''
sa_stop_words = nltk.corpus.stopwords.words("english")
# words that might invert a sentence's meaning
white_list = [
'what', 'but', 'if', 'because', 'as', 'until', 'against',
'up', 'down', 'in', 'out', 'on', 'off', 'over', 'under', 'again',
'further', 'then', 'once', 'here', 'there', 'why', 'how', 'all', 'any',
'most', 'other', 'some', 'such', 'no', 'nor', 'not', 'only', 'own',
'same', 'so', 'than', 'too', 'can', 'will', 'just', 'don', 'should']
# take these out of the standard NLTK stop word list
sa_stop_words = [sw for sw in sa_stop_words if sw not in white_list]
# vectorize means we turn non-numerical data into an array of numbers
count_vectorizer = feature_extraction.text.CountVectorizer(
lowercase=True, # for demonstration, True by default
tokenizer=nltk.word_tokenize, # use the NLTK tokenizer
min_df=2, # minimum document frequency, i.e. the word must appear more than once.
ngram_range=(1, 2),
stop_words=sa_stop_words
)
processed_corpus = count_vectorizer.fit_transform(corpus)
processed_corpus = feature_extraction.text.TfidfTransformer().fit_transform(
processed_corpus)
return processed_corpus
data_directory = 'movie_reviews'
movie_sentiment_data = datasets.load_files(data_directory, shuffle=True)
print('{} files loaded.'.format(len(movie_sentiment_data.data)))
print('They contain the following classes: {}.'.format(
movie_sentiment_data.target_names))
movie_tfidf = extract_features(movie_sentiment_data.data)
X_train, X_test, y_train, y_test = model_selection.train_test_split(
movie_tfidf, movie_sentiment_data.target, test_size=0.30, random_state=42)
# similar to nltk.NaiveBayesClassifier.train()
clf1 = linear_model.LogisticRegression()
clf1.fit(X_train, y_train)
print('Logistic Regression performance: {}'.format(clf1.score(X_test, y_test)))
clf2 = linear_model.SGDClassifier()
clf2.fit(X_train, y_train)
print('SGDClassifier performance: {}'.format(clf2.score(X_test, y_test)))
clf3 = naive_bayes.MultinomialNB()
clf3.fit(X_train, y_train)
print('MultinomialNB performance: {}'.format(clf3.score(X_test, y_test)))
clf4 = naive_bayes.BernoulliNB()
clf4.fit(X_train, y_train)
print('BernoulliNB performance: {}'.format(clf4.score(X_test, y_test)))
voting_model = ensemble.VotingClassifier(
estimators=[('lr', clf1), ('sgd', clf2), ('mnb', clf3), ('bnb', clf4)],
voting='hard')
voting_model.fit(X_train, y_train)
print('Voting classifier performance: {}'.format(
voting_model.score(X_test, y_test)))
| 36.844156 | 90 | 0.70638 |
2283d1768504ac50dd9ea43fb4e940fbaf88eee6 | 649 | py | Python | code/gcd_sequence/sol_443.py | bhavinjawade/project-euler-solutions | 56bf6a282730ed4b9b875fa081cf4509d9939d98 | [
"Apache-2.0"
] | 2 | 2020-07-16T08:16:32.000Z | 2020-10-01T07:16:48.000Z | code/gcd_sequence/sol_443.py | Psingh12354/project-euler-solutions | 56bf6a282730ed4b9b875fa081cf4509d9939d98 | [
"Apache-2.0"
] | null | null | null | code/gcd_sequence/sol_443.py | Psingh12354/project-euler-solutions | 56bf6a282730ed4b9b875fa081cf4509d9939d98 | [
"Apache-2.0"
] | 1 | 2021-05-07T18:06:08.000Z | 2021-05-07T18:06:08.000Z |
# -*- coding: utf-8 -*-
'''
File name: code\gcd_sequence\sol_443.py
Author: Vaidic Joshi
Date created: Oct 20, 2018
Python Version: 3.x
'''
# Solution to Project Euler Problem #443 :: GCD sequence
#
# For more information see:
# https://projecteuler.net/problem=443
# Problem Statement
'''
Let g(n) be a sequence defined as follows:
g(4) = 13,
g(n) = g(n-1) + gcd(n, g(n-1)) for n > 4.
The first few values are:
n4567891011121314151617181920...
g(n)1314161718272829303132333451545560...
You are given that g(1000) = 2524 and g(1000000) = 2624152.
Find g(1015).
'''
# Solution
# Solution Approach
'''
'''
| 17.540541 | 62 | 0.644068 |
22849e131dffff72236a4d1d46cddf477f92bab9 | 2,823 | py | Python | src/collectors/rabbitmq/rabbitmq.py | lreed/Diamond | 2772cdbc27a7ba3fedeb6d4241aeee9d2fcbdb80 | [
"MIT"
] | null | null | null | src/collectors/rabbitmq/rabbitmq.py | lreed/Diamond | 2772cdbc27a7ba3fedeb6d4241aeee9d2fcbdb80 | [
"MIT"
] | null | null | null | src/collectors/rabbitmq/rabbitmq.py | lreed/Diamond | 2772cdbc27a7ba3fedeb6d4241aeee9d2fcbdb80 | [
"MIT"
] | null | null | null | # coding=utf-8
"""
Collects data from RabbitMQ through the admin interface
#### Notes
* if two vhosts have the queues with the same name, the metrics will collide
#### Dependencies
* pyrabbit
"""
import diamond.collector
try:
from numbers import Number
Number # workaround for pyflakes issue #13
import pyrabbit.api
except ImportError:
Number = None
| 30.031915 | 78 | 0.54729 |
2284b104a47dc324bd27f42ce83e41850b152d6c | 27,170 | py | Python | nemo/collections/tts/torch/data.py | MalikIdreesHasanKhan/NeMo | 984fd34921e81659c4594a22ab142311808b3bb7 | [
"Apache-2.0"
] | 4,145 | 2019-09-13T08:29:43.000Z | 2022-03-31T18:31:44.000Z | nemo/collections/tts/torch/data.py | MalikIdreesHasanKhan/NeMo | 984fd34921e81659c4594a22ab142311808b3bb7 | [
"Apache-2.0"
] | 2,031 | 2019-09-17T16:51:39.000Z | 2022-03-31T23:52:41.000Z | nemo/collections/tts/torch/data.py | MalikIdreesHasanKhan/NeMo | 984fd34921e81659c4594a22ab142311808b3bb7 | [
"Apache-2.0"
] | 1,041 | 2019-09-13T10:08:21.000Z | 2022-03-30T06:37:38.000Z | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import pickle
from pathlib import Path
from typing import Callable, Dict, List, Optional, Union
import librosa
import torch
from nemo_text_processing.text_normalization.normalize import Normalizer
from tqdm import tqdm
from nemo.collections.asr.parts.preprocessing.features import WaveformFeaturizer
from nemo.collections.tts.torch.helpers import (
BetaBinomialInterpolator,
beta_binomial_prior_distribution,
general_padding,
)
from nemo.collections.tts.torch.tts_data_types import (
DATA_STR2DATA_CLASS,
MAIN_DATA_TYPES,
VALID_SUPPLEMENTARY_DATA_TYPES,
DurationPrior,
Durations,
Energy,
LMTokens,
LogMel,
Pitch,
SpeakerID,
WithLens,
)
from nemo.collections.tts.torch.tts_tokenizers import BaseTokenizer, EnglishCharsTokenizer, EnglishPhonemesTokenizer
from nemo.core.classes import Dataset
from nemo.utils import logging
| 42.386895 | 147 | 0.61325 |
2284c119fbaa59ef00a4dd53417eccef839221b3 | 1,140 | py | Python | anmotordesign/server.py | MarkWengSTR/ansys-maxwell-online | f9bbc535c7637d8f34abb241acfb97d1bdbe4103 | [
"MIT"
] | 8 | 2021-01-25T11:17:32.000Z | 2022-03-29T05:34:47.000Z | anmotordesign/server.py | MarkWengSTR/ansys-maxwell-online | f9bbc535c7637d8f34abb241acfb97d1bdbe4103 | [
"MIT"
] | 1 | 2021-06-14T18:40:16.000Z | 2021-08-25T14:37:21.000Z | anmotordesign/server.py | MarkWengSTR/ansys-maxwell-online | f9bbc535c7637d8f34abb241acfb97d1bdbe4103 | [
"MIT"
] | 8 | 2020-09-25T15:40:07.000Z | 2022-03-29T05:34:48.000Z | from flask import Flask, request, jsonify
from flask_cors import CORS
from run import run_ansys
from api.validate import spec_present, data_type_validate, spec_keys_validate, ansys_overload_check
ansys_processing_count = 0
# debug
# import ipdb; ipdb.set_trace()
app = Flask(__name__)
CORS(app) # local development cors
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5000, debug=True)
| 25.909091 | 99 | 0.62193 |
2284f5a8afa9699354bd56f97faf33c044aeae81 | 160 | py | Python | cnn/donas_utils/dataset/__init__.py | eric8607242/darts | 34c79a0956039f56a6a87bfb7f4b1ae2af615bea | [
"Apache-2.0"
] | null | null | null | cnn/donas_utils/dataset/__init__.py | eric8607242/darts | 34c79a0956039f56a6a87bfb7f4b1ae2af615bea | [
"Apache-2.0"
] | null | null | null | cnn/donas_utils/dataset/__init__.py | eric8607242/darts | 34c79a0956039f56a6a87bfb7f4b1ae2af615bea | [
"Apache-2.0"
] | null | null | null | from .dataset import get_cifar100, get_cifar10, get_imagenet_lmdb, get_imagenet
__all__ = ["get_cifar100", "get_cifar10", "get_imagenet_lmdb", "get_imagenet"]
| 40 | 79 | 0.8 |
2285470cfe61c3208efb829c668012f4eb4c042d | 196 | py | Python | classifier/cross_validation.py | ahmdrz/spam-classifier | a9cc3916a7c22545c82f0bfae7e4b95f3b36248f | [
"MIT"
] | 1 | 2019-08-05T12:02:53.000Z | 2019-08-05T12:02:53.000Z | classifier/cross_validation.py | ahmdrz/spam-classifier | a9cc3916a7c22545c82f0bfae7e4b95f3b36248f | [
"MIT"
] | null | null | null | classifier/cross_validation.py | ahmdrz/spam-classifier | a9cc3916a7c22545c82f0bfae7e4b95f3b36248f | [
"MIT"
] | null | null | null | from sklearn.model_selection import KFold | 32.666667 | 41 | 0.704082 |
2285d8fefdc5efe988f942a7eb7b3f78ecd84063 | 310 | py | Python | category/models.py | captainxavier/AutoBlog | 44fb23628fe0210a3dcec80b91e1217d27ee9462 | [
"MIT"
] | null | null | null | category/models.py | captainxavier/AutoBlog | 44fb23628fe0210a3dcec80b91e1217d27ee9462 | [
"MIT"
] | null | null | null | category/models.py | captainxavier/AutoBlog | 44fb23628fe0210a3dcec80b91e1217d27ee9462 | [
"MIT"
] | null | null | null | from django.db import models
| 15.5 | 45 | 0.590323 |
228727092b8b8c1cbde1234be034bd7032daae7a | 1,488 | py | Python | admin_tools/urls.py | aucoeur/WeVoteServer | 7b30bdbb59d6e0c19abc81237aa42fba7de1a432 | [
"MIT"
] | 44 | 2015-11-19T04:52:39.000Z | 2021-03-17T02:08:26.000Z | admin_tools/urls.py | aucoeur/WeVoteServer | 7b30bdbb59d6e0c19abc81237aa42fba7de1a432 | [
"MIT"
] | 748 | 2015-09-03T04:18:33.000Z | 2022-03-10T14:08:10.000Z | admin_tools/urls.py | aucoeur/WeVoteServer | 7b30bdbb59d6e0c19abc81237aa42fba7de1a432 | [
"MIT"
] | 145 | 2015-09-19T10:10:44.000Z | 2022-03-04T21:01:12.000Z | # admin_tools/urls.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from django.conf.urls import re_path
from . import views
urlpatterns = [
re_path(r'^$', views.admin_home_view, name='admin_home',),
re_path(r'^data_cleanup/$', views.data_cleanup_view, name='data_cleanup'),
re_path(r'^data_cleanup_organization_analysis/$',
views.data_cleanup_organization_analysis_view, name='data_cleanup_organization_analysis'),
re_path(r'^data_cleanup_organization_list_analysis/$',
views.data_cleanup_organization_list_analysis_view, name='data_cleanup_organization_list_analysis'),
re_path(r'^data_cleanup_position_list_analysis/$',
views.data_cleanup_position_list_analysis_view, name='data_cleanup_position_list_analysis'),
re_path(r'^data_cleanup_voter_hanging_data_process/$',
views.data_cleanup_voter_hanging_data_process_view, name='data_cleanup_voter_hanging_data_process'),
re_path(r'^data_cleanup_voter_list_analysis/$',
views.data_cleanup_voter_list_analysis_view, name='data_cleanup_voter_list_analysis'),
re_path(r'^data_voter_statistics/$', views.data_voter_statistics_view, name='data_voter_statistics'),
re_path(r'^import_sample_data/$', views.import_sample_data_view, name='import_sample_data'),
re_path(r'^statistics/$', views.statistics_summary_view, name='statistics_summary'),
re_path(r'^sync_dashboard/$', views.sync_data_with_master_servers_view, name='sync_dashboard'),
]
| 55.111111 | 108 | 0.78293 |
22875dd3eed7789c404cf71dae058c78660c2f50 | 3,414 | py | Python | hippynn/graphs/nodes/base/multi.py | tautomer/hippynn | df4504a5ea4680cfc61f490984dcddeac7ed99ee | [
"BSD-3-Clause"
] | 21 | 2021-11-17T00:56:35.000Z | 2022-03-22T05:57:11.000Z | hippynn/graphs/nodes/base/multi.py | tautomer/hippynn | df4504a5ea4680cfc61f490984dcddeac7ed99ee | [
"BSD-3-Clause"
] | 4 | 2021-12-17T16:16:53.000Z | 2022-03-16T23:50:38.000Z | hippynn/graphs/nodes/base/multi.py | tautomer/hippynn | df4504a5ea4680cfc61f490984dcddeac7ed99ee | [
"BSD-3-Clause"
] | 6 | 2021-11-30T21:09:31.000Z | 2022-03-18T07:07:32.000Z | """
A base node that provides several output tensors.
"""
from ....layers.algebra import Idx
from .base import SingleNode, Node
from .. import _debprint
from ...indextypes import IdxType
| 38.795455 | 110 | 0.626245 |
22881ed2f077cedcedaa10dbf83c13905a622021 | 113 | py | Python | main_module/__init__.py | JohanNicander/python-test-architecture | 2418f861cb46c3fccaa21be94ee92c5862985a15 | [
"Apache-2.0"
] | null | null | null | main_module/__init__.py | JohanNicander/python-test-architecture | 2418f861cb46c3fccaa21be94ee92c5862985a15 | [
"Apache-2.0"
] | null | null | null | main_module/__init__.py | JohanNicander/python-test-architecture | 2418f861cb46c3fccaa21be94ee92c5862985a15 | [
"Apache-2.0"
] | null | null | null | from .zero import zero
from main_module._unittester import UnitTester
test = UnitTester(__name__)
del UnitTester | 22.6 | 46 | 0.840708 |
228856c2bad586d523ebf387bffc058ae9b589d7 | 4,151 | py | Python | barber/cutter.py | LSSTDESC/barber | 9dbe69e69a078ef3b70a316807517e2a4d4e60cd | [
"MIT"
] | null | null | null | barber/cutter.py | LSSTDESC/barber | 9dbe69e69a078ef3b70a316807517e2a4d4e60cd | [
"MIT"
] | 6 | 2020-04-28T15:20:08.000Z | 2020-04-28T15:37:02.000Z | barber/cutter.py | LSSTDESC/barber | 9dbe69e69a078ef3b70a316807517e2a4d4e60cd | [
"MIT"
] | null | null | null | import numpy as np
import numpy.random as npr
import scipy.optimize as spo
import tomo_challenge.metrics as tcm
# custom data type, could be replaced with/tie in to tree.py class
# cut_vals is (nfeat, nbins - 1) numpy array, float
# tree_ids is ((nbins,) * nfeat) numpy array, int
TreePars = namedtuple('TreePars', ['cut_vals', 'tree_ids'])
# should maybe put this function in a class so we can call TreePars.to_array
def treepars_to_array(treepars):
"""
Flattens cut_vals and tree_ids for optimizer
"""
cuts = np.flatten(treepars.cut_vals)
ids = np.flatten(treepars.tree_ids)
arr = np.concatenate((cuts, ids))
return(arr)
# should maybe put this function in a class so we can call TreePars.from_array
def array_to_treepars(arr):
"""
Converts optimizer format of 1D array back into namedtuple of arrays
"""
flat_cuts = arr[type(arr) == float]
flat_ids = arr[type(arr) == int]
nbins = len(np.unique(flat_ids))
nfeat = len(flat_cuts) / (nbins - 1)
# maybe do some assert checks with these just in case types have problems
# cuts = arr[0:nfeat*(nbins-1)].reshape((nfeat, nbins-1))
# ids = arr[feat*(nbins-1):].reshape((nbins,) * nfeat)
cuts = flat_cuts.reshape((nfeat, nbins-1))
ids = flat_ids.reshape((nbins,) * nfeat)
treepars = TreePars(cuts, ids)
return(treepars)
def get_cuts(galaxies, ival_treepars=None, nbins=3):
"""
Obtains simplest possible bin definitions: cuts in the space of observables given number of bins
Parameters
----------
galaxies: numpy.ndarray, float
observables (magnitudes and/or colors and/or errors) to serve as features for set of galaxies
shape(galaxies) = (ngals, nfeat)
ival_treepars: namedtuple, numpy.ndarray, float and int, optional
initial values for decision tree parameters
shape(ivals.cut_vals) = (nfeat, (nbins - 1))
shape(tree_ids) = ((nbins,) * nfeat)
nbins: int, optional
number of bins for which to obtain cuts
Returns
-------
assignments: numpy.ndarray, int
bin assignment for each galaxy
shape(assignments) = (ngals, 1)
Notes
-----
`sort_gals` does the heavy lifting.
`eval_metric` will call one of the metrics from [tomo_challenge](https://github.com/LSSTDESC/tomo_challenge/blob/master/tomo_challenge/metrics.py).
The original idea for a general, non-cut-based optimizer was to have parameters equal to the (ngals) length array of ints representing the bin assignments, but that's not necessary for the simple cut-and-sweep barber and would probably break `spo.minimize`.
"""
(ngals, nfeat) = np.shape(galaxies)
if ival_treepars is None:
cut_ivals = np.quantile(galaxies, np.linspace(0., 1., nbins), axis=1)
assert(len(np.flatten(ivals)) == nbins**nfeat)
# need structure and way of making dumb version of these
tree_ids = npr.random_integers(0, nbins, nbins**nfeat)
assert(len(np.unique(tree_ids)) == nbins)
tree_ids.reshape((nfeat, nbins))
ival_treepars = TreePars(cut_ivals, tree_ids)
ivals = treepars_to_array(ival_treepars)
opt_res = spo.minimize(eval_metric, ivals, args=galaxies)
treepars = array_to_treepars(opt_res.x)
assignments = sort_gals(galaxies, treepars)
return(assignments)
def sort_gals(galaxies, tree_pars):
"""
Divides available galaxies into subsets according to a given decision tree on their observables
Parameters
----------
galaxies: nfeature x n_gal array
tree: tree object
Notes
-----
could be based on bisect, or maybe a sklearn object?
"""
pass
def eval_metric(arr, galaxies):
"""
Just calls a metric from tomo_challenge wrapped for the `spo.minimize` API
Notes
-----
Replace `tcm.metric` with actual call to one of the tomo_challenge metrics
Actually, there's a problem in that the current tomo_challenge metrics require the true redshifts...
"""
treepars = array_to_treepars(arr)
assignments = sort_gals(galaxies, treepars)
metval = tcm.metric(assignments)
return metval
| 35.478632 | 261 | 0.685859 |
2288f93227622fced04679bfe49afbad16de4e0a | 480 | py | Python | examples/transfer/highscore.py | coding-world/matrix_max7219 | 3126604ee400a9ec1d25797f6957a2eae8a3f33c | [
"MIT"
] | null | null | null | examples/transfer/highscore.py | coding-world/matrix_max7219 | 3126604ee400a9ec1d25797f6957a2eae8a3f33c | [
"MIT"
] | null | null | null | examples/transfer/highscore.py | coding-world/matrix_max7219 | 3126604ee400a9ec1d25797f6957a2eae8a3f33c | [
"MIT"
] | null | null | null | import shelve
regal = shelve.open('score.txt')
neuerScore = int(input("Neuer HighScore: \n"))
updateScore(neuerScore) | 20 | 46 | 0.66875 |
22896fc7355f1baa1a7f7d9e3165cdfe2c0b6611 | 165 | py | Python | src/node/ext/ldap/scope.py | enfold/node.ext.ldap | 28127057be6ba3092389f3c920575292d43d9f94 | [
"BSD-2-Clause"
] | 3 | 2016-04-22T00:37:17.000Z | 2020-04-03T07:14:54.000Z | src/node/ext/ldap/scope.py | enfold/node.ext.ldap | 28127057be6ba3092389f3c920575292d43d9f94 | [
"BSD-2-Clause"
] | 51 | 2015-02-10T11:14:01.000Z | 2021-05-05T11:06:59.000Z | src/node/ext/ldap/scope.py | enfold/node.ext.ldap | 28127057be6ba3092389f3c920575292d43d9f94 | [
"BSD-2-Clause"
] | 12 | 2016-08-09T09:39:35.000Z | 2020-04-18T14:53:56.000Z | # -*- coding: utf-8 -*-
import ldap
BASE = ldap.SCOPE_BASE
ONELEVEL = ldap.SCOPE_ONELEVEL
SUBTREE = ldap.SCOPE_SUBTREE
SCOPES = [BASE, ONELEVEL, SUBTREE]
del ldap
| 16.5 | 34 | 0.727273 |
2289dcddf267c6a1a0e8cb907450531ad79de492 | 493 | py | Python | urban-sound-classification/feature_merge.py | tensorflow-korea/tfk-notebooks | 67831acce7f435500377bf03e6bd9d15fdd5f1bc | [
"MIT"
] | 50 | 2016-06-18T12:52:29.000Z | 2021-12-10T07:13:20.000Z | urban-sound-classification/feature_merge.py | tensorflow-korea/tfk-notebooks | 67831acce7f435500377bf03e6bd9d15fdd5f1bc | [
"MIT"
] | null | null | null | urban-sound-classification/feature_merge.py | tensorflow-korea/tfk-notebooks | 67831acce7f435500377bf03e6bd9d15fdd5f1bc | [
"MIT"
] | 51 | 2016-04-30T16:38:05.000Z | 2021-01-15T18:12:03.000Z | import glob
import numpy as np
X = np.empty((0, 193))
y = np.empty((0, 10))
groups = np.empty((0, 1))
npz_files = glob.glob('./urban_sound_?.npz')
for fn in npz_files:
print(fn)
data = np.load(fn)
X = np.append(X, data['X'], axis=0)
y = np.append(y, data['y'], axis=0)
groups = np.append(groups, data['groups'], axis=0)
print(groups[groups>0])
print(X.shape, y.shape)
for r in y:
if np.sum(r) > 1.5:
print(r)
np.savez('urban_sound', X=X, y=y, groups=groups)
| 22.409091 | 54 | 0.602434 |
228ad78fbc730707861e4c8d9c262be93d22bf72 | 485 | py | Python | program/program/trackers/TrackerCorrelation.py | JankaSvK/thesis | c440ab8242b058f580fdf9d5a1d00708a1696561 | [
"MIT"
] | 1 | 2018-11-29T14:13:47.000Z | 2018-11-29T14:13:47.000Z | program/program/trackers/TrackerCorrelation.py | JankaSvK/thesis | c440ab8242b058f580fdf9d5a1d00708a1696561 | [
"MIT"
] | 3 | 2018-04-24T18:30:00.000Z | 2018-05-11T23:25:07.000Z | program/program/trackers/TrackerCorrelation.py | JankaSvK/thesis | c440ab8242b058f580fdf9d5a1d00708a1696561 | [
"MIT"
] | null | null | null | import dlib
| 28.529412 | 96 | 0.585567 |
228b1c94896beb15138918d15679461767abdb01 | 3,238 | py | Python | examples/nlp/language_modeling/megatron_gpt_ckpt_to_nemo.py | rilango/NeMo | 6f23ff725c596f25fab6043d95e7c0b4a5f56331 | [
"Apache-2.0"
] | null | null | null | examples/nlp/language_modeling/megatron_gpt_ckpt_to_nemo.py | rilango/NeMo | 6f23ff725c596f25fab6043d95e7c0b4a5f56331 | [
"Apache-2.0"
] | null | null | null | examples/nlp/language_modeling/megatron_gpt_ckpt_to_nemo.py | rilango/NeMo | 6f23ff725c596f25fab6043d95e7c0b4a5f56331 | [
"Apache-2.0"
] | 1 | 2021-12-07T08:15:36.000Z | 2021-12-07T08:15:36.000Z | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from argparse import ArgumentParser
import torch.multiprocessing as mp
from pytorch_lightning.trainer.trainer import Trainer
from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import MegatronGPTModel
from nemo.collections.nlp.parts.nlp_overrides import NLPSaveRestoreConnector
from nemo.utils import AppState, logging
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| 37.218391 | 218 | 0.734713 |
228b861994dfd3c8d5b7524f5b44ae49bacc2148 | 6,007 | py | Python | sdk/python/pulumi_aws/apigateway/api_key.py | dixler/pulumi-aws | 88838ed6d412c092717a916b0b5b154f68226c3a | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/apigateway/api_key.py | dixler/pulumi-aws | 88838ed6d412c092717a916b0b5b154f68226c3a | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/apigateway/api_key.py | dixler/pulumi-aws | 88838ed6d412c092717a916b0b5b154f68226c3a | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
| 45.507576 | 170 | 0.662227 |
228b9e5c3d1a55dd867bb42f9e9fbbc7ed2e9fc5 | 10,684 | py | Python | SROMPy/optimize/ObjectiveFunction.py | jwarner308/SROMPy | 12007e4cd99c88446f10974a93050405c5cd925b | [
"Apache-2.0"
] | 23 | 2018-05-13T05:13:03.000Z | 2022-01-29T19:43:28.000Z | SROMPy/optimize/ObjectiveFunction.py | jwarner308/SROMPy | 12007e4cd99c88446f10974a93050405c5cd925b | [
"Apache-2.0"
] | 11 | 2018-03-28T13:13:44.000Z | 2022-03-30T18:56:57.000Z | SROMPy/optimize/ObjectiveFunction.py | jwarner308/SROMPy | 12007e4cd99c88446f10974a93050405c5cd925b | [
"Apache-2.0"
] | 19 | 2018-06-01T14:49:30.000Z | 2022-03-05T05:02:06.000Z | # Copyright 2018 United States Government as represented by the Administrator of
# the National Aeronautics and Space Administration. No copyright is claimed in
# the United States under Title 17, U.S. Code. All Other Rights Reserved.
# The Stochastic Reduced Order Models with Python (SROMPy) platform is licensed
# under the Apache License, Version 2.0 (the "License"); you may not use this
# file except in compliance with the License. You may obtain a copy of the
# License at http://www.apache.org/licenses/LICENSE-2.0.
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import numpy as np
from SROMPy.target import RandomVector
from SROMPy.target.RandomEntity import RandomEntity
| 37.356643 | 80 | 0.624579 |
228bb0a969acb617ccc7d0b12b1281bd81283a5f | 4,016 | py | Python | test/utils.py | vasili-v/distcovery | e07882d55ebe2e4fd78a720764803e6b3e8cbc7d | [
"MIT"
] | null | null | null | test/utils.py | vasili-v/distcovery | e07882d55ebe2e4fd78a720764803e6b3e8cbc7d | [
"MIT"
] | null | null | null | test/utils.py | vasili-v/distcovery | e07882d55ebe2e4fd78a720764803e6b3e8cbc7d | [
"MIT"
] | null | null | null | import os
import errno
import sys
| 36.844037 | 80 | 0.493775 |
228d76877f0d9f67ffc6dc7483c7c0a95962b0f9 | 864 | py | Python | var/spack/repos/builtin/packages/perl-ipc-run/package.py | adrianjhpc/spack | 0a9e4fcee57911f2db586aa50c8873d9cca8de92 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2 | 2020-10-15T01:08:42.000Z | 2021-10-18T01:28:18.000Z | var/spack/repos/builtin/packages/perl-ipc-run/package.py | adrianjhpc/spack | 0a9e4fcee57911f2db586aa50c8873d9cca8de92 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2 | 2019-07-30T10:12:28.000Z | 2019-12-17T09:02:27.000Z | var/spack/repos/builtin/packages/perl-ipc-run/package.py | adrianjhpc/spack | 0a9e4fcee57911f2db586aa50c8873d9cca8de92 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 5 | 2019-07-30T09:42:14.000Z | 2021-01-25T05:39:20.000Z | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
| 39.272727 | 100 | 0.730324 |
228d8328feac3519c1eb966b9a43a964120c8c6c | 1,369 | py | Python | tests/test_parser_create_site_users.py | WillAyd/tabcmd | 1ba4a6ce1586b5ec4286aca0edff0fbaa1c69f15 | [
"MIT"
] | null | null | null | tests/test_parser_create_site_users.py | WillAyd/tabcmd | 1ba4a6ce1586b5ec4286aca0edff0fbaa1c69f15 | [
"MIT"
] | null | null | null | tests/test_parser_create_site_users.py | WillAyd/tabcmd | 1ba4a6ce1586b5ec4286aca0edff0fbaa1c69f15 | [
"MIT"
] | null | null | null | import sys
import unittest
try:
from unittest import mock
except ImportError:
import mock
import argparse
from tabcmd.parsers.create_site_users_parser import CreateSiteUsersParser
from .common_setup import *
commandname = 'createsiteusers'
| 37 | 90 | 0.720964 |
228e4efae17879a415faffa2bdf7cfbc08f32c9f | 1,078 | py | Python | secretsmanager_env.py | iarlyy/secretsmanager-env | 3a34a4e9561e4651fa2975ff6f32b00ef0c0ca73 | [
"MIT"
] | 1 | 2020-02-13T17:11:29.000Z | 2020-02-13T17:11:29.000Z | secretsmanager_env.py | iarlyy/secretsmanager-env | 3a34a4e9561e4651fa2975ff6f32b00ef0c0ca73 | [
"MIT"
] | null | null | null | secretsmanager_env.py | iarlyy/secretsmanager-env | 3a34a4e9561e4651fa2975ff6f32b00ef0c0ca73 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import argparse
import json
import os
import boto3
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='''\
Output following the defined format.
Options are:
dotenv - dotenv style [default]
export - shell export style
stdout - secret plain value style'''
)
parser.add_argument(
'--output',
default='dotenv',
choices=['stdout', 'dotenv', 'export'],
)
args = parser.parse_args()
try:
secret_id = os.environ.get("ENV_SECRET_NAME")
secretsmanager = boto3.client('secretsmanager')
secret_values = json.loads(secretsmanager.get_secret_value(SecretId=secret_id)['SecretString'])
except:
print('Error getting secret')
raise
if args.output == 'export':
prefix = 'export '
else:
prefix = ''
if args.output != 'stdout':
for envvar in secret_values:
print(prefix+envvar+"=$'"+secret_values[envvar].replace('\\n', '\n')+"'")
else:
print(json.dumps(secret_values.replace('\\n', '\n'), indent=2, sort_keys=True))
| 24.5 | 99 | 0.670686 |
228e74b0f9248fe2ef101b86260ca316c5578c5c | 1,730 | py | Python | 109.py | juandarr/ProjectEuler | 951705ac62f550d7fbecdc3f35ab8c38b53b9225 | [
"MIT"
] | null | null | null | 109.py | juandarr/ProjectEuler | 951705ac62f550d7fbecdc3f35ab8c38b53b9225 | [
"MIT"
] | null | null | null | 109.py | juandarr/ProjectEuler | 951705ac62f550d7fbecdc3f35ab8c38b53b9225 | [
"MIT"
] | null | null | null | """
Finds the number of distinct ways a player can checkout a score less than 100
Author: Juan Rios
"""
import math
def checkout_solutions(checkout,sequence,idx_sq,d):
'''
returns the number of solution for a given checkout value
'''
counter = 0
for double in d:
if double>checkout:
break
res = checkout-double
if res==0:
counter +=1
continue
if res<=60:
if res in idx_sq:
index = idx_sq[res]
else:
index = len(sequence)-1
while res>sequence[index]:
index -=1
else:
index = len(sequence)-1
for idx in range(index,-1,-1):
a = sequence[idx]
if a==res:
counter+=1
continue
for idx2 in range(idx,-1,-1):
if a+sequence[idx2]==res:
counter +=1
elif a+sequence[idx2]<res:
break
return counter
if __name__ == "__main__":
limit_value=99
print('The number of distinct ways a player can checkout a score less than 100 is {0}'.format(darts_checkout(limit_value))) | 28.360656 | 128 | 0.540462 |
228e9262ba137f922fefb676a2a9e3eabc4bf87c | 804 | py | Python | src/tevatron/tevax/loss.py | vjeronymo2/tevatron | 7235b0823b5c3cdf1c8ce8f67cb5f1209218086a | [
"Apache-2.0"
] | 95 | 2021-09-16T00:35:17.000Z | 2022-03-31T04:59:05.000Z | src/tevatron/tevax/loss.py | vjeronymo2/tevatron | 7235b0823b5c3cdf1c8ce8f67cb5f1209218086a | [
"Apache-2.0"
] | 16 | 2021-10-05T12:29:33.000Z | 2022-03-31T17:59:20.000Z | src/tevatron/tevax/loss.py | vjeronymo2/tevatron | 7235b0823b5c3cdf1c8ce8f67cb5f1209218086a | [
"Apache-2.0"
] | 15 | 2021-09-19T02:20:03.000Z | 2022-03-10T03:00:23.000Z | import jax.numpy as jnp
from jax import lax
import optax
import chex
| 36.545455 | 108 | 0.690299 |
228eb608e052e061a5945151be48c2a98a56d133 | 1,758 | py | Python | setup.py | kinnala/gammy | 85237d424001f77f296d724c95c8dec5803a8e1e | [
"MIT"
] | null | null | null | setup.py | kinnala/gammy | 85237d424001f77f296d724c95c8dec5803a8e1e | [
"MIT"
] | null | null | null | setup.py | kinnala/gammy | 85237d424001f77f296d724c95c8dec5803a8e1e | [
"MIT"
] | null | null | null | import os
from setuptools import setup, find_packages
import versioneer
if __name__ == "__main__":
meta = {}
base_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(base_dir, 'gammy', '_meta.py')) as fp:
exec(fp.read(), meta)
setup(
name = "gammy",
version = versioneer.get_version(),
author = meta["__author__"],
author_email = meta["__contact__"],
description = "Generalized additive models with a Bayesian twist",
url = "https://github.com/malmgrek/Gammy",
cmdclass = versioneer.get_cmdclass(),
packages = find_packages(),
install_requires = [
"attrs",
"bayespy",
"h5py",
"matplotlib",
"numpy",
"scipy"
],
extras_require = {
"dev": [
"versioneer",
"pytest",
"hypothesis",
],
},
keywords = [
"Statistical modeling",
"Bayesian statistics",
"Machine learning",
],
classifiers = [
"Programming Language :: Python :: 3 :: Only",
"Development Status :: 1 - Planning",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: {0}".format(meta["__license__"]),
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering",
],
long_description = read('README.md'),
long_description_content_type = "text/markdown",
) | 30.842105 | 75 | 0.513083 |
228f917fd03d25566ca49e7918c233c48b585119 | 88 | py | Python | fast-ml/main.py | gabrielstork/fast-ml | ce93c1263970ce7b958e1c3e932c70909bcc0e31 | [
"Apache-2.0"
] | 1 | 2021-07-26T15:37:30.000Z | 2021-07-26T15:37:30.000Z | fast-ml/main.py | gabrielstork/fast-ml | ce93c1263970ce7b958e1c3e932c70909bcc0e31 | [
"Apache-2.0"
] | null | null | null | fast-ml/main.py | gabrielstork/fast-ml | ce93c1263970ce7b958e1c3e932c70909bcc0e31 | [
"Apache-2.0"
] | null | null | null | import root
if __name__ == '__main__':
window = root.Root()
window.mainloop()
| 12.571429 | 26 | 0.636364 |
2290a77719ce3ea48bd13dc7fb8b6642fe413085 | 144 | py | Python | application/recommendations/__init__.py | QualiChain/qualichain_backend | cc6dbf1ae5d09e8d01cccde94326563b25d28b58 | [
"MIT"
] | null | null | null | application/recommendations/__init__.py | QualiChain/qualichain_backend | cc6dbf1ae5d09e8d01cccde94326563b25d28b58 | [
"MIT"
] | null | null | null | application/recommendations/__init__.py | QualiChain/qualichain_backend | cc6dbf1ae5d09e8d01cccde94326563b25d28b58 | [
"MIT"
] | null | null | null | from flask import Blueprint
recommendation_blueprint = Blueprint('recommendations', __name__)
from application.recommendations import routes | 20.571429 | 65 | 0.847222 |
2290bfd1c4b65da8f41f786b9bf73bcded25e4b1 | 4,203 | py | Python | predictors/scene_predictor.py | XenonLamb/higan | 6e7b47f91df23d8d6075d95921e664c9fa4f1306 | [
"MIT"
] | 83 | 2020-03-11T21:20:59.000Z | 2022-03-17T10:08:27.000Z | predictors/scene_predictor.py | XenonLamb/higan | 6e7b47f91df23d8d6075d95921e664c9fa4f1306 | [
"MIT"
] | 8 | 2020-04-16T14:37:42.000Z | 2021-09-20T20:18:06.000Z | predictors/scene_predictor.py | billzhonggz/higan | 168f24f7e3969bc8dc580e2c997463e76644c17f | [
"MIT"
] | 19 | 2020-04-13T02:55:51.000Z | 2022-01-28T06:37:25.000Z | # python 3.7
"""Predicts the scene category, attribute."""
import numpy as np
from PIL import Image
import torch
import torch.nn.functional as F
import torchvision.transforms as transforms
from .base_predictor import BasePredictor
from .scene_wideresnet import resnet18
__all__ = ['ScenePredictor']
NUM_CATEGORIES = 365
NUM_ATTRIBUTES = 102
FEATURE_DIM = 512
| 36.232759 | 79 | 0.647395 |
22915424775bb0c1cd95df8d2deeb30cca4451ba | 1,845 | py | Python | python_test.py | jackKiZhu/mypython | 43eac97bec07338ed3b8b9473d4e4fae26f7140c | [
"MIT"
] | null | null | null | python_test.py | jackKiZhu/mypython | 43eac97bec07338ed3b8b9473d4e4fae26f7140c | [
"MIT"
] | null | null | null | python_test.py | jackKiZhu/mypython | 43eac97bec07338ed3b8b9473d4e4fae26f7140c | [
"MIT"
] | null | null | null | from flask import Flask, render_template, request
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config["SQLALCHEMY_DATABASE_URI"] = "mysql://root:mysql@127.0.0.1:3306/python_github"
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = True
db = SQLAlchemy(app)
if __name__ == "__main__":
db.drop_all()
db.create_all()
app.run(debug=True)
| 32.368421 | 95 | 0.614634 |
2291547d5512bbb1bda47b665f654ae2a6cde5f2 | 652 | py | Python | src/etc/gec/3.py | iml1111/algorithm-study | f21f6f9f43235248f3496f034a899f2314ab6fcc | [
"MIT"
] | null | null | null | src/etc/gec/3.py | iml1111/algorithm-study | f21f6f9f43235248f3496f034a899f2314ab6fcc | [
"MIT"
] | null | null | null | src/etc/gec/3.py | iml1111/algorithm-study | f21f6f9f43235248f3496f034a899f2314ab6fcc | [
"MIT"
] | null | null | null | from collections import deque
if __name__ == '__main__':
print(solution(
3, [[1,2],[3,3]],
)) | 27.166667 | 63 | 0.45092 |
2293c25414f578bb3829ecd6692177ce5d098784 | 1,218 | py | Python | python/tree/0103_binary_tree_zigzag_level_order_traversal.py | linshaoyong/leetcode | ea052fad68a2fe0cbfa5469398508ec2b776654f | [
"MIT"
] | 6 | 2019-07-15T13:23:57.000Z | 2020-01-22T03:12:01.000Z | python/tree/0103_binary_tree_zigzag_level_order_traversal.py | linshaoyong/leetcode | ea052fad68a2fe0cbfa5469398508ec2b776654f | [
"MIT"
] | null | null | null | python/tree/0103_binary_tree_zigzag_level_order_traversal.py | linshaoyong/leetcode | ea052fad68a2fe0cbfa5469398508ec2b776654f | [
"MIT"
] | 1 | 2019-07-24T02:15:31.000Z | 2019-07-24T02:15:31.000Z |
def test_zigzag_level_order():
a = TreeNode(3)
b = TreeNode(9)
c = TreeNode(20)
d = TreeNode(15)
e = TreeNode(7)
a.left = b
a.right = c
c.left = d
c.right = e
assert Solution().zigzagLevelOrder(a) == [
[3],
[20, 9],
[15, 7]
]
| 21 | 46 | 0.374384 |
22941cdcf437ea8fe9f771e15f228dacff7fbb5f | 5,452 | py | Python | plaso/parsers/winreg_plugins/usbstor.py | berggren/plaso | 2658c80c5076f97a9a27272e73997bde8c39e875 | [
"Apache-2.0"
] | 2 | 2020-02-09T01:11:08.000Z | 2021-09-17T04:16:31.000Z | plaso/parsers/winreg_plugins/usbstor.py | berggren/plaso | 2658c80c5076f97a9a27272e73997bde8c39e875 | [
"Apache-2.0"
] | null | null | null | plaso/parsers/winreg_plugins/usbstor.py | berggren/plaso | 2658c80c5076f97a9a27272e73997bde8c39e875 | [
"Apache-2.0"
] | 1 | 2021-03-17T09:47:01.000Z | 2021-03-17T09:47:01.000Z | # -*- coding: utf-8 -*-
"""File containing a Windows Registry plugin to parse the USBStor key."""
from __future__ import unicode_literals
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import definitions
from plaso.parsers import logger
from plaso.parsers import winreg
from plaso.parsers.winreg_plugins import interface
winreg.WinRegistryParser.RegisterPlugin(USBStorPlugin)
| 37.6 | 80 | 0.716985 |
2298b7f13b630423d0c12d2422ae336ad2ea8774 | 71 | py | Python | damn_vulnerable_python/evil.py | CodyKochmann/damn_vulnerable_python | 8a90ee3b70dddae96f9f0a8500ed9ba5693f3082 | [
"MIT"
] | 1 | 2018-05-22T03:27:54.000Z | 2018-05-22T03:27:54.000Z | damn_vulnerable_python/evil.py | CodyKochmann/damn_vulnerable_python | 8a90ee3b70dddae96f9f0a8500ed9ba5693f3082 | [
"MIT"
] | 2 | 2018-05-22T02:04:39.000Z | 2018-05-22T12:46:31.000Z | damn_vulnerable_python/evil.py | CodyKochmann/damn_vulnerable_python | 8a90ee3b70dddae96f9f0a8500ed9ba5693f3082 | [
"MIT"
] | null | null | null | ''' static analyzers are annoying so lets rename eval '''
evil = eval
| 17.75 | 57 | 0.704225 |
229d03edb58694ea053e0d0cf56108a3ca34b32c | 17,257 | py | Python | rltoolkit/rltoolkit/acm/off_policy/ddpg_acm.py | MIMUW-RL/spp-rl | 86b96cdd220cc4eae86f7cfd26924c69b498dcc6 | [
"MIT"
] | 7 | 2020-06-15T12:25:53.000Z | 2021-11-03T01:08:47.000Z | rltoolkit/rltoolkit/acm/off_policy/ddpg_acm.py | MIMUW-RL/spp-rl | 86b96cdd220cc4eae86f7cfd26924c69b498dcc6 | [
"MIT"
] | null | null | null | rltoolkit/rltoolkit/acm/off_policy/ddpg_acm.py | MIMUW-RL/spp-rl | 86b96cdd220cc4eae86f7cfd26924c69b498dcc6 | [
"MIT"
] | 1 | 2020-12-21T11:21:22.000Z | 2020-12-21T11:21:22.000Z | import numpy as np
import torch
from torch.nn import functional as F
from rltoolkit.acm.off_policy import AcMOffPolicy
from rltoolkit.algorithms import DDPG
from rltoolkit.algorithms.ddpg.models import Actor, Critic
if __name__ == "__main__":
#with torch.cuda.device(0):
model = DDPG_AcM(
# unbiased_update=True,
# custom_loss=True,
# acm_update_batches=50,
# denormalize_actor_out=True,
env_name="Pendulum-v0",
buffer_size=50000,
act_noise=0.05,
iterations=100,
gamma=0.99,
steps_per_epoch=200,
stats_freq=5,
test_episodes=3,
custom_loss=1,
lagrangian_custom_loss=False,
# tensorboard_dir="logs_ddpg",
# tensorboard_comment="",
acm_update_freq=200,
acm_epochs=1,
acm_pre_train_epochs=10,
acm_pre_train_samples=10000,
use_gpu=False,
render=False,
)
model.pre_train()
model.train()
| 39.042986 | 157 | 0.589963 |
229f21bdd7be594d33b1093f3cb181d2690aa326 | 3,714 | py | Python | pyroute/poi_osm.py | ftrimble/route-grower | d4343ecc9b13a3e1701c8460c8a1792d08b74567 | [
"Apache-2.0"
] | null | null | null | pyroute/poi_osm.py | ftrimble/route-grower | d4343ecc9b13a3e1701c8460c8a1792d08b74567 | [
"Apache-2.0"
] | null | null | null | pyroute/poi_osm.py | ftrimble/route-grower | d4343ecc9b13a3e1701c8460c8a1792d08b74567 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
#----------------------------------------------------------------
# OSM POI handler for pyroute
#
#------------------------------------------------------
# Copyright 2007, Oliver White
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#------------------------------------------------------
from xml.sax import make_parser, handler
from poi_base import *
import os
from xml.sax._exceptions import SAXParseException
import urllib
if __name__ == "__main__":
nodes = osmPoiModule(None)
nodes.sort({'valid':True,'lat':51.3,'lon':-0.2})
#nodes.report()
| 29.244094 | 74 | 0.630856 |
22a0ba4419e5d5479b0eea3b85e6ded458dffecb | 13,025 | py | Python | pelutils/logger.py | peleiden/pelutils | 9860734c0e06481aa58a9f767a4cfb5129cb48ec | [
"BSD-3-Clause"
] | 3 | 2021-02-28T13:03:12.000Z | 2022-01-01T09:53:33.000Z | pelutils/logger.py | peleiden/pelutils | 9860734c0e06481aa58a9f767a4cfb5129cb48ec | [
"BSD-3-Clause"
] | 72 | 2020-10-13T09:20:01.000Z | 2022-02-26T09:12:21.000Z | pelutils/logger.py | peleiden/pelutils | 9860734c0e06481aa58a9f767a4cfb5129cb48ec | [
"BSD-3-Clause"
] | null | null | null | from __future__ import annotations
import os
import traceback as tb
from collections import defaultdict
from enum import IntEnum
from functools import update_wrapper
from itertools import chain
from typing import Any, Callable, DefaultDict, Generator, Iterable, Optional
from pelutils import get_timestamp, get_repo
from .format import RichString
_STDERR_LEVELS = { LogLevels.CRITICAL, LogLevels.ERROR, LogLevels.WARNING }
# https://rich.readthedocs.io/en/stable/appendix/colors.html
_TIMESTAMP_COLOR = "#72b9e0"
_LEVEL_FORMAT = {
LogLevels.SECTION: "bright_yellow",
LogLevels.CRITICAL: "red1",
LogLevels.ERROR: "red3",
LogLevels.WARNING: "gold3",
LogLevels.INFO: "chartreuse3",
LogLevels.DEBUG: "deep_sky_blue1",
}
def configure(
self,
fpath: Optional[str] = None, # Path to place logger. Any missing directories are created
title: Optional[str] = None, # Title on first line of logfile
default_seperator = "\n",
include_micros = False, # Include microseconds in timestamps
log_commit = False, # Log commit of git repository
logger_name = "default", # Name of logger
append = False, # Set to True to append to old log file instead of overwriting it
print_level = LogLevels.INFO, # Highest level that will be printed. All will be logged. None for no print
):
""" Configure a logger. If not called, the logger will act like a print statement """
if logger_name in self._loggers:
raise LoggingException("Logger '%s' already exists. Did you call log.configure(...) twice?" % logger_name)
if self._collect:
raise LoggingException("Cannot configure a new logger while using collect_logs")
self._selected_logger = logger_name
self._loggers[logger_name]["fpath"] = os.path.realpath(fpath) if fpath else None
self._loggers[logger_name]["default_sep"] = default_seperator
self._loggers[logger_name]["include_micros"] = include_micros
self._loggers[logger_name]["level_mgr"] = _LevelManager()
self._loggers[logger_name]["print_level"] = print_level or len(LogLevels) + 1
if fpath is not None:
dirs = os.path.split(fpath)[0]
if dirs:
os.makedirs(dirs, exist_ok=True)
exists = os.path.exists(fpath)
with open(fpath, "a" if append else "w", encoding="utf-8") as logfile:
logfile.write("\n\n" if append and exists else "")
if title is not None:
self.section(title + "\n")
if log_commit:
repo, commit = get_repo()
if repo is not None:
self.debug(
"Executing in repository %s" % repo,
"Commit: %s\n" % commit,
)
else:
self.debug("Unable to find repository that code was executed in")
def level(self, level: LogLevels):
""" Log only at given level and above. Use with a with block """
return self._level_mgr.with_level(level)
def __call__(self, *tolog, with_info=True, sep=None, with_print=None, level: LogLevels=LogLevels.INFO):
self._log(*tolog, level=level, with_info=with_info, sep=sep, with_print=with_print)
def _write_to_log(self, content: RichString):
if self._fpath is not None:
with open(self._fpath, "a", encoding="utf-8") as logfile:
logfile.write(f"{content}\n")
def input(self, prompt: str | Iterable[str] = "") -> str | Generator[str]:
"""
Get user input and log both prompt an input
If prompt is an iterable, a generator of user inputs will be returned
"""
self._log("Waiting for user input", with_print=False)
if isinstance(prompt, str):
return self._input(prompt)
else:
return (self._input(p) for p in prompt)
def clean(self):
""" Resets the loggers and removes all existing logger configurations """
self._loggers = defaultdict(dict)
self._selected_logger = "default"
log = _Logger()
| 36.080332 | 123 | 0.61666 |
22a11f4324f76cab0ee6ba121cab810e162f6104 | 10,942 | py | Python | tests/test_metrics.py | aaxelb/django-elasticsearch-metrics | 8a02ffc57f57257843834d4f84c41480f4e27fbd | [
"MIT"
] | 5 | 2018-08-21T19:48:39.000Z | 2021-04-01T22:11:31.000Z | tests/test_metrics.py | aaxelb/django-elasticsearch-metrics | 8a02ffc57f57257843834d4f84c41480f4e27fbd | [
"MIT"
] | 18 | 2018-07-26T16:04:53.000Z | 2018-08-30T19:31:30.000Z | tests/test_metrics.py | aaxelb/django-elasticsearch-metrics | 8a02ffc57f57257843834d4f84c41480f4e27fbd | [
"MIT"
] | 5 | 2019-04-01T17:47:08.000Z | 2022-01-28T17:23:11.000Z | import mock
import pytest
import datetime as dt
from django.utils import timezone
from elasticsearch_metrics import metrics
from elasticsearch_dsl import IndexTemplate
from elasticsearch_metrics import signals
from elasticsearch_metrics.exceptions import (
IndexTemplateNotFoundError,
IndexTemplateOutOfSyncError,
)
from tests.dummyapp.metrics import (
DummyMetric,
DummyMetricWithExplicitTemplateName,
DummyMetricWithExplicitTemplatePattern,
)
| 39.501805 | 88 | 0.683787 |
22a124507f9c19ec78061c640c8a18dd5ea530ee | 180 | py | Python | 6 kyu/SumFibs.py | mwk0408/codewars_solutions | 9b4f502b5f159e68024d494e19a96a226acad5e5 | [
"MIT"
] | 6 | 2020-09-03T09:32:25.000Z | 2020-12-07T04:10:01.000Z | 6 kyu/SumFibs.py | mwk0408/codewars_solutions | 9b4f502b5f159e68024d494e19a96a226acad5e5 | [
"MIT"
] | 1 | 2021-12-13T15:30:21.000Z | 2021-12-13T15:30:21.000Z | 6 kyu/SumFibs.py | mwk0408/codewars_solutions | 9b4f502b5f159e68024d494e19a96a226acad5e5 | [
"MIT"
] | null | null | null | from functools import lru_cache | 30 | 65 | 0.661111 |
22a1b8da531316fb6c21092916dd14f6945d1c1d | 1,924 | py | Python | tests/unit/test_iris_helpers.py | jvegreg/ESMValCore | 03eb1c942bf1dc3be98cb30c3592b42e82a94f16 | [
"Apache-2.0"
] | null | null | null | tests/unit/test_iris_helpers.py | jvegreg/ESMValCore | 03eb1c942bf1dc3be98cb30c3592b42e82a94f16 | [
"Apache-2.0"
] | 2 | 2022-03-02T16:16:06.000Z | 2022-03-10T12:58:49.000Z | tests/unit/test_iris_helpers.py | valeriupredoi/ESMValCore | b46b948c47d8579d997b28501f8588f5531aa354 | [
"Apache-2.0"
] | null | null | null | """Tests for :mod:`esmvalcore.iris_helpers`."""
import datetime
import iris
import numpy as np
import pytest
from cf_units import Unit
from esmvalcore.iris_helpers import date2num, var_name_constraint
def test_var_name_constraint(cubes):
"""Test :func:`esmvalcore.iris_helpers.var_name_constraint`."""
out_cubes = cubes.extract(var_name_constraint('a'))
assert out_cubes == iris.cube.CubeList([
iris.cube.Cube(0.0, var_name='a', long_name='a'),
iris.cube.Cube(0.0, var_name='a', long_name='b'),
])
out_cubes = cubes.extract(var_name_constraint('b'))
assert out_cubes == iris.cube.CubeList([])
out_cubes = cubes.extract(var_name_constraint('c'))
assert out_cubes == iris.cube.CubeList([
iris.cube.Cube(0.0, var_name='c', long_name='d'),
])
with pytest.raises(iris.exceptions.ConstraintMismatchError):
cubes.extract_cube(var_name_constraint('a'))
with pytest.raises(iris.exceptions.ConstraintMismatchError):
cubes.extract_cube(var_name_constraint('b'))
out_cube = cubes.extract_cube(var_name_constraint('c'))
assert out_cube == iris.cube.Cube(0.0, var_name='c', long_name='d')
| 33.172414 | 72 | 0.677755 |
22a26cac9546e3d04238eea2e14e595751d5270c | 11,429 | py | Python | geo_regions.py | saeed-moghimi-noaa/Maxelev_plot | 5bb701d8cb7d64db4c89ea9d7993a8269e57e504 | [
"CC0-1.0"
] | null | null | null | geo_regions.py | saeed-moghimi-noaa/Maxelev_plot | 5bb701d8cb7d64db4c89ea9d7993a8269e57e504 | [
"CC0-1.0"
] | null | null | null | geo_regions.py | saeed-moghimi-noaa/Maxelev_plot | 5bb701d8cb7d64db4c89ea9d7993a8269e57e504 | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Geo regions for map plot
"""
__author__ = "Saeed Moghimi"
__copyright__ = "Copyright 2017, UCAR/NOAA"
__license__ = "GPL"
__version__ = "1.0"
__email__ = "moghimis@gmail.com"
import matplotlib.pyplot as plt
from collections import defaultdict
defs = defaultdict(dict)
defs['elev']['var'] = 'elev'
defs['elev']['vmin'] = -1
defs['elev']['vmax'] = 1
defs['elev']['label'] = 'Elev. [m]'
defs['elev']['format']= '%3.1g'
defs['elev']['cmap'] = plt.cm.jet_r
| 34.116418 | 52 | 0.441683 |
22a33ada09a97d4c429f1c99f360e9ceb37d5903 | 771 | py | Python | figures/plot_log_figure_paper.py | davidADSP/deepAI_paper | f612e80aa0e8507444228940c54554a83bc16119 | [
"MIT"
] | 21 | 2017-09-09T18:41:40.000Z | 2022-03-16T06:50:00.000Z | figures/plot_log_figure_paper.py | davidADSP/deepAI_paper | f612e80aa0e8507444228940c54554a83bc16119 | [
"MIT"
] | null | null | null | figures/plot_log_figure_paper.py | davidADSP/deepAI_paper | f612e80aa0e8507444228940c54554a83bc16119 | [
"MIT"
] | 6 | 2017-09-09T18:41:53.000Z | 2022-02-25T08:11:40.000Z | import numpy
import matplotlib.pyplot as plt
fig_convergence = plt.figure(1,figsize=(12,6))
x = numpy.loadtxt('log_deepAI_paper_nonlin_action_long.txt')
plt.subplot(122)
plt.plot(x[:,0])
plt.xlim([0,500])
plt.ylim([-10,200])
plt.xlabel('Steps')
plt.ylabel('Free Action')
plt.axvline(x=230.0,linestyle=':')
plt.axvline(x=250.0,linestyle=':')
plt.axvline(x=270.0,linestyle=':')
ax = plt.subplot(121)
plt.plot(x[:,0])
plt.ylim([-10,200])
ax.axvspan(0, 500, alpha=0.3, color='red')
plt.xlim([0,30000])
plt.xlabel('Steps')
plt.ylabel('Free Action')
fig_convergence.subplots_adjust(left=0.07, bottom=0.1, right=0.95, top=0.95,
wspace=0.2, hspace=0.15)
fig_convergence.savefig('fig_convergence.pdf')
plt.show()
| 24.09375 | 76 | 0.657588 |
22a452c901b5e5a2bc4953164caa1bd099196d19 | 2,938 | py | Python | setup.py | matiasgrana/nagios_sql | 7858b852cf539da418a1a289e8c06e386b62287a | [
"MIT"
] | null | null | null | setup.py | matiasgrana/nagios_sql | 7858b852cf539da418a1a289e8c06e386b62287a | [
"MIT"
] | 4 | 2017-08-08T13:42:39.000Z | 2019-11-25T10:29:29.000Z | setup.py | matiasgrana/nagios_sql | 7858b852cf539da418a1a289e8c06e386b62287a | [
"MIT"
] | 4 | 2019-01-28T13:58:09.000Z | 2019-11-29T14:01:07.000Z | #! python3
# Help from: http://www.scotttorborg.com/python-packaging/minimal.html
# https://docs.python.org/3/distutils/commandref.html#sdist-cmd
# https://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
# https://docs.python.org/3.4/tutorial/modules.html
# Install it with python setup.py install
# Or use: python setup.py develop (changes to the source files will be
# immediately available)
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
from setuptools import setup, find_packages
import os
from os import path
import rstcheck
exec(open('src/version.py').read())
# __version__ comes when execution src/version.py
version = __version__
here = path.abspath(path.dirname(__file__))
with open(os.path.join(here, 'requirements.txt')) as f:
requires = [x.strip() for x in f if x.strip()]
def check_readme(file='README.rst'):
"""
Checks readme rst file, to ensure it will upload to pypi and be formatted
correctly.
:param file:
:return:
"""
# Get the long description from the relevant file
with open(file, encoding='utf-8') as f:
readme_content = f.read()
errors = list(rstcheck.check(readme_content))
if errors:
msg = 'There are errors in {}, errors \n {}'.format(file,
errors[0].message)
raise SystemExit(msg)
else:
msg = 'No errors in {}'.format(file)
print(msg)
readme_file = path.join(here, 'README.rst')
# Get the long description from the relevant file
with open(readme_file, encoding='utf-8') as f:
long_description = f.read()
check_readme(readme_file)
# Define setuptools specifications
setup(name='nagios_sql',
version=version,
description='Nagios plugin with sqlchecks',
long_description=long_description, # this is the file README.rst
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: SQL',
'Topic :: System :: Monitoring',
'Topic :: Database :: Database Engines/Servers',
'Topic :: System :: Systems Administration'
],
url='https://github.com/pablodav/nagios_sql',
author='Pablo Estigarribia',
author_email='pablodav@gmail.com',
license='MIT',
packages=find_packages(),
#include_package_data=True,
#package_data={
# 'data': 'src/data/*',
#},
#data_files=[('VERSION', ['src/VERSION'])],
entry_points={
'console_scripts': [
'nagios_sql = src.nagios_sql:main'
]
},
install_requires=requires,
tests_require=['pytest',
'pytest-cov'],
zip_safe=False)
| 32.285714 | 84 | 0.636147 |
22a4a9fee06a32718975fa561659e922ae3f756e | 1,838 | py | Python | textnn/utils/test/test_progress_iterator.py | tongr/TextNN | a0294a197d3be284177214e8f019e9fed13dff1a | [
"Apache-2.0"
] | 1 | 2019-03-08T12:12:45.000Z | 2019-03-08T12:12:45.000Z | textnn/utils/test/test_progress_iterator.py | tongr/TextNN | a0294a197d3be284177214e8f019e9fed13dff1a | [
"Apache-2.0"
] | 16 | 2019-02-14T11:51:30.000Z | 2019-06-11T08:25:53.000Z | textnn/utils/test/test_progress_iterator.py | tongr/TextNN | a0294a197d3be284177214e8f019e9fed13dff1a | [
"Apache-2.0"
] | null | null | null | import io
import sys
from textnn.utils import ProgressIterator
#inspired by https://stackoverflow.com/a/34738440
| 34.679245 | 76 | 0.541349 |
22a5a69bd0005b87e47d0ff6d4ecd35b5d2cdf15 | 159 | py | Python | reach.py | NIKH0610/class5-homework | d4cfb1b28656a37002dff6b1b20bae1253b2ae80 | [
"MIT"
] | null | null | null | reach.py | NIKH0610/class5-homework | d4cfb1b28656a37002dff6b1b20bae1253b2ae80 | [
"MIT"
] | null | null | null | reach.py | NIKH0610/class5-homework | d4cfb1b28656a37002dff6b1b20bae1253b2ae80 | [
"MIT"
] | null | null | null | import os
import numpy as np
import pandas as pd
housing_df = pd.read_csv(filepath_or_buffer='~/C:\Users\nikhi\NIKH0610\class5-homework\toys-datasets\boston') | 31.8 | 109 | 0.805031 |
22a5b5de1219dd90ee90a5e573d5793e913c42ca | 379 | py | Python | queries/general_queries.py | souparvo/airflow-plugins | 0ca7fa634335145b69671054680d5d67de329644 | [
"BSD-3-Clause"
] | null | null | null | queries/general_queries.py | souparvo/airflow-plugins | 0ca7fa634335145b69671054680d5d67de329644 | [
"BSD-3-Clause"
] | null | null | null | queries/general_queries.py | souparvo/airflow-plugins | 0ca7fa634335145b69671054680d5d67de329644 | [
"BSD-3-Clause"
] | null | null | null |
def insert_metatable():
"""SQL query to insert records from table insert into a table on a DB
"""
return """
INSERT INTO TABLE {{ params.target_schema }}.{{ params.target_table }} VALUES
('{{ params.schema }}', '{{ params.table }}', {{ ti.xcom_pull(key='hive_res', task_ids=params.count_inserts)[0][0] }}, current_timestamp(), '{{ params.type }}');
""" | 42.111111 | 165 | 0.62533 |
22a5f31f1b502fe38b7dada2cca91916da3eb320 | 24,973 | py | Python | pyvisa_py/highlevel.py | Handfeger/pyvisa-py | fcfb45895cd44dd922985c3a9d8f3372c8318d63 | [
"MIT"
] | 1 | 2019-03-25T20:26:16.000Z | 2019-03-25T20:26:16.000Z | pyvisa_py/highlevel.py | Handfeger/pyvisa-py | fcfb45895cd44dd922985c3a9d8f3372c8318d63 | [
"MIT"
] | null | null | null | pyvisa_py/highlevel.py | Handfeger/pyvisa-py | fcfb45895cd44dd922985c3a9d8f3372c8318d63 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Highlevel wrapper of the VISA Library.
:copyright: 2014-2020 by PyVISA-py Authors, see AUTHORS for more details.
:license: MIT, see LICENSE for more details.
"""
import random
from collections import OrderedDict
from typing import Any, Dict, Iterable, List, Optional, Tuple, Union, cast
from pyvisa import constants, highlevel, rname
from pyvisa.constants import StatusCode
from pyvisa.typing import VISAEventContext, VISARMSession, VISASession
from pyvisa.util import LibraryPath
from . import sessions
from .common import logger
def _register(self, obj: object) -> VISASession:
"""Creates a random but unique session handle for a session object.
Register it in the sessions dictionary and return the value.
"""
session = None
while session is None or session in self.sessions:
session = random.randint(1000000, 9999999)
self.sessions[session] = obj
return session
def open(
self,
session: VISARMSession,
resource_name: str,
access_mode: constants.AccessModes = constants.AccessModes.no_lock,
open_timeout: int = constants.VI_TMO_IMMEDIATE,
) -> Tuple[VISASession, StatusCode]:
"""Opens a session to the specified resource.
Corresponds to viOpen function of the VISA library.
Parameters
----------
session : VISARMSession
Resource Manager session (should always be a session returned from
open_default_resource_manager()).
resource_name : str
Unique symbolic name of a resource.
access_mode : constants.AccessModes, optional
Specifies the mode by which the resource is to be accessed.
open_timeout : int
Specifies the maximum time period (in milliseconds) that this
operation waits before returning an error. constants.VI_TMO_IMMEDIATE
and constants.VI_TMO_INFINITE are used as min and max.
Returns
-------
VISASession
Unique logical identifier reference to a session
StatusCode
Return value of the library call.
"""
try:
open_timeout = int(open_timeout)
except ValueError:
raise ValueError(
"open_timeout (%r) must be an integer (or compatible type)"
% open_timeout
)
try:
parsed = rname.parse_resource_name(resource_name)
except rname.InvalidResourceName:
return (
VISASession(0),
self.handle_return_value(None, StatusCode.error_invalid_resource_name),
)
cls = sessions.Session.get_session_class(
parsed.interface_type_const, parsed.resource_class
)
sess = cls(session, resource_name, parsed, open_timeout)
return self._register(sess), StatusCode.success
def clear(self, session: VISASession) -> StatusCode:
"""Clears a device.
Corresponds to viClear function of the VISA library.
Parameters
----------
session : typin.VISASession
Unique logical identifier to a session.
Returns
-------
StatusCode
Return value of the library call.
"""
try:
sess = self.sessions[session]
except KeyError:
return self.handle_return_value(session, StatusCode.error_invalid_object)
return self.handle_return_value(session, sess.clear())
def flush(
self, session: VISASession, mask: constants.BufferOperation
) -> StatusCode:
"""Flush the specified buffers.
The buffers can be associated with formatted I/O operations and/or
serial communication.
Corresponds to viFlush function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
mask : constants.BufferOperation
Specifies the action to be taken with flushing the buffer.
The values can be combined using the | operator. However multiple
operations on a single buffer cannot be combined.
Returns
-------
StatusCode
Return value of the library call.
"""
try:
sess = self.sessions[session]
except KeyError:
return self.handle_return_value(session, StatusCode.error_invalid_object)
return self.handle_return_value(session, sess.flush(mask))
def gpib_command(
self, session: VISASession, command_byte: bytes
) -> Tuple[int, StatusCode]:
"""Write GPIB command bytes on the bus.
Corresponds to viGpibCommand function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
command_byte : bytes
Data to write.
Returns
-------
int
Number of written bytes
StatusCode
Return value of the library call.
"""
try:
written, st = self.sessions[session].gpib_command(command_byte)
return written, self.handle_return_value(session, st)
except KeyError:
return 0, self.handle_return_value(session, StatusCode.error_invalid_object)
def assert_trigger(
self, session: VISASession, protocol: constants.TriggerProtocol
) -> StatusCode:
"""Assert software or hardware trigger.
Corresponds to viAssertTrigger function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
protocol : constants.TriggerProtocol
Trigger protocol to use during assertion.
Returns
-------
StatusCode
Return value of the library call.
"""
try:
return self.handle_return_value(
session, self.sessions[session].assert_trigger(protocol)
)
except KeyError:
return self.handle_return_value(session, StatusCode.error_invalid_object)
def gpib_send_ifc(self, session: VISASession) -> StatusCode:
"""Pulse the interface clear line (IFC) for at least 100 microseconds.
Corresponds to viGpibSendIFC function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
Returns
-------
StatusCode
Return value of the library call.
"""
try:
return self.handle_return_value(
session, self.sessions[session].gpib_send_ifc()
)
except KeyError:
return self.handle_return_value(session, StatusCode.error_invalid_object)
def gpib_control_ren(
self, session: VISASession, mode: constants.RENLineOperation
) -> StatusCode:
"""Controls the state of the GPIB Remote Enable (REN) interface line.
Optionally the remote/local state of the device can also be set.
Corresponds to viGpibControlREN function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
mode : constants.RENLineOperation
State of the REN line and optionally the device remote/local state.
Returns
-------
StatusCode
Return value of the library call.
"""
try:
return self.handle_return_value(
session, self.sessions[session].gpib_control_ren(mode)
)
except KeyError:
return self.handle_return_value(session, StatusCode.error_invalid_object)
def gpib_control_atn(
self, session: VISASession, mode: constants.ATNLineOperation
) -> StatusCode:
"""Specifies the state of the ATN line and the local active controller state.
Corresponds to viGpibControlATN function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
mode : constants.ATNLineOperation
State of the ATN line and optionally the local active controller state.
Returns
-------
StatusCode
Return value of the library call.
"""
try:
return self.handle_return_value(
session, self.sessions[session].gpib_control_atn(mode)
)
except KeyError:
return self.handle_return_value(session, StatusCode.error_invalid_object)
def gpib_pass_control(
self, session: VISASession, primary_address: int, secondary_address: int
) -> StatusCode:
"""Tell a GPIB device to become controller in charge (CIC).
Corresponds to viGpibPassControl function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
primary_address : int
Primary address of the GPIB device to which you want to pass control.
secondary_address : int
Secondary address of the targeted GPIB device.
If the targeted device does not have a secondary address, this parameter
should contain the value Constants.VI_NO_SEC_ADDR.
Returns
-------
StatusCode
Return value of the library call.
"""
try:
return self.handle_return_value(
session,
self.sessions[session].gpib_pass_control(
primary_address, secondary_address
),
)
except KeyError:
return self.handle_return_value(session, StatusCode.error_invalid_object)
def read_stb(self, session: VISASession) -> Tuple[int, StatusCode]:
"""Reads a status byte of the service request.
Corresponds to viReadSTB function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
Returns
-------
int
Service request status byte
StatusCode
Return value of the library call.
"""
try:
sess = self.sessions[session]
except KeyError:
return 0, self.handle_return_value(session, StatusCode.error_invalid_object)
stb, status_code = sess.read_stb()
return stb, self.handle_return_value(session, status_code)
def close(
self, session: Union[VISASession, VISAEventContext, VISARMSession]
) -> StatusCode:
"""Closes the specified session, event, or find list.
Corresponds to viClose function of the VISA library.
Parameters
---------
session : Union[VISASession, VISAEventContext, VISARMSession]
Unique logical identifier to a session, event, resource manager.
Returns
-------
StatusCode
Return value of the library call.
"""
try:
sess = self.sessions[session]
# The RM session directly references the library.
if sess is not self:
return self.handle_return_value(session, sess.close())
else:
return self.handle_return_value(session, StatusCode.success)
except KeyError:
return self.handle_return_value(session, StatusCode.error_invalid_object)
def open_default_resource_manager(self) -> Tuple[VISARMSession, StatusCode]:
"""This function returns a session to the Default Resource Manager resource.
Corresponds to viOpenDefaultRM function of the VISA library.
Returns
-------
VISARMSession
Unique logical identifier to a Default Resource Manager session
StatusCode
Return value of the library call.
"""
return (
cast(VISARMSession, self._register(self)),
self.handle_return_value(None, StatusCode.success),
)
def list_resources(
self, session: VISARMSession, query: str = "?*::INSTR"
) -> Tuple[str, ...]:
"""Return a tuple of all connected devices matching query.
Parameters
----------
session : VISARMSession
Unique logical identifier to the resource manager session.
query : str
Regular expression used to match devices.
Returns
-------
Tuple[str, ...]
Resource names of all the connected devices matching the query.
"""
# For each session type, ask for the list of connected resources and
# merge them into a single list.
# HINT: the cast should not be necessary here
resources: List[str] = []
for key, st in sessions.Session.iter_valid_session_classes():
resources += st.list_resources()
return rname.filter(resources, query)
def read(self, session: VISASession, count: int) -> Tuple[bytes, StatusCode]:
"""Reads data from device or interface synchronously.
Corresponds to viRead function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
count : int
Number of bytes to be read.
Returns
-------
bytes
Date read
StatusCode
Return value of the library call.
"""
# from the session handle, dispatch to the read method of the session object.
try:
data, status_code = self.sessions[session].read(count)
except KeyError:
return (
b"",
self.handle_return_value(session, StatusCode.error_invalid_object),
)
return data, self.handle_return_value(session, status_code)
def write(self, session: VISASession, data: bytes) -> Tuple[int, StatusCode]:
"""Write data to device or interface synchronously.
Corresponds to viWrite function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
data : bytes
Data to be written.
Returns
-------
int
Number of bytes actually transferred
StatusCode
Return value of the library call.
"""
# from the session handle, dispatch to the write method of the session object.
try:
written, status_code = self.sessions[session].write(data)
except KeyError:
return 0, self.handle_return_value(session, StatusCode.error_invalid_object)
return written, self.handle_return_value(session, status_code)
def buffer_read(self, session: VISASession, count: int) -> Tuple[bytes, StatusCode]:
"""Reads data through the use of a formatted I/O read buffer.
The data can be read from a device or an interface.
Corresponds to viBufRead function of the VISA library.
Parameters
----------
session : VISASession\
Unique logical identifier to a session.
count : int
Number of bytes to be read.
Returns
-------
bytes
Data read
StatusCode
Return value of the library call.
"""
return self.read(session, count)
def buffer_write(self, session: VISASession, data: bytes) -> Tuple[int, StatusCode]:
"""Writes data to a formatted I/O write buffer synchronously.
Corresponds to viBufWrite function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
data : bytes
Data to be written.
Returns
-------
int
number of written bytes
StatusCode
return value of the library call.
"""
return self.write(session, data)
def get_attribute(
self,
session: Union[VISASession, VISAEventContext, VISARMSession],
attribute: Union[constants.ResourceAttribute, constants.EventAttribute],
) -> Tuple[Any, StatusCode]:
"""Retrieves the state of an attribute.
Corresponds to viGetAttribute function of the VISA library.
Parameters
----------
session : Union[VISASession, VISAEventContext]
Unique logical identifier to a session, event, or find list.
attribute : Union[constants.ResourceAttribute, constants.EventAttribute]
Resource or event attribute for which the state query is made.
Returns
-------
Any
State of the queried attribute for a specified resource
StatusCode
Return value of the library call.
"""
try:
sess = self.sessions[session]
except KeyError:
return (
None,
self.handle_return_value(session, StatusCode.error_invalid_object),
)
state, status_code = sess.get_attribute(
cast(constants.ResourceAttribute, attribute)
)
return state, self.handle_return_value(session, status_code)
def set_attribute(
self,
session: VISASession,
attribute: constants.ResourceAttribute,
attribute_state: Any,
) -> StatusCode:
"""Set the state of an attribute.
Corresponds to viSetAttribute function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
attribute : constants.ResourceAttribute
Attribute for which the state is to be modified.
attribute_state : Any
The state of the attribute to be set for the specified object.
Returns
-------
StatusCode
Return value of the library call.
"""
try:
return self.handle_return_value(
session,
self.sessions[session].set_attribute(attribute, attribute_state),
)
except KeyError:
return self.handle_return_value(session, StatusCode.error_invalid_object)
def lock(
self,
session: VISASession,
lock_type: constants.Lock,
timeout: int,
requested_key: Optional[str] = None,
) -> Tuple[str, StatusCode]:
"""Establishes an access mode to the specified resources.
Corresponds to viLock function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
lock_type : constants.Lock
Specifies the type of lock requested.
timeout : int
Absolute time period (in milliseconds) that a resource waits to get
unlocked by the locking session before returning an error.
requested_key : Optional[str], optional
Requested locking key in the case of a shared lock. For an exclusive
lock it should be None.
Returns
-------
str
Key that can then be passed to other sessions to share the lock, or
None for an exclusive lock.
StatusCode
Return value of the library call.
"""
try:
sess = self.sessions[session]
except KeyError:
return (
"",
self.handle_return_value(session, StatusCode.error_invalid_object),
)
key, status_code = sess.lock(lock_type, timeout, requested_key)
return key, self.handle_return_value(session, status_code)
def unlock(self, session: VISASession) -> StatusCode:
"""Relinquish a lock for the specified resource.
Corresponds to viUnlock function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
Returns
-------
StatusCode
Return value of the library call.
"""
try:
sess = self.sessions[session]
except KeyError:
return self.handle_return_value(session, StatusCode.error_invalid_object)
return self.handle_return_value(session, sess.unlock())
def disable_event(
self,
session: VISASession,
event_type: constants.EventType,
mechanism: constants.EventMechanism,
) -> StatusCode:
"""Disable notification for an event type(s) via the specified mechanism(s).
Corresponds to viDisableEvent function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
event_type : constants.EventType
Event type.
mechanism : constants.EventMechanism
Event handling mechanisms to be disabled.
Returns
-------
StatusCode
Return value of the library call.
"""
pass
def discard_events(
self,
session: VISASession,
event_type: constants.EventType,
mechanism: constants.EventMechanism,
) -> StatusCode:
"""Discard event occurrences for a given type and mechanisms in a session.
Corresponds to viDiscardEvents function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
event_type : constans.EventType
Logical event identifier.
mechanism : constants.EventMechanism
Specifies event handling mechanisms to be discarded.
Returns
-------
StatusCode
Return value of the library call.
"""
pass
| 31.893997 | 88 | 0.610019 |
22a63f951029bec63e4f61cb892764b3e55fdcae | 13,219 | py | Python | detectron/utils/webly_vis.py | sisrfeng/NA-fWebSOD | 49cb75a9a0d557b05968c6b11b0f17a7043f2077 | [
"Apache-2.0"
] | 23 | 2020-03-30T11:48:33.000Z | 2022-03-11T06:34:31.000Z | detectron/utils/webly_vis.py | sisrfeng/NA-fWebSOD | 49cb75a9a0d557b05968c6b11b0f17a7043f2077 | [
"Apache-2.0"
] | 9 | 2020-09-28T07:15:16.000Z | 2022-03-25T08:11:06.000Z | detectron/utils/webly_vis.py | sisrfeng/NA-fWebSOD | 49cb75a9a0d557b05968c6b11b0f17a7043f2077 | [
"Apache-2.0"
] | 10 | 2020-03-30T11:48:34.000Z | 2021-06-02T06:12:36.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import cv2
import numpy as np
import os
import math
from PIL import Image, ImageDraw, ImageFont
from caffe2.python import workspace
from detectron.core.config import cfg
from detectron.core.config import get_output_dir
def dump_proto_files(model, output_dir):
"""Save prototxt descriptions of the training network and parameter
initialization network."""
with open(os.path.join(output_dir, model.net.Proto().name), 'w') as fid:
fid.write(str(model.net.Proto()))
with open(os.path.join(output_dir,
model.param_init_net.Proto().name), 'w') as fid:
fid.write(str(model.param_init_net.Proto()))
| 37.341808 | 87 | 0.504577 |
22a72547959131b60da1f328cdda0445ca0ed7eb | 13,740 | py | Python | salt/runner.py | StepOneInc/salt | ee210172c37bf0cee224794cd696b38e288e4073 | [
"Apache-2.0"
] | 1 | 2016-04-26T03:42:32.000Z | 2016-04-26T03:42:32.000Z | salt/runner.py | apergos/salt | 106c715d495a9c2bd747c8ca75745236b0d7fb41 | [
"Apache-2.0"
] | null | null | null | salt/runner.py | apergos/salt | 106c715d495a9c2bd747c8ca75745236b0d7fb41 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''
Execute salt convenience routines
'''
# Import python libs
from __future__ import print_function
from __future__ import absolute_import
import collections
import logging
import time
import sys
import multiprocessing
# Import salt libs
import salt.exceptions
import salt.loader
import salt.minion
import salt.utils
import salt.utils.args
import salt.utils.event
from salt.client import mixins
from salt.output import display_output
from salt.utils.error import raise_error
from salt.utils.event import tagify
import salt.ext.six as six
log = logging.getLogger(__name__)
| 37.135135 | 118 | 0.527365 |
22a8b0a10c5a619e3d02f83382579627b355c5a9 | 186 | py | Python | .venv/lib/python3.8/site-packages/poetry/core/_vendor/lark/__pyinstaller/__init__.py | RivtLib/replit01 | ce1ae18b446a9c844f40e88a51c71fbc45ab3ad7 | [
"MIT"
] | 1 | 2020-08-07T16:09:57.000Z | 2020-08-07T16:09:57.000Z | .venv/lib/python3.8/site-packages/poetry/core/_vendor/lark/__pyinstaller/__init__.py | RivtLib/replit01 | ce1ae18b446a9c844f40e88a51c71fbc45ab3ad7 | [
"MIT"
] | null | null | null | .venv/lib/python3.8/site-packages/poetry/core/_vendor/lark/__pyinstaller/__init__.py | RivtLib/replit01 | ce1ae18b446a9c844f40e88a51c71fbc45ab3ad7 | [
"MIT"
] | null | null | null | # For usage of lark with PyInstaller. See https://pyinstaller-sample-hook.readthedocs.io/en/latest/index.html
import os
| 31 | 110 | 0.747312 |
22a8bf88232fd22e170f70f6a4d8e344cbe114aa | 4,257 | py | Python | pong-pg.py | s-gv/pong-keras | 38a0f25ae0e628f357512d085dc957720d83ece2 | [
"0BSD"
] | null | null | null | pong-pg.py | s-gv/pong-keras | 38a0f25ae0e628f357512d085dc957720d83ece2 | [
"0BSD"
] | null | null | null | pong-pg.py | s-gv/pong-keras | 38a0f25ae0e628f357512d085dc957720d83ece2 | [
"0BSD"
] | null | null | null | # Copyright (c) 2019 Sagar Gubbi. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import numpy as np
import gym
import tensorflow as tf
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Input, Lambda, Dense, Conv2D, MaxPool2D, Flatten, BatchNormalization, Dropout
from tensorflow.keras.optimizers import RMSprop, Adam
import tensorflow.keras.backend as K
env = gym.make('PongDeterministic-v4')
UP_ACTION = 2
DOWN_ACTION = 3
ACTIONS = [UP_ACTION, DOWN_ACTION]
# Neural net model takes the state and outputs action and value for that state
model = Sequential([
Dense(512, activation='elu', input_shape=(2*6400,)),
Dense(len(ACTIONS), activation='softmax'),
])
model.compile(optimizer=RMSprop(1e-4), loss='sparse_categorical_crossentropy')
gamma = 0.99
# preprocess frames
def prepro(I):
""" prepro 210x160x3 uint8 frame into 6400 (80x80) 1D float vector. http://karpathy.github.io/2016/05/31/rl/ """
if I is None: return np.zeros((6400,))
I = I[35:195] # crop
I = I[::2,::2,0] # downsample by factor of 2
I[I == 144] = 0 # erase background (background type 1)
I[I == 109] = 0 # erase background (background type 2)
I[I != 0] = 1 # everything else (paddles, ball) just set to 1
return I.astype(np.float).ravel()
def discount_rewards(r):
""" take 1D float array of rewards and compute discounted reward. http://karpathy.github.io/2016/05/31/rl/ """
discounted_r = np.zeros((len(r),))
running_add = 0
for t in reversed(range(0, len(r))):
if r[t] != 0: running_add = 0 # reset the sum, since this was a game boundary (pong specific!)
running_add = running_add * gamma + r[t]
discounted_r[t] = running_add
return discounted_r
if __name__ == '__main__':
main()
| 33.257813 | 116 | 0.597369 |
22a8ec1abea9d6f95b972cc7b4d65ddb840ef8b2 | 2,962 | py | Python | dexp/cli/dexp_commands/crop.py | JoOkuma/dexp | 6d9003384605b72f387d38b5befa29e4e2246af8 | [
"BSD-3-Clause"
] | null | null | null | dexp/cli/dexp_commands/crop.py | JoOkuma/dexp | 6d9003384605b72f387d38b5befa29e4e2246af8 | [
"BSD-3-Clause"
] | null | null | null | dexp/cli/dexp_commands/crop.py | JoOkuma/dexp | 6d9003384605b72f387d38b5befa29e4e2246af8 | [
"BSD-3-Clause"
] | null | null | null | import click
from arbol.arbol import aprint, asection
from dexp.cli.defaults import DEFAULT_CLEVEL, DEFAULT_CODEC, DEFAULT_STORE
from dexp.cli.parsing import _get_output_path, _parse_channels, _parse_chunks
from dexp.datasets.open_dataset import glob_datasets
from dexp.datasets.operations.crop import dataset_crop
| 32.549451 | 117 | 0.660365 |
22a950c4c4a0d6a5d8ae35400f9dc583d0a56a66 | 2,287 | py | Python | morse_DMT/write_dipha_file_3d_revise.py | YinuoJin/DMT_loss | c6e66cb7997b7cd5616156faaf294e350e77c4c2 | [
"MIT"
] | 1 | 2021-12-06T13:06:55.000Z | 2021-12-06T13:06:55.000Z | morse_DMT/write_dipha_file_3d_revise.py | YinuoJin/DMT_loss | c6e66cb7997b7cd5616156faaf294e350e77c4c2 | [
"MIT"
] | null | null | null | morse_DMT/write_dipha_file_3d_revise.py | YinuoJin/DMT_loss | c6e66cb7997b7cd5616156faaf294e350e77c4c2 | [
"MIT"
] | null | null | null | import sys
from matplotlib import image as mpimg
import numpy as np
import os
DIPHA_CONST = 8067171840
DIPHA_IMAGE_TYPE_CONST = 1
DIM = 3
input_dir = os.path.join(os.getcwd(), sys.argv[1])
dipha_output_filename = sys.argv[2]
vert_filename = sys.argv[3]
input_filenames = [name
for name in os.listdir(input_dir)
if (os.path.isfile(input_dir + '/' + name)) and (name != ".DS_Store")]
input_filenames.sort()
image = mpimg.imread(os.path.join(input_dir, input_filenames[0]))
nx, ny = image.shape
del image
nz = len(input_filenames)
print(nx, ny, nz)
#sys.exit()
im_cube = np.zeros([nx, ny, nz])
i = 0
for name in input_filenames:
sys.stdout.flush()
print(i, name)
fileName = input_dir + "/" + name
im_cube[:, :, i] = mpimg.imread(fileName)
i = i + 1
print('writing dipha output...')
with open(dipha_output_filename, 'wb') as output_file:
# this is needed to verify you are giving dipha a dipha file
np.int64(DIPHA_CONST).tofile(output_file)
# this tells dipha that we are giving an image as input
np.int64(DIPHA_IMAGE_TYPE_CONST).tofile(output_file)
# number of points
np.int64(nx * ny * nz).tofile(output_file)
# dimension
np.int64(DIM).tofile(output_file)
# pixels in each dimension
np.int64(nx).tofile(output_file)
np.int64(ny).tofile(output_file)
np.int64(nz).tofile(output_file)
# pixel values
for k in range(nz):
sys.stdout.flush()
print('dipha - working on image', k)
for j in range(ny):
for i in range(nx):
val = int(-im_cube[i, j, k]*255)
'''
if val != 0 and val != -1:
print('val check:', val)
'''
np.float64(val).tofile(output_file)
output_file.close()
print('writing vert file')
with open(vert_filename, 'w') as vert_file:
for k in range(nz):
sys.stdout.flush()
print('verts - working on image', k)
for j in range(ny):
for i in range(nx):
vert_file.write(str(i) + ' ' + str(j) + ' ' + str(k) + ' ' + str(int(-im_cube[i, j, k] * 255)) + '\n')
vert_file.close()
print(nx, ny, nz)
| 29.701299 | 119 | 0.584609 |
22a96894a0336c7d7df8e78f4c4c6ea30cbd0530 | 1,507 | py | Python | microservices/validate/tools/validates.py | clodonil/pipeline_aws_custom | 8ca517d0bad48fe528461260093f0035f606f9be | [
"Apache-2.0"
] | null | null | null | microservices/validate/tools/validates.py | clodonil/pipeline_aws_custom | 8ca517d0bad48fe528461260093f0035f606f9be | [
"Apache-2.0"
] | null | null | null | microservices/validate/tools/validates.py | clodonil/pipeline_aws_custom | 8ca517d0bad48fe528461260093f0035f606f9be | [
"Apache-2.0"
] | null | null | null | """
Tools para validar o arquivo template recebido do SQS
"""
| 25.542373 | 73 | 0.568016 |
22aabcb0f1d4d4e04e99859300806fd807e56ef4 | 1,223 | py | Python | MetropolisMCMC.py | unrealTOM/MC | 5a4cdf1ee11ef3d438f24dd38e894731103448ac | [
"MIT"
] | 4 | 2020-04-11T09:54:27.000Z | 2021-08-18T07:06:52.000Z | MetropolisMCMC.py | unrealTOM/MC | 5a4cdf1ee11ef3d438f24dd38e894731103448ac | [
"MIT"
] | null | null | null | MetropolisMCMC.py | unrealTOM/MC | 5a4cdf1ee11ef3d438f24dd38e894731103448ac | [
"MIT"
] | 5 | 2019-01-22T03:47:17.000Z | 2022-02-14T18:09:07.000Z | import numpy as np
import matplotlib.pyplot as plt
import math
N = [100,500,1000,5000]
fig = plt.figure()
for i in range(4):
X = np.array([])
x = 0.1 #initialize x0 to be 0.1
for j in range(N[i]):
u = np.random.rand()
x_star = np.random.normal(x,10)
A = min(1,eval(x_star)/eval(x)) #*q(x,x_star)/p(x)/q(x_star,x))
if u < A:
x = x_star
X=np.hstack((X,x))
ax = fig.add_subplot(2,2,i+1)
ax.hist(X,bins=100,density=True)
x = np.linspace(-10,20,5000)
#ax.plot(x,eval(x)/2.7) #2.7 approximates the normalizing constant
ax.plot(x,eval(x)/2) #2 approximates the normalizing constant
ax.set_ylim(0,0.35)
ax.text(-9,0.25,'I=%d'%N[i])
fig.suptitle('Metropolis_Hastings for MCMC(Normal)')
#fig.suptitle('Metropolis_Hastings for MCMC(Exp.)')
plt.savefig('MetropolisNormal.png',dpi=100)
#plt.savefig('MetropolisExp.png',dpi=100)
plt.show()
| 29.829268 | 71 | 0.623058 |
22ab90482878ca5263216eabd709a4a4b0c55fab | 338 | py | Python | gfwlist/gen.py | lipeijian/shadowsocks-android | ef707e4383a0d430775c8ac9b660c334e87e40ec | [
"OpenSSL",
"MIT"
] | 137 | 2016-08-04T13:34:02.000Z | 2021-05-31T12:47:10.000Z | gfwlist/gen.py | lipeijian/shadowsocks-android | ef707e4383a0d430775c8ac9b660c334e87e40ec | [
"OpenSSL",
"MIT"
] | 9 | 2016-10-16T14:43:30.000Z | 2018-04-21T11:02:39.000Z | gfwlist/gen.py | lipeijian/shadowsocks-android | ef707e4383a0d430775c8ac9b660c334e87e40ec | [
"OpenSSL",
"MIT"
] | 86 | 2016-08-30T07:22:19.000Z | 2020-10-19T05:08:22.000Z | #!/usr/bin/python
# -*- encoding: utf8 -*-
import itertools
import math
import sys
import IPy
if __name__ == "__main__":
main()
| 14.695652 | 44 | 0.60355 |
22ac34a9639b610355752302f9ba8f423e657538 | 436 | py | Python | Specialization/Personal/SortHours.py | lastralab/Statistics | 358679f2e749db2e23c655795b34382c84270704 | [
"MIT"
] | 3 | 2017-09-26T20:19:57.000Z | 2020-02-03T16:59:59.000Z | Specialization/Personal/SortHours.py | lastralab/Statistics | 358679f2e749db2e23c655795b34382c84270704 | [
"MIT"
] | 1 | 2017-09-22T13:57:04.000Z | 2017-09-26T20:03:24.000Z | Specialization/Personal/SortHours.py | lastralab/Statistics | 358679f2e749db2e23c655795b34382c84270704 | [
"MIT"
] | 3 | 2018-05-09T01:41:16.000Z | 2019-01-16T15:32:59.000Z | name = "mail.txt"
counts = dict()
handle = open(name)
for line in handle:
line = line.rstrip()
if line == '':
continue
words = line.split()
if words[0] == 'From':
counts[words[5][:2]] = counts.get(words[5][:2], 0) + 1
tlist = list()
for key, value in counts.items():
newtup = (key, value)
tlist.append(newtup)
tlist.sort()
for key, value in tlist:
print key, value
| 18.956522 | 64 | 0.548165 |
22ac5683811849c14d8a103b4887cbd79b2ac236 | 9,338 | py | Python | core/simulators/carla_scenario_simulator.py | RangiLyu/DI-drive | f7db2e7b19d70c05184d6d6edae6b7e035a324d7 | [
"Apache-2.0"
] | null | null | null | core/simulators/carla_scenario_simulator.py | RangiLyu/DI-drive | f7db2e7b19d70c05184d6d6edae6b7e035a324d7 | [
"Apache-2.0"
] | null | null | null | core/simulators/carla_scenario_simulator.py | RangiLyu/DI-drive | f7db2e7b19d70c05184d6d6edae6b7e035a324d7 | [
"Apache-2.0"
] | null | null | null | import os
from typing import Any, Dict, List, Optional
import carla
from core.simulators.carla_simulator import CarlaSimulator
from core.simulators.carla_data_provider import CarlaDataProvider
from .srunner.scenarios.route_scenario import RouteScenario, SCENARIO_CLASS_DICT
from .srunner.scenariomanager.scenario_manager import ScenarioManager
| 41.502222 | 119 | 0.624331 |
22acbc10643824eb1f53a753c9581e0e1f9b708d | 86 | py | Python | bin/run.py | Conengmo/python-empty-project | 18d275422116577d48ae4fdbe1c93501a5e6ef78 | [
"MIT"
] | null | null | null | bin/run.py | Conengmo/python-empty-project | 18d275422116577d48ae4fdbe1c93501a5e6ef78 | [
"MIT"
] | null | null | null | bin/run.py | Conengmo/python-empty-project | 18d275422116577d48ae4fdbe1c93501a5e6ef78 | [
"MIT"
] | null | null | null | import myproject
myproject.logs(show_level='debug')
myproject.mymod.do_something()
| 12.285714 | 34 | 0.802326 |
22ad01968a4a3e4e8168ccbc68b9c73d312ea977 | 709 | py | Python | development/simple_email.py | gerold-penz/python-simplemail | 9cfae298743af2b771d6d779717b602de559689b | [
"MIT"
] | 16 | 2015-04-21T19:12:26.000Z | 2021-06-04T04:38:12.000Z | development/simple_email.py | gerold-penz/python-simplemail | 9cfae298743af2b771d6d779717b602de559689b | [
"MIT"
] | 3 | 2015-04-21T22:09:55.000Z | 2021-04-27T07:04:05.000Z | development/simple_email.py | gerold-penz/python-simplemail | 9cfae298743af2b771d6d779717b602de559689b | [
"MIT"
] | 4 | 2015-07-22T11:33:28.000Z | 2019-08-06T07:27:20.000Z | #!/usr/bin/env python
# coding: utf-8
# BEGIN --- required only for testing, remove in real world code --- BEGIN
import os
import sys
THISDIR = os.path.dirname(os.path.abspath(__file__))
APPDIR = os.path.abspath(os.path.join(THISDIR, os.path.pardir, os.path.pardir))
sys.path.insert(0, APPDIR)
# END --- required only for testing, remove in real world code --- END
import simplemail
simplemail.Email(
smtp_server = "smtp.a1.net:25",
smtp_user = "xxx",
smtp_password = "xxx",
use_tls = False,
from_address = "xxx",
to_address = "xxx",
subject = u"Really simple test with umlauts ()",
message = u"This is the message with umlauts ()",
).send()
print "Sent"
print
| 22.870968 | 79 | 0.679831 |