max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
src/CassandraLibrary/cassandra_connection_manager.py
|
SensationS/Robotframework-CassandraLibrary
| 0
|
12784651
|
<filename>src/CassandraLibrary/cassandra_connection_manager.py<gh_stars>0
from cassandra.cluster import Cluster
class CassandraConnection(object):
"""
Connection Manager handles the connection & disconnection to the database.
"""
def __init__(self):
"""
Initializes cluster and session to None.
"""
self.cluster = None
self.session = None
def connect_to_cassandra(self, keyspaceName, hostslist=''):
"""
Connect to Cassandra host
For example:
| Connect To Casssandra | # connect to the cassandra |
"""
hosts = hostslist.replace(' ', '').split(',')
# print hosts
self.cluster = Cluster(hosts)
self.session = self.cluster.connect(keyspaceName)
def disconnect_from_cassandra(self):
"""
Disconnects from the database.
For example:
| Disconnect From Cassandra | # disconnects from current connection to the database |
"""
self.cluster.shutdown()
| 3.296875
| 3
|
tower_cli/cli/action.py
|
kedark3/tower-cli
| 363
|
12784652
|
<filename>tower_cli/cli/action.py<gh_stars>100-1000
# Copyright 2017, Ansible by Red Hat
# <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import click
from click.formatting import join_options
from tower_cli.conf import SETTINGS_PARMS
class ActionSubcommand(click.Command):
"""A Command subclass that adds support for the concept that invocation
without arguments assumes `--help`.
This code is adapted by taking code from click.MultiCommand and placing
it here, to get just the --help functionality and nothing else.
"""
def __init__(self, name=None, no_args_is_help=True, **kwargs):
self.no_args_is_help = no_args_is_help
super(ActionSubcommand, self).__init__(name=name, **kwargs)
def parse_args(self, ctx, args):
"""Parse arguments sent to this command.
The code for this method is taken from MultiCommand:
https://github.com/mitsuhiko/click/blob/master/click/core.py
It is Copyright (c) 2014 by <NAME>.
See the license:
https://github.com/mitsuhiko/click/blob/master/LICENSE
"""
if not args and self.no_args_is_help and not ctx.resilient_parsing:
click.echo(ctx.get_help())
ctx.exit()
return super(ActionSubcommand, self).parse_args(ctx, args)
def format_options(self, ctx, formatter):
"""Monkey-patch click's format_options method to support option categorization.
"""
field_opts = []
global_opts = []
local_opts = []
other_opts = []
for param in self.params:
if param.name in SETTINGS_PARMS:
opts = global_opts
elif getattr(param, 'help', None) and param.help.startswith('[FIELD]'):
opts = field_opts
param.help = param.help[len('[FIELD]'):]
else:
opts = local_opts
rv = param.get_help_record(ctx)
if rv is None:
continue
else:
opts.append(rv)
if self.add_help_option:
help_options = self.get_help_option_names(ctx)
if help_options:
other_opts.append([join_options(help_options)[0], 'Show this message and exit.'])
if field_opts:
with formatter.section('Field Options'):
formatter.write_dl(field_opts)
if local_opts:
with formatter.section('Local Options'):
formatter.write_dl(local_opts)
if global_opts:
with formatter.section('Global Options'):
formatter.write_dl(global_opts)
if other_opts:
with formatter.section('Other Options'):
formatter.write_dl(other_opts)
| 2.296875
| 2
|
hooks/pre_find_module_path/hook-distutils.py
|
sbneto/s3conf
| 2
|
12784653
|
import distutils
import os
from PyInstaller.utils.hooks import logger
# https://github.com/pyinstaller/pyinstaller/issues/4064
# https://pythonhosted.org/PyInstaller/hooks.html#the-pre-find-module-path-pfmp-api-method
def pre_find_module_path(api):
# Absolute path of the system-wide "distutils" package when run from within
# a venv or None otherwise.
distutils_dir = getattr(distutils, 'distutils_path', None)
if distutils_dir is not None:
# workaround for https://github.com/pyinstaller/pyinstaller/issues/4064
if distutils_dir.endswith('__init__.py'):
distutils_dir = os.path.dirname(distutils_dir)
# Find this package in its parent directory.
api.search_dirs = [os.path.dirname(distutils_dir)]
logger.info('>>>>>>> CUSTOM >>>>>>>>> distutils: retargeting to non-venv dir %r' % distutils_dir)
| 1.898438
| 2
|
triggered_ec2_ssm_iam_role_attachment/triggered_ec2_ssm_iam_role_attachment.py
|
affinitywaterltd/terraform-aws-lambda
| 0
|
12784654
|
<gh_stars>0
import boto3
import os
ROLE_NAME = os.environ['role_name']
ec2client = boto3.client('ec2')
stsclient = boto3.client('sts')
account_id = stsclient.get_caller_identity()['Account']
iam_role_arn = "arn:aws:iam::{}:instance-profile/{}".format(account_id, ROLE_NAME)
def lambda_handler(event, context):
ec2 = boto3.resource('ec2')
instanceid = event['detail']['instance-id']
instance = ec2.Instance(id=instanceid)
print ("Waiting for EC2 to become running - {}".format(instanceid))
instance.wait_until_running()
print ("EC2 is now running - {}".format(instanceid))
iam_role = instance.iam_instance_profile
print ('{} - Current IAM Role - {}'.format(instanceid, iam_role))
if iam_role == None:
print ('{} - Updating IAM Role to - {}'.format(instanceid, iam_role_arn))
# Set instance IAM Role
ec2client.associate_iam_instance_profile(
IamInstanceProfile={
'Arn': iam_role_arn,
'Name': ROLE_NAME
},
InstanceId=instanceid
)
print ('{} - Successfully Updated IAM Role to - {}'.format(instanceid, iam_role_arn))
return 0
else:
print ('{} - IAM Role already assigned - {}'.format(instanceid, iam_role))
return 0
#End Function
| 2.375
| 2
|
Serial/sender_serial.py
|
nikisalli/esp32-screen-mirroring
| 0
|
12784655
|
<filename>Serial/sender_serial.py
from mss import mss
import time
from PIL import Image
import serial
import io
import sys
width = 3240 # monitor width
heigth = 2160 # monitor height
offx = 0 # window horizontal offset
offy = 0 # window vertical offset
s = serial.Serial('/dev/ttyUSB0', 921600) #insert your serial port here
bounding_box = {'top': offx, 'left': offy, 'width': width, 'height': heigth}
sct = mss()
while True:
sct_img = sct.grab(bounding_box)
img = Image.frombytes("RGB", sct_img.size, sct_img.bgra, "raw", "BGRX")
img.thumbnail([96,64])
b = io.BytesIO()
img.save(b, format='JPEG')
jpg = b.getvalue()
size = len(jpg)
for _ in range(10):
s.write(b'U')
s.write(((size >> 8) & 0xff).to_bytes(1, 'big'))
s.write((size & 0xff).to_bytes(1, 'big'))
s.write(jpg)
time.sleep(0.04)
string = b''
while(s.in_waiting > 0):
string += s.read()
print(string)
| 2.578125
| 3
|
tests/test_plbvfu1.py
|
cuihantao/Andes
| 16
|
12784656
|
<filename>tests/test_plbvfu1.py
"""
Test PLBVFU1 model.
"""
import unittest
import andes
class TestPLBVFU1(unittest.TestCase):
"""
Class for testing PLBVFU1.
"""
def test_PLBVFU1(self):
"""
Test PLVBFU1 model.
"""
ss = andes.run(andes.get_case("ieee14/ieee14_plbvfu1.xlsx"),
default_config=True,
no_output=True,
)
ss.TDS.config.tf = 3.0
ss.TDS.config.criteria = 0
ss.TDS.run()
self.assertEqual(ss.exit_code, 0)
| 2.625
| 3
|
rm_imgs_without_labels.py
|
sgavela/annotations-and-mAP-sripts
| 0
|
12784657
|
import argparse
import os
from pathlib import Path
import logging
def missing_image(image_path: Path, label_path: Path):
"""
This function removes the images from the train folder if the correspining labels
are not found in the .txt file.
NOTE - Make sure you perform the conversion from the label to txt.
The code performs the following function,
- Takes the input dataset folder path, searches if the images with label information
are present.
- If not found, removes the image.
:params
image_path - The directory where the training images are present
label_path - The directory where .txt file correspinding to each image is
present.
"""
for image in image_path.iterdir():
if image.suffix == ".jpg":
# Corresponding label file name
label = label_path / (image.stem + ".txt")
if not label.is_file():
logging.warning("Label not found: {}".format(label))
logging.warning("Deleting file: {}".format(image))
os.remove(image)
def main():
ap = argparse.ArgumentParser()
ap.add_argument("-l", "--label_path", help="path to the label dir")
ap.add_argument("-d", "--image_path", help="directory with images")
args = ap.parse_args()
image_path = Path(args.image_path).absolute()
label_path = Path(args.label_path).absolute()
assert image_path.is_dir(), "Image directory needs to exist"
assert label_path.is_dir(), "Label directory needs to exist"
missing_image(image_path, label_path)
if __name__ == "__main__":
main()
| 3.796875
| 4
|
km3pipe/utils/triggermap.py
|
KM3NeT/km3pipe
| 1
|
12784658
|
<gh_stars>1-10
#!/usr/bin/env python
# Filename: triggermap.py
# vim: ts=4 sw=4 et
"""
This script creates histogram which shows the trigger contribution for events.
Usage:
triggermap [options] -d DET_ID_OR_DETX FILENAMES...
triggermap --version
Option:
FILENAMES Name of the input file(s).
--offline Read offline events instead.
-u DU Only plot for the given DU.
-d DET_ID_OR_DETX Detector ID or DETX file.
-p PLOT_FILENAME The filename of the plot [default: trigger_map.png].
-s SUBTITLE Optional subtitle for the plot instead of the filename.
-h --help Show this screen.
"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2016, <NAME> and the KM3NeT collaboration."
__credits__ = []
__license__ = "MIT"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
from docopt import docopt
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use("Agg") # noqa
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
import numpy as np
import km3pipe as kp
from km3modules.common import StatusBar
import km3modules as km
import km3pipe.style
km3pipe.style.use("km3pipe")
class TriggerMap(kp.Module):
"""Creates a plot to show the number of triggered hits for each DOM."""
def configure(self):
self.det = self.require("detector")
self.plot_filename = self.require("plot_filename")
self.subtitle = self.get("subtitle", default="")
self.du = self.get("du")
if self.du is not None:
self.n_dus = 1
self.n_doms = 18
self.dus = [self.du]
else:
self.n_dus = self.det.n_dus
self.n_doms = int(self.det.n_doms / self.n_dus)
self.dus = sorted(self.det.dus)
self.n_rows = self.n_dus * self.n_doms
self.hit_counts = []
def process(self, blob):
hits = blob["Hits"]
hits = hits[hits.triggered.astype(bool)]
dom_ids = np.unique(hits.dom_id)
hit_counts = np.zeros(self.n_dus * self.n_doms)
for dom_id in dom_ids:
n_hits = np.sum(hits.dom_id == dom_id)
du, floor, _ = self.det.doms[dom_id]
if self.du is not None and du != self.du:
continue
hit_counts[(self.dus.index(du) - 1) * self.n_doms + floor - 1] += n_hits
self.hit_counts.append(hit_counts)
return blob
def finish(self):
self.create_plot()
def create_plot(self):
title_du = " - DU {}".format(self.du) if self.du else ""
title = "Trigger Map{}\n{}".format(title_du, self.subtitle)
fig, ax = plt.subplots(figsize=(16, 8))
ax.grid(True)
ax.set_axisbelow(True)
hit_mat = np.array([np.array(x) for x in self.hit_counts]).transpose()
im = ax.matshow(
hit_mat,
interpolation="nearest",
filternorm=None,
cmap="plasma",
aspect="auto",
origin="lower",
zorder=3,
norm=LogNorm(vmin=1, vmax=np.amax(hit_mat)),
)
yticks = np.arange(self.n_rows)
floors_to_label = range(self.n_doms) if self.n_dus == 1 else [1, 6, 12]
ytick_labels = [
"DU{}-DOM{}".format(du, floor) if floor in floors_to_label else ""
for (du, floor, _) in self.det.doms.values()
]
ax.set_yticks(yticks)
ax.set_yticklabels(ytick_labels)
ax.tick_params(labelbottom=True)
ax.tick_params(labeltop=True)
ax.set_xlabel("event id")
ax.set_title(title)
cb = fig.colorbar(im, pad=0.05)
cb.set_label("number of triggered hits")
fig.tight_layout()
plt.savefig(self.plot_filename, dpi=120, bbox_inches="tight")
def main():
args = docopt(__doc__, version=kp.version)
du = int(args["-u"]) if args["-u"] else None
try:
det_id = int(args["-d"])
det = kp.hardware.Detector(det_id=det_id)
except ValueError:
detx = args["-d"]
det = kp.hardware.Detector(filename=detx)
if args["-s"] is not None:
subtitle = args["-s"]
else:
subtitle = ", ".join(args["FILENAMES"])
pipe = kp.Pipeline()
if args["--offline"]:
pipe.attach(
km.common.MultiFilePump, pump=kp.io.OfflinePump, filenames=args["FILENAMES"]
)
pipe.attach(km.io.HitsTabulator, kind="offline")
else:
pipe.attach(
km.common.MultiFilePump,
pump=kp.io.online.EventPump,
filenames=args["FILENAMES"],
)
pipe.attach(StatusBar, every=2500)
pipe.attach(
TriggerMap,
detector=det,
du=du,
plot_filename=args["-p"],
subtitle=subtitle,
)
pipe.drain()
if __name__ == "__main__":
main()
| 2.296875
| 2
|
specs/steps/calculator_with_regexes.py
|
hltbra/pycukes
| 1
|
12784659
|
from pycukes import *
@When('I sum $left and $right')
def sum_two_numbers(context, left, right):
context._sum = int(left) + int(right)
#Then I have 2 as result
| 2.859375
| 3
|
axes/signals.py
|
tabdon/django-axes
| 1
|
12784660
|
from django.dispatch import Signal, receiver
from django.contrib.auth.signals import user_logged_out
from django.core.exceptions import ObjectDoesNotExist
from axes.models import AccessLog
# django 1.4 has a new timezone aware now() use if available.
try:
from django.utils.timezone import now
except ImportError:
# fall back to none timezone aware now()
from datetime import datetime
now = datetime.now
user_locked_out = Signal(providing_args=['request', 'username', 'ip_address'])
@receiver(user_logged_out)
def log_user_lockout(sender, request, user, signal, *args, **kwargs):
""" When a user logs out, update the access log"""
if not user:
return
access_log = None
access_logs = AccessLog.objects.filter(username=user.username,
logout_time__isnull=True).order_by("-attempt_time")
if len(access_logs) > 0:
access_log = access_logs[0]
if access_log:
access_log.logout_time = now()
access_log.save()
| 2.171875
| 2
|
cafe/plugins/http/tests/engine/http/test_behaviors.py
|
melissa-kam/opencafe
| 0
|
12784661
|
# Copyright 2015 Rackspace
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
from cafe.engine.http.behaviors import get_range_data
class TestHttpFunctions(unittest.TestCase):
def test_get_range_data(self):
data = '0123456789'
data_subset = get_range_data(data, '0-4')
self.assertEqual('01234', data_subset)
data_subset = get_range_data(data, '5-9')
self.assertEqual('56789', data_subset)
def test_get_range_data_with_first_byte_pos(self):
data = '0123456789'
data_subset = get_range_data(data, '7-')
self.assertEqual('789', data_subset)
def test_get_range_data_with_last_byte_pos(self):
data = '0123456789'
data_subset = get_range_data(data, '-3')
self.assertEqual('789', data_subset)
if __name__ == '__main__':
unittest.main()
| 2.171875
| 2
|
tusc/graph/distance.py
|
lucasmccabe/combinatorial-zoo
| 0
|
12784662
|
<reponame>lucasmccabe/combinatorial-zoo<gh_stars>0
import networkx as nx
import numpy as np
def shortest_path(G, u, v, method = "Dijkstra"):
"""
Calculates the shortest path between nodes u and v in G.
Parameters
----------
G : networkx graph
source graph
u : int or str - whatever type the labels of the graph are
source node
v : int or str - whatever type the labels of the graph are
destination node
method : str
the shortest path method you want to use.
Currently implemented:
- "Dijkstra"
Raises
------
NotImplementedError : if selected method is not Dijkstra's algorithm
"""
if method.lower() == "dijkstra":
distances = single_source_shortest_paths(G, u, method = "Dijkstra")
return distances[v]
else:
raise NotImplementedError
def single_source_shortest_paths(G, u, method = "Dijkstra"):
"""
Calculates all shortest paths from node u and v in G.
Parameters
----------
G : networkx graph
source graph
u : int or str - whatever type the labels of the graph are
source node
method : str
the shortest path method you want to use.
Currently implemented:
- "Dijkstra"
"""
if method.lower() == "dijkstra":
return _single_source_shortest_paths_dijkstra(G, u)
else:
raise NotImplementedError
def _single_source_shortest_paths_dijkstra(G, u):
"""
Implements Dijkstra's algorithm for single-source shortest paths from u in G.
Parameters
----------
G : networkx graph
source graph
u : int or str - whatever type the labels of the graph are
source node
Notes
-----
[1] West, <NAME>. “Trees and Distance.” In Introduction to
Graph Theory, 97–97. United States: Pearson, 2018.
"""
G_old, u_old = G, u
G = nx.convert_node_labels_to_integers(G_old)
u = list(G_old.nodes).index(u)
A = nx.adjacency_matrix(G)
S = {u:0}
T = {}
for i in [j for j in G.nodes if j not in S]:
if i not in G.neighbors(u):
T[i] = np.inf
else:
T[i] = A[u, i]
while len(S) < len(G):
v = min(T, key = T.get)
S[v] = T[v]
T.pop(v)
for i in [j for j in G.neighbors(v) if j not in S]:
T[i] = min(T[i], S[v] + A[v, i])
distances = {}
for i in S:
distances[list(G_old.nodes)[i]] = S[i]
return distances
| 3.890625
| 4
|
Step09_Metric_Learn.py
|
LMSE/PathwayLenPred
| 0
|
12784663
|
#!/usr/bin/env python
# coding: utf-8
#$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$#
#$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$#
#$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$#
# Microsoft VS header
#--------------------------------------------------#
import os
import sys
import os.path
from sys import platform
from pathlib import Path
#--------------------------------------------------#
if os.name == 'nt' or platform == 'win32':
print("Running on Windows")
if 'ptvsd' in sys.modules:
print("Running in Visual Studio")
try:
os.chdir(os.path.dirname(__file__))
print('CurrentDir: ', os.getcwd())
except:
pass
#--------------------------------------------------#
else:
print("Running outside Visual Studio")
try:
if not 'workbookDir' in globals():
workbookDir = os.getcwd()
print('workbookDir: ' + workbookDir)
os.chdir(workbookDir)
except:
pass
#--------------------------------------------------#
from rdkit import Chem
from rdkit import DataStructs
from rdkit.Chem import AllChem
from rdkit.Chem import MACCSkeys
from rdkit.Chem.AtomPairs import Pairs
from rdkit.Chem.AtomPairs import Torsions
from rdkit.Chem.Fingerprints import FingerprintMols
#--------------------------------------------------#
import ast
import copy
import pickle
import scipy.io
import subprocess
import numpy as np
import pandas as pd
from numpy import *
from tqdm import tqdm
from pathlib import Path
from random import shuffle
#--------------------------------------------------#
import seaborn as sns
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
#--------------------------------------------------#
from scipy import stats
from matplotlib import pylab as pl
#--------------------------------------------------#
from AP_RDKIT_FP import *
from Step07_NetworkToDistance import *
#--------------------------------------------------#
##############################################################################################################
##############################################################################################################
loading_folder = Path("MNX_data/")
saving_folder = Path("MNX_ECFP_savings/")
##############################################################################################################
##############################################################################################################
# all_cmpds : list( ["X","X",...] ) # list
# all_ecfps : set ( ["ecfp", "ecfp", ...] ) # set
# all_pairs : [{{},{}}, {{},{}}, {{},{}},... ]
# all_info : [ [ { fr{}, fr{} }, d ], [ { fr{}, fr{} }, d ], [ { fr{}, fr{} }, d ], .... ]
##############################################################################################################
##############################################################################################################
# Args
# Select ECFP encodings
#------------------- 0 1 2 3 4 5 6
ECFP_encodings_list = ["ECFP2", "ECFP4", "ECFP6", "JTVAE", "MorganFP", "ECFP8", "ECFPX"]
ECFP_encodings = ECFP_encodings_list[1]
ECFP_type = ECFP_encodings[-1] if ECFP_encodings in ["ECFP2", "ECFP4", "ECFP6"] else "6" # 2, 4, 6
#$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$#
#$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$#
#$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$#
def list_smiles_to_ecfp_through_dict(smiles_list, all_cmpds_ecfps_dict):
ecfp_list=[]
for one_smiles in smiles_list:
ecfp_list=ecfp_list+all_cmpds_ecfps_dict[one_smiles]
return ecfp_list
#====================================================================================================#
def parse_one_pair_info(one_pair_info, all_ecfps, all_cmpds_ecfps_dict):
dimension=len(all_ecfps)
X1i=[0]*dimension
X2i=[0]*dimension
X1i_ecfp_list=list_smiles_to_ecfp_through_dict(list(list(one_pair_info[0])[0]),all_cmpds_ecfps_dict)
X2i_ecfp_list=list_smiles_to_ecfp_through_dict(list(list(one_pair_info[0])[1]),all_cmpds_ecfps_dict)
distance=one_pair_info[1]
for one_ecfp in X1i_ecfp_list:
X1i[all_ecfps.index(one_ecfp)]=X1i_ecfp_list.count(one_ecfp)
for one_ecfp in X2i_ecfp_list:
X2i[all_ecfps.index(one_ecfp)]=X2i_ecfp_list.count(one_ecfp)
Yi=distance
return (X1i,X2i,Yi)
#====================================================================================================#
def list_subtract(list_a,list_b):
list_out=[]
for i in range(len(list_a)):
list_out.append(list_a[i]-list_b[i])
return list_out
#$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$#
#$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$#
#$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$#
def Step09_main(loading_folder, saving_folder, ECFP_encodings):
#====================================================================================================#
pickle_in1=open(saving_folder / "Step07_paired_cmpds_list","rb")
paired_smiles_list=pickle.load(pickle_in1)
pickle_in1.close()
pickle_in2=open(saving_folder / "Step07_all_pairs_list","rb")
all_pairs_list=pickle.load(pickle_in2)
pickle_in2.close()
#====================================================================================================#
pickle_in1=open(saving_folder / ("Step08_all_cmpds_"+ECFP_encodings),"rb")
all_smiles=pickle.load(pickle_in1)
pickle_in1.close()
pickle_in2=open(saving_folder / ("Step08_all_ecfps_"+ECFP_encodings),"rb")
all_ecfps=pickle.load(pickle_in2)
pickle_in2.close()
pickle_in3=open(saving_folder / ("Step08_all_cmpds_ecfps_dict_"+ECFP_encodings),"rb")
all_smiles_ecfps_dict=pickle.load(pickle_in3)
pickle_in3.close()
#====================================================================================================#
for one_pair_info in paired_smiles_list:
if len(one_pair_info[0])!=2:
print (one_pair_info[0])
print ("wtf?")
paired_smiles_list.remove(one_pair_info)
print ("screened!")
#====================================================================================================#
all_ecfps=list(all_ecfps)
X_Diff=[]
Y_Distance=[]
for one_pair_info in tqdm(paired_smiles_list):
(X1i, X2i, Yi)=parse_one_pair_info(one_pair_info,all_ecfps,all_smiles_ecfps_dict)
X_Diff.append(list_subtract(X1i, X2i))
Y_Distance.append(Yi)
Step09_processed_data_dict = {"X_data": X_Diff, "y_data": Y_Distance}
#====================================================================================================#
pickle_out1=open(saving_folder / "Step09_processed_data_"+ ECFP_encodings,"wb")
pickle.dump(Step09_processed_data_dict, pickle_out1)
pickle_out1.close()
print("Step09_main Done!")
return
#$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$#
#$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$#
#$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$#
if __name__ == '__main__':
Step09_main(loading_folder, saving_folder, ECFP_encodings)
#$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$#
#$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$#
#$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$#
| 2.34375
| 2
|
codegen_sources/test_generation/test_runners/python_test_runner.py
|
AlexShypula/CodeGen
| 241
|
12784664
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import subprocess
import sys
import uuid
from pathlib import Path, PosixPath
from subprocess import Popen
from .evosuite_test_runners import (
EvosuiteTestRunner,
TestRuntimeError,
CompilationError,
InvalidTest,
clean_firejail,
FIREJAIL_PROFILE,
)
from ...model.src.utils import (
TREE_SITTER_ROOT,
limit_virtual_memory,
MAX_VIRTUAL_MEMORY,
)
from ...preprocessing.lang_processors.lang_processor import LangProcessor
sys.path.append(str(Path(__file__).parents[3]))
print("adding to path", str(Path(__file__).parents[3]))
python_processor = LangProcessor.processors["python"](root_folder=TREE_SITTER_ROOT)
class PythonTestRunner(EvosuiteTestRunner):
def __init__(
self,
tmp_folder=Path(
Path.home().joinpath("data/CodeGen/automatic_tests/tmp_tests_folder/python")
),
timeout=15,
):
super().__init__(tmp_folder=tmp_folder, timeout=timeout)
def _run_tests(
self,
function: str,
test: str,
tmp_path: PosixPath,
classname: str = None,
scaffolding: str = None,
):
assert (
scaffolding is None
), f"Scaffolding should be None for python tests, was {scaffolding}"
if "#TOFILL" not in test:
raise InvalidTest("Missing #TOFILL")
try:
f_name = python_processor.get_function_name(function)
except (ValueError, IndexError):
raise CompilationError("No function definition")
function = python_processor.detokenize_code(
function.replace(f" {f_name.strip()} ", " f_filled ")
)
filled_test = test.replace("#TOFILL", function)
test_path = self.write_test(filled_test, classname, tmp_path)
assert test_path.is_file()
test_cmd = f"{limit_virtual_memory(MAX_VIRTUAL_MEMORY)}; firejail --profile={FIREJAIL_PROFILE} python {test_path}"
test_proc = Popen(
test_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
executable="/bin/bash",
preexec_fn=os.setsid,
)
return test_proc, tmp_path
def _eval_proc_state(self, out, err):
stderr = err.decode("utf-8", errors="replace")
stderr = clean_firejail(stderr)
res_line = stderr.splitlines()
if len(res_line) <= 2 or not (
res_line[-1].startswith("OK") or res_line[-1].startswith("FAILED")
):
raise TestRuntimeError(stderr)
assert res_line[-3].startswith("Ran ")
number_of_tests = int(res_line[-3].replace("Ran ", "").split(" ")[0])
res_line = res_line[-1]
if res_line.startswith("OK"):
return "success", number_of_tests, 0
else:
assert res_line.startswith("FAILED (errors=") or res_line.startswith(
"FAILED (failures="
)
number_failures = int(res_line.split("=")[-1].replace(")", ""))
return "failure", number_of_tests, number_failures
@staticmethod
def write_test(test, classname, out_folder):
if classname is None:
classname = "a"
test_path = out_folder.joinpath(f"python_test_{classname}.py")
with open(test_path, "w", encoding="utf-8") as o:
o.write(test)
return test_path
| 1.898438
| 2
|
toutiao-backend/toutiao/main.py
|
MarioKarting/Flask_toutiao
| 5
|
12784665
|
<gh_stars>1-10
import sys
import os
#BASE_DIR -->toutiao-backend
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
#python解释器查找一个包的路径
sys.path.insert(0, os.path.join(BASE_DIR, 'common'))
sys.path.insert(0, os.path.join(BASE_DIR))
from flask import jsonify
from . import create_app
from settings.default import DefaultConfig
#工厂函数,按需定制
app = create_app(DefaultConfig, enable_config_file=True)
@app.route('/')
def route_map():
"""
主视图,返回所有视图网址
"""
rules_iterator = app.url_map.iter_rules()
return jsonify(
{rule.endpoint: rule.rule for rule in rules_iterator if rule.endpoint not in ('route_map', 'static')})
| 2.328125
| 2
|
movies/urls.py
|
Kaushiksekar/movie_base
| 0
|
12784666
|
from django.urls import path
from .views import call_json_to_db, get_all_movies, add_movie, edit_movie, remove_movie
urlpatterns = [
path('load-db/', call_json_to_db, name='load_db'),
path('list/', get_all_movies, name='all_movies'),
path('add/', add_movie, name='add_movie'),
path('edit/', edit_movie, name='edit_movie'),
path('remove/', remove_movie, name='remove_movie'),
]
| 1.75
| 2
|
octicons16px/check_circle_fill.py
|
andrewp-as-is/octicons16px.py
| 1
|
12784667
|
OCTICON_CHECK_CIRCLE_FILL = """
<svg class="octicon octicon-circle-fill" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 16 16" width="16" height="16"><path fill-rule="evenodd" d="M8 16A8 8 0 108 0a8 8 0 000 16zm3.78-9.72a.75.75 0 00-1.06-1.06L6.75 9.19 5.28 7.72a.75.75 0 00-1.06 1.06l2 2a.75.75 0 001.06 0l4.5-4.5z"></path></svg>
"""
| 1.382813
| 1
|
torch_tools/data/datasets/util/dataset_to_h5.py
|
gregunz/TorchTools
| 0
|
12784668
|
from itertools import tee
from pathlib import Path
from typing import Union, List, Tuple
import h5py
import torch
from torch.utils.data import Dataset, DataLoader
from tqdm.auto import tqdm
T = List[Tuple[torch.Tensor, torch.Tensor]]
def dataset_to_h5_file(dataset: Union[Dataset, T], filepath: Union[str, Path], inputs_type=None,
targets_type=None, inputs_name: str = 'inputs', targets_name: str = 'targets'):
n = len(dataset)
x, y = next(tee(iter(dataset))[1])
assert isinstance(x, torch.Tensor), f'input should be a torch tensor, not {type(x)}'
assert isinstance(y, torch.Tensor), f'target should be a torch tensor, not {type(x)}'
inputs_shape = (n,) + x.size()
targets_shape = (n,) + y.size()
if inputs_type is None:
inputs_type = x.numpy().dtype
if targets_type is None:
targets_type = y.numpy().dtype
with h5py.File(name=filepath, mode='w', libver='latest', swmr=True) as h5_file:
inputs = h5_file.create_dataset(inputs_name, shape=inputs_shape, dtype=inputs_type, fillvalue=0)
targets = h5_file.create_dataset(targets_name, shape=targets_shape, dtype=targets_type, fillvalue=0)
dloader = DataLoader(dataset, batch_size=1, num_workers=8)
for i, (x, y) in enumerate(tqdm(dloader, desc=str(filepath))):
inputs[i] = x
targets[i] = y
assert i == n - 1
| 2.140625
| 2
|
tests/test_similarity_matrix_filters.py
|
jo-mueller/biapol-utilities
| 4
|
12784669
|
# -*- coding: utf-8 -*-
import biapol_utilities as biau
import numpy as np
def test_suppression():
a = np.random.rand(100).reshape(10, -1)
threshold = 0.5
a_sup = biau.label.suppressed_similarity(a, threshold=threshold)
assert(all(a_sup[a < threshold].ravel() == 0))
if __name__ == "__main__":
test_suppression()
| 2.640625
| 3
|
nPYc/reports/_generateBasicPCAReport.py
|
ghaggart/nPYc-Toolbox
| 14
|
12784670
|
<reponame>ghaggart/nPYc-Toolbox
import os
import re
from collections import OrderedDict
from ..plotting import plotScores, plotLoadings
from pyChemometrics.ChemometricsPCA import ChemometricsPCA
def generateBasicPCAReport(pcaModel, dataset, figureCounter=1, destinationPath=None, fileNamePrefix=''):
"""
Visualise a PCA model by plotting scores plots coloured by sample type, loadings plots,
:param ChemometricsPCA pcaModel: ChemometricsPCA model of **dataset**
:param Dataset dataset: Dataset object
:param int figureCounter: Start numbering figures from this value
:param destinationPath: If not ``None`` save to the path specified
:type destinationPath: None or str
:param str fileNamePrefix: Additional prefix to add to filenames if saving
:returns: Dictionary of paths figures where saved to if destinationPath was not ``None``
:rtype: dict
"""
if not isinstance(pcaModel, ChemometricsPCA):
raise TypeError('pcaModel must be a ChemometricsPCA object')
returnDict = dict()
figuresQCscores = OrderedDict()
if destinationPath is not None:
if not os.path.exists(destinationPath):
os.makedirs(destinationPath)
if not os.path.exists(os.path.join(destinationPath, 'graphics')):
os.makedirs(os.path.join(destinationPath, 'graphics'))
graphicsPath = os.path.join(destinationPath, 'graphics', 'report_finalSummary')
if not os.path.exists(graphicsPath):
os.makedirs(graphicsPath)
else:
graphicsPath = None
##
# Scores Plot
##
if destinationPath:
saveAs = os.path.join(graphicsPath, dataset.name + '_PCAscoresPlot_')
else:
print('Figure %i: PCA scores plots coloured by sample type.' % (figureCounter))
saveAs = None
figuresQCscores = plotScores(pcaModel,
classes=dataset.sampleMetadata['Plot Sample Type'],
classType='Plot Sample Type',
title='Sample Type',
savePath=saveAs,
figures=figuresQCscores,
figureFormat=dataset.Attributes['figureFormat'],
dpi=dataset.Attributes['dpi'],
figureSize=dataset.Attributes['figureSize'])
for keyS in figuresQCscores:
if 'graphics' in str(graphicsPath): # expect graphics to have been already passed in the previous path
figuresQCscores[keyS] = re.sub('.*graphics', 'graphics', figuresQCscores[keyS])
returnDict['QCscores'] = figuresQCscores
returnDict['PCAcount'] = figureCounter
figuresLoadings = OrderedDict()
##
# Loadings plot
##
if destinationPath:
saveAs = os.path.join(graphicsPath, dataset.name + '_PCAloadingsPlot_')
else:
print('\n\nFigure %i: PCA loadings plots.' % (figureCounter + 1))
saveAs = None
figuresLoadings = plotLoadings(pcaModel,
dataset,
title='',
figures=figuresLoadings,
savePath=saveAs,
figureFormat=dataset.Attributes['figureFormat'],
dpi=dataset.Attributes['dpi'],
figureSize=dataset.Attributes['figureSize'])
for keyL in figuresLoadings:
if 'graphics' in str(graphicsPath):
figuresLoadings[keyL] = re.sub('.*graphics', 'graphics', figuresLoadings[keyL])
returnDict['loadings'] = figuresLoadings
if destinationPath:
return returnDict
else:
return None
| 2.1875
| 2
|
bin/generate_sitemap.py
|
gaybro8777/CiteSeerX
| 108
|
12784671
|
#!/usr/bin/python
# Script to generate sitemaps
# <NAME>
# Requires mysql-python
import MySQLdb
import argparse
import logging
import os
import sys
import subprocess
from config import db
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument("sitemapdir")
args = parser.parse_args()
try:
sitemapdir = args.sitemapdir
except:
logging.error("sitemap dir not set. run python generate_sitemap.py -h")
sys.exit(0)
# clear sitemapdir if it is there already
if os.path.exists(sitemapdir):
subprocess.call(['rm','-rfv',sitemapdir+"/*"])
else:
os.makedirs(sitemapdir)
MAX_PER_FILE = 49999
db = MySQLdb.connect(host=db["dbhost"], user=db["dbuser"], passwd=db["dbpass"], db="citeseerx")
cur = db.cursor()
i = 0
file = 1
header = '<?xml version="1.0" encoding="UTF-8"?>\n<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">\n'
cur.execute("SELECT id FROM papers WHERE public = 1")
for row in cur.fetchall():
if i == 0:
f = open(os.path.join(sitemapdir,"sitemap%d.xml" % file), 'w+')
f.write(header)
f.write('<url>\n\t<loc>http://citeseerx.ist.psu.edu/viewdoc/download?doi=%s&rep=rep1&type=pdf</loc>\n</url>\n' % row[0])
i = i + 1
if i == MAX_PER_FILE:
file = file + 1
i = 0
f.write('</urlset>')
f.close()
logging.info("sitemap generated: {}".format(f.name))
if not f.closed:
f.write('</urlset>')
f.close()
logging.info("sitemap generated: {}".format(f.name))
f = open(os.path.join(sitemapdir,'sitemap_index.xml'), 'w+')
f.write('<?xml version="1.0" encoding="UTF-8"?>\n<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">\n')
for i in range(1, file+1):
f.write('<sitemap>\n\t<loc>http://citeseerx.ist.psu.edu/sitemap%d.xml</loc>\n</sitemap>\n' % i)
f.write('</sitemapindex>');
f.close()
logging.info("sitemap index file: {}".format(f.name))
| 2.71875
| 3
|
tests/test_upload_download_local.py
|
anand870/aiocloudstorage
| 2
|
12784672
|
<reponame>anand870/aiocloudstorage<filename>tests/test_upload_download_local.py
import os
import pytest
import io
from aiocloudstorage.drivers.local import LocalDriver
from tests.settings import *
from tests.helpers import random_container_name, uri_validator,binary_iostreams
from aiocloudstorage.exceptions import CloudStorageError,NotFoundError,InvalidFileURLError
from aiocloudstorage import configure,upload,bulk_upload,download,bulk_download
from aiocloudstorage.helpers import file_checksum,parse_file_url
@pytest.fixture()
async def storage():
storage = LocalDriver(
LOCAL_ENDPOINT,
LOCAL_SECRET
)
yield storage
async for container in storage.get_containers():
if container.name.startswith(CONTAINER_PREFIX):
async for blob in container.get_blobs():
await blob.delete()
await container.delete()
@pytest.fixture()
async def container(request,storage,store_config):
param = None if not hasattr(request,'param') else request.param
print(param)
if param == 'nds': #no default store
store_config.pop('DEFAULT_STORE')
elif param == 'ndc': #no default container
store_config.pop('DEFAULT_CONTAINER')
elif param=='nd': #no default
store_config.pop('DEFAULT_CONTAINER')
store_config.pop('DEFAULT_STORE')
elif param == 'sd': #store disabled
store_config['STORAGE_ENABLED']=False
conf = await configure(store_config)
container_name = store_config.get('DEFAULT_CONTAINER',random_container_name())
container = await storage.create_container(container_name)
return container
@pytest.mark.asyncio
async def test_upload_path(container,text_filename):
blob = await upload(text_filename,'auto')
assert blob.checksum == TEXT_MD5_CHECKSUM
assert blob.name == TEXT_FILENAME
assert blob.file_url == FILE_URL%(LOCAL_NAME,container.name,TEXT_FILENAME)
@pytest.mark.asyncio
async def test_upload_nested_path(container,text_filename):
blob = await upload(text_filename,'auto',destpath=TEXT_NESTED_UPLOAD_PATH)
assert blob.checksum == TEXT_MD5_CHECKSUM
assert blob.name == TEXT_NESTED_UPLOAD_NAME
@pytest.mark.asyncio
async def test_upload_auto_name(container,text_filename):
blob = await upload(text_filename,'auto')
assert blob.checksum == TEXT_MD5_CHECKSUM
assert blob.name == TEXT_FILENAME
assert blob.file_url == FILE_URL%(LOCAL_NAME,container.name,TEXT_FILENAME)
@pytest.mark.asyncio
async def test_upload_random_name(container,text_filename):
blob = await upload(text_filename,'random')
assert blob.checksum == TEXT_MD5_CHECKSUM
assert blob.name != TEXT_FILENAME
assert blob.file_url == FILE_URL%(LOCAL_NAME,container.name,blob.name)
@pytest.mark.asyncio
async def test_upload_specified_name(container,text_filename):
name = random_container_name()+TEXT_FILENAME
blob = await upload(text_filename,name)
assert blob.checksum == TEXT_MD5_CHECKSUM
assert blob.name == name
assert blob.file_url == FILE_URL%(LOCAL_NAME,container.name,name)
@pytest.mark.asyncio
async def test_upload_stream(container,binary_stream):
blob = await upload(binary_stream,destfilename=BINARY_STREAM_FILENAME)
assert blob.name == BINARY_STREAM_FILENAME
assert blob.checksum == BINARY_MD5_CHECKSUM
@pytest.mark.asyncio
@pytest.mark.parametrize('container',['sd'],indirect=True)
async def test_upload_store_disabled(container,text_filename):
with pytest.raises(CloudStorageError) as err:
blob = await upload(text_filename,store_name=LOCAL_NAME,container_name=container.name)
@pytest.mark.asyncio
async def test_upload_container_invalid(container,text_filename):
with pytest.raises(NotFoundError) as err:
blob = await upload(text_filename,container_name=random_container_name())
@pytest.mark.asyncio
@pytest.mark.parametrize('container',['nd','nds','ndc','sd'],indirect=True)
async def test_upload_no_default(container,text_filename):
with pytest.raises(CloudStorageError) as err:
blob = await upload(text_filename)
@pytest.mark.asyncio
@pytest.mark.parametrize('container',['ndc'],indirect=True)
async def test_upload_no_default_container(container,text_filename):
blob = await upload(text_filename,container_name=container.name)
assert blob.checksum == TEXT_MD5_CHECKSUM
assert blob.file_url == FILE_URL%(LOCAL_NAME,container.name,blob.name)
@pytest.mark.asyncio
async def test_bulk_upload_no_file(container):
files = await bulk_upload({})
assert files== {}
@pytest.mark.asyncio
async def test_bulk_upload(container):
filecount = 10
iostreams = binary_iostreams(filecount)
destpath = random_container_name()
files = await bulk_upload(iostreams,destpath='/'+destpath)
assert isinstance(files,dict)
assert len(files) == filecount
hash_type = container.driver.hash_type
for key,fileurl in files.items():
iostreams[key].seek(0)
download_hash = file_checksum(iostreams[key],hash_type=hash_type)
blob = await container.get_blob(fileurl)
assert blob.name.startswith(destpath)
assert blob.checksum == download_hash.hexdigest()
@pytest.mark.asyncio
async def test_bulk_upload_with_one_invalid(container):
filecount = 10
iostreams = binary_iostreams(filecount)
iostreams[filecount+1] = io.BytesIO(b'')
destpath = random_container_name()
files = await bulk_upload(iostreams,destpath='/'+destpath)
assert isinstance(files,dict)
assert len(files) == filecount+1
hash_type = container.driver.hash_type
for key,fileurl in files.items():
if key==(filecount+1):
assert isinstance(fileurl,Exception)
continue
iostreams[key].seek(0)
download_hash = file_checksum(iostreams[key],hash_type=hash_type)
blob = await container.get_blob(fileurl)
assert blob.name.startswith(destpath)
assert blob.checksum == download_hash.hexdigest()
@pytest.mark.asyncio
async def test_download_invalid_file_url():
with pytest.raises(InvalidFileURLError) as err:
filepath = await download('http://www.google.com/myfile.json')
@pytest.mark.asyncio
@pytest.mark.parametrize('container',['sd'],indirect=True)
async def test_download_storage_disabled(binary_blob):
with pytest.raises(CloudStorageError) as err:
filepath = await download(binary_blob.file_url)
@pytest.mark.asyncio
async def test_download_container_in_url_invalid(binary_blob):
with pytest.raises(NotFoundError) as err:
parsed = parse_file_url(binary_blob.file_url)
invalid_url = "%s://%s/%s"%(parsed['store'],random_container_name(),parsed['blob'])
filepath = await download(invalid_url)
@pytest.mark.asyncio
async def test_download_no_destpath_auto(binary_blob,random_filepath):
try:
filepath = await download(binary_blob.file_url,destfilename=random_filepath)
except:
pytest.fail("Should have treated destfilename as relative or absolute path")
@pytest.mark.asyncio
async def test_download_destpath_not_exist(binary_blob,random_dirpath):
try:
filepath = await download(binary_blob.file_url,destpath=random_dirpath)
except:
pytest.fail("Should auto create destination if not exist")
assert os.path.isdir(random_dirpath)
@pytest.mark.asyncio
async def test_download_file_path(binary_blob,temp_file):
filepath = await download(binary_blob.file_url,destfilename=temp_file)
hash_type = binary_blob.driver.hash_type
download_hash = file_checksum(filepath, hash_type=hash_type)
assert download_hash.hexdigest() == binary_blob.checksum
@pytest.mark.asyncio
async def test_download_stream(binary_blob,temp_file):
with open(temp_file,'wb') as download_file:
await download(binary_blob.file_url,download_file)
hash_type = binary_blob.driver.hash_type
download_hash = file_checksum(temp_file, hash_type=hash_type)
assert download_hash.hexdigest() == binary_blob.checksum
@pytest.mark.asyncio
async def test_download_without_destination(binary_blob):
download_file = await download(binary_blob.file_url)
hash_type = binary_blob.driver.hash_type
download_hash = file_checksum(download_file, hash_type=hash_type)
assert download_hash.hexdigest() == binary_blob.checksum
@pytest.mark.asyncio
async def test_bulk_download_no_file(container):
path_dict = await bulk_download({})
assert path_dict == {}
@pytest.mark.asyncio
async def test_bulk_download(binary_blob_list):
blobs_dict = {k:v for k,v in enumerate(binary_blob_list)}
fileurls_dict = {k:v.file_url for k,v in enumerate(binary_blob_list)}
count = len(blobs_dict)
path_dict = await bulk_download(fileurls_dict)
assert len(path_dict) == count
for key,blob in blobs_dict.items():
hash_type = blob.driver.hash_type
assert os.path.isfile(path_dict[key])
download_hash = file_checksum(path_dict[key], hash_type=hash_type)
assert download_hash.hexdigest() == blob.checksum
@pytest.mark.asyncio
async def test_bulk_download_multi_container(random_blob_list):
blobs_dict = {k:v for k,v in enumerate(random_blob_list)}
fileurls_dict = {k:v.file_url for k,v in enumerate(random_blob_list)}
count = len(blobs_dict)
path_dict = await bulk_download(fileurls_dict)
assert len(path_dict) == count
for key,blob in blobs_dict.items():
hash_type = blob.driver.hash_type
assert os.path.isfile(path_dict[key])
download_hash = file_checksum(path_dict[key], hash_type=hash_type)
assert download_hash.hexdigest() == blob.checksum
| 2.09375
| 2
|
test/controllers/test_delete_debt_by_id_route.py
|
JVGC/MyFinancesPython
| 0
|
12784673
|
<reponame>JVGC/MyFinancesPython<gh_stars>0
import unittest
from uuid import uuid4
from domain.entities import Date, Debt
from infra.controllers.contracts.http import HttpRequest
from infra.controllers.errors import NotFoundError
from infra.controllers.operators.debt import DeleteDebtByIdOperator
from infra.repositories import DebtRepositoryMongo
from pymongo import response
class TestDeleteDebtByIdRoute(unittest.TestCase):
def test_success(self):
debt_repository_mongo = DebtRepositoryMongo()
delete_debt_by_id_operator = DeleteDebtByIdOperator(
debt_repository_mongo)
_id = str(uuid4())
debt_or_err = Debt.create(_id,
'testando',
10.5,
5,
Date.create(year=2020, month=10).ok(),
8)
new_debt = debt_or_err.ok()
_ = debt_repository_mongo.add(new_debt.id,
new_debt.description,
new_debt.part_value,
new_debt.total_parts,
new_debt.start_date.to_dict(),
new_debt.total_value,
new_debt.paid_parts,
new_debt.remaining_parts,
new_debt.remaining_value)
request = HttpRequest(params={'debt_id': _id})
http_response = delete_debt_by_id_operator.operate(request)
self.assertEqual(http_response.status_code, 200)
self.assertEqual(http_response.body['debt_id'], _id)
def test_debt_not_found(self):
debt_repository_mongo = DebtRepositoryMongo()
delete_debt_by_id_operator = DeleteDebtByIdOperator(
debt_repository_mongo)
request = HttpRequest(params={'debt_id': '123'})
http_response = delete_debt_by_id_operator.operate(request)
self.assertEqual(http_response.status_code, 404)
self.assertIsInstance(http_response, NotFoundError)
| 2.390625
| 2
|
fHDHR/versions/__init__.py
|
deathbybandaid/fHDHR
| 0
|
12784674
|
import os
import sys
import platform
from fHDHR import fHDHR_VERSION
from fHDHR.tools import is_docker
class Versions():
"""
fHDHR versioning management system.
"""
def __init__(self, settings, fHDHR_web, logger, web, db, scheduler):
self.fHDHR_web = fHDHR_web
self.logger = logger
self.web = web
self.db = db
self.scheduler = scheduler
self.github_org_list_url = "https://api.github.com/orgs/fHDHR/repos?type=all"
self.github_fhdhr_core_info_url = "https://raw.githubusercontent.com/fHDHR/fHDHR/main/version.json"
self.dict = {}
self.official_plugins = self.db.get_fhdhr_value("versions", "dict") or {}
self.register_fhdhr()
self.register_env()
self.get_online_versions()
self.update_url = "/api/versions?method=check"
def sched_init(self, fhdhr):
"""
The Scheduled update method.
"""
self.api = fhdhr.api
self.scheduler.every(2).to(3).hours.do(self.sched_update)
def sched_update(self):
"""
Use an API thread to update Versions listing.
"""
self.api.threadget(self.update_url)
def get_online_versions(self):
"""
Update Onling versions listing.
"""
self.logger.debug("Checking for Online Plugin Information")
official_plugins = {}
try:
github_org_json = self.web.session.get(self.github_org_list_url).json()
except self.web.exceptions.ReadTimeout as err:
self.logger.error("Online Plugin Information Check Failed: %s" % err)
return
online_plugin_names = [x["name"] for x in github_org_json if x["name"].startswith("fHDHR_plugin_")]
for plugin_name in online_plugin_names:
plugin_version_check_success = 0
for branch in ["main", "master", "dev"]:
if not plugin_version_check_success:
self.logger.debug("Attempting Online Plugin Information for %s %s branch" % (plugin_name, branch))
plugin_json_url = "https://raw.githubusercontent.com/fHDHR/%s/%s/plugin.json" % (plugin_name, branch)
try:
plugin_json = self.web.session.get(plugin_json_url)
if plugin_json.status_code == 200:
plugin_json = plugin_json.json()
official_plugins[plugin_name] = plugin_json
plugin_version_check_success = 1
except self.web.exceptions.ReadTimeout as err:
self.logger.error("Online Plugin Information Check Failed for %s %s branch: %s" % (plugin_name, branch, err))
self.official_plugins = official_plugins
core_json = self.web.session.get(self.github_fhdhr_core_info_url).json()
for key in list(core_json.keys()):
self.official_plugins[key] = {"name": key, "version": core_json[key], "type": "core"}
self.db.set_fhdhr_value("versions", "dict", official_plugins)
def register_version(self, item_name, item_version, item_type):
"""
Register a version item.
"""
self.logger.debug("Registering %s item: %s %s" % (item_type, item_name, item_version))
self.dict[item_name] = {
"name": item_name,
"version": item_version,
"type": item_type
}
def register_fhdhr(self):
"""
Register core version items.
"""
self.register_version("fHDHR", fHDHR_VERSION, "fHDHR")
self.register_version("fHDHR_web", self.fHDHR_web.fHDHR_web_VERSION, "fHDHR")
def register_env(self):
"""
Register env version items.
"""
self.register_version("Python", sys.version, "env")
if sys.version_info.major == 2 or sys.version_info < (3, 7):
self.logger.error('Error: fHDHR requires python 3.7+. Do NOT expect support for older versions of python.')
opersystem = platform.system()
self.register_version("Operating System", opersystem, "env")
if opersystem in ["Linux", "Darwin"]:
# Linux/Mac
if os.getuid() == 0 or os.geteuid() == 0:
self.logger.warning('Do not run fHDHR with root privileges.')
elif opersystem in ["Windows"]:
# Windows
if os.environ.get("USERNAME") == "Administrator":
self.logger.warning('Do not run fHDHR as Administrator.')
else:
self.logger.warning("Uncommon Operating System, use at your own risk.")
cpu_type = platform.machine()
self.register_version("CPU Type", cpu_type, "env")
isdocker = is_docker()
self.register_version("Docker", isdocker, "env")
def register_plugins(self, plugins):
"""
Register plugin version items.
"""
self.logger.info("Scanning Plugins for Version Information.")
self.plugins = plugins
plugin_names = []
for plugin in list(self.plugins.plugins.keys()):
if self.plugins.plugins[plugin].plugin_name not in plugin_names:
plugin_names.append(self.plugins.plugins[plugin].plugin_name)
self.register_version(self.plugins.plugins[plugin].plugin_name, self.plugins.plugins[plugin].manifest["version"], "plugin")
| 2.359375
| 2
|
saved_scores/mwob/pop_dir.py
|
nathanShepherd/Intelligent-Interface
| 3
|
12784675
|
click_games = ['BisectAngle',
'ChaseCircle',
'ChooseDate',
'ChooseList',
'CircleCenter',
'ClickButton',
'ClickButtonSequence',
'ClickCheckboxes',
'ClickCollapsible',
'ClickCollapsible2',
'ClickColor',
'ClickDialog',
'ClickDialog2',
'ClickLink',
'ClickMenu2',
'ClickOption',
'ClickPie',
'ClickShades',
'ClickShape',
'ClickTab',
'ClickTab2',
'ClickTest',
'ClickTest2',
'ClickWidget',
'CountShape',
'CountSides',
'FindMidpoint',
'FocusText',
'FocusText2',
'GridCoordinate',
'GuessNumber',
'IdentifyShape',
'NavigateTree',
'NumberCheckboxes',
'RightAngle',
'SimonSays',
'TicTacToe',
'UseColorwheel',
'UseColorwheel2',
'UseSlider',
'UseSlider2',
'UseSpinner',
]
for env_name in click_games:
f = open(env_name+'/README.txt','w')
f.write( str(env_name) )
f.close()
| 2.1875
| 2
|
OpenDataCatalog/contest/views.py
|
runonthespot/Open-Data-Catalog
| 105
|
12784676
|
from django.shortcuts import render_to_response, redirect
from django.template import RequestContext
from django.template.loader import render_to_string
from django.core.mail import send_mail, mail_managers, EmailMessage
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from OpenDataCatalog.contest.models import *
from datetime import datetime
def get_entries(request, contest_id=1):
contest = Contest.objects.get(pk=contest_id)
entries = Entry.objects.filter(contest=contest, is_visible=True)
if not request.GET.__contains__('sort'):
entries = entries.order_by('-vote_count')
return render_to_response('contest/entries.html', {'contest': contest, 'entries': entries}, context_instance=RequestContext(request))
def get_entries_table(request, contest_id=1):
contest = Contest.objects.get(pk=contest_id)
entries = Entry.objects.filter(contest=contest)
if not request.GET.__contains__('sort'):
entries = entries.order_by('-vote_count')
return render_to_response('contest/entry_table.html', {'contest': contest, 'entries': entries}, context_instance=RequestContext(request))
def get_winners(request, contest_id=1):
contest = Contest.objects.get(pk=contest_id)
entries = Entry.objects.filter(contest=contest, is_visible=True).order_by('-vote_count')
return render_to_response('contest/winners.html', {'contest': contest, 'entries': entries}, context_instance=RequestContext(request))
def get_rules(request, contest_id=1):
contest = Contest.objects.get(pk=contest_id)
return render_to_response('contest/rules.html', {'contest': contest}, context_instance=RequestContext(request))
def get_entry(request, entry_id):
entry = Entry.objects.get(pk=entry_id)
return render_to_response('contest/entry.html', {'contest': entry.contest, 'entry': entry}, context_instance=RequestContext(request))
#@login_required
def add_entry(request, contest_id=1):
contest = Contest.objects.get(pk=contest_id)
if request.method == 'POST':
form = EntryForm(request.POST)
form.contest = contest_id
if form.is_valid():
data = {
#"submitter": request.user.username,
"submit_date": datetime.now(),
"org_name": form.cleaned_data.get("org_name"),
"org_url": form.cleaned_data.get("org_url"),
"contact_person": form.cleaned_data.get("contact_person"),
"contact_phone": form.cleaned_data.get("contact_phone"),
"contact_email": form.cleaned_data.get("contact_email"),
"data_set": form.cleaned_data.get("data_set"),
"data_use": form.cleaned_data.get("data_use"),
"data_mission": form.cleaned_data.get("data_mission")
}
subject = 'OpenDataPhilly - Contest Submission'
user_email = form.cleaned_data.get("contact_email")
text_content = render_to_string('contest/submit_email.txt', data)
text_content_copy = render_to_string('contest/submit_email_copy.txt', data)
mail_managers(subject, text_content)
msg = EmailMessage(subject, text_content_copy, to=[user_email])
msg.send()
return render_to_response('contest/thanks.html', {'contest': contest}, context_instance=RequestContext(request))
else:
form = EntryForm()
return render_to_response('contest/submit_entry.html', {'contest': contest, 'form': form}, context_instance=RequestContext(request))
@login_required
def add_vote(request, entry_id):
entry = Entry.objects.get(pk=entry_id)
contest = entry.contest
user = User.objects.get(username=request.user)
if contest.user_can_vote(user):
new_vote = Vote(user=user, entry=entry)
new_vote.save()
entry.vote_count = entry.vote_set.count()
entry.save()
next_vote_date = contest.get_next_vote_date(user)
if next_vote_date > contest.end_date:
messages.success(request, '<div style="font-weight:bold;">Your vote has been recorded.</div>Thank you for your vote! You will not be able to vote again before the end of the contest. <br><br>Please encourage others to visit <a href="/">OpenDataPhilly</a> and to join the race toward more open data!')
else:
messages.success(request, '<div style="font-weight:bold;">Your vote has been recorded.</div>You may vote once per week, so come back and visit us again on ' + next_vote_date.strftime('%A, %b %d %Y, %I:%M%p') + '. <br><br>Until then, encourage others to visit <a href="/">OpenDataPhilly</a> and to join the race toward more open data!')
else:
next_vote_date = contest.get_next_vote_date(user)
if next_vote_date > contest.end_date:
messages.error(request, '<div style="font-weight:bold;">You have already voted.</div>You will not be able to vote again before the end of the contest. <br><br>Please encourage others to visit <a href="/">OpenDataPhilly</a> and to join the race toward more open data!')
else:
messages.error(request, '<div style="font-weight:bold;">You have already voted.</div>You may vote once per week, so come back and visit us again on ' + next_vote_date.strftime('%A, %b %d %Y, %I:%M%p') + '. <br><br>Until then, encourage others to visit <a href="/">OpenDataPhilly</a> and to join the race toward more open data!')
return redirect('/contest/?sort=vote_count')
| 1.945313
| 2
|
result/migrations/mig/0027_auto_20190626_0838.py
|
0Jihad/uqhs
| 0
|
12784677
|
<reponame>0Jihad/uqhs<filename>result/migrations/mig/0027_auto_20190626_0838.py
# Generated by Django 2.1.3 on 2019-06-25 19:38
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('result', '0026_auto_20190626_0835'),
]
operations = [
migrations.RenameField(
model_name='all_subject',
old_name='Agg',
new_name='agr',
),
]
| 1.507813
| 2
|
LogisticRegression_Firth.py
|
LukeLB/LogisticRegression_Firth
| 1
|
12784678
|
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 6 09:59:14 2021
@author: ll17354
"""
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.linear_model._logistic import LogisticRegression
from sklearn.utils.validation import check_X_y, check_array, check_is_fitted
import sys
import warnings
import math
import statsmodels
import numpy as np
from scipy import stats
import statsmodels.api as smf
def firth_likelihood(beta, logit):
return -(logit.loglike(beta) + 0.5*np.log(np.linalg.det(-logit.hessian(beta))))
def null_fit_firth(y, X, start_vec = None, step_limit=1000, convergence_limit=0.0001):
"""
Computes the null model in the likelihood ratio test
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features. Make sure X has an intercept
term (column of ones).
y : array-like of shape (n_samples,)
Target vector relative to X. Please note this function only currently works for
binomial regression so output values of {0, 1} will work while
{0, 1, 2} will not.
start_vec : int or None, optional
starting vector The default is None.
step_limit : TYPE, optional
Max number of steps before MLE termination. The default is 1000.
convergence_limit : TYPE, optional
Minimum difference between MLE's. The default is 0.0001.
Returns
-------
return_fit :
intercept: Intercept coeffcient
beta: list of beta coeffcients
bse: coeffcient standard errors
fitll: fit log-likelihood
"""
logit_model = smf.Logit(y, X)
if start_vec is None:
start_vec = np.zeros(X.shape[1])
beta_iterations = []
beta_iterations.append(start_vec)
for i in range(0, step_limit):
pi = logit_model.predict(beta_iterations[i])
W = np.diagflat(np.multiply(pi, 1-pi))
var_covar_mat = np.linalg.pinv(-logit_model.hessian(beta_iterations[i]))
# build hat matrix
rootW = np.sqrt(W)
H = np.dot(np.transpose(X), np.transpose(rootW))
H = np.matmul(var_covar_mat, H)
H = np.matmul(np.dot(rootW, X), H)
# penalised score
U = np.matmul(np.transpose(X), y - pi + np.multiply(np.diagonal(H), 0.5 - pi))
new_beta = beta_iterations[i] + np.matmul(var_covar_mat, U)
# step halving
j = 0
while firth_likelihood(new_beta, logit_model) > firth_likelihood(beta_iterations[i], logit_model):
new_beta = beta_iterations[i] + 0.5*(new_beta - beta_iterations[i])
j = j + 1
if (j > step_limit):
sys.stderr.write('Firth regression failed. Try increasing step limit.\n')
return None
beta_iterations.append(new_beta)
if i > 0 and (np.linalg.norm(beta_iterations[i] - beta_iterations[i-1]) < convergence_limit):
break
return_fit = None
if np.linalg.norm(beta_iterations[i] - beta_iterations[i-1]) >= convergence_limit:
sys.stderr.write('Firth regression failed to converge.\n')
else:
# Calculate stats
fitll = -firth_likelihood(beta_iterations[-1], logit_model)
intercept = beta_iterations[-1][0]
beta = beta_iterations[-1][1:].tolist()
bse = np.sqrt(np.diagonal(np.linalg.pinv(-logit_model.hessian(beta_iterations[-1]))))
return_fit = intercept, beta, bse, fitll
return return_fit
class Firth_LogisticRegression(LogisticRegression,
ClassifierMixin,
BaseEstimator):
"""
This class represents a rewriting Firth regression originally implemented
by John Lees (https://gist.github.com/johnlees/3e06380965f367e4894ea20fbae2b90d)
into a class which can interact with the sci-kit learn ecosystem.
To use the fit function make sure X has an intercept term (column of ones).
When using validation functions make sure to not include this 'dummy' column
of ones.
Please note: This estimator class does not currently pass the check_estimator test
in sklearn. This is because it cannot perform the multinomial classification task that
check_estimator attempts to pass it.
Parameters
----------
start_vec : ndarray of shape (n_features, 1). Default set to None in which
case the zero vector is used.
step_limit : int.
convergence_limit : float.
multi_class : string. Default is set to 'ovr' to let this function intgerate
with the logistic_regression parent class and pass the _check_multi_class
function. A bit hacky but works.
Antributes
----------
classes_ : ndarray of shape (n_classes, )
A list of class labels known to the classifier.
coef_ : ndarray of shape (1, n_features) or (n_classes, n_features)
Coefficient of the features in the decision function.
`coef_` is of shape (1, n_features) when the given problem is binary.
In particular, when `multi_class='multinomial'`, `coef_` corresponds
to outcome 1 (True) and `-coef_` corresponds to outcome 0 (False).
beta_ : list of size n_features. This is used in the wald and likelihood
ratio test functions.
intercept_ : ndarray of shape (1,) or (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
"""
def __init__(self, start_vec = None,
step_limit = 1000,
convergence_limit = 0.0001,
multi_class = 'ovr'):
self.start_vec = start_vec
self.step_limit = step_limit
self.convergence_limit = convergence_limit
self.multi_class = multi_class # multiclass should not be changed from 'ovr'
def fit(self, X = None, y = None):
"""
Fits the model accoridng to given training data. This fit function which
has been changed to work inaccordance with the sklearn estimator
documentation. Major changes are, rather than returning specific
variables fit() return an instance of itself allowing other functions
to be run from it.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features. Make sure X has an intercept
term (column of ones).
y : array-like of shape (n_samples,)
Target vector relative to X. Please note this function only currently works for
binomial regression so output values of {0, 1} will work while
{0, 1, 2} will not.
Returns
-------
self
Fitted estimator.
self.fitll_ : fit log-likelihood
self.intercept_ : intercept
self.coef_ : coeffcients not including intercept (used in all other sklearn classes )
self.beta_ : coeffcients including intercept (used in wald and LR tests)
self.bse_ : standard errors
"""
X, y = check_X_y(X, y)
self.n_features_in = X.shape[1]-1
self.classes_ = np.unique(y)
logit_model = smf.Logit(y, X)
if self.start_vec is None:
start_vec = np.zeros(X.shape[1])
beta_iterations = []
beta_iterations.append(start_vec)
for i in range(0, self.step_limit):
pi = logit_model.predict(beta_iterations[i])
W = np.diagflat(np.multiply(pi, 1-pi))
var_covar_mat = np.linalg.pinv(-logit_model.hessian(beta_iterations[i]))
# build hat matrix
rootW = np.sqrt(W)
H = np.dot(np.transpose(X), np.transpose(rootW))
H = np.matmul(var_covar_mat, H)
H = np.matmul(np.dot(rootW, X), H)
# penalised score
U = np.matmul(np.transpose(X), y - pi + np.multiply(np.diagonal(H), 0.5 - pi))
new_beta = beta_iterations[i] + np.matmul(var_covar_mat, U)
# step halving
j = 0
while firth_likelihood(new_beta, logit_model) > firth_likelihood(beta_iterations[i], logit_model):
new_beta = beta_iterations[i] + 0.5*(new_beta - beta_iterations[i])
j = j + 1
if (j > self.step_limit):
sys.stderr.write('Firth regression failed. Try increasing step limit.\n')
return None
beta_iterations.append(new_beta)
if i > 0 and (np.linalg.norm(beta_iterations[i] - beta_iterations[i-1]) < self.convergence_limit):
break
if np.linalg.norm(beta_iterations[i] - beta_iterations[i-1]) >= self.convergence_limit:
sys.stderr.write('Firth regression failed to converge\n')
else:
# Calculate stats
self.fitll_ = -firth_likelihood(beta_iterations[-1], logit_model)
self.intercept_ = beta_iterations[-1][0]
self.coef_ = np.array(beta_iterations[-1][1:].tolist()).reshape((1, self.n_features_in)) #for other sklearn functions
self.beta_ = [self.intercept_] + beta_iterations[-1][1:].tolist() #used by Wald and LR test
self.bse_ = np.sqrt(np.diagonal(np.linalg.pinv(-logit_model.hessian(beta_iterations[-1]))))
return self
def test_wald(self):
'''
Implemnatation of the wald test
Returns
-------
waldp : list
A list p-values from the Wald test.
'''
check_is_fitted(self)
waldp = []
for beta_val, bse_val in zip(self.beta_, self.bse_):
waldp.append(2 * (1 - stats.norm.cdf(abs(beta_val/bse_val))))
return waldp
def test_likelihoodratio(self, X, y, start_vec = None, step_limit=1000, convergence_limit=0.0001):
"""
Implementation of the likelihood ratio test. An external function,
null_fit_firth(), is used to refit the null-estimator.
Parameters
----------
X : {array-like} of shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features. Make sure to include the dummy column
of ones.
y : array-like of shape (n_samples,)
Target vector relative to X.
Returns
-------
lrtp : List
List of p-values from the likelihood ratio test.
"""
check_is_fitted(self)
X_np = X.values
lrtp = []
for beta_idx, (beta_val, bse_val) in enumerate(zip(self.beta_, self.bse_)):
null_X = np.delete(X_np, beta_idx, axis=1)
(null_intercept, null_beta, null_bse, null_fitll) = null_fit_firth(y, null_X, start_vec, step_limit, convergence_limit)
lrstat = -2*(null_fitll - self.fitll_)
lrt_pvalue = 1
if lrstat > 0: # non-convergence
lrt_pvalue = stats.chi2.sf(lrstat, 1)
lrtp.append(lrt_pvalue)
return lrtp
| 2.96875
| 3
|
tests/query/test_base_query.py
|
m-housh/flask_open_directory
| 0
|
12784679
|
import pytest
from flask_open_directory import BaseQuery, User
from flask_open_directory.query.base_query import _quote_if_str
import ldap3
@pytest.fixture
def base_query(open_directory):
return BaseQuery(open_directory=open_directory)
@pytest.fixture
def connection():
return ldap3.Connection(ldap3.Server('localhost'))
@pytest.fixture
def custom_model():
class CustomModel(object):
@classmethod
def ldap_attribute_map(cls): pass
@classmethod
def attribute_name_for(cls, key): pass
@classmethod
def query_cn(cls):
return 'cn=custom'
@classmethod
def from_entry(cls, entry): pass
@classmethod
def ldap_keys(cls): pass
return CustomModel
def test_quote_if_str():
assert _quote_if_str('a') == "'a'"
assert _quote_if_str(None) is None
obj = object()
assert _quote_if_str(obj) == obj
def test_BaseQuery_model(base_query):
base_query.model = User
assert base_query.model == User
base_query.model = User()
assert base_query.model == User
with pytest.raises(TypeError):
base_query.model = object
def test_BaseQuery_open_directory(base_query):
assert base_query.open_directory is not None
with pytest.raises(TypeError):
base_query.open_directory = object()
def test_BaseQuery_search_base(base_query, open_directory, custom_model):
assert base_query.search_base == open_directory.base_dn
base_query.model = User
assert base_query.search_base == 'cn=users,' + open_directory.base_dn
no_open_directory = BaseQuery()
assert no_open_directory.search_base is None
explicit = BaseQuery(search_base='dc=example,dc=com')
assert explicit.search_base == 'dc=example,dc=com'
custom = BaseQuery(open_directory=open_directory, model=custom_model)
assert custom.model is not None
assert custom.search_base == 'cn=custom,{}'.format(open_directory.base_dn)
def test_BaseQuery_search_filter(base_query):
assert base_query.search_filter == base_query._default_search_filter
q = BaseQuery(search_filter='(objectClass=apple-user)')
assert q.search_filter == '(objectClass=apple-user)'
def test_BaseQuery_ldap_attributes(base_query):
assert base_query.ldap_attributes is None
base_query.ldap_attributes = ['a', 'b', object()]
assert base_query.ldap_attributes == ('a', 'b')
base_query.ldap_attributes = 'c'
assert base_query.ldap_attributes == ('c', )
def test_BaseQuery_connection(base_query, connection):
assert base_query.connection is None
base_query.connection = connection
assert base_query.connection == connection
with pytest.raises(TypeError):
base_query.connection = object()
def test_BaseQuery_connection_ctx(base_query, connection):
with base_query.connection_ctx() as ctx:
assert isinstance(ctx, ldap3.Connection)
assert ctx != connection
base_query.connection = connection
with base_query.connection_ctx() as ctx:
assert ctx == connection
no_ctx = BaseQuery()
with no_ctx.connection_ctx() as ctx:
assert ctx is None
def test_BaseQuery_first(base_query, open_directory):
base_query.model = User
user = base_query.first()
assert isinstance(user, User)
entry = base_query.first(convert=False)
assert isinstance(entry, ldap3.Entry)
with base_query.connection_ctx() as conn:
user = base_query.first(conn)
assert isinstance(user, User)
invalid = BaseQuery()
item = invalid.first()
assert item is None
def test_BaseQuery_all(base_query):
base_query.model = User
users = base_query.all()
assert isinstance(users, tuple)
for u in users:
assert isinstance(u, User)
entries = base_query.all(convert=False)
for e in entries:
assert isinstance(e, ldap3.Entry)
with base_query.connection_ctx() as conn:
users = base_query.all(conn)
assert len(users) > 0
invalid = BaseQuery()
items = invalid.all()
assert isinstance(items, tuple)
assert len(items) == 0
def test_BaseQuery_repr(base_query, open_directory):
r = repr(base_query)
assert "model=None" in r
assert "search_base='{}'".format(open_directory.base_dn) in r
assert "search_filter='(objectClass=*)'" in r
assert "open_directory={}".format(repr(open_directory)) in r
assert "connection={}".format(base_query.connection) in r
assert "ldap_attributes=None" in r
assert "BaseQuery(" in r
assert ")" in r
| 2.109375
| 2
|
avgn/custom_parsing/mobysound_humpback.py
|
xingjeffrey/avgn_paper
| 0
|
12784680
|
from avgn.utils.audio import get_samplerate
from avgn.utils.json import NoIndent, NoIndentEncoder
import numpy as np
from avgn.utils.paths import DATA_DIR
import librosa
from datetime import datetime
import pandas as pd
import avgn
import json
DATASET_ID = 'mobysound_humpback_whale'
def load_labs(labels):
all_labels = []
for label_file in labels:
label_df = pd.DataFrame(
[line.split() for line in open(label_file, "r")],
columns=["start_time", "end_time", "low_freq", "high_freq", "SNR"],
)
label_df['file'] = label_file.stem
all_labels.append(label_df)
all_labels = pd.concat(all_labels).reset_index()
for lab in ['start_time', 'end_time', 'low_freq', 'high_freq', 'SNR']:
all_labels[lab] = all_labels[lab].values.astype('float32')
return all_labels
def find_longest_nonvocal_stretch(file_df, wav_duration):
""" An ugly function to find the longest stretch of nonvocal behavior in a syllable dataframe
"""
## find the longest stretch of non-vocal behavior in this wav
max_break = np.argmax(file_df.start_time.values[1:] - file_df.end_time.values[:-1])
noise_end_time = file_df.start_time.values[1:][max_break]
noise_start_time = file_df.end_time.values[:-1][max_break]
start_noise = file_df.start_time.values[0]
end_noise = wav_duration - file_df.end_time.values[-1]
noise_lens = np.array([noise_end_time - noise_start_time, start_noise, end_noise])
noise_start_ends = np.array(
[
[noise_start_time, noise_end_time],
[0, start_noise],
[file_df.end_time.values[-1], wav_duration],
]
)
noise_start, noise_end = noise_start_ends[np.argmax(noise_lens)]
return noise_start, noise_end
def generate_noise_and_json(bout_number, fn, DT_ID, wavloc, file_df):
# location of wav
#wavloc = np.array(wavs)[np.array([i.stem for i in wavs]) == fn][0]
# wav time
wavdate = datetime.strptime(fn, "%y%m%d-%H%M")
wav_date = wavdate.strftime("%Y-%m-%d_%H-%M-%S")
# wav samplerate and duration
sr = get_samplerate(wavloc.as_posix())
wav_duration = librosa.get_duration(filename=wavloc)
# df of syllables in file
#file_df = label_df[label_df.file == fn].sort_values(by="start_time")
## find the longest stretch of non-vocal behavior in this wav
noise_start, noise_end = find_longest_nonvocal_stretch(file_df, wav_duration)
bout_start_string = avgn.utils.general.seconds_to_str(noise_start)
# determine save locations
noise_out = (
DATA_DIR
/ "processed"
/ DATASET_ID
/ DT_ID
/ "NOISE"
/ (fn + "__" + bout_start_string + ".WAV")
)
json_out = DATA_DIR / "processed" / DATASET_ID / DT_ID / "JSON" / (fn + ".JSON")
# wav general information
json_dict = {}
json_dict["bout_number"] = bout_number
json_dict["species"] = "Megaptera novaengliae"
json_dict["common_name"] = "Humpback whale"
json_dict["datetime"] = wav_date
json_dict["samplerate_hz"] = sr
json_dict["length_s"] = wav_duration
json_dict["wav_loc"] = wavloc.as_posix()
json_dict["noise_loc"] = noise_out.as_posix()
json_dict["indvs"] = {
"UNK": {
"syllables": {
"start_times": NoIndent(
list(file_df.start_time.values.astype("float"))
),
"end_times": NoIndent(list(file_df.end_time.astype("float"))),
"high_freq": NoIndent(list(file_df.high_freq.astype("float"))),
"low_freq": NoIndent(list(file_df.low_freq.astype("float"))),
"SNR": NoIndent(list(file_df.SNR.astype("float"))),
}
}
}
json_txt = json.dumps(json_dict, cls=NoIndentEncoder, indent=2)
# save wav file
noise_wav, sr = librosa.load(
wavloc, sr=None, mono=True, offset=noise_start, duration=noise_end - noise_start
)
avgn.utils.paths.ensure_dir(noise_out)
librosa.output.write_wav(noise_out, y=noise_wav, sr=sr, norm=True)
# save json
avgn.utils.paths.ensure_dir(json_out.as_posix())
print(json_txt, file=open(json_out.as_posix(), "w"))
| 2.53125
| 3
|
federatedml/ftl/test/functional_test/hetero_ftl_plain_guest_test.py
|
chenlongzhen/FATE-0.1
| 1
|
12784681
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import unittest
from federatedml.ftl.plain_ftl import PlainFTLHostModel
from federatedml.ftl.hetero_ftl.hetero_ftl_guest import HeteroPlainFTLGuest, HeteroFTLGuest
from federatedml.ftl.plain_ftl import PlainFTLGuestModel
from federatedml.feature.instance import Instance
from federatedml.ftl.common.data_util import create_table
from federatedml.ftl.test.fake_models import FakeAutoencoder, FakeDiffConverge
from federatedml.param.param import FTLModelParam
from federatedml.util.transfer_variable import HeteroFTLTransferVariable
from arch.api.eggroll import init
class TestHeteroFTLGuest(HeteroPlainFTLGuest):
def __init__(self, guest, model_param, transfer_variable):
super(TestHeteroFTLGuest, self).__init__(guest, model_param, transfer_variable)
U_B = np.array([[4, 2, 3, 1, 2],
[6, 5, 1, 4, 5],
[7, 4, 1, 9, 10],
[6, 5, 1, 4, 5]])
overlap_indexes = [1, 2]
Wh = np.ones((5, U_B.shape[1]))
bh = np.zeros(U_B.shape[1])
autoencoderB = FakeAutoencoder(1)
autoencoderB.build(U_B.shape[1], Wh, bh)
self.host = PlainFTLHostModel(autoencoderB, self.model_param)
self.host.set_batch(U_B, overlap_indexes)
def _do_remote(self, value=None, name=None, tag=None, role=None, idx=None):
print("@_do_remote", value, name, tag, role, idx)
def _do_get(self, name=None, tag=None, idx=None):
print("@_do_get", name, tag, idx)
if tag == "HeteroFTLTransferVariable.host_sample_indexes.0":
return [np.array([1, 2, 4, 5])]
elif tag == "HeteroFTLTransferVariable.host_component_list.0.0":
return self.host.send_components()
return None
class TestCreateGuestHostEggrollTable(unittest.TestCase):
def test_hetero_plain_guest_prepare_table(self):
U_A = np.array([[1, 2, 3, 4, 5],
[4, 5, 6, 7, 8],
[7, 8, 9, 10, 11],
[4, 5, 6, 7, 8]])
y = np.array([[1], [-1], [1], [-1]])
Wh = np.ones((5, U_A.shape[1]))
bh = np.zeros(U_A.shape[1])
model_param = FTLModelParam(alpha=1, max_iteration=1)
autoencoderA = FakeAutoencoder(0)
autoencoderA.build(U_A.shape[1], Wh, bh)
guest = PlainFTLGuestModel(autoencoderA, model_param)
converge_func = FakeDiffConverge(None)
ftl_guest = TestHeteroFTLGuest(guest, model_param, HeteroFTLTransferVariable())
ftl_guest.set_converge_function(converge_func)
guest_sample_indexes = np.array([0, 1, 2, 3])
guest_x_dict = {}
guest_label_dict = {}
instance_dict = {}
instance_list = []
np.random.seed(100)
for i, feature, label, in zip(guest_sample_indexes, U_A, y):
instance = Instance(inst_id=i, features=feature, label=label[0])
guest_x_dict[i] = feature
guest_label_dict[i] = label[0]
instance_dict[i] = instance
instance_list.append(instance)
guest_x = create_table(instance_list, indexes=guest_sample_indexes)
guest_x, overlap_indexes, non_overlap_indexes, guest_y = ftl_guest.prepare_data(guest_x)
print("guest_x", guest_x)
print("overlap_indexes", overlap_indexes)
print("non_overlap_indexes", non_overlap_indexes)
print("guest_y", guest_y)
if __name__ == '__main__':
init()
unittest.main()
| 1.851563
| 2
|
evap/evaluation/migrations/0125_use_lists_for_ordering.py
|
felixrindt/EvaP
| 29
|
12784682
|
<gh_stars>10-100
# Generated by Django 3.1.8 on 2021-07-24 17:57
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('evaluation', '0124_add_html_content'),
]
operations = [
migrations.AlterModelOptions(
name='logentry',
options={'ordering': ['-datetime', '-id']},
),
migrations.AlterModelOptions(
name='questionnaire',
options={'ordering': ['type', 'order', 'pk'], 'verbose_name': 'questionnaire', 'verbose_name_plural': 'questionnaires'},
),
migrations.AlterModelOptions(
name='semester',
options={'ordering': ['-created_at', 'pk'], 'verbose_name': 'semester', 'verbose_name_plural': 'semesters'},
),
migrations.AlterModelOptions(
name='userprofile',
options={'ordering': ['last_name', 'first_name', 'email'], 'verbose_name': 'user', 'verbose_name_plural': 'users'},
),
]
| 1.710938
| 2
|
server/mlabns/db/nagios_config_wrapper.py
|
mtlynch/m-lab.ns
| 10
|
12784683
|
import logging
from google.appengine.api import memcache
from mlabns.db import model
from mlabns.util import constants
def get_nagios_config():
"""Retrieves nagios config info. First checks memcache, then datastore.
Returns:
Nagios model instance
"""
nagios = memcache.get(constants.DEFAULT_NAGIOS_ENTRY)
if not nagios:
nagios = model.Nagios.get_by_key_name(constants.DEFAULT_NAGIOS_ENTRY)
if nagios:
memcache.set(constants.DEFAULT_NAGIOS_ENTRY, nagios)
else:
logging.error('Datastore does not have the Nagios credentials.')
return nagios
| 2.359375
| 2
|
l5kit/l5kit/tests/planning/common_test.py
|
cdicle-motional/l5kit
| 1
|
12784684
|
<reponame>cdicle-motional/l5kit
import torch
def mock_vectorizer_data(batch_size: int, num_steps: int, num_history: int, num_agents: int, num_lanes: int,
num_crosswalks: int, num_points_per_element: int, TYPE_MAX: int) -> dict:
return {
"extent": torch.rand(batch_size, 3),
"type": torch.randint(0, TYPE_MAX, (batch_size,)),
"world_to_image": torch.rand(batch_size, 3, 3),
"raster_from_agent": torch.rand(batch_size, 3, 3),
"raster_from_world": torch.rand(batch_size, 3, 3),
"agent_from_world": torch.rand(batch_size, 3, 3),
"world_from_agent": torch.rand(batch_size, 3, 3),
"target_positions": torch.rand(batch_size, num_steps, 2),
"target_yaws": torch.rand(batch_size, num_steps, 1),
"target_extents": torch.rand(batch_size, num_steps, 2),
"target_availabilities": torch.rand(batch_size, num_steps) > 0.5,
"history_positions": torch.rand(batch_size, num_history + 1, 2),
"history_yaws": torch.rand(batch_size, num_history + 1, 1),
"history_extents": torch.rand(batch_size, num_history + 1, 2),
"history_availabilities": torch.rand(batch_size, num_history + 1) > 0.5,
"centroid": torch.rand(batch_size, 2),
"yaw": torch.rand(batch_size),
"speed": torch.rand(batch_size),
"all_other_agents_history_positions": torch.rand(batch_size, num_agents, num_history + 1, 2),
"all_other_agents_history_yaws": torch.rand(batch_size, num_agents, num_history + 1, 1),
"all_other_agents_history_extents": torch.rand(batch_size, num_agents, num_history + 1, 2),
"all_other_agents_history_availability": torch.rand(batch_size, num_agents, num_history + 1) > 0.5,
"all_other_agents_future_positions": torch.rand(batch_size, num_agents, num_steps, 2),
"all_other_agents_future_yaws": torch.rand(batch_size, num_agents, num_steps, 1),
"all_other_agents_future_availability": torch.rand(batch_size, num_agents, num_steps) > 0.5,
"all_other_agents_types": torch.randint(0, TYPE_MAX, (batch_size, num_agents)),
"agent_trajectory_polyline": torch.rand(batch_size, num_history + 1, 3),
"agent_polyline_availability": torch.rand(batch_size, num_history + 1) > 0.5,
"other_agents_polyline": torch.rand(batch_size, num_agents, num_history + 1, 3),
"other_agents_polyline_availability": torch.rand(batch_size, num_agents, num_history + 1) > 0.5,
"lanes": torch.rand(batch_size, num_lanes, num_points_per_element, 3),
"lanes_availability": torch.rand(batch_size, num_lanes, num_points_per_element) > 0.5,
"lanes_mid": torch.rand(batch_size, num_lanes, num_points_per_element, 3),
"lanes_mid_availabilities": torch.rand(batch_size, num_lanes, num_points_per_element) > 0.5,
"crosswalks": torch.rand(batch_size, num_crosswalks, num_points_per_element, 3),
"crosswalks_availabilities": torch.rand(batch_size, num_crosswalks, num_points_per_element) > 0.5,
"scene_index": torch.rand(batch_size),
"host_id": torch.rand(batch_size, 70),
"timestamp": torch.rand(batch_size),
"track_id": torch.rand(batch_size),
}
| 2.125
| 2
|
gs15_py/signature_RSA.py
|
Jajajzhh/Blockchain_EncryptionKasumi
| 0
|
12784685
|
<gh_stars>0
from Crypto.PublicKey import RSA
import errno
from Crypto.Hash import SHA256
from Crypto.PublicKey import RSA
from hashlib import sha512
from os import mkdir
from os import _exists
#RSA signature
class Sign_hash:
def __init__(self, usernum):
#Using Crypto Library to generate, export and import key pair instead of using the generated key in #3
dirpriv = 'key/' + usernum +'/privkey.pem'
dirpub = 'key/' + usernum +'/pubkey.pem'
try:
with open(dirpriv, 'r') as f:
self.keyPair = RSA.importKey(f.read())
except IOError as e:
if e.errno != errno.ENOENT:
raise
# No private key, generate a new one. This can take a few seconds.
if not _exists('key/elgamal/' + usernum + '/'):
mkdir('key/' + usernum + '/')
self.keyPair = RSA.generate(bits=1024)
with open(dirpriv, 'wb') as f:
f.write(self.keyPair.exportKey('PEM'))
with open(dirpub, 'wb') as f:
f.write(self.keyPair.publickey().exportKey('PEM'))
def sign(self, message):
#keyPair = RSA.generate(bits=1024)
print(f"Public key: (n={hex(self.keyPair.n)}, e={hex(self.keyPair.e)})")
print(f"Private key: (n={hex(self.keyPair.n)}, d={hex(self.keyPair.d)})")
# RSA sign the message
msg = message
from hashlib import sha512
hash = int.from_bytes(sha512(msg).digest(), byteorder='big')
signature = pow(hash, self.keyPair.d, self.keyPair.n)
print("Signature:", hex(signature))
return signature
def verify(self, message, signature):
#message in bytes
msg = message
hash = int.from_bytes(sha512(msg).digest(), byteorder='big')
hashFromSignature = pow(signature, self.keyPair.e, self.keyPair.n)
print('Hash1:', hex(hash))
print('Hash2:', hex(hashFromSignature))
print("Signature valid:", hash == hashFromSignature)
if(hash == hashFromSignature):
return True
else:
return False
##################
####################
if __name__ == '__main__':
signature = Sign_hash('1')
signed = signature.sign(b'A message for sign')
verify = signature.verify(b'A message for sign',signed)
print(hex(signed))
#keyPair = RSA.generate(bits=1024)
#print(f"Public key: (n={hex(keyPair.n)}, e={hex(keyPair.e)})")
#print(f"Private key: (n={hex(keyPair.n)}, d={hex(keyPair.d)})")
# RSA sign the message
#msg = b'A message for signing'
#from hashlib import sha512
#hash = int.from_bytes(sha512(msg).digest(), byteorder='big')
#signature = pow(hash, keyPair.d, keyPair.n)
#print("Signature:", hex(signature))
# RSA verify signature
#msg = b'A message for signing'
#hash = int.from_bytes(sha512(msg).digest(), byteorder='big')
##hashFromSignature = pow(signature, keyPair.e, keyPair.n)
#print("Signature valid:", hash == hashFromSignature)
# RSA verify signature (tampered msg)
#msg = b'A message for signing (tampered)'
#hash = int.from_bytes(sha512(msg).digest(), byteorder='big')
#hashFromSignature = pow(signature, keyPair.e, keyPair.n)
#print("Signature valid (tampered):", hash == hashFromSignature)
| 2.828125
| 3
|
game/entities/voxel.py
|
eivaremir/TheLearningDoors
| 2
|
12784686
|
from ursina import *
from ..textures import textures
class Voxel(Button):
def __init__(self, position = (0,0,0), texture = textures["grass"]):
super().__init__(
parent = scene,
position = position,
model = 'assets/block',
origin_y = .5,
texture =texture,
color = color.color(0,0,random.uniform(.9,1)),
scale = .5
)
| 2.46875
| 2
|
d17.py
|
theShmoo/adventofcode2019
| 0
|
12784687
|
import intcode
import utils
class Droid(object):
def __init__(self):
super(Droid, self).__init__()
self.grid = []
self.current_line = []
self.width = 0
self.height = 0
self.neighborhoods = ((1, 0), (0, 1), (-1, 0), (0, -1))
def printGrid(self):
s = ""
for line in self.grid:
s += str(''.join(line)) + "\n"
print(s)
def getNumNeighbors(self, x, y):
num = 0
if self.grid[y][x] != '#':
return 0
for n in self.neighborhoods:
check_x = x + n[0]
check_y = y + n[1]
if check_x > 0 and check_x < self.width:
if check_y > 0 and check_y < self.height:
if self.grid[check_y][check_x] == '#':
num += 1
return num
def setOutput(self, v):
if v > 127:
print(f"{v} dust collected")
if v == 10:
self.grid.append(self.current_line)
if len(self.current_line) > 0:
self.width = len(self.current_line)
self.height += 1
self.current_line = []
else:
self.current_line.append(chr(v))
def getInput(self):
return 0
data = [int(x) for x in utils.get_input(2019, 17).split(',')]
data[0] = 2
droid = Droid()
pc = intcode.Intcode(data, droid.getInput, droid.setOutput)
pc.run()
droid.printGrid()
intersections = []
for x in range(droid.width):
for y in range(droid.height):
if droid.getNumNeighbors(x, y) > 2:
intersections.append((x, y))
print(intersections)
s = [i[0] * i[1] for i in intersections]
print(sum(s))
| 3.09375
| 3
|
terrascript/kubernetes/__init__.py
|
amlodzianowski/python-terrascript
| 0
|
12784688
|
<reponame>amlodzianowski/python-terrascript<filename>terrascript/kubernetes/__init__.py
# terrascript/kubernetes/__init__.py
import terrascript
class kubernetes(terrascript.Provider):
pass
| 1.296875
| 1
|
vote.py
|
evan82/dash-voter
| 1
|
12784689
|
<gh_stars>1-10
#!/usr/bin/env python
"""
Dash-Voter
----
Mass vote on a proposal semi-anonymously
"""
import subprocess
import argparse
import json
from config import dashd_path, datadir, masternodes
from random import randint, shuffle
from time import sleep
from sys import exit, argv
print dashd_path, datadir
if len(argv) == 2:
print "proposal yes|no required"
exit()
#vote yes somewhere between 70 and 100% of the time. This will average 85% the direction you want.
p = randint(700, 1000)
#sleep multiple, sleep between 10 and 30 seconds per proposal
s = randint(1, 3)
# Called when a client sends a message
def vote(proposal, yes_no, masternode):
"""
./dash-cli --datadir=/Users/evan/.dash mnbudget vote-alias 1e477007d555f9f8919ecbe3b4c457b6f269184924771c0117fbb48751bf23d6 no flare_024
"""
r = randint(0, 1000)
a, b = "yes", "no"
if yes_no == "no": a, b = b, a
a = yes_no
b = yes_no
print proposal, (a if r<p else b)
print dashd_path + " --datadir=" + datadir + " mnbudget vote-alias " + proposal + " " + (a if r<p else b) + " " + masternode
subprocess.call(dashd_path + " --datadir=" + datadir + " mnbudget vote-alias " + proposal + " " + (a if r<p else b) + " " + masternode, shell=True)
#vote anonymously
shuffle(masternodes)
for masternode in masternodes:
vote(argv[1], argv[2], masternode)
sleep(randint(1, 10)*s)
| 2.5625
| 3
|
mllogger.py
|
ok1zjf/LBAE
| 15
|
12784690
|
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__version__= '1.8'
__status__ = "Research"
__date__ = "2/1/2020"
__license__= "MIT License"
import os
import sys
import numpy as np
import time
import glob
from torchvision.utils import make_grid
from tensorboardX import SummaryWriter
import imageio
import skimage
from parameters import Params
from sys_utils import tohms
from image_utils import save_image
#========================================================================================
class TeePipe(object):
#source: https://stackoverflow.com/q/616645
def __init__(self, filename="Red.Wood", mode="a", buff=0):
self.stdout = sys.stdout
# self.file = open(filename, mode, buff)
self.file = open(filename, mode)
sys.stdout = self
def __del__(self):
self.close()
def __enter__(self):
pass
def __exit__(self, *args):
self.close()
def write(self, message):
self.stdout.write(message)
self.file.write(message)
def flush(self):
self.stdout.flush()
self.file.flush()
os.fsync(self.file.fileno())
def close(self):
if self.stdout != None:
sys.stdout = self.stdout
self.stdout = None
if self.file != None:
self.file.close()
self.file = None
# ========================================================================================
class MLLogger():
def __init__(self, hps):
self.hps = hps
self.logf = None
self.im_size = hps.img_size
self.epoch_num = hps.epochs_max # Total number of epochs
self.iter_num = {} # Iterations per epoch
# self.iter_epoch = 0
self.batch_size = hps.batch_size
self.data = []
self.dkeys_id=['ts', 'epoch', 'iter', 'stage'] # Key lookup by ID
self.dkeys = {} # ID lookup by key
self.m_first = {} # stage_name -> position of first record
self.m_last = {} # stage_name -> position of last record
self.start_time = None # blobal start timestamp
self.iter_global = 0 # Total iteration since the begining of the training
self.print_header = True
self.data_format_changed = True
self.last_report_pos={} # stage_name -> Position in self.data of the last report
# Tensorboard
self.writer = None
self.log_id = None
return
def load_config(self):
logdir = self.exp_path+'/log/'
cfg_filename = os.path.join(logdir, 'cfg-'+str(self.log_id-1)+'-*.json')
cfg_files = glob.glob(cfg_filename)
cfg_files.sort(reverse=True)
if len(cfg_files) == 0 or not os.path.isfile(cfg_files[0]):
return None
p = Params()
if not p.load(cfg_files[0]):
return None
return p
def save_config(self, epoch=None):
logdir = self.exp_path+'/log/'
cfg_filename = os.path.join(logdir, 'cfg-'+str(self.log_id)+'-'+str(epoch)+'.json')
self.hps.save(cfg_filename)
return
def open_experiment(self, experiment_name='m1'):
"""
Creates sub-directory structure
- Creates new log file
-
"""
self.experiment_name = experiment_name
self.exp_path = os.path.join(experiment_name)
os.makedirs(self.exp_path, exist_ok=True)
if not self.hps.eval and self.experiment_name != '.':
# Backup source code & configs
os.system('cp *.py ' + self.exp_path+ '/')
logdir = self.exp_path+'/tblog/'
os.makedirs(logdir, exist_ok=True)
self.writer = SummaryWriter(logdir)
logdir = self.exp_path+'/log/'
os.makedirs(logdir, exist_ok=True)
self.model_path =os.path.join(self.exp_path, 'models')
os.makedirs(self.model_path, exist_ok=True)
# Create new log files
prefix = 'eval-' if self.hps.eval else 'train-'
log_id = 0
while True:
log_filename = prefix+'log-'+str(log_id)+'.txt'
log_path = os.path.join(logdir, log_filename)
if not os.path.isfile(log_path):
break
log_id += 1
if self.hps.log_stdout:
stdout_log_filename = prefix+'stdout-'+str(log_id)+'.txt'
stdout_log_filename = os.path.join(logdir, stdout_log_filename)
self.stdout_logger = TeePipe(stdout_log_filename)
print("Creating new log file:",log_path)
self.logf = open(log_path, 'wt')
self.log_id = log_id
return
def set_samples_num(self, stage_name, samples_num):
self.iter_num[stage_name] = self.hps.batch_size * int(np.floor(samples_num / self.hps.batch_size))
def start_epoch(self, stage_name, epoch):
"""
Creates a null record with a current timestamp
"""
if self.start_time is None:
self.start_time = time.time()
# Stored the position of the first epoch record
# There can be one start per stage
self.m_first[stage_name] = len(self.data)
self.m_last[stage_name] = len(self.data)
self.last_report_pos[stage_name] = len(self.data)
rec = [0]*len(self.dkeys_id)
rec[0] = time.time() - self.start_time
rec[1] = epoch
rec[2] = self.iter_global
rec[3] = stage_name
self.data.append(rec)
self.print_header = True
return
def log_loss(self, epoch, iter, losses, stage_name):
"""
Args:
epoch (int): current epoch starting from 0
iter (int): sample iteration within the epoch
stage_name (str): 'train', 'val', 'test'
losses (dict): dictionary of loss_name->loss_val
"""
if iter is not None:
self.iter_global = iter
# Collect new value keys
for key, val in losses.items():
if key not in self.dkeys_id:
# Add new key=val
self.dkeys_id.append(key)
self.data_format_changed = True
# Update the key-index lookup table
if self.data_format_changed:
self.dkeys = {}
for i, key in enumerate(self.dkeys_id):
self.dkeys[key] = i
# Store new data
rec = [0]*len(self.dkeys_id)
rec[0] = time.time() - self.start_time
rec[1] = epoch
rec[2] = self.iter_global # Global iteration
rec[3] = stage_name
# Generate tensorboar record
tboard_losses = {}
for key, val in losses.items():
id = self.dkeys[key]
rec[id] = val
key = stage_name+'_'+key
tboard_losses[key] = val
self.data.append(rec)
# Append log to the file
if self.logf is not None:
if self.data_format_changed:
# Insert data format header
header_str = [str(v) for v in self.dkeys_id]
self.logf.write('\n'+' '.join(header_str)+'\n')
line = [str(v) for v in rec]
self.logf.write(' '.join(line)+'\n')
self.logf.flush()
# Update tensorboard
# {'d_loss': d_loss, 'grad_penalty': grad_penalty}
self.writer.add_scalars('losses', tboard_losses, self.iter_global)
self.m_last[stage_name] = len(self.data)-1
self.data_format_changed= False
return
def print_table(self, name, data, header=None):
"""
max_iter = self.iter_num*self.epoch_num
epoch_str = str(rec[-1][1])+" ("+str(int(done))+"%)"
header = ['T', 'e('+str(self.epoch_num)+')', 'iter('+str(max_iter//1000)+'k)', 'batch (ms)']
data = [[rec[-1][3], epoch_str, str(last_iter), batch_took_avg*1000.0]]
"""
# Print table
table_width = 0
if header is not None:
self.col_width = []
line = ""
for i, hv in enumerate(header):
line += '{:<{c0}}'.format(hv, c0=len(hv))
self.col_width.append(len(hv))
print('')
if name is not None:
print(name)
print(line)
head_len = len(line)
print('-'*head_len )
table_width = head_len
# Print data
for r, rv in enumerate(data):
line = ""
for c, cv in enumerate(rv):
line += '{:<{c0}}'.format(cv, c0=self.col_width[c])
print(line, flush=True)
if len(line) > table_width:
table_width = len(line)
return table_width
def get_avg(self, begin, end, cols=[]):
rec = self.data[begin:end]
# Get the max number of stored value in this run
mx = 0
for val in rec:
if len(val)>mx: mx = len(val)
# Create numpy vector for the averages
rec = np.asarray([x+[0]*(mx-len(x)) for x in rec], dtype=np.object )
# Get only the records with loss values
rec_avg = rec.copy()
rec_avg[:,:4] = 0
rec_avg = rec_avg.astype(np.float)
rec_avg = rec_avg.mean(0)
return rec_avg
def print_batch_stat(self, stage_name='t'):
last_epoch_pos = self.m_last.get(stage_name, 0)
last_report_pos = self.last_report_pos.get(stage_name, 0)
if last_report_pos == last_epoch_pos:
# Already reported
return
# Get averages since the last report
rec_avg = self.get_avg(last_report_pos+1, last_epoch_pos+1)
rec_last = self.data[last_epoch_pos]
time_now, last_epoch, last_iter, last_stage_name = rec_last[:4]
iter = last_iter - self.data[self.m_first[stage_name]][2]
done = round(100*iter/self.iter_num.get(stage_name), 2) if stage_name in self.iter_num else 0
batch_took_avg = float(time_now) - float(self.data[last_report_pos+1][0])
if self.batch_size is not None:
batch_took_avg /= self.batch_size
self.last_report_pos[stage_name] = last_epoch_pos
# Print table
header = None
if self.print_header:
max_iter = self.iter_num.get(stage_name, 0)*self.epoch_num
header = ['Time ',
'E('+str(self.epoch_num)+') ',
'Iter('+str(max_iter//1000)+'k) ',
'Batch (ms) ']
for key in self.dkeys_id[4:]:
header.append(key+' '*(15-len(key)))
self.print_header=False
data = [tohms(time_now), str(last_epoch)+' ('+str(done)+'%)', str(last_iter), round(batch_took_avg*1000.0, 3)]
for key in self.dkeys_id[4:]:
data.append(round(rec_avg[self.dkeys[key]], 4))
table_width = self.print_table(last_stage_name, [data], header)
return
def print_epoch_stat(self, stage_name, **kwargs):
"""
Batch train log format
Epoch train log format
Test log format
"""
first_epoch_pos = self.m_first.get(stage_name, 0)
last_epoch_pos = self.m_last.get(stage_name, 0)
rec_avg = self.get_avg(first_epoch_pos+1, last_epoch_pos+1)
rec_last = self.data[last_epoch_pos]
time_now, last_epoch, last_iter, last_stage_name = rec_last[:4]
epoch_took = tohms(time_now - self.data[first_epoch_pos][0])
# Print table
max_iter = self.iter_num*self.epoch_num
header = ['Time ',
'E('+str(self.epoch_num)+') ',
'Iter('+str(max_iter//1000)+'k) ',
'Epoch (H:M:S) ']
for key in self.dkeys_id[4:]:
header.append(key)
data = [tohms(time_now), str(last_epoch), str(last_iter), epoch_took]
for key in self.dkeys_id[4:]:
data.append(round(rec_avg[self.dkeys[key]], 4))
table_width = self.print_table(last_stage_name, [data], header)
print("-"*table_width)
return
def log_images(self, x, epoch, name_suffix, name, channels=3, nrow=8):
img_path = os.path.join(self.experiment_name, name)
os.makedirs(img_path, exist_ok=True)
img_size = self.im_size
if img_size < 1:
img_size2 = x.nelement() / x.size(0) / channels
img_size = int(np.sqrt(img_size2))
x = x.view(-1, channels, img_size, img_size) # * 0.5 + 0.5
grid = save_image(x,
img_path+'/sample_' + str(epoch) + "_" + str(name_suffix) + '.jpg',
nrow = nrow, normalize=True, scale_each=True)
img_grid = make_grid(x, normalize=True, scale_each=True, nrow=nrow)
self.writer.add_image(name, img_grid , self.iter_global)
return
def _merge(self, images, size, labels=[], strike=[]):
h, w = images.shape[1], images.shape[2]
resize_factor=1.0
h_ = int(h * resize_factor)
w_ = int(w * resize_factor)
img = np.zeros((h_ * size[0], w_ * size[1]))
for idx, image in enumerate(images):
i = int(idx % size[1])
j = int(idx / size[1])
image_ = skimage.transform.resize(image, output_shape=(w_, h_))
img[j * h_:j * h_ + h_, i * w_:i * w_ + w_] = image_
if len(labels) == len(images):
if labels[idx] == 1:
img[j * h_:j * h_ + 2, i * w_:i * w_ + w_-4] = np.ones((2, w_-4))
if len(strike) == len(images):
if strike[idx] == 1:
img[j * h_+h_//2:j * h_ + h_//2+1, i * w_:i * w_ + w_-4] = np.ones((1, w_-4))
return img
def save_images(self, images, img_size=(28,28), labels=[], strike=[], name='result.jpg'):
n_img_y = 16
n_img_x = 32
images = images.reshape(n_img_x * n_img_y, img_size[0], img_size[1])
imageio.imsave(name, self._merge(images, [n_img_y, n_img_x], labels, strike))
#=================================================================================
if __name__ == "__main__":
print("NOT AN EXECUTABLE!")
| 2.171875
| 2
|
core/app/utils/tests/test_date.py
|
EmixMaxime/mx-home-security
| 2
|
12784691
|
from datetime import timedelta
from unittest import TestCase
from django.utils import timezone
from freezegun import freeze_time
from utils.date import is_time_newer_than
class UtilsDateTestCase(TestCase):
def setUp(self) -> None:
pass
@freeze_time("2020-12-21 03:21:00")
def test_is_less_old_than(self):
t = timezone.now() - timedelta(seconds=60)
self.assertFalse(is_time_newer_than(t, 50))
t = timezone.now() - timedelta(seconds=40)
self.assertTrue(is_time_newer_than(t, 50))
| 2.828125
| 3
|
Frozen/script.py
|
killua4564/2021-Crypto-CTF
| 0
|
12784692
|
from pwn import remote
from gmpy2 import next_prime
from Crypto.Util.number import *
conn = remote("03.cr.yp.toc.tf", "25010")
def get_params():
conn.recvuntil("[Q]uit")
conn.sendline("s")
conn.recvuntil(" = ")
p = int(conn.recvuntil("\n").decode())
conn.recvuntil(" = ")
r = int(conn.recvuntil("\n").decode())
return (p, r)
def get_public():
conn.recvuntil("[Q]uit")
conn.sendline("p")
conn.recvuntil("[")
return list(map(int, conn.recvuntil("]").decode().strip("]").split(", ")))
def get_example():
conn.recvuntil("[Q]uit")
conn.sendline("e")
conn.recvuntil("\"")
randstr = conn.recvuntil("\"").decode().strip("\"")
conn.recvuntil("[")
return randstr, list(map(int, conn.recvuntil("]").decode().strip("]").split(", ")))
p, r = get_params()
pubkey = get_public()
randstr, sign = get_example()
M = [
bytes_to_long(randstr[4*i:4*(i+1)].encode())
for i in range(len(randstr) // 4)
]
q = int(next_prime(max(M)))
privkey = [sig * inverse(m, q) % q for m, sig in zip(M, sign)]
inv_r = inverse(r, p)
s_list = (
(pubkey[0] + privkey[0]) * inv_r % p,
(pubkey[0] + privkey[0] + q) * inv_r % p
)
key = True
for idx in range(1, len(privkey)):
ts = (pubkey[idx] + privkey[idx]) * pow(inv_r, idx+1, p) % p
if ts not in s_list:
privkey[idx] += q
elif key:
key = False
if ts == s_list[1]:
privkey[0] += q
conn.recvuntil("[Q]uit")
conn.sendline("f")
conn.recvuntil(": ")
randmsg = conn.recvuntil("\n").decode().strip("\n")
MM = [
bytes_to_long(randmsg[4*i:4*(i+1)].encode())
for i in range(len(randmsg) // 4)
]
qq = int(next_prime(max(MM)))
conn.sendline(",".join(
map(str, (mm * priv % qq for mm, priv in zip(MM, privkey)))
))
conn.recvuntil("'")
print(conn.recvuntil("'").decode().strip("'"))
| 2.25
| 2
|
tools/ops.py
|
imatge-upc/unsupervised-2017-cvprw
| 25
|
12784693
|
<gh_stars>10-100
import math
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import ops
def _variable_with_weight_decay(name, shape, wd=1e-3):
with tf.device("/cpu:0"): # store all weights in CPU to optimize weights sharing among GPUs
var = tf.get_variable(name, shape, initializer=tf.contrib.layers.xavier_initializer())
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def max_pool3d(input_, k, name='max_pool3d'):
return tf.nn.max_pool3d(input_, ksize=[1, k, 2, 2, 1], strides=[1, k, 2, 2, 1], padding='SAME', name=name)
def conv2d(input_, output_dim, k_h=3, k_w=3, d_h=1, d_w=1, padding='SAME', name="conv2d"):
with tf.variable_scope(name):
w = _variable_with_weight_decay('w', [k_h, k_w, input_.get_shape()[-1], output_dim])
conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding=padding)
b = _variable_with_weight_decay('b', [output_dim])
return tf.nn.bias_add(conv, b)
def cross_conv2d(input_, kernel, d_h=1, d_w=1, padding='SAME', name="cross_conv2d"):
with tf.variable_scope(name):
output_dim = kernel.get_shape()[4]
batch_size = input_.get_shape().as_list()[0]
b = _variable_with_weight_decay('b', [output_dim])
output = []
input_list = tf.unstack(input_)
kernel_list = tf.unstack(kernel)
for i in range(batch_size):
conv = tf.nn.conv2d(tf.expand_dims(input_list[i],0), kernel_list[i], strides=[1, d_h, d_w, 1], padding=padding)
conv = tf.nn.bias_add(conv, b)
output.append(conv)
return tf.concat(output, 0)
def conv3d(input_, output_dim, k_t=3, k_h=3, k_w=3, d_t=1, d_h=1, d_w=1, padding='SAME', name="conv3d"):
with tf.variable_scope(name):
w = _variable_with_weight_decay('w', [k_t, k_h, k_w, input_.get_shape()[-1], output_dim])
conv = tf.nn.conv3d(input_, w, strides=[1, d_t, d_h, d_w, 1], padding=padding)
b = _variable_with_weight_decay('b', [output_dim])
return tf.nn.bias_add(conv, b)
def relu(x):
return tf.nn.relu(x)
def fc(input_, output_dim, name='fc'):
with tf.variable_scope(name):
w = _variable_with_weight_decay('w', [input_.get_shape()[-1], output_dim])
b = _variable_with_weight_decay('b', [output_dim])
return tf.matmul(input_, w) + b
def deconv2d(input_, output_shape, k_h=3, k_w=3, d_h=1, d_w=1, padding='SAME', name="deconv2d"):
with tf.variable_scope(name):
# filter : [height, width, output_channels, in_channels]
w = _variable_with_weight_decay('w', [k_h, k_h, output_shape[-1], input_.get_shape()[-1]])
deconv = tf.nn.conv2d_transpose(input_, w, output_shape=output_shape, strides=[1, d_h, d_w, 1], padding=padding)
b = _variable_with_weight_decay('b', [output_shape[-1]])
return tf.nn.bias_add(deconv, b)
def deconv3d(input_, output_shape, k_t=3, k_h=3, k_w=3, d_t=1, d_h=1, d_w=1, padding='SAME', name="deconv3d"):
with tf.variable_scope(name):
# filter : [depth, height, width, output_channels, in_channels]
w = _variable_with_weight_decay('w', [k_t, k_h, k_h, output_shape[-1], input_.get_shape()[-1]])
deconv = tf.nn.conv3d_transpose(input_, w, output_shape=output_shape, strides=[1, d_t, d_h, d_w, 1], padding=padding)
b = _variable_with_weight_decay('b', [output_shape[-1]])
return tf.nn.bias_add(deconv, b)
| 2.53125
| 3
|
fsutil/test/write_with_config.py
|
wenbobuaa/pykit
| 13
|
12784694
|
import os
import sys
from pykit import fsutil
fn = sys.argv[1]
fsutil.write_file(fn, 'boo')
stat = os.stat(fn)
os.write(1, '{uid},{gid}'.format(uid=stat.st_uid, gid=stat.st_gid))
| 2.34375
| 2
|
config/__init__.py
|
haoyu-x/robot-learning
| 1
|
12784695
|
""" Define parameters for algorithms. """
import argparse
def str2bool(v):
return v.lower() == "true"
def str2intlist(value):
if not value:
return value
else:
return [int(num) for num in value.split(",")]
def str2list(value):
if not value:
return value
else:
return [num for num in value.split(",")]
def create_parser():
"""
Creates the argparser. Use this to add additional arguments
to the parser later.
"""
parser = argparse.ArgumentParser(
"Robot Learning Algorithms",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
# environment
parser.add_argument(
"--env",
type=str,
default="Hopper-v2",
help="environment name",
)
parser.add_argument("--seed", type=int, default=123)
add_method_arguments(parser)
return parser
def add_method_arguments(parser):
# algorithm
parser.add_argument(
"--algo",
type=str,
default="sac",
choices=[
"sac",
"ppo",
"ddpg",
"td3",
"bc",
"gail",
"dac",
],
)
# training
parser.add_argument("--is_train", type=str2bool, default=True)
parser.add_argument("--resume", type=str2bool, default=True)
parser.add_argument("--init_ckpt_path", type=str, default=None)
parser.add_argument("--gpu", type=int, default=None)
# evaluation
parser.add_argument("--ckpt_num", type=int, default=None)
parser.add_argument("--num_eval",
type=int,
default=1,
help="number of episodes for evaluation"
)
# environment
try:
parser.add_argument("--screen_width", type=int, default=480)
parser.add_argument("--screen_height", type=int, default=480)
except:
pass
parser.add_argument("--action_repeat", type=int, default=1)
# misc
parser.add_argument("--run_prefix", type=str, default=None)
parser.add_argument("--notes", type=str, default="")
# log
parser.add_argument("--log_interval", type=int, default=1)
parser.add_argument("--evaluate_interval", type=int, default=10)
parser.add_argument("--ckpt_interval", type=int, default=200)
parser.add_argument("--log_root_dir", type=str, default="log")
parser.add_argument(
"--wandb",
type=str2bool,
default=False,
help="set it True if you want to use wandb",
)
parser.add_argument("--wandb_entity", type=str, default="clvr")
parser.add_argument("--wandb_project", type=str, default="robot-learning")
parser.add_argument("--record_video", type=str2bool, default=True)
parser.add_argument("--record_video_caption", type=str2bool, default=True)
try:
parser.add_argument("--record_demo", type=str2bool, default=False)
except:
pass
# observation normalization
parser.add_argument("--ob_norm", type=str2bool, default=False)
parser.add_argument("--max_ob_norm_step", type=int, default=int(1e6))
parser.add_argument(
"--clip_obs", type=float, default=200, help="the clip range of observation"
)
parser.add_argument(
"--clip_range",
type=float,
default=5,
help="the clip range after normalization of observation",
)
parser.add_argument("--max_global_step", type=int, default=int(1e6))
parser.add_argument(
"--batch_size", type=int, default=128, help="the sample batch size"
)
add_policy_arguments(parser)
# arguments specific to algorithms
args, unparsed = parser.parse_known_args()
if args.algo == "sac":
add_sac_arguments(parser)
elif args.algo == "ddpg":
add_ddpg_arguments(parser)
elif args.algo == "td3":
add_td3_arguments(parser)
elif args.algo == "ppo":
add_ppo_arguments(parser)
elif args.algo == "bc":
add_il_arguments(parser)
add_bc_arguments(parser)
elif args.algo in ["gail", "gaifo", "gaifo-s"]:
add_il_arguments(parser)
add_ppo_arguments(parser)
add_gail_arguments(parser)
elif args.algo in ["dac"]:
add_il_arguments(parser)
add_gail_arguments(parser)
add_dac_arguments(parser)
return parser
def add_policy_arguments(parser):
# network
parser.add_argument("--policy_mlp_dim", type=str2intlist, default=[256, 256])
parser.add_argument("--critic_mlp_dim", type=str2intlist, default=[256, 256])
parser.add_argument("--critic_ensemble", type=int, default=1)
parser.add_argument(
"--policy_activation", type=str, default="relu", choices=["relu", "elu", "tanh"]
)
parser.add_argument("--tanh_policy", type=str2bool, default=True)
parser.add_argument("--gaussian_policy", type=str2bool, default=True)
# encoder
parser.add_argument(
"--encoder_type", type=str, default="mlp", choices=["mlp", "cnn"]
)
parser.add_argument("--encoder_image_size", type=int, default=84)
parser.add_argument("--encoder_conv_dim", type=int, default=32)
parser.add_argument("--encoder_kernel_size", type=str2intlist, default=[3, 3, 3, 3])
parser.add_argument("--encoder_stride", type=str2intlist, default=[2, 1, 1, 1])
parser.add_argument("--encoder_conv_output_dim", type=int, default=50)
parser.add_argument("--encoder_soft_update_weight", type=float, default=0.95)
args, unparsed = parser.parse_known_args()
if args.encoder_type == "cnn":
parser.set_defaults(screen_width=100, screen_height=100)
parser.set_defaults(policy_mlp_dim=[1024, 1024])
parser.set_defaults(critic_mlp_dim=[1024, 1024])
# actor-critic
parser.add_argument(
"--actor_lr", type=float, default=3e-4, help="the learning rate of the actor"
)
parser.add_argument(
"--critic_lr", type=float, default=3e-4, help="the learning rate of the critic"
)
parser.add_argument(
"--critic_soft_update_weight", type=float, default=0.995, help="the average coefficient"
)
# absorbing state
parser.add_argument("--absorbing_state", type=str2bool, default=False)
def add_rl_arguments(parser):
parser.add_argument(
"--rl_discount_factor", type=float, default=0.99, help="the discount factor"
)
parser.add_argument("--warm_up_steps", type=int, default=0)
def add_on_policy_arguments(parser):
parser.add_argument("--rollout_length", type=int, default=2000)
parser.add_argument("--gae_lambda", type=float, default=0.95)
def add_off_policy_arguments(parser):
parser.add_argument(
"--buffer_size", type=int, default=int(1e6), help="the size of the buffer"
)
parser.set_defaults(warm_up_steps=1000)
def add_sac_arguments(parser):
add_rl_arguments(parser)
add_off_policy_arguments(parser)
parser.add_argument("--reward_scale", type=float, default=1.0, help="reward scale")
parser.add_argument("--actor_update_freq", type=int, default=2)
parser.add_argument("--critic_target_update_freq", type=int, default=2)
parser.add_argument("--alpha_init_temperature", type=float, default=0.1)
parser.add_argument(
"--alpha_lr", type=float, default=1e-4, help="the learning rate of the actor"
)
parser.set_defaults(actor_lr=1e-3)
parser.set_defaults(critic_lr=1e-3)
parser.set_defaults(evaluate_interval=5000)
parser.set_defaults(ckpt_interval=10000)
parser.set_defaults(log_interval=500)
parser.set_defaults(critic_soft_update_weight=0.99)
parser.set_defaults(buffer_size=100000)
parser.set_defaults(critic_ensemble=2)
def add_ppo_arguments(parser):
add_rl_arguments(parser)
add_on_policy_arguments(parser)
parser.add_argument("--ppo_clip", type=float, default=0.2)
parser.add_argument("--value_loss_coeff", type=float, default=0.5)
parser.add_argument("--action_loss_coeff", type=float, default=1.0)
parser.add_argument("--entropy_loss_coeff", type=float, default=1e-4)
parser.add_argument("--ppo_epoch", type=int, default=5)
parser.add_argument("--max_grad_norm", type=float, default=100)
parser.set_defaults(critic_soft_update_weight=0.995)
parser.set_defaults(evaluate_interval=20)
parser.set_defaults(ckpt_interval=20)
def add_ddpg_arguments(parser):
add_rl_arguments(parser)
add_off_policy_arguments(parser)
parser.add_argument("--actor_update_delay", type=int, default=2000)
parser.add_argument("--actor_update_freq", type=int, default=2)
parser.add_argument("--actor_target_update_freq", type=int, default=2)
parser.add_argument("--critic_target_update_freq", type=int, default=2)
parser.add_argument(
"--actor_soft_update_weight", type=float, default=0.995, help="the average coefficient"
)
parser.set_defaults(critic_soft_update_weight=0.995)
# epsilon greedy
parser.add_argument("--epsilon_greedy", type=str2bool, default=False)
parser.add_argument("--epsilon_greedy_eps", type=float, default=0.3)
parser.add_argument("--policy_exploration_noise", type=float, default=0.1)
parser.set_defaults(gaussian_policy=False)
parser.set_defaults(evaluate_interval=10000)
parser.set_defaults(ckpt_interval=50000)
parser.set_defaults(log_interval=1000)
def add_td3_arguments(parser):
add_ddpg_arguments(parser)
parser.set_defaults(critic_ensemble=2)
parser.add_argument("--policy_noise", type=float, default=0.2)
parser.add_argument("--policy_noise_clip", type=float, default=0.5)
def add_il_arguments(parser):
parser.add_argument("--demo_path", type=str, default=None, help="path to demos")
parser.add_argument(
"--demo_subsample_interval",
type=int,
default=1,
# default=20, # used in GAIL
help="subsample interval of expert transitions",
)
def add_bc_arguments(parser):
parser.set_defaults(gaussian_policy=False)
parser.set_defaults(max_global_step=100)
parser.add_argument(
"--bc_lr", type=float, default=1e-3, help="learning rate for bc"
)
parser.add_argument(
"--val_split", type=float, default=0, help="how much of dataset to leave for validation set"
)
def add_gail_arguments(parser):
parser.add_argument("--gail_entropy_loss_coeff", type=float, default=0.0)
parser.add_argument("--gail_vanilla_reward", type=str2bool, default=True)
parser.add_argument("--discriminator_lr", type=float, default=1e-4)
parser.add_argument("--discriminator_mlp_dim", type=str2intlist, default=[256, 256])
parser.add_argument(
"--discriminator_activation", type=str, default="tanh", choices=["relu", "elu", "tanh"]
)
parser.add_argument("--discriminator_update_freq", type=int, default=4)
parser.add_argument("--gail_no_action", type=str2bool, default=False)
parser.add_argument("--gail_env_reward", type=float, default=0.0)
def add_dac_arguments(parser):
parser.add_argument("--dac_rl_algo", type=str, default="td3", choices=["sac", "td3"])
args, unparsed = parser.parse_known_args()
if args.dac_rl_algo == "sac":
add_sac_arguments(parser)
elif args.dac_rl_algo == "td3":
add_td3_arguments(parser)
def argparser():
""" Directly parses the arguments. """
parser = create_parser()
args, unparsed = parser.parse_known_args()
return args, unparsed
| 3.484375
| 3
|
tools/python/boutiques/__version__.py
|
glatard/boutiques
| 2
|
12784696
|
VERSION = "0.5.23"
| 1.117188
| 1
|
python/test/test_feature_extraction.py
|
spongezhang/vlb
| 11
|
12784697
|
<reponame>spongezhang/vlb
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ===========================================================
# File Name: test_feature_extraction.py
# Author: <NAME>, Columbia University
# Creation Date: 01-25-2019
# Last Modified: Sat Feb 9 11:13:09 2019
#
# Usage: python test_feature_extraction.py
# Description: Test feature extraction
#
# Copyright (C) 2018 <NAME>
# All rights reserved.
#
# This file is made available under
# the terms of the BSD license (see the COPYING file).
# ===========================================================
import sys
sys.path.insert(0, '/Users/Xu/program/Image_Genealogy/code/vlb/python/dset/')
sys.path.insert(
0, '/Users/Xu/program/Image_Genealogy/code/vlb/python/features/')
import feature_utils
import vlsift_matlab
import vlsift
import vgg_dataset
if __name__ == "__main__":
a = vgg_dataset.vggh_Dataset()
image = a.get_image('graf', '1')
#vlsift_all = vlsift.vlsift()
vlsift_all = vlsift_matlab.vlsift_matlab()
feature, descriptor = vlsift_all.extract_all(image)
print(feature.shape, descriptor.shape)
| 1.984375
| 2
|
tests/conftest.py
|
paulperegud/small-1toN-transactions
| 0
|
12784698
|
import os
import pytest
from ethereum import utils
from ethereum.tools import tester
from ethereum.abi import ContractTranslator
from ethereum.config import config_metropolis
from solc_simple import Builder
GAS_LIMIT = 8000000
START_GAS = GAS_LIMIT - 1000000
config_metropolis['BLOCK_GAS_LIMIT'] = GAS_LIMIT
# Compile contracts before testing
OWN_DIR = os.path.dirname(os.path.realpath(__file__))
CONTRACTS_DIR = os.path.abspath(os.path.realpath(os.path.join(OWN_DIR, '../contracts')))
OUTPUT_DIR = os.path.abspath(os.path.realpath(os.path.join(OWN_DIR, '../build')))
builder = Builder(CONTRACTS_DIR, OUTPUT_DIR)
builder.compile_all()
@pytest.fixture
def ethtester():
tester.chain = tester.Chain()
return tester
@pytest.fixture
def ethutils():
return utils
@pytest.fixture
def get_contract(ethtester, ethutils):
def create_contract(path, args=(), sender=ethtester.k0):
abi, hexcode = builder.get_contract_data(path)
bytecode = ethutils.decode_hex(hexcode)
encoded_args = (ContractTranslator(abi).encode_constructor_arguments(args) if args else b'')
code = bytecode + encoded_args
address = ethtester.chain.tx(sender=sender, to=b'', startgas=START_GAS, data=code)
return ethtester.ABIContract(ethtester.chain, abi, address)
return create_contract
@pytest.fixture
def tree(ethtester, get_contract):
contract = get_contract('PercentTrees')
ethtester.chain.mine()
return contract
@pytest.fixture
def dummy(ethtester, get_contract):
contract = get_contract('Dummy')
ethtester.chain.mine()
return contract
@pytest.fixture
def p2pk(ethtester, get_contract):
contract = get_contract('P2PK')
ethtester.chain.mine()
return contract
@pytest.fixture
def treetest(ethtester, get_contract):
contract = get_contract('TreeTest')
ethtester.chain.mine()
return contract
| 1.921875
| 2
|
src/NER.py
|
HaritzPuerto/Entity_Extractor
| 2
|
12784699
|
<reponame>HaritzPuerto/Entity_Extractor
import spacy
from tqdm import tqdm
class Entity_model():
def __init__(self, spacy_model='en_core_web_sm'):
self.nlp = spacy.load(spacy_model)
def __clean_input(self, s):
'''
Remove duplicated whitespaces.
There are several instances in SQuAD with double spaces that makes tricky the allignment in the tokenization.
'''
return " ".join(s.split())
def get_entities(self, list_sentences):
'''
Input:
- list_sentences
Output:
- dict_sent_idx2entities: dictionary that maps each sentence idx to a list of entities
'''
dict_sent_idx2entities = dict() # this will be the final output
# for each sentence
for sent_idx, sent in enumerate(tqdm(list_sentences)):
sent = self.__clean_input(sent)
# initialize the list of entities for the current sentence
dict_sent_idx2entities[sent_idx] = []
# use spacy processor on the question | context sentence
spacy_sent = self.nlp(sent)
# get the entities of the two sentences concatenated (query and context)
for e in spacy_sent.ents:
# create the dictionary to store the metadata of the entity
dict_ent = {'char_idx': (e.start_char, e.end_char), # eg: (0,3)
'word_idx': (e.start, e.end), # eg: (0,1)
'ent_type': e.label_, # eg: "PERSON"
'text': e.text, # eg: "John"
}
dict_sent_idx2entities[sent_idx].append(dict_ent)
return dict_sent_idx2entities
def get_entities_from_sentence(self, sent):
list_entities = []
sent = self.__clean_input(sent)
spacy_sent = self.nlp(sent)
# get the entities of the two sentences concatenated (query and context)
for e in spacy_sent.ents:
# create the dictionary to store the metadata of the entity
dict_ent = {'char_idx': (e.start_char, e.end_char), # eg: (0,3)
'word_idx': (e.start, e.end), # eg: (0,1)
'ent_type': e.label_, # eg: "PERSON"
'text': e.text, # eg: "John"
}
list_entities.append(dict_ent)
return list_entities, spacy_sent
| 2.8125
| 3
|
src/userservice/tests/test_db.py
|
Budget-Web-App/budget-web-app
| 0
|
12784700
|
<reponame>Budget-Web-App/budget-web-app
"""
Tests for db module
"""
import unittest
from unittest.mock import patch
from sqlalchemy.exc import IntegrityError
from db import UserDb
from tests.constants import EXAMPLE_USER
class TestDb(unittest.TestCase):
"""
Test cases for db module
"""
def setUp(self):
"""Init db and create table before each test"""
# init SQLAlchemy with sqllite in mem
self.db = UserDb('sqlite:///:memory:')
# create users table in mem
self.db.users_table.create(self.db.engine)
def test_add_user_returns_none_no_exception(self):
"""test if a user can be added"""
user = EXAMPLE_USER.copy()
# create a user with username foo
user['username'] = 'foo'
user['userid'] = '1'
# add user to db
self.db.add_user(user)
def test_add_same_user_raises_exception(self):
"""test if one user can be added twice"""
user = EXAMPLE_USER.copy()
# create a user with username bar
user['username'] = 'bar'
user['userid'] = '2'
# add bar_user to db
self.db.add_user(user)
# try to add same user again
self.assertRaises(IntegrityError, self.db.add_user, user)
def test_get_user_returns_existing_user(self):
"""test getting a user"""
user = EXAMPLE_USER.copy()
# create a user with username baz
user['username'] = 'baz'
user['userid'] = '3'
# add baz_user to db
self.db.add_user(user)
# get baz_user from db
db_user = self.db.get_user(user['username'])
# assert both user objects are equal
self.assertEqual(user, db_user)
def test_get_non_existent_user_returns_none(self):
"""test getting a user that does not exist"""
# assert None when user does not exist
self.assertIsNone(self.db.get_user('user1'))
# mock random.randint to produce 4,5,6 on each invocation
@patch('random.randint', side_effect=[4, 5, 6])
def test_generate_account_id_ignores_existing_id_generates_new_id(self, mock_rand):
"""test generating account id"""
user = EXAMPLE_USER.copy()
# create a user with username qux
user['username'] = 'qux'
user['userid'] = '4'
# add qux_user to db
# generate_account_id should return 5 now as 4 exists
self.db.add_user(user)
self.assertEqual('5', self.db.generate_userid())
# mock_rand was called twice, first generating 4, then 5
self.assertEqual(2, mock_rand.call_count)
| 3.078125
| 3
|
src/manager/om/script/gspylib/inspection/items/network/CheckMTU.py
|
wotchin/openGauss-server
| 1
|
12784701
|
<reponame>wotchin/openGauss-server<filename>src/manager/om/script/gspylib/inspection/items/network/CheckMTU.py
# -*- coding:utf-8 -*-
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
#
# openGauss is licensed under Mulan PSL v2.
# You can use this software according to the terms
# and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
#
# http://license.coscl.org.cn/MulanPSL2
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the Mulan PSL v2 for more details.
# ----------------------------------------------------------------------------
import subprocess
from gspylib.inspection.common import SharedFuncs
from gspylib.inspection.common.CheckItem import BaseItem
from gspylib.inspection.common.CheckResult import ResultStatus
from gspylib.common.ErrorCode import ErrorCode
from gspylib.os.gsnetwork import g_network
networkCardNum = ""
class CheckMTU(BaseItem):
def __init__(self):
super(CheckMTU, self).__init__(self.__class__.__name__)
self.expectMTU1 = None
self.expectMTU2 = None
def preCheck(self):
# check current node contains cn instances if not raise exception
super(CheckMTU, self).preCheck()
# check the threshold was set correctly
if (not self.threshold.__contains__('expectMTU1')):
raise Exception(ErrorCode.GAUSS_530["GAUSS_53013"]
% "threshold expectMTU1")
self.expectMTU1 = self.threshold['expectMTU1']
if (not self.threshold.__contains__('expectMTU2')):
raise Exception(ErrorCode.GAUSS_530["GAUSS_53013"]
% "threshold expectMTU2")
self.expectMTU2 = self.threshold['expectMTU2']
def doCheck(self):
global networkCardNum
if self.cluster:
# Get node information
LocalNodeInfo = self.cluster.getDbNodeByName(self.host)
# Get the IP address
backIP = LocalNodeInfo.backIps[0]
else:
backIP = SharedFuncs.getIpByHostName(self.host)
# Get the network card number
networkCards = g_network.getAllNetworkInfo()
for network in networkCards:
if network.ipAddress == backIP:
networkCardNum = network.NICNum
networkMTU = network.MTUValue
break
if not networkCardNum or not networkMTU:
raise Exception(ErrorCode.GAUSS_506["GAUSS_50619"])
# Check the mtu value obtained is not a number
if not str(networkMTU).isdigit():
raise Exception(ErrorCode.GAUSS_506["GAUSS_50612"]
% (networkCardNum + " " + "MTU"))
self.result.val = str(networkMTU)
# Compare the acquired MTU with the threshold
if (int(networkMTU) != int(self.expectMTU1) and int(
networkMTU) != int(self.expectMTU2)):
self.result.rst = ResultStatus.WARNING
self.result.raw = "Warning MTU value[%s]: RealValue '%s' " \
"ExpectedValue '%s' or '%s'.\n" \
% (networkCardNum, int(networkMTU),
self.expectMTU1, self.expectMTU2)
else:
self.result.rst = ResultStatus.OK
self.result.raw = "[%s]MTU: %s" \
% (networkCardNum, str(networkMTU))
def doSet(self):
resultStr = ""
(THPFile, initFile) = SharedFuncs.getTHPandOSInitFile()
cmd = "ifconfig %s mtu 1500;" % networkCardNum
cmd += "echo ifconfig %s mtu 1500 >> %s" % (networkCardNum, initFile)
(status, output) = subprocess.getstatusoutput(cmd)
if (status != 0):
resultStr += "Set MTU Failed.Error : %s." % output
resultStr += "The cmd is %s " % cmd
if (len(resultStr) > 0):
self.result.val = resultStr
else:
self.result.val = "Set MTU successfully."
| 2.03125
| 2
|
Q7.py
|
devasheeshG/Project-file-XII_21-22
| 0
|
12784702
|
<filename>Q7.py
# Q7. Write a programs to find the sum of the following series :
a)1/x + 1/x2+ 1/x3+................. 1/xn
b)1 + 1/2! + 1/3! +.......................1/n!
c)1 –2 + 3 –4 + 5 -...................... n
| 2.703125
| 3
|
lab-taxi/main.py
|
aopina1/DRLND-Course
| 0
|
12784703
|
from agent import Agent
from monitor import interact
import gym
import numpy as np
env = gym.make('Taxi-v3')
agent = Agent()
avg_rewards, best_avg_reward = interact(env, agent)
| 1.773438
| 2
|
scripts/twint2json.py
|
code2k13/nlppipe
| 9
|
12784704
|
#!/usr/bin/env python3
import sys
import json
for line in sys.stdin:
s = line.split(" ")
obj = {}
obj['id'] = s[0]
obj['date'] = s[1] + " " + s[2] + " " + s[3]
obj['user'] = s[4]
obj['text'] = " ".join(s[5:])
print(json.dumps(obj,ensure_ascii=False))
| 2.65625
| 3
|
tests/test_console_script.py
|
MapleCCC/importall
| 0
|
12784705
|
from runpy import run_module
import pytest
from .subtest import _test_stdlib_symbols_in_namespace
# TODO do some research about how to test an interactive CLI application
@pytest.mark.xfail
def test_console_script() -> None:
_test_stdlib_symbols_in_namespace(run_module("importall"))
| 1.476563
| 1
|
test/config_test.py
|
jamesbeyond/mbed-fastmodel-agent-1
| 0
|
12784706
|
from unittest import TestCase
from fm_agent.fm_config import FastmodelConfig
from fm_agent.utils import SimulatorError
class TestFastmodelConfig(TestCase):
def test_Setting_File(self):
self.assertTrue(FastmodelConfig.SETTINGS_FILE,"settings.json")
def test_parse_params_file_failed(self):
c=FastmodelConfig()
try:
c.parse_params_file("FILE_NOT_EXIST")
except SimulatorError as e:
pass
else:
self.fail("failed to catch the exception")
def test_parse_params_file(self):
c=FastmodelConfig()
try:
c.parse_params_file("DEFAULT.conf")
except SimulatorError as e:
self.fail("caught an SimulatorError exception")
def test_get_configs_none(self):
c=FastmodelConfig()
self.assertIsNone(c.get_configs("NOT_A_MODEL"))
def test_get_configs(self):
c=FastmodelConfig()
self.assertIsNotNone(c.get_configs("FVP_MPS2_M3"))
def test_get_all_configs(self):
c=FastmodelConfig()
self.assertIsNotNone(c.get_all_configs())
| 2.59375
| 3
|
vuakhter/base/requests_log.py
|
best-doctor/vuakhter
| 0
|
12784707
|
<reponame>best-doctor/vuakhter<filename>vuakhter/base/requests_log.py
from __future__ import annotations
import typing
from vuakhter.base.base_log import BaseLog
if typing.TYPE_CHECKING:
from vuakhter.utils.types import RequestEntry, TimestampRange
class RequestsLog(BaseLog):
def get_records(self, ts_range: TimestampRange = None, **kwargs: typing.Any) -> typing.Iterator[RequestEntry]:
raise NotImplementedError()
| 2.046875
| 2
|
test/test_filters.py
|
ecometrica/grandfatherson
| 15
|
12784708
|
from datetime import datetime, date
import unittest
from grandfatherson import (FRIDAY, SATURDAY, SUNDAY)
from grandfatherson.filters import (Seconds, Minutes, Hours, Days, Weeks,
Months, Years, UTC)
def utcdatetime(*args):
return datetime(*args, tzinfo=UTC())
class TestSeconds(unittest.TestCase):
def setUp(self):
self.now = datetime(2000, 1, 1, 0, 0, 1, 1)
self.datetimes = [
datetime(2000, 1, 1, 0, 0, 1, 0),
datetime(2000, 1, 1, 0, 0, 0, 1),
datetime(2000, 1, 1, 0, 0, 0, 0),
datetime(1999, 12, 31, 23, 59, 59, 999999),
datetime(1999, 12, 31, 23, 59, 57, 0),
]
def test_mask(self):
self.assertEqual(
Seconds.mask(datetime(1999, 12, 31, 23, 59, 59, 999999)),
datetime(1999, 12, 31, 23, 59, 59, 0)
)
def test_future(self):
datetimes = [datetime(2010, 1, 15, 0, 0, 0, 0)] # Wikipedia
self.assertEqual(Seconds.filter(datetimes, number=0, now=self.now),
set(datetimes))
self.assertEqual(Seconds.filter(datetimes, number=1, now=self.now),
set(datetimes))
def test_invalid_number(self):
self.assertRaises(ValueError,
Seconds.filter, [], number=-1, now=self.now)
self.assertRaises(ValueError,
Seconds.filter, [], number=0.1, now=self.now)
self.assertRaises(ValueError,
Seconds.filter, [], number='1', now=self.now)
def test_no_input(self):
self.assertEqual(Seconds.filter([], number=1, now=self.now),
set())
def test_no_results(self):
self.assertEqual(Seconds.filter([self.now], number=0, now=self.now),
set())
self.assertEqual(Seconds.filter(self.datetimes, number=0,
now=self.now),
set())
def test_current(self):
self.assertEqual(Seconds.filter(self.datetimes, number=1,
now=self.now),
set([datetime(2000, 1, 1, 0, 0, 1, 0)]))
def test_duplicates(self):
# Ensure we get the oldest per-second datetime when there are
# duplicates: i.e. not datetime(2000, 1, 1, 0, 0, 0, 1)
self.assertEqual(Seconds.filter(self.datetimes, number=2,
now=self.now),
set([datetime(2000, 1, 1, 0, 0, 0, 0),
datetime(2000, 1, 1, 0, 0, 1, 0)]))
def test_microseconds(self):
self.assertEqual(Seconds.filter(self.datetimes, number=3,
now=self.now),
set([datetime(1999, 12, 31, 23, 59, 59, 999999),
datetime(2000, 1, 1, 0, 0, 0, 0),
datetime(2000, 1, 1, 0, 0, 1, 0)]))
def test_before_start(self):
# datetime(1999, 12, 31, 23, 59, 57, 0) is too old to show up
# in the results
self.assertEqual(Seconds.filter(self.datetimes, number=4,
now=self.now),
set([datetime(1999, 12, 31, 23, 59, 59, 999999),
datetime(2000, 1, 1, 0, 0, 0, 0),
datetime(2000, 1, 1, 0, 0, 1, 0)]))
def test_all_input(self):
self.assertEqual(Seconds.filter(self.datetimes, number=5,
now=self.now),
set([datetime(1999, 12, 31, 23, 59, 57, 0),
datetime(1999, 12, 31, 23, 59, 59, 999999),
datetime(2000, 1, 1, 0, 0, 0, 0),
datetime(2000, 1, 1, 0, 0, 1, 0)]))
self.assertEqual(Seconds.filter(self.datetimes, number=6,
now=self.now),
set([datetime(1999, 12, 31, 23, 59, 57, 0),
datetime(1999, 12, 31, 23, 59, 59, 999999),
datetime(2000, 1, 1, 0, 0, 0, 0),
datetime(2000, 1, 1, 0, 0, 1, 0)]))
def test_with_tzinfo(self):
utcnow = utcdatetime(2000, 1, 1, 0, 0, 1, 1)
tzinfo_datetimes = [
utcdatetime(2000, 1, 1, 0, 0, 1, 0),
utcdatetime(2000, 1, 1, 0, 0, 0, 1),
utcdatetime(2000, 1, 1, 0, 0, 0, 0),
utcdatetime(1999, 12, 31, 23, 59, 59, 999999),
utcdatetime(1999, 12, 31, 23, 59, 57, 0),
]
self.assertEqual(Seconds.filter(tzinfo_datetimes, number=5,
now=utcnow),
set([utcdatetime(1999, 12, 31, 23, 59, 57, 0),
utcdatetime(1999, 12, 31, 23, 59, 59, 999999),
utcdatetime(2000, 1, 1, 0, 0, 0, 0),
utcdatetime(2000, 1, 1, 0, 0, 1, 0)]))
self.assertEqual(Seconds.filter(tzinfo_datetimes, number=6,
now=utcnow),
set([utcdatetime(1999, 12, 31, 23, 59, 57, 0),
utcdatetime(1999, 12, 31, 23, 59, 59, 999999),
utcdatetime(2000, 1, 1, 0, 0, 0, 0),
utcdatetime(2000, 1, 1, 0, 0, 1, 0)]))
class TestMinutes(unittest.TestCase):
def setUp(self):
self.now = datetime(2000, 1, 1, 0, 1, 1, 1)
self.datetimes = [
datetime(2000, 1, 1, 0, 1, 0, 0),
datetime(2000, 1, 1, 0, 0, 1, 0),
datetime(2000, 1, 1, 0, 0, 0, 0),
datetime(1999, 12, 31, 23, 59, 59, 999999),
datetime(1999, 12, 31, 23, 57, 0, 0),
]
def test_mask(self):
self.assertEqual(
Minutes.mask(datetime(1999, 12, 31, 23, 59, 59, 999999)),
datetime(1999, 12, 31, 23, 59, 0, 0)
)
def test_future(self):
datetimes = [datetime(2010, 1, 15, 0, 0, 0, 0)] # Wikipedia
self.assertEqual(Minutes.filter(datetimes, number=0, now=self.now),
set(datetimes))
self.assertEqual(Minutes.filter(datetimes, number=1, now=self.now),
set(datetimes))
def test_invalid_number(self):
self.assertRaises(ValueError,
Minutes.filter, [], number=-1, now=self.now)
self.assertRaises(ValueError,
Minutes.filter, [], number=0.1, now=self.now)
self.assertRaises(ValueError,
Minutes.filter, [], number='1', now=self.now)
def test_no_input(self):
self.assertEqual(Minutes.filter([], number=1, now=self.now),
set())
def test_no_results(self):
self.assertEqual(Minutes.filter([self.now], number=0, now=self.now),
set())
self.assertEqual(Minutes.filter(self.datetimes, number=0,
now=self.now),
set())
def test_current(self):
self.assertEqual(Minutes.filter(self.datetimes, number=1,
now=self.now),
set([datetime(2000, 1, 1, 0, 1, 0, 0)]))
def test_duplicates(self):
# Ensure we get the oldest per-minute datetime when there are
# duplicates: i.e. not datetime(2000, 1, 1, 0, 0, 1, 0)
self.assertEqual(Minutes.filter(self.datetimes, number=2,
now=self.now),
set([datetime(2000, 1, 1, 0, 0, 0, 0),
datetime(2000, 1, 1, 0, 1, 0, 0)]))
def test_microseconds(self):
self.assertEqual(Minutes.filter(self.datetimes, number=3,
now=self.now),
set([datetime(1999, 12, 31, 23, 59, 59, 999999),
datetime(2000, 1, 1, 0, 0, 0, 0),
datetime(2000, 1, 1, 0, 1, 0, 0)]))
def test_before_start(self):
# datetime(1999, 12, 31, 23, 57, 0, 0) is too old to show up
# in the results
self.assertEqual(Minutes.filter(self.datetimes, number=4,
now=self.now),
set([datetime(1999, 12, 31, 23, 59, 59, 999999),
datetime(2000, 1, 1, 0, 0, 0, 0),
datetime(2000, 1, 1, 0, 1, 0, 0)]))
def test_all_input(self):
self.assertEqual(Minutes.filter(self.datetimes, number=5,
now=self.now),
set([datetime(1999, 12, 31, 23, 57, 0, 0),
datetime(1999, 12, 31, 23, 59, 59, 999999),
datetime(2000, 1, 1, 0, 0, 0, 0),
datetime(2000, 1, 1, 0, 1, 0, 0)]))
self.assertEqual(Minutes.filter(self.datetimes, number=6,
now=self.now),
set([datetime(1999, 12, 31, 23, 57, 0, 0),
datetime(1999, 12, 31, 23, 59, 59, 999999),
datetime(2000, 1, 1, 0, 0, 0, 0),
datetime(2000, 1, 1, 0, 1, 0, 0)]))
class TestHours(unittest.TestCase):
def setUp(self):
self.now = datetime(2000, 1, 1, 1, 1, 1, 1)
self.datetimes = [
datetime(2000, 1, 1, 1, 0, 0, 0),
datetime(2000, 1, 1, 0, 1, 0, 0),
datetime(2000, 1, 1, 0, 0, 0, 0),
datetime(1999, 12, 31, 23, 59, 59, 999999),
datetime(1999, 12, 31, 21, 0, 0, 0),
]
def test_mask(self):
self.assertEqual(
Hours.mask(datetime(1999, 12, 31, 23, 59, 59, 999999)),
datetime(1999, 12, 31, 23, 0, 0, 0)
)
def test_future(self):
datetimes = [datetime(2010, 1, 15, 0, 0, 0, 0)] # Wikipedia
self.assertEqual(Hours.filter(datetimes, number=0, now=self.now),
set(datetimes))
self.assertEqual(Hours.filter(datetimes, number=1, now=self.now),
set(datetimes))
def test_invalid_number(self):
self.assertRaises(ValueError,
Hours.filter, [], number=-1, now=self.now)
self.assertRaises(ValueError,
Hours.filter, [], number=0.1, now=self.now)
self.assertRaises(ValueError,
Hours.filter, [], number='1', now=self.now)
def test_no_input(self):
self.assertEqual(Hours.filter([], number=1, now=self.now),
set())
def test_no_results(self):
self.assertEqual(Hours.filter([self.now], number=0, now=self.now),
set())
self.assertEqual(Hours.filter(self.datetimes, number=0, now=self.now),
set())
def test_current(self):
self.assertEqual(Hours.filter(self.datetimes, number=1, now=self.now),
set([datetime(2000, 1, 1, 1, 0, 0, 0)]))
def test_duplicates(self):
# Ensure we get the oldest per-hour datetime when there are
# duplicates: i.e. not datetime(2000, 1, 1, 0, 1, 0, 0)
self.assertEqual(Hours.filter(self.datetimes, number=2,
now=self.now),
set([datetime(2000, 1, 1, 0, 0, 0, 0),
datetime(2000, 1, 1, 1, 0, 0, 0)]))
def test_microseconds(self):
self.assertEqual(Hours.filter(self.datetimes, number=3, now=self.now),
set([datetime(1999, 12, 31, 23, 59, 59, 999999),
datetime(2000, 1, 1, 0, 0, 0, 0),
datetime(2000, 1, 1, 1, 0, 0, 0)]))
def test_before_start(self):
# datetime(1999, 12, 31, 21, 0, 0, 0) is too old to show up
# in the results
self.assertEqual(Hours.filter(self.datetimes, number=4, now=self.now),
set([datetime(1999, 12, 31, 23, 59, 59, 999999),
datetime(2000, 1, 1, 0, 0, 0, 0),
datetime(2000, 1, 1, 1, 0, 0, 0)]))
def test_all_input(self):
self.assertEqual(Hours.filter(self.datetimes, number=5, now=self.now),
set([datetime(1999, 12, 31, 21, 0, 0, 0),
datetime(1999, 12, 31, 23, 59, 59, 999999),
datetime(2000, 1, 1, 0, 0, 0, 0),
datetime(2000, 1, 1, 1, 0, 0, 0)]))
self.assertEqual(Hours.filter(self.datetimes, number=6, now=self.now),
set([datetime(1999, 12, 31, 21, 0, 0, 0),
datetime(1999, 12, 31, 23, 59, 59, 999999),
datetime(2000, 1, 1, 0, 0, 0, 0),
datetime(2000, 1, 1, 1, 0, 0, 0)]))
class TestDays(unittest.TestCase):
def setUp(self):
self.now = datetime(2000, 1, 1, 1, 1, 1, 1)
self.datetimes = [
datetime(2000, 1, 1, 1, 0, 0, 0),
datetime(2000, 1, 1, 0, 0, 0, 0),
datetime(1999, 12, 31, 23, 59, 59, 999999),
datetime(1999, 12, 30, 0, 0, 0, 0),
datetime(1999, 12, 28, 0, 0, 0, 0),
]
def test_mask(self):
self.assertEqual(
Days.mask(datetime(1999, 12, 31, 23, 59, 59, 999999)),
datetime(1999, 12, 31, 0, 0, 0, 0)
)
def test_future(self):
datetimes = [datetime(2010, 1, 15, 0, 0, 0, 0)] # Wikipedia
self.assertEqual(Days.filter(datetimes, number=0, now=self.now),
set(datetimes))
self.assertEqual(Days.filter(datetimes, number=1, now=self.now),
set(datetimes))
def test_invalid_number(self):
self.assertRaises(ValueError,
Days.filter, [], number=-1, now=self.now)
self.assertRaises(ValueError,
Days.filter, [], number=0.1, now=self.now)
self.assertRaises(ValueError,
Days.filter, [], number='1', now=self.now)
def test_no_input(self):
self.assertEqual(Days.filter([], number=1, now=self.now),
set())
def test_no_results(self):
self.assertEqual(Days.filter([self.now], number=0, now=self.now),
set())
self.assertEqual(Days.filter(self.datetimes, number=0, now=self.now),
set())
def test_current(self):
self.assertEqual(Days.filter(self.datetimes, number=1, now=self.now),
set([datetime(2000, 1, 1, 0, 0, 0, 0)]))
def test_duplicates(self):
# Ensure we get the oldest per-day datetime when there are
# duplicates: i.e. not datetime(2000, 1, 1, 1, 0, 0, 0)
self.assertEqual(Days.filter(self.datetimes, number=2, now=self.now),
set([datetime(1999, 12, 31, 23, 59, 59, 999999),
datetime(2000, 1, 1, 0, 0, 0, 0)]))
def test_before_start(self):
# datetime(1999, 12, 28, 0, 0, 0, 0) is too old to show up
# in the results
self.assertEqual(Days.filter(self.datetimes, number=4, now=self.now),
set([datetime(1999, 12, 30, 0, 0, 0, 0),
datetime(1999, 12, 31, 23, 59, 59, 999999),
datetime(2000, 1, 1, 0, 0, 0, 0)]))
def test_all_input(self):
self.assertEqual(Days.filter(self.datetimes, number=5, now=self.now),
set([datetime(1999, 12, 28, 0, 0, 0, 0),
datetime(1999, 12, 30, 0, 0, 0, 0),
datetime(1999, 12, 31, 23, 59, 59, 999999),
datetime(2000, 1, 1, 0, 0, 0, 0)]))
self.assertEqual(Days.filter(self.datetimes, number=6, now=self.now),
set([datetime(1999, 12, 28, 0, 0, 0, 0),
datetime(1999, 12, 30, 0, 0, 0, 0),
datetime(1999, 12, 31, 23, 59, 59, 999999),
datetime(2000, 1, 1, 0, 0, 0, 0)]))
def test_leap_year(self):
# 2004 is a leap year, because it is divisible by 4
now = datetime(2004, 3, 1, 0, 0, 0, 0)
datetimes_2004 = [
datetime(2004, 3, 1, 0, 0, 0, 0),
datetime(2004, 2, 29, 0, 0, 0, 0),
datetime(2004, 2, 28, 0, 0, 0, 0),
datetime(2004, 2, 27, 0, 0, 0, 0),
]
self.assertEqual(Days.filter(datetimes_2004, number=1, now=now),
set([datetime(2004, 3, 1, 0, 0, 0, 0)]))
self.assertEqual(Days.filter(datetimes_2004, number=2, now=now),
set([datetime(2004, 2, 29, 0, 0, 0, 0),
datetime(2004, 3, 1, 0, 0, 0, 0)]))
self.assertEqual(Days.filter(datetimes_2004, number=3, now=now),
set([datetime(2004, 2, 28, 0, 0, 0, 0),
datetime(2004, 2, 29, 0, 0, 0, 0),
datetime(2004, 3, 1, 0, 0, 0, 0)]))
def test_not_leap_year(self):
# 1900 was not a leap year, because it is divisible by 400
now = datetime(1900, 3, 1, 0, 0, 0, 0)
datetimes_1900 = [
datetime(1900, 3, 1, 0, 0, 0, 0),
datetime(1900, 2, 28, 0, 0, 0, 0),
datetime(1900, 2, 27, 0, 0, 0, 0),
]
self.assertEqual(Days.filter(datetimes_1900, number=1, now=now),
set([datetime(1900, 3, 1, 0, 0, 0, 0)]))
self.assertEqual(Days.filter(datetimes_1900, number=2, now=now),
set([datetime(1900, 2, 28, 0, 0, 0, 0),
datetime(1900, 3, 1, 0, 0, 0, 0)]))
self.assertEqual(Days.filter(datetimes_1900, number=3, now=now),
set([datetime(1900, 2, 27, 0, 0, 0, 0),
datetime(1900, 2, 28, 0, 0, 0, 0),
datetime(1900, 3, 1, 0, 0, 0, 0)]))
def test_with_tzinfo_and_date(self):
tzinfo_datetimes = [
utcdatetime(2000, 1, 1, 1, 0, 0, 0),
utcdatetime(2000, 1, 1, 0, 0, 0, 0),
utcdatetime(1999, 12, 31, 23, 59, 59, 999999),
utcdatetime(1999, 12, 30, 0, 0, 0, 0),
utcdatetime(1999, 12, 28, 0, 0, 0, 0),
]
today = date(2000, 1, 1)
self.assertEqual(Days.filter(tzinfo_datetimes, number=5, now=today),
set([utcdatetime(1999, 12, 28, 0, 0, 0, 0),
utcdatetime(1999, 12, 30, 0, 0, 0, 0),
utcdatetime(1999, 12, 31, 23, 59, 59, 999999),
utcdatetime(2000, 1, 1, 0, 0, 0, 0)]))
def test_with_date(self):
today = date(2000, 1, 1)
self.assertEqual(Days.filter(self.datetimes, number=5, now=today),
set([datetime(1999, 12, 28, 0, 0, 0, 0),
datetime(1999, 12, 30, 0, 0, 0, 0),
datetime(1999, 12, 31, 23, 59, 59, 999999),
datetime(2000, 1, 1, 0, 0, 0, 0)]))
class TestWeeks(unittest.TestCase):
def setUp(self):
# 1 January 2000 is a Saturday
self.now = datetime(2000, 1, 1, 1, 1, 1, 1)
self.datetimes = [
datetime(2000, 1, 1, 1, 0, 0, 0),
datetime(2000, 1, 1, 0, 0, 0, 0),
datetime(1999, 12, 31, 23, 59, 59, 999999),
datetime(1999, 12, 18, 0, 0, 0, 0),
datetime(1999, 12, 4, 0, 0, 0, 0),
]
def test_mask(self):
# 31 December 1999 is a Friday.
dt = datetime(1999, 12, 31, 23, 59, 59, 999999)
self.assertEqual(dt.weekday(), FRIDAY)
# Default firstweekday is Saturday
self.assertEqual(Weeks.mask(dt),
Weeks.mask(dt, firstweekday=SATURDAY))
self.assertEqual(Weeks.mask(dt),
datetime(1999, 12, 25, 0, 0, 0, 0))
# Sunday
self.assertEqual(Weeks.mask(dt, firstweekday=SUNDAY),
datetime(1999, 12, 26, 0, 0, 0, 0))
# If firstweekday is the same as dt.weekday, then it should return
# the same day.
self.assertEqual(Weeks.mask(dt, firstweekday=dt.weekday()),
Days.mask(dt))
def test_future(self):
datetimes = [datetime(2010, 1, 15, 0, 0, 0, 0)] # Wikipedia
self.assertEqual(Weeks.filter(datetimes, number=0, now=self.now),
set(datetimes))
self.assertEqual(Weeks.filter(datetimes, number=1, now=self.now),
set(datetimes))
def test_invalid_number(self):
self.assertRaises(ValueError,
Weeks.filter, [], number=-1, now=self.now)
self.assertRaises(ValueError,
Weeks.filter, [], number=0.1, now=self.now)
self.assertRaises(ValueError,
Weeks.filter, [], number='1', now=self.now)
def test_no_input(self):
self.assertEqual(Weeks.filter([], number=1, now=self.now),
set())
def test_no_results(self):
self.assertEqual(Weeks.filter([self.now], number=0, now=self.now),
set())
self.assertEqual(Weeks.filter(self.datetimes, number=0, now=self.now),
set())
def test_current(self):
self.assertEqual(Weeks.filter(self.datetimes, number=1, now=self.now),
set([datetime(2000, 1, 1, 0, 0, 0, 0)]))
def test_duplicates(self):
# Ensure we get the oldest per-day datetime when there are
# duplicates: i.e. not datetime(2000, 1, 1, 1, 0, 0, 0)
self.assertEqual(Weeks.filter(self.datetimes, number=2, now=self.now),
set([datetime(1999, 12, 31, 23, 59, 59, 999999),
datetime(2000, 1, 1, 0, 0, 0, 0)]))
def test_before_start(self):
# datetime(1999, 12, 4, 0, 0, 0, 0) is too old to show up
# in the results
self.assertEqual(Weeks.filter(self.datetimes, number=4, now=self.now),
set([datetime(1999, 12, 18, 0, 0, 0, 0),
datetime(1999, 12, 31, 23, 59, 59, 999999),
datetime(2000, 1, 1, 0, 0, 0, 0)]))
def test_all_input(self):
self.assertEqual(Weeks.filter(self.datetimes, number=5, now=self.now),
set([datetime(1999, 12, 4, 0, 0, 0, 0),
datetime(1999, 12, 18, 0, 0, 0, 0),
datetime(1999, 12, 31, 23, 59, 59, 999999),
datetime(2000, 1, 1, 0, 0, 0, 0)]))
self.assertEqual(Weeks.filter(self.datetimes, number=6, now=self.now),
set([datetime(1999, 12, 4, 0, 0, 0, 0),
datetime(1999, 12, 18, 0, 0, 0, 0),
datetime(1999, 12, 31, 23, 59, 59, 999999),
datetime(2000, 1, 1, 0, 0, 0, 0)]))
def test_different_firstweekday(self):
self.assertEqual(
Weeks.filter(
self.datetimes, number=3, firstweekday=3, now=self.now
),
set([datetime(1999, 12, 18, 0, 0, 0, 0),
datetime(1999, 12, 31, 23, 59, 59, 999999)])
)
filtered = Weeks.filter(
self.datetimes, number=5, firstweekday=3, now=self.now
)
self.assertEqual(
Weeks.filter(
self.datetimes, number=5, firstweekday=3, now=self.now
),
set([datetime(1999, 12, 18, 0, 0, 0, 0),
datetime(1999, 12, 4, 0, 0, 0, 0),
datetime(1999, 12, 31, 23, 59, 59, 999999)])
)
class TestMonths(unittest.TestCase):
def setUp(self):
self.now = datetime(2000, 2, 1, 1, 1, 1, 1)
self.datetimes = [
datetime(2000, 2, 1, 0, 0, 0, 0),
datetime(2000, 1, 1, 1, 0, 0, 0),
datetime(2000, 1, 1, 0, 0, 0, 0),
datetime(1999, 12, 31, 23, 59, 59, 999999),
datetime(1999, 10, 1, 0, 0, 0, 0),
]
def test_mask(self):
self.assertEqual(
Months.mask(datetime(1999, 12, 31, 23, 59, 59, 999999)),
datetime(1999, 12, 1, 0, 0, 0, 0)
)
def test_future(self):
datetimes = [datetime(2010, 1, 15, 0, 0, 0, 0)] # Wikipedia
self.assertEqual(Months.filter(datetimes, number=0, now=self.now),
set(datetimes))
self.assertEqual(Months.filter(datetimes, number=1, now=self.now),
set(datetimes))
def test_invalid_number(self):
self.assertRaises(ValueError,
Months.filter, [], number=-1, now=self.now)
self.assertRaises(ValueError,
Months.filter, [], number=0.1, now=self.now)
self.assertRaises(ValueError,
Months.filter, [], number='1', now=self.now)
def test_no_input(self):
self.assertEqual(Months.filter([], number=1, now=self.now),
set())
def test_no_results(self):
self.assertEqual(Months.filter([self.now], number=0, now=self.now),
set())
self.assertEqual(Months.filter(self.datetimes, number=0, now=self.now),
set())
def test_current(self):
self.assertEqual(Months.filter(self.datetimes, number=1, now=self.now),
set([datetime(2000, 2, 1, 0, 0, 0, 0)]))
def test_duplicates(self):
# Ensure we get the oldest per-month datetime when there are
# duplicates: i.e. not datetime(2000, 1, 1, 1, 0, 0, 0)
self.assertEqual(Months.filter(self.datetimes, number=2, now=self.now),
set([datetime(2000, 1, 1, 0, 0, 0, 0),
datetime(2000, 2, 1, 0, 0, 0, 0)]))
def test_new_year(self):
self.assertEqual(Months.filter(self.datetimes, number=3, now=self.now),
set([datetime(1999, 12, 31, 23, 59, 59, 999999),
datetime(2000, 1, 1, 0, 0, 0, 0),
datetime(2000, 2, 1, 0, 0, 0, 0)]))
def test_before_start(self):
# datetime(1999, 10, 1, 0, 0, 0, 0) is too old to show up
# in the results
self.assertEqual(Months.filter(self.datetimes, number=4, now=self.now),
set([datetime(1999, 12, 31, 23, 59, 59, 999999),
datetime(2000, 1, 1, 0, 0, 0, 0),
datetime(2000, 2, 1, 0, 0, 0, 0)]))
def test_all_input(self):
self.assertEqual(Months.filter(self.datetimes, number=5, now=self.now),
set([datetime(1999, 10, 1, 0, 0, 0, 0),
datetime(1999, 12, 31, 23, 59, 59, 999999),
datetime(2000, 1, 1, 0, 0, 0, 0),
datetime(2000, 2, 1, 0, 0, 0, 0)]))
self.assertEqual(Months.filter(self.datetimes, number=6, now=self.now),
set([datetime(1999, 10, 1, 0, 0, 0, 0),
datetime(1999, 12, 31, 23, 59, 59, 999999),
datetime(2000, 1, 1, 0, 0, 0, 0),
datetime(2000, 2, 1, 0, 0, 0, 0)]))
def test_multiple_years(self):
now = datetime(2000, 1, 1, 0, 0, 0, 0)
datetimes = [
datetime(2000, 1, 1, 0, 0, 0, 0),
datetime(1999, 12, 1, 0, 0, 0, 0),
datetime(1999, 1, 1, 0, 0, 0, 0),
datetime(1998, 12, 1, 0, 0, 0, 0),
datetime(1997, 12, 1, 0, 0, 0, 0),
]
# 12 months back ignores datetime(1999, 1, 1, 0, 0, 0, 0)
self.assertEqual(Months.filter(datetimes, number=12, now=now),
set([datetime(1999, 12, 1, 0, 0, 0, 0),
datetime(2000, 1, 1, 0, 0, 0, 0)]))
# But 13 months back gets it
self.assertEqual(Months.filter(datetimes, number=13, now=now),
set([datetime(1999, 1, 1, 0, 0, 0, 0),
datetime(1999, 12, 1, 0, 0, 0, 0),
datetime(2000, 1, 1, 0, 0, 0, 0)]))
# But 14 months back gets datetime(1998, 12, 1, 0, 0, 0, 0)
self.assertEqual(Months.filter(datetimes, number=14, now=now),
set([datetime(1998, 12, 1, 0, 0, 0, 0),
datetime(1999, 1, 1, 0, 0, 0, 0),
datetime(1999, 12, 1, 0, 0, 0, 0),
datetime(2000, 1, 1, 0, 0, 0, 0)]))
# As does 24 months back
self.assertEqual(Months.filter(datetimes, number=24, now=now),
set([datetime(1998, 12, 1, 0, 0, 0, 0),
datetime(1999, 1, 1, 0, 0, 0, 0),
datetime(1999, 12, 1, 0, 0, 0, 0),
datetime(2000, 1, 1, 0, 0, 0, 0)]))
# 36 months back should get datetime(1997, 12, 1, 0, 0, 0, 0)
self.assertEqual(Months.filter(datetimes, number=36, now=now),
set([datetime(1997, 12, 1, 0, 0, 0, 0),
datetime(1998, 12, 1, 0, 0, 0, 0),
datetime(1999, 1, 1, 0, 0, 0, 0),
datetime(1999, 12, 1, 0, 0, 0, 0),
datetime(2000, 1, 1, 0, 0, 0, 0)]))
class TestYears(unittest.TestCase):
def setUp(self):
self.now = datetime(2000, 1, 1, 1, 1, 1, 1)
self.datetimes = [
datetime(2000, 1, 1, 1, 0, 0, 0),
datetime(2000, 1, 1, 0, 0, 0, 0),
datetime(1999, 12, 31, 23, 59, 59, 999999),
datetime(1998, 1, 1, 0, 0, 0, 0),
datetime(1996, 1, 1, 0, 0, 0, 0),
]
def test_mask(self):
self.assertEqual(
Years.mask(datetime(1999, 12, 31, 23, 59, 59, 999999)),
datetime(1999, 1, 1, 0, 0, 0, 0)
)
def test_future(self):
datetimes = [datetime(2010, 1, 15, 0, 0, 0, 0)] # Wikipedia
self.assertEqual(Years.filter(datetimes, number=0, now=self.now),
set(datetimes))
self.assertEqual(Years.filter(datetimes, number=1, now=self.now),
set(datetimes))
def test_invalid_number(self):
self.assertRaises(ValueError,
Years.filter, [], number=-1, now=self.now)
self.assertRaises(ValueError,
Years.filter, [], number=0.1, now=self.now)
self.assertRaises(ValueError,
Years.filter, [], number='1', now=self.now)
def test_no_input(self):
self.assertEqual(Years.filter([], number=1, now=self.now),
set())
def test_no_results(self):
self.assertEqual(Years.filter([self.now], number=0, now=self.now),
set())
self.assertEqual(Years.filter(self.datetimes, number=0, now=self.now),
set())
def test_current(self):
self.assertEqual(Years.filter(self.datetimes, number=1, now=self.now),
set([datetime(2000, 1, 1, 0, 0, 0, 0)]))
def test_duplicates(self):
# Ensure we get the oldest per-month datetime when there are
# duplicates: i.e. not datetime(2000, 1, 1, 1, 0, 0, 0)
self.assertEqual(Years.filter(self.datetimes, number=2, now=self.now),
set([datetime(1999, 12, 31, 23, 59, 59, 999999),
datetime(2000, 1, 1, 0, 0, 0, 0)]))
def test_before_start(self):
# datetime(1996, 1, 1, 0, 0, 0, 0) is too old to show up
# in the results
self.assertEqual(Years.filter(self.datetimes, number=4, now=self.now),
set([datetime(1998, 1, 1, 0, 0, 0, 0),
datetime(1999, 12, 31, 23, 59, 59, 999999),
datetime(2000, 1, 1, 0, 0, 0, 0)]))
def test_all_input(self):
self.assertEqual(Years.filter(self.datetimes, number=5, now=self.now),
set([datetime(1996, 1, 1, 0, 0, 0, 0),
datetime(1998, 1, 1, 0, 0, 0, 0),
datetime(1999, 12, 31, 23, 59, 59, 999999),
datetime(2000, 1, 1, 0, 0, 0, 0)]))
self.assertEqual(Years.filter(self.datetimes, number=6, now=self.now),
set([datetime(1996, 1, 1, 0, 0, 0, 0),
datetime(1998, 1, 1, 0, 0, 0, 0),
datetime(1999, 12, 31, 23, 59, 59, 999999),
datetime(2000, 1, 1, 0, 0, 0, 0)]))
| 3.125
| 3
|
mugicli/pytail.py
|
mugiseyebrows/mugi-cli
| 0
|
12784709
|
from . import head_tail_main, T_TAIL
def main():
head_tail_main(T_TAIL)
if __name__ == "__main__":
main()
| 1.4375
| 1
|
plugins/helpers/table_dictionaries.py
|
rmmoreira/udacity-dend-capstone
| 1
|
12784710
|
""" Dictionary file for tables """
staging_tables = {
'immigration': 'immigration_table',
'temperature': 'temperature_table',
'airport': 'airport_table',
'demographics': 'demographics_table'
}
fact_dimension_tables = {
'city_data': 'fact_city_data_table',
'demographic': 'dim_demographic_table',
'airport': 'dim_airport_table',
'visitor': 'dim_visitor_table'
}
fact_dimension_insert = {
'city_data': 'fact_city_table_insert',
'demographic': 'dim_demographic_table_insert',
'airport': 'dim_airport_table_insert',
'visitor': 'dim_visitor_table_insert'
}
| 2.140625
| 2
|
players.py
|
aroxby-kinnek/machine-learning
| 0
|
12784711
|
"""
Players!
"""
import random
from readchar import readchar
class Player(object):
"""
Base player object
"""
def next_move(self, game):
"""
Retrieve the next move for this player in this game
"""
raise NotImplementedError
class HumanPlayer(Player):
"""
Human player input
"""
def next_move(self, game):
return readchar().upper()
class BotPlayer(Player):
"""
Base bot player
"""
def reproduce(self, allowed_moves, min_mutations, max_mutations):
"""
Create a new bot based on this bot
"""
raise NotImplementedError
class PlannedBot(BotPlayer):
"""
Bot is "born" with a fixed set of moves
"""
def __init__(self, moves=None):
self.moves = moves or []
self.games = {}
def next_move(self, game):
idx = self.games.get(game, 0)
self.games[game] = idx + 1
if idx < len(self.moves):
return self.moves[idx]
# HACK
return 'Q'
def reproduce(self, allowed_moves, min_mutations, max_mutations):
"""
Create a new bot based on this bot
"""
mutations = random.randint(min_mutations, max_mutations)
new_moves = self.moves[:]
for _ in xrange(mutations):
new_moves.append(random.choice(allowed_moves))
return self.__class__(new_moves)
class NeatBot(BotPlayer):
network_factory = GameNetwork()
def __init__(self, network):
self.games = {}
self.network = network or self.network_factory()
def next_move(self, game):
moves = self.network.eval_game(game)
# HACK
preference = 'wasd'
for move in preference:
if move in moves:
return move
# HACK
return 'Q'
def reproduce(self, allowed_moves, min_mutations, max_mutations):
mutations = random.randint(min_mutations, max_mutations)
new_network = self.network.deep_copy()
mutators = [ # FIXME: Add weights
new_network.add_random_neuron,
new_network.add_random_connection
]
for _ in xrange(mutations):
mutator = random.choice(mutators)
mutator()
return self.__class__(new_network)
| 3.59375
| 4
|
temperature/temperature.py
|
Harry-Lees/TemperaturePy
| 1
|
12784712
|
from __future__ import annotations
from numbers import Real
ABSOLUTE_ZERO: float = 0.0 # in Kelvin
ABSOLUTE_HOT: float = 1.416785e32 # in Kelvin
class Temperature:
'''
Simple Temperature class for converting between different units of temperature.
Default temperature unit is Kelvin.
'''
def __init__(self, temp: Real) -> None:
if not isinstance(temp, Real):
raise TypeError('Temperature must be a Real number')
if temp < ABSOLUTE_ZERO:
raise ValueError('Temperature cannot be below Absolute Zero')
elif temp > ABSOLUTE_HOT: # type: ignore
raise ValueError('Temperature cannot be above Absolute Hot')
self._t: Real = temp
def __repr__(self) -> str:
return f'Temperature(kelvin={round(self.kelvin, 2)}, celsius={round(self.celsius, 2)}, farenheit={round(self.farenheit, 2)}, rankine={round(self.rankine, 2)})'
def __eq__(self, other: object) -> bool:
if not isinstance(other, Temperature):
return NotImplemented
return self._t == other._t
def __lt__(self, other: Temperature) -> bool:
return self._t < other._t
def __add__(self, other: Temperature) -> Temperature:
return Temperature(self._t + other._t)
def __sub__(self, other: Temperature) -> Temperature:
return Temperature(self._t - other._t)
def __mul__(self, other: Temperature) -> Temperature:
return Temperature(self._t * other._t)
@classmethod
def fromfarenheit(cls, temp: Real) -> Temperature:
return cls(Temperature.ftok(temp))
@classmethod
def fromcelsius(cls, temp: Real) -> Temperature:
return cls(Temperature.ctok(temp))
@classmethod
def fromrankine(cls, temp: Real) -> Temperature:
return cls(Temperature.rtok(temp))
@property
def kelvin(self) -> Real:
'''Return the temperature in Kelvin'''
return self._t
@kelvin.setter
def kelvin(self, temp: Real) -> None:
if temp < ABSOLUTE_ZERO:
raise ValueError('Temperature cannot be below Absolute Zero')
elif temp > ABSOLUTE_HOT: # type: ignore
raise ValueError('Temperature cannot be above Absolute Hot')
self._t = temp
@property
def celsius(self) -> Real:
'''temperature in celsius'''
return self.ktoc(self.kelvin)
@celsius.setter
def celsius(self, temp: Real) -> None:
self.kelvin = self.ctok(temp)
@property
def farenheit(self) -> Real:
'''temperature in farenheit'''
return self.ktof(self.kelvin)
@farenheit.setter
def farenheit(self, temp: Real) -> None:
self.kelvin = self.ftok(temp)
@property
def rankine(self) -> Real:
'''temperature in Rankine'''
return self.ktor(self.kelvin)
@rankine.setter
def rankine(self, temp) -> None:
self.kelvin = self.rtok(temp)
@staticmethod
def ctok(c: Real) -> Real:
'''convert Celsius to Kelvin'''
return c + 273.15
@staticmethod
def ktoc(k: Real) -> Real:
'''convert Kelvin to Celsius'''
return k - 273.15
@staticmethod
def ktof(k: Real) -> Real:
'''convert Kelvin to Farenheit'''
return (k - 273.15) * 9/5 + 32
@staticmethod
def ftok(f: Real) -> Real:
'''convert Farenheit to Kelvin'''
return (f - 32) * 5/9 + 273.15
@staticmethod
def ktor(k: Real) -> Real:
'''convert Kelvin to Rakine'''
return k * 1.8
@staticmethod
def rtok(r: Real) -> Real:
'''convert Rakine to Kelvin'''
return r * 5/9
@staticmethod
def ftoc(f: Real) -> Real:
'''convert Farenheit to Celsius'''
return (f - 32) / 1.8
@staticmethod
def ctof(c: Real) -> Real:
'''convert Celsius to Farenheit'''
return 1.8 * c + 32
@staticmethod
def ctor(c: Real) -> Real:
'''convert Celsius to Rankine'''
return c * 9/5 + 491.67
@staticmethod
def rtoc(r: Real) -> Real:
'''convert Rankine to Celsius'''
return (r - 491.67) * 5/9
| 3.609375
| 4
|
ticket.py
|
qhgongzi/xilinTicketV2
| 5
|
12784713
|
__author__ = 'Administrator'
import requests
import xlstr
import time
class Ticket:
#车票信息
train_no = ''
station_train_code = '' #车次编号,例如K540
from_station_telecode = ''
from_station_name = ''
to_station_telecode = ''
to_station_name = ''
yp_info = '' #未知信息
location_code = ''
secret_str = ''
start_train_date = '' #乘车日期,例如20140127
#乘车信息
train_date = ''
train_date_utc=''
seat_type = ''
def __init__(self, ticket_obj, buy_type):
self.train_no = ticket_obj['queryLeftNewDTO']['train_no']
self.from_station_telecode = ticket_obj['queryLeftNewDTO']['from_station_telecode']
self.from_station_name = ticket_obj['queryLeftNewDTO']['from_station_name']
self.to_station_telecode = ticket_obj['queryLeftNewDTO']['to_station_telecode']
self.to_station_name = ticket_obj['queryLeftNewDTO']['to_station_name']
self.yp_info = ticket_obj['queryLeftNewDTO']['yp_info']
self.start_train_date = ticket_obj['queryLeftNewDTO']['start_train_date']
self.location_code = ticket_obj['queryLeftNewDTO']['location_code']
self.secret_str = ticket_obj['secretStr']
self.station_train_code = ticket_obj['queryLeftNewDTO']['station_train_code']
trainTime = time.strptime(self.start_train_date, '%Y%m%d')
self.train_date = time.strftime('%Y-%m-%d', trainTime)
self.train_date_utc=time.strftime('%a %b %d %H:%M:%S UTC+0800 %Y',trainTime)
self.seat_type = buy_type
SeatType={'M':'一等座','O':'二等座','4':'软卧','3':'硬卧','1':'硬座'}
| 2.5
| 2
|
tests/unit/config/api/test_settings.py
|
antonku/ncssl_api_client
| 8
|
12784714
|
import mock
from unittest import TestCase
from ncssl_api_client.config.api import settings
try:
reload # Python 2.7
except NameError:
try:
from importlib import reload # Python 3.4+
except ImportError:
from imp import reload # Python 3.0 - 3.3
try:
__import__('__builtin__')
open_reference = "__builtin__.open" # Python 2.7
except ImportError:
open_reference = "builtins.open" # Python 3.x
DATA = 'API_SETTINGS_TEST: TEST'
class ApiSettingsLoadingTest(TestCase):
@mock.patch(open_reference, mock.mock_open(read_data=DATA))
def test_update_locals(self):
reload(settings)
self.assertEqual(settings.API_SETTINGS_TEST, 'TEST')
| 2.375
| 2
|
core/preprocessing/voice.py
|
ArnolFokam/dna-gate-backend
| 0
|
12784715
|
import noisereduce as nr
def remove_noise(data, rate):
return nr.reduce_noise(y=data,
sr=rate,
thresh_n_mult_nonstationary=2,
stationary=False)
| 2.46875
| 2
|
validator/sawtooth_validator/journal/chain_commit_state.py
|
askmish/sawtooth-core
| 0
|
12784716
|
# Copyright 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
from sawtooth_validator.journal.block_wrapper import NULL_BLOCK_IDENTIFIER
from sawtooth_validator.protobuf.block_pb2 import BlockHeader
from sawtooth_validator.protobuf.transaction_pb2 import TransactionHeader
class MissingDependency(Exception):
def __init__(self, txn_id):
super().__init__("Missing dependency: {}".format(txn_id))
self.transaction_id = txn_id
class DuplicateTransaction(Exception):
def __init__(self, txn_id):
super().__init__("Duplicate transaction: {}".format(txn_id))
self.transaction_id = txn_id
class DuplicateBatch(Exception):
def __init__(self, batch_id):
super().__init__("Duplicate batch: {}".format(batch_id))
self.batch_id = batch_id
class BlockStoreUpdated(Exception):
pass
class ChainCommitState:
"""Checking to see if a batch or transaction in a block has already been
committed is somewhat difficult because of the presence of forks. While
the block store is the definitive source for all batches and transactions
that have been committed on the current chain, validation of blocks on
another fork requires determing what blocks would actually be in the chain
if that block were to be committed and only checking the batches and
transactions contained within. ChainCommitState abstracts this process.
"""
def __init__(self, head_id, block_manager, block_store):
"""The constructor should be passed the previous block id of the block
being validated."""
uncommitted_block_ids = list()
uncommitted_batch_ids = set()
uncommitted_txn_ids = set()
# Find the most recent ancestor of this block that is in the block
# store. Batches and transactions that are in a block that is in the
# block store and that has a greater block number than this block must
# be ignored.
if head_id != NULL_BLOCK_IDENTIFIER:
head = next(block_manager.get([head_id]))
ancestor = head
while ancestor.header_signature not in block_store:
# For every block not in the block store, we need to track all
# its batch ids and transaction ids separately to ensure there
# are no duplicates.
for batch in ancestor.batches:
uncommitted_batch_ids.add(batch.header_signature)
for txn in batch.transactions:
uncommitted_txn_ids.add(txn.header_signature)
uncommitted_block_ids.append(ancestor.header_signature)
ancestor_header = BlockHeader()
ancestor_header.ParseFromString(ancestor.header)
previous_block_id = ancestor_header.previous_block_id
if previous_block_id == NULL_BLOCK_IDENTIFIER:
break
ancestor = next(block_manager.get([previous_block_id]))
else:
ancestor = None
self.block_store = block_store
ancestor_header = None
if ancestor:
ancestor_header = BlockHeader()
ancestor_header.ParseFromString(ancestor.header)
self.common_ancestor = ancestor_header
self.uncommitted_block_ids = uncommitted_block_ids
self.uncommitted_batch_ids = uncommitted_batch_ids
self.uncommitted_txn_ids = uncommitted_txn_ids
def _block_in_chain(self, block):
if self.common_ancestor is not None:
return block.block_num <= self.common_ancestor.block_num
return False
@staticmethod
def _check_for_duplicates_within(key_fn, items):
"""Checks that for any two items in `items`, calling `key_fn` on both
does not return equal values."""
for i, item_i in enumerate(items):
for item_j in items[i + 1:]:
if key_fn(item_i) == key_fn(item_j):
return key_fn(item_i)
return None
def check_for_duplicate_transactions(self, transactions):
"""Check that none of the transactions passed in have already been
committed in the chain. Also checks that the list of transactions
passed contains no duplicates."""
# Same as for batches
duplicate = self._check_for_duplicates_within(
lambda txn: txn.header_signature, transactions)
if duplicate is not None:
raise DuplicateTransaction(duplicate)
for txn in transactions:
txn_id = txn.header_signature
if txn_id in self.uncommitted_txn_ids:
raise DuplicateTransaction(txn_id)
if self.block_store.has_transaction(txn_id):
try:
committed_block =\
self.block_store.get_block_by_transaction_id(txn_id)
except ValueError:
raise BlockStoreUpdated(
"The BlockStore updated while checking for duplicate"
" transactions."
)
if self._block_in_chain(committed_block):
raise DuplicateTransaction(txn_id)
def check_for_duplicate_batches(self, batches):
"""Check that none of the batches passed in have already been committed
in the chain. Also checks that the list of batches passed contains no
duplicates."""
# Check for duplicates within the given list
duplicate = self._check_for_duplicates_within(
lambda batch: batch.header_signature, batches)
if duplicate is not None:
raise DuplicateBatch(duplicate)
for batch in batches:
batch_id = batch.header_signature
# Make sure the batch isn't in one of the uncommitted block
if batch_id in self.uncommitted_batch_ids:
raise DuplicateBatch(batch_id)
# Check if the batch is in one of the committed blocks
if self.block_store.has_batch(batch_id):
try:
committed_block =\
self.block_store.get_block_by_batch_id(batch_id)
except ValueError:
raise BlockStoreUpdated(
"The BlockStore updated while checking for duplicate"
" transactions."
)
# This is only a duplicate batch if the batch is in a block
# that would stay committed if this block were committed. This
# is equivalent to asking if the number of the block that this
# batch is in is less than or equal to the number of the common
# ancestor block.
if self._block_in_chain(committed_block):
raise DuplicateBatch(batch_id)
def check_for_transaction_dependencies(self, transactions):
"""Check that all explicit dependencies in all transactions passed have
been satisfied."""
dependencies = []
txn_ids = []
for txn in transactions:
txn_ids.append(txn.header_signature)
txn_hdr = TransactionHeader()
txn_hdr.ParseFromString(txn.header)
dependencies.extend(txn_hdr.dependencies)
for dep in dependencies:
# Check for dependency within the given block's batches
if dep in txn_ids:
continue
# Check for dependency in the uncommitted blocks
if dep in self.uncommitted_txn_ids:
continue
# Check for dependency in the committe blocks
if self.block_store.has_transaction(dep):
committed_block =\
self.block_store.get_block_by_transaction_id(dep)
# Make sure the block wouldn't be uncomitted if the given block
# were uncommitted
if self._block_in_chain(committed_block):
continue
raise MissingDependency(dep)
class _CommitCache:
"""Tracks the commit status of a set of identifiers and these identifiers
are either explicitly committed, or explicitly uncommitted. If they fall in
to neither of these cases then the fallback is to look in the BlockStore to
see if they are there. Explicit committed ids take priority over
uncommitted since one of the common use cases we have is to simulate the
committed state at a previous state of the BlockStore and we allow for the
identifiers to be re-committed.
"""
def __init__(self, block_store_check):
self.block_store_check = block_store_check
self._committed = set() # the set of items
# committed by this chain
self._uncommitted = set() # the set of items
# uncommitted by the current chain when it is rolled back.
def add(self, identifier):
self._committed.add(identifier)
def remove(self, identifier):
self._committed.discard(identifier)
def uncommit(self, identifier):
self._uncommitted.add(identifier)
def __contains__(self, identifier):
if identifier in self._committed:
return True
if identifier in self._uncommitted:
return False
return self.block_store_check(identifier)
class TransactionCommitCache(_CommitCache):
"""Tracks the set of Transactions that are committed to a hypothetical
blockchain. This is used to detect duplicate transactions or missing
dependencies when building a block.
"""
def __init__(self, block_store):
super(TransactionCommitCache, self).__init__(
block_store.has_transaction)
def add_batch(self, batch):
for txn in batch.transactions:
self._committed.add(txn.header_signature)
def remove_batch(self, batch):
for txn in batch.transactions:
self._committed.discard(txn.header_signature)
| 2.078125
| 2
|
sip/tango_control/tango_interactive_client/check_tango_host.py
|
SKA-ScienceDataProcessor/integration-prototype
| 3
|
12784717
|
# coding: utf-8
"""."""
from tango import Database
# Get reference to tango database
DB = Database()
print('=' * 80)
print('Database info:')
print('=' * 80)
print(DB.get_info())
print('=' * 80)
print('Server list:')
print('=' * 80)
print(DB.get_server_list().value_string)
print('')
| 2.0625
| 2
|
tests/test_validator.py
|
ens-lgil/gwas-sumstats-validator
| 0
|
12784718
|
import unittest
import shutil
import os
import sys
import tests.prep_tests as prep
import validate.validator as v
from validate.common_constants import *
import tests.test_values as test_arrays
class BasicTestCase(unittest.TestCase):
def setUp(self):
self.test_storepath = "./tests/data"
os.makedirs(self.test_storepath, exist_ok=True)
def tearDown(self):
shutil.rmtree(self.test_storepath)
def test_validate_good_file_extension(self):
test_filepath = os.path.join(self.test_storepath, "test_file.tsv.gz")
validator = v.Validator(test_filepath, "pgs-upload", logfile=test_filepath + ".LOG")
valid_ext = validator.validate_file_extension()
self.assertTrue(valid_ext)
# alternative
test_filepath = os.path.join(self.test_storepath, "test_file.csv.gz")
validator = v.Validator(test_filepath, "pgs-upload", logfile=test_filepath + ".LOG")
valid_ext = validator.validate_file_extension()
self.assertTrue(valid_ext)
def test_validate_bad_file_extension(self):
test_filepath = os.path.join(self.test_storepath, "test_file.zip")
validator = v.Validator(test_filepath, "pgs-upload", logfile=test_filepath + ".LOG")
valid_ext = validator.validate_file_extension()
self.assertFalse(valid_ext)
def test_validate_good_file_headers(self):
test_filepath = os.path.join(self.test_storepath, "test_file.tsv")
setup_file = prep.SSTestFile()
setup_file.prep_test_file()
validator = v.Validator(test_filepath, "pgs-upload", logfile=test_filepath + ".LOG")
valid_headers = validator.validate_headers()
self.assertTrue(valid_headers)
def test_validate_file_headers_missing_snp(self):
test_filepath = os.path.join(self.test_storepath, "test_file.tsv")
setup_file = prep.SSTestFile()
setup_file.set_test_data_dict()
setup_file.test_data_dict.pop(SNP_DSET) # remove a snp field
setup_file.prep_test_file()
validator = v.Validator(test_filepath, "pgs-upload", logfile=test_filepath + ".LOG")
valid_headers = validator.validate_headers()
self.assertTrue(valid_headers)
def test_validate_file_headers_missing_pos(self):
test_filepath = os.path.join(self.test_storepath, "test_file.tsv")
setup_file = prep.SSTestFile()
setup_file.set_test_data_dict()
setup_file.test_data_dict.pop(CHR_DSET) # remove the chr field
setup_file.test_data_dict.pop(BP_DSET) # remove the pos field
setup_file.prep_test_file()
validator = v.Validator(test_filepath, "pgs-upload", logfile=test_filepath + ".LOG")
valid_headers = validator.validate_headers()
self.assertTrue(valid_headers)
def test_validate_file_headers_missing_snp_and_pos(self):
test_filepath = os.path.join(self.test_storepath, "test_file.tsv")
setup_file = prep.SSTestFile()
setup_file.set_test_data_dict()
setup_file.test_data_dict.pop(SNP_DSET) # remove a snp field
setup_file.test_data_dict.pop(CHR_DSET) # remove the chr field
setup_file.test_data_dict.pop(BP_DSET) # remove the pos field
setup_file.prep_test_file()
validator = v.Validator(test_filepath, "pgs-upload", logfile=test_filepath + ".LOG")
valid_headers = validator.validate_headers()
self.assertFalse(valid_headers)
def test_validate_bad_file_headers(self):
test_filepath = os.path.join(self.test_storepath, "test_file.tsv")
setup_file = prep.SSTestFile()
setup_file.set_test_data_dict()
setup_file.test_data_dict.pop(EFFECT_DSET) # remove a mandatory field
setup_file.prep_test_file()
validator = v.Validator(test_filepath, "pgs-upload", logfile=test_filepath + ".LOG")
valid_headers = validator.validate_headers()
self.assertFalse(valid_headers)
def test_validate_bad_file_headers_missing_effect(self):
test_filepath = os.path.join(self.test_storepath, "test_file.tsv")
setup_file = prep.SSTestFile()
setup_file.set_test_data_dict()
setup_file.test_data_dict.pop(EFFECT_WEIGHT_DSET) # remove effect_weight field
setup_file.test_data_dict.pop(OR_DSET) # remove odds ratio field
setup_file.test_data_dict.pop(HR_DSET) # remove hazard ratio field
setup_file.prep_test_file()
validator = v.Validator(test_filepath, "pgs-upload", logfile=test_filepath + ".LOG")
valid_headers = validator.validate_headers()
self.assertFalse(valid_headers)
def test_validate_good_file_data(self):
test_filepath = os.path.join(self.test_storepath, "test_file.tsv")
logfile=test_filepath.replace('tsv', 'LOG')
setup_file = prep.SSTestFile()
setup_file.prep_test_file()
validator = v.Validator(test_filepath, "pgs-upload", logfile=logfile)
valid_data = validator.validate_data()
self.assertTrue(valid_data)
def test_validate_bad_snp_file_data(self):
test_filename = "bad_snp.tsv"
test_filepath = os.path.join(self.test_storepath, test_filename)
logfile=test_filepath.replace('tsv', 'LOG')
setup_file = prep.SSTestFile(filename=test_filename)
setup_file.set_test_data_dict()
setup_file.test_data_dict[SNP_DSET] = ["invalid", 123, "1_1234_A_G", "ss151232"] # set bad snps
setup_file.prep_test_file()
validator = v.Validator(file=test_filepath, filetype="pgs-upload", logfile=logfile)
valid_data = validator.validate_data()
self.assertEqual(len(validator.bad_rows), 4)
self.assertFalse(valid_data)
def test_validate_bad_snp_and_no_pos_file_data(self):
test_filename = "bad_snp_no_pos.tsv"
test_filepath = os.path.join(self.test_storepath, test_filename)
logfile=test_filepath.replace('tsv', 'LOG')
setup_file = prep.SSTestFile(filename=test_filename)
setup_file.set_test_data_dict()
setup_file.test_data_dict[SNP_DSET] = ["invalid", "rs123", "1_1234_A_G", "ss151232"] # set bad snps
setup_file.test_data_dict[BP_DSET] = [None, 123, "NA", None] # only one good row
setup_file.prep_test_file()
validator = v.Validator(file=test_filepath, filetype="pgs-upload", logfile=logfile)
valid_data = validator.validate_data()
self.assertEqual(len(validator.bad_rows), 3)
self.assertFalse(valid_data)
def test_validate_bad_chr_file_data(self):
test_filename = "bad_chr.tsv"
test_filepath = os.path.join(self.test_storepath, test_filename)
logfile=test_filepath.replace('tsv', 'LOG')
setup_file = prep.SSTestFile(filename=test_filename)
setup_file.set_test_data_dict()
setup_file.test_data_dict[CHR_DSET] = [1, 123, "CHR1", "X"] # set 2 bad chrs
setup_file.prep_test_file()
validator = v.Validator(file=test_filepath, filetype="pgs-upload", logfile=logfile)
valid_data = validator.validate_data()
self.assertEqual(len(validator.bad_rows), 2)
self.assertFalse(valid_data)
def test_validate_bad_chr_and_no_snp_file_data(self):
test_filename = "bad_chr_no_snp.tsv"
test_filepath = os.path.join(self.test_storepath, test_filename)
logfile=test_filepath.replace('tsv', 'LOG')
setup_file = prep.SSTestFile(filename=test_filename)
setup_file.set_test_data_dict()
setup_file.test_data_dict[CHR_DSET] = [1, 123, "CHR1", "X"] # set 2 bad chrs
setup_file.test_data_dict[SNP_DSET] = ["invalid", 123, "rs1234", "rs151"] # set only one good row
setup_file.prep_test_file()
validator = v.Validator(file=test_filepath, filetype="pgs-upload", logfile=logfile)
valid_data = validator.validate_data()
self.assertEqual(len(validator.bad_rows), 3)
self.assertFalse(valid_data)
def test_validate_bad_bp_file_data(self):
test_filename = "bad_bp.tsv"
test_filepath = os.path.join(self.test_storepath, test_filename)
logfile=test_filepath.replace('tsv', 'LOG')
setup_file = prep.SSTestFile(filename=test_filename)
setup_file.set_test_data_dict()
setup_file.test_data_dict[BP_DSET] = [1, 1234567890, "CHR1_122334", 123245] # set 2 bad bps
setup_file.prep_test_file()
validator = v.Validator(file=test_filepath, filetype="pgs-upload", logfile=logfile)
valid_data = validator.validate_data()
self.assertEqual(len(validator.bad_rows), 2)
self.assertFalse(valid_data)
def test_validate_bad_bp_and_no_snp_file_data(self):
test_filename = "bad_bp_no_snp.tsv"
test_filepath = os.path.join(self.test_storepath, test_filename)
logfile=test_filepath.replace('tsv', 'LOG')
setup_file = prep.SSTestFile(filename=test_filename)
setup_file.set_test_data_dict()
setup_file.test_data_dict[BP_DSET] = [1, 1234567890, "CHR1_122334", 123245] # set 2 bad bps
setup_file.test_data_dict[SNP_DSET] = ["invalid", 123, None, "rs1234"] # set so only one good row
setup_file.prep_test_file()
validator = v.Validator(file=test_filepath, filetype="pgs-upload", logfile=logfile)
valid_data = validator.validate_data()
self.assertEqual(len(validator.bad_rows), 3)
self.assertFalse(valid_data)
def test_validate_bad_optional_effect_weight_file_data(self):
test_filename = "bad_weight.tsv"
test_filepath = os.path.join(self.test_storepath, test_filename)
logfile=test_filepath.replace('tsv', 'LOG')
setup_file = prep.SSTestFile(filename=test_filename)
setup_file.set_test_data_dict()
setup_file.test_data_dict[EFFECT_WEIGHT_DSET] = [1.1232e-23, "invalid", 0.123, .3245] # set 1 bad bps
setup_file.prep_test_file()
validator = v.Validator(file=test_filepath, filetype="pgs-upload", logfile=logfile)
valid_data = validator.validate_data()
self.assertEqual(len(validator.bad_rows), 1)
self.assertFalse(valid_data)
def test_validate_bad_optional_odds_ratio_file_data(self):
test_filename = "bad_odds.tsv"
test_filepath = os.path.join(self.test_storepath, test_filename)
logfile=test_filepath.replace('tsv', 'LOG')
setup_file = prep.SSTestFile(filename=test_filename)
setup_file.set_test_data_dict()
setup_file.test_data_dict[OR_DSET] = [1.1232e-23, "invalid", 0.123, .3245] # set 1 bad bps
setup_file.prep_test_file()
validator = v.Validator(file=test_filepath, filetype="pgs-upload", logfile=logfile)
valid_data = validator.validate_data()
self.assertEqual(len(validator.bad_rows), 1)
self.assertFalse(valid_data)
def test_validate_bad_optional_hazard_ratio_file_data(self):
test_filename = "bad_hazard.tsv"
test_filepath = os.path.join(self.test_storepath, test_filename)
logfile=test_filepath.replace('tsv', 'LOG')
setup_file = prep.SSTestFile(filename=test_filename)
setup_file.set_test_data_dict()
setup_file.test_data_dict[HR_DSET] = [1.1232e-23, "invalid", 0.123, .3245] # set 1 bad bps
setup_file.prep_test_file()
validator = v.Validator(file=test_filepath, filetype="pgs-upload", logfile=logfile)
valid_data = validator.validate_data()
self.assertEqual(len(validator.bad_rows), 1)
self.assertFalse(valid_data)
def test_validate_bad_effect_allele_file_data(self):
test_filename = "bad_effect.tsv"
test_filepath = os.path.join(self.test_storepath, test_filename)
logfile=test_filepath.replace('tsv', 'LOG')
setup_file = prep.SSTestFile(filename=test_filename)
setup_file.set_test_data_dict()
setup_file.test_data_dict[EFFECT_DSET] = ['A', 'AGG', 'INS:T', 'd'] # set 2 bad alleles
setup_file.prep_test_file()
validator = v.Validator(file=test_filepath, filetype="pgs-upload", logfile=logfile)
valid_data = validator.validate_data()
self.assertEqual(len(validator.bad_rows), 2)
self.assertFalse(valid_data)
def test_validate_empty_snp_file_data(self):
test_filename = "empty_snp.tsv"
test_filepath = os.path.join(self.test_storepath, test_filename)
logfile=test_filepath.replace('tsv', 'LOG')
setup_file = prep.SSTestFile(filename=test_filename)
setup_file.set_test_data_dict()
setup_file.test_data_dict[SNP_DSET] = ["NA", None, None, None] # set bad snps
setup_file.prep_test_file()
validator = v.Validator(file=test_filepath, filetype="pgs-upload", logfile=logfile)
valid_data = validator.validate_data()
self.assertEqual(len(validator.bad_rows), 4)
self.assertFalse(valid_data)
def test_validate_empty_snp_no_pos_file_data(self):
test_filename = "empty_snp_no_pos.tsv"
test_filepath = os.path.join(self.test_storepath, test_filename)
logfile=test_filepath.replace('tsv', 'LOG')
setup_file = prep.SSTestFile(filename=test_filename)
setup_file.set_test_data_dict()
setup_file.test_data_dict[SNP_DSET] = ["NA", None, "1234", "rs1"] # set bad snps
setup_file.test_data_dict[BP_DSET] = [None, 123, "NA", None] # only one good bp
setup_file.prep_test_file()
validator = v.Validator(file=test_filepath, filetype="pgs-upload", logfile=logfile)
valid_data = validator.validate_data()
self.assertEqual(len(validator.bad_rows), 4)
self.assertFalse(valid_data)
if __name__ == '__main__':
unittest.main()
| 2.546875
| 3
|
utils/resource_path.py
|
MAPL-UFU/palms-ftp
| 0
|
12784719
|
# You need to use this script every time you want to rebuild pyinstaller under the pyQ5 MainWindow.py
#-------------------------------------------------------------
import os
import sys
def resource_path(relative_path):
if hasattr(sys, '_MEIPASS'):
return os.path.join(sys._MEIPASS, relative_path)
return os.path.join(os.path.abspath("."), relative_path)
#--------------------------------------------------------------------
| 2.109375
| 2
|
presets/keywords.py
|
monotony113/feedly-link-aggregator
| 5
|
12784720
|
PRIORITIZED_KEYWORDS = {
10: ['cats', 'kitties'],
5: ['dogs', 'puppies'],
-5: ['goldfish'],
-float('inf'): ['rat'],
}
| 1.46875
| 1
|
app/lab/handle/handleMaterial.py
|
jerry0chu/Experiment
| 0
|
12784721
|
from app.models import Material
from app import db
import json
def handleGetMaterials(page,per_page):
materials = Material.query.paginate(page=page, per_page=per_page, error_out=False)
res = db.engine.execute("select count(*) from material")
count = [r[0] for r in res][0]
materialInfo = {
'materials': [a.to_json() for a in materials.items],
'count': count
}
return json.dumps(materialInfo)
def handleSubmitMaterialEditForm(material):
mater = Material.query.filter_by(mid=material['mid']).first()
if mater:
mater.name=material['name']
mater.purity=material['purity']
mater.manufacturer=material['manufacturer']
mater.note=material['note']
db.session.commit()
return "success"
else:
return "failure"
def HnadleSubmitMaterialAddForm(material):
mater = Material(name=material["name"], purity=material["purity"], manufacturer=material["manufacturer"],
note=material["note"])
db.session.add(mater)
db.session.commit()
return "success"
def handleRemoveMaterial(mid):
material = Material.query.filter_by(mid=mid).first()
if material:
db.session.delete(material)
db.session.commit()
return "success"
else:
return "failure"
def handleMaterialBatchDelete(midList):
for mid in midList:
material = Material.query.filter_by(mid=mid).first()
if material:
db.session.delete(material)
db.session.commit()
return "success"
def handleMaterialQueryContent(selectType,content,page,per_page):
countQuery = "db.session.query(Material).filter(Material." + selectType + ".like('%" + content + "%')).count()"
count = eval(countQuery)
result = "db.session.query(Material).filter(Material." + selectType + ".like('%" + content + "%')).paginate(page=" + page + ", per_page=" + per_page + ", error_out=False)"
materials = eval(result)
materialInfo = {
'materials': [a.to_json() for a in materials.items],
'count': count
}
return json.dumps(materialInfo)
| 2.40625
| 2
|
catalyst/utils/scripts.py
|
162/catalyst
| 0
|
12784722
|
<reponame>162/catalyst
import os
import sys
import shutil
import pathlib
from importlib.util import spec_from_file_location, module_from_spec
def import_module(expdir: pathlib.Path):
# @TODO: better PYTHONPATH handling
if not isinstance(expdir, pathlib.Path):
expdir = pathlib.Path(expdir)
sys.path.insert(0, str(expdir.absolute()))
sys.path.insert(0, os.path.dirname(str(expdir.absolute())))
s = spec_from_file_location(
expdir.name,
str(expdir.absolute() / "__init__.py"),
submodule_search_locations=[expdir.absolute()]
)
m = module_from_spec(s)
s.loader.exec_module(m)
sys.modules[expdir.name] = m
return m
def _tricky_dir_copy(dir_from, dir_to):
os.makedirs(dir_to, exist_ok=True)
shutil.rmtree(dir_to)
shutil.copytree(dir_from, dir_to)
def dump_code(expdir, logdir):
expdir = expdir[:-1] if expdir.endswith("/") else expdir
new_src_dir = f"code"
# @TODO: hardcoded
old_pro_dir = os.path.dirname(os.path.abspath(__file__)) + "/../"
new_pro_dir = os.path.join(logdir, new_src_dir, "catalyst")
_tricky_dir_copy(old_pro_dir, new_pro_dir)
old_expdir = os.path.abspath(expdir)
expdir_ = os.path.basename(os.path.dirname(expdir))
new_expdir = os.path.join(logdir, new_src_dir, expdir_)
_tricky_dir_copy(old_expdir, new_expdir)
__all__ = ["import_module", "dump_code"]
| 2.0625
| 2
|
protocol_scripts/Test_custom_labware.py
|
Microbiaki-Lab/Opentrons
| 0
|
12784723
|
<gh_stars>0
#!/usr/bin/env python
#test custom pcr plates, move water from first row to second
from opentrons import protocol_api
metadata = {'apiLevel': '2.11'}
def run(protocol: protocol_api.ProtocolContext):
protocol.home()
plate = protocol.load_labware('tethystestrun2', 1) #our_well_plate
tiprack_multi = protocol.load_labware('opentrons_96_tiprack_300ul', 2)
pipette_multi = protocol.load_instrument('p300_multi', mount = 'right', tip_racks=[tiprack_multi])
#This loads a Well Plate in slot 1 and an Opentrons 300 µL Tiprack in slot 2
#and uses a P300 Multi pipette. can modify to add more pipettes/tips etc.
#basic_transfer with multichannel pipette. #moves first row 100ul to second row
pipette_multi.transfer(100, plate.wells_by_name()['A1'], plate.wells_by_name()['A2'])
| 2.390625
| 2
|
client_script.py
|
Cr0wTom/Block-SSL-Smart-Contract
| 1
|
12784724
|
<filename>client_script.py
#!/usr/bin/python
import sys
import string
import hashlib
import os
import random
import struct
import getpass
import datetime
import json
import requests #pip install requests
import traceback
import subprocess
from datetime import timedelta
from Crypto.Cipher import AES
from pybitcoin import BitcoinPrivateKey
from OpenSSL import crypto, SSL
from ecdsa import SigningKey
#For pybitcoin download and install from:
#https://github.com/blockstack/pybitcoin.git
art = r'''
____ _ _ _____ _____ _
| _ \| | | | / ____/ ____| |Smart
| |_) | | ___ ___| | _______| (___| (___ | |Contract
| _ <| |/ _ \ / __| |/ /______\___ \\___ \| |Client script
| |_) | | (_) | (__| < ____) |___) | |v0.1
|____/|_|\___/ \___|_|\_\ |_____/_____/|______|
Block-SSL - SSL/TLS Certificate Authority Replacement
through the Ethereum Smart Contracts
Thesis Project - Aristotle University of Thessaloniki
By Cr0wTom
------------------------------------------------------
'''
def clientscript():
# create a key pair
print "Creating a new key pair:"
print "Warning: This is a pseudo-random generation.\n"
k = crypto.PKey()
k.generate_key(crypto.TYPE_RSA, 4096)
priv = raw_input("Which is your private key? (in hexadecimal format)\n") #Private Key of the owner
priv = BitcoinPrivateKey(priv)
pub = priv.get_verifying_key()
pub = pub.to_string()
keccak.update(pub)
address = keccak.hexdigest()[24:]
open("Address.txt", "w").write(address)
# create a self-signed cert
cert = crypto.X509()
createCert(k, cert)
open("certificate.crt", "wt").write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
open("keys.key", "wt").write(crypto.dump_privatekey(crypto.FILETYPE_PEM, k))
print "\nCertificate created in file: certificate.crt"
print "\nKeys saved in file: keys.key\n"
#Hashing of the certificate
f = open("certificate.crt", "rb") #read file in binary mode
fr = f.read()
cert_hash = hashlib.sha256() #use the SHA256 hashing algorithm
cert_hash.update(fr)
data = cert_hash.hexdigest()
print "\nYour Certificate hash is: ", data
subprocess.Popen(["geth"])
os.system("BZZKEY=" + address)
subprocess.Popen(["$GOPATH/bin/swarm --bzzaccount $BZZKEY"])
os.system("curl -H \"Content-Type: text/plain\" --data-binary \"some-data\" http://localhost:8500/bzz:/") #todo: find swarm gateways
#todo: print Certificate expiration date
#todo: print the expiration date and the days that left
print "Please open the BlockSSL Smart Contract and paste this info in the required fields."
sys.exit()
def createCert(k, cert):
# create a self-signed cert
country = raw_input("Country Name (2 letter code): ")
cert.get_subject().C = country
state = raw_input("State or Province Name (full name): ")
cert.get_subject().ST = state
local = raw_input("Locality Name (eg, city): ")
cert.get_subject().L = local
org = raw_input("Organization Name (eg, company): ")
cert.get_subject().O = org
orgu = raw_input("Organizational Unit Name (eg, section): ")
cert.get_subject().OU = orgu
cn = raw_input("Common Name (eg, fully qualified host name): ")
cert.get_subject().CN = cn
email = raw_input("email Address: ")
cert.get_subject().emailAddress = email
cert.set_serial_number(1000) #Actual serial number added in the contract
cert.gmtime_adj_notBefore(0)
now = datetime.datetime.now() #setting the time right now
tr = 0
while tr == 0:
an = int(raw_input("For how long do you need to update the certificate in days? (maximum: 365)\n"))
if an < 366 and an > 0:
cert.gmtime_adj_notAfter(60*60*24*an)
tr += 1
else:
print "Please give a number smaller than 366.\n"
tr = 0
diff = datetime.timedelta(an)
future = now + diff
print future.strftime("\nYour certificate expires on %m/%d/%Y") #print the expiration date
print "\nAdding the GE and RV signatures to the issuer field..."
message_gen = open("Address.txt", "rb").read()
message_gen = message_gen.strip('\n')
m1 = hashlib.sha256()
m1.update(message_gen)
m1 = m1.hexdigest()
cert.get_issuer().CN = m1 #Ethereum address at the CN issuer field
cert.set_pubkey(k)
cert.sign(k, 'sha256')
return cert
def checkforSwarn():
name = "$GOPATH/bin/swarm"
try: #check if swarn exists
devnull = open(os.devnull)
subprocess.Popen([name], stdout=devnull, stderr=devnull).communicate()
print "\tSwarn exists.\n"
except OSError as e: #install swarn - os specific
if e.errno == os.errno.ENOENT:
if sys.platform == "linux" or sys.platform == "linux2":
print "Installing Swarn: \n"
os.system("sudo apt install golang git")
os.system("mkdir ~/go")
os.system("export GOPATH=\"$HOME/go\"")
os.system("echo \'export GOPATH=\"$HOME/go\"\' >> ~/.profile")
os.system("mkdir -p $GOPATH/src/github.com/ethereum | cd $GOPATH/src/github.com/ethereum | git clone https://github.com/ethereum/go-ethereum | cd go-ethereum | git checkout master | go get github.com/ethereum/go-ethereum")
os.system("go install -v ./cmd/geth")
os.system("go install -v ./cmd/swarm")
os.system("$GOPATH/bin/swarm version")
elif sys.platform == "win32": #all Windows versions
print "Swarn is not supported on Windows, please use Linux or Mac.\n"
elif sys.platform == "darwin": #all OSX versions
print "Installing Swarn: \n"
os.system("brew install go git")
os.system("mkdir ~/go")
os.system("export GOPATH=\"$HOME/go\"")
os.system("echo \'export GOPATH=\"$HOME/go\"\' >> ~/.profile")
os.system("mkdir -p $GOPATH/src/github.com/ethereum | cd $GOPATH/src/github.com/ethereum | git clone https://github.com/ethereum/go-ethereum | cd go-ethereum | git checkout master | go get github.com/ethereum/go-ethereum")
os.system("go install -v ./cmd/geth")
os.system("go install -v ./cmd/swarm")
os.system("$GOPATH/bin/swarm version")
def main(argu):
try:
if argu[1] == "--help" or argu[1] == "-h":
#Option to helo with the usage of the script
print art
print "Usage: \"client_script.py -c\""
print "\n"
elif argu[1] == "-c":
print art
clientscript()
except IndexError:
print art
print "Usage: \"client_script.py -c\""
print "\nFor help use the --help or -h option."
print "\n"
if __name__ == "__main__":
main(sys.argv)
| 2.546875
| 3
|
9-dars-uyishi.py
|
thejorabek/python
| 1
|
12784725
|
<filename>9-dars-uyishi.py
#%% 1
class Hayvon():
def __init__(self,nomi):
self.name=nomi
class Yirtqich(Hayvon):
def ovlash(self):
print(self.name,"yashash uchun ovlaydi")
def yugurish(self):
print(self.name,"o'ljaga yetib olish uchun yuguradi")
def uxlash(self):
print(self.name,"kuch yig'ish uchun uxlaydi")
class Otxor(Hayvon):
def yugurish(self):
print(self.name,"yirtqichdan qochish uchun yuguradi")
def avlodQoldirish(self):
print(self.name,"qirilib ketmaslik uchun avlod qoldiradi")
def eshitish(self):
print(self.name,"yirtqichlarni eshitadi")
sher=Yirtqich("Alex")
sher.ovlash()
sher.uxlash()
sher.yugurish()
kiyik=Otxor("Bambi")
kiyik.avlodQoldirish()
kiyik.eshitish()
kiyik.yugurish()
#%% 2
class Shape():
def __init__(self,name):
self.name=name
class Line(Shape):
def show(self):
print("* "*10)
class Triangle(Shape):
def show(self):
n=int(input("n="))
for i in range(n):
for j in range(i):
if i==0 or i==n-1 or j==i-1 or j==0:
print("* ",end="")
else:
print(end=" ")
print()
class Rectangle(Shape):
def show(self):
n=int(input("n="))
for i in range(n):
for j in range(n):
if i==0 or i==n-1 or j==n-1 or j==0:
print("* ",end="")
else:
print(end=" ")
print()
class NullShape(Shape):
def show(self):
print("Bo'sh shakl")
ob=Line("Chiziq")
ob.show()
ob1=Triangle("Uchburchak")
ob1.show()
ob2=Rectangle("To'rtburchak")
ob2.show()
ob3=NullShape("NullShape")
ob3.show()
lst=[ob,ob1,ob2]
k=0
nomi=input("Shakl nomini kiriting: ")
for i in lst:
if i.name==nomi:
i.show()
k=0
break
else:
k=1
if k==1:
ob3.show()
#%%
new=[]
class Int():
def __init__(self):
new.append(int(input("Butun sonni kiriting: ")))
class Float(Int):
def __init__(self):
print("1-int yoki float 2-bool")
a=int(input("a="))
if a==1:
super().__init__()
if a==2:
new.append(float(input("Haqiqiy sonni kiriting: ")))
class Float(Int):
def __init__(self):
print("1-int 2-float")
a=int(input("a="))
if a==1:
super().__init__()
if a==2:
new.append(float(input("Haqiqiy sonni kiriting: ")))
else:
| 3.5625
| 4
|
model/create_landscape.py
|
FelixNoessler/Buffers-or-corridors-for-great-crested-newts
| 0
|
12784726
|
import scipy.spatial as sci_spatial
import skimage.draw as ski_draw
import shapely.geometry as shapely_geom
import numpy as np
import os, sys
def create_landscape(no_of_circles, radius):
# create the middle points of the ponds (the ponds should not overlap)
x,y = np.random.randint(0,400), np.random.randint(0,400)
list_of_points = [(x + 400, y + 400),
(x + 400, y),
(x + 800, y + 400),
(x + 400, y + 800),
(x, y + 400)]
for i in range(no_of_circles-1):
new_point_found = False
trials = 0
while not new_point_found and trials < 500:
x,y = np.random.randint(0,400), np.random.randint(0,400)
new_point = shapely_geom.Point((x + 400, y + 400))
trials += 1
if not new_point.buffer(radius * 2 + 50).intersects(shapely_geom.MultiPoint(list_of_points)):
new_point_found = True
list_of_points.append((x + 400, y + 400))
list_of_points.append((x + 400, y))
list_of_points.append((x + 800, y + 400))
list_of_points.append((x + 400, y + 800))
list_of_points.append((x, y + 400))
# landscape with ponds
ponds_img = np.full((1200 + 2*radius, 1200 + 2*radius), 55)
# draw the ponds
for point_i in list_of_points:
rr, cc = ski_draw.disk(point_i, radius)
ponds_img[rr + radius, cc + radius] = 105
ponds_img = ponds_img[400+radius : 800+radius, 400+radius : 800+radius]
# pond-id
ponds_id_img = np.full((1200 + 2*radius, 1200 + 2*radius), -999)
# draw the ponds
id_i = 0
for point_i, id_i in zip(list_of_points, np.repeat(np.arange(len(list_of_points)/5), 5)):
rr, cc = ski_draw.disk(point_i, radius)
ponds_id_img[rr + radius, cc + radius] = id_i
ponds_id_img = ponds_id_img[400+radius : 800+radius, 400+radius : 800+radius]
# create an raster image with the middle points marked
is_center_img = np.zeros_like(ponds_img)
boundary = shapely_geom.Polygon([(399, 399), (799, 399), (799, 799), (399, 799)])
selection = [shapely_geom.Point(point_i).intersects(boundary) for point_i in list_of_points]
x,y = np.array(list_of_points)[selection].T
x -= 400
y -= 400
is_center_img[x, y] = 1
return is_center_img, ponds_img, ponds_id_img
def make_corridors(is_center, ponds):
without_boundaries = np.zeros((400*3, 400*3))
without_boundaries[0:400, 400:800] = is_center
without_boundaries[400:800, 0:400] = is_center
without_boundaries[400:800, 400:800] = is_center
without_boundaries[800:1200, 400:800] = is_center
without_boundaries[400:800, 800:1200] = is_center
loc = np.where(without_boundaries == 1)
center_points = np.swapaxes(loc, 0, 1)
result = sci_spatial.distance.cdist(center_points, center_points)
new_img = np.full_like(without_boundaries, 55) # 55 --> green in netlogo
points_with_corridors = np.where(np.logical_and( result != 0, result < 170)) #mean(result[result != 0]) * 0.3
for i in np.arange(0, np.shape(points_with_corridors)[1]):
index_from = points_with_corridors[0][i]
index_to = points_with_corridors[1][i]
x = [loc[1][index_from], loc[1][index_to]]
y = [loc[0][index_from], loc[0][index_to]]
x_corr, y_corr = shapely_geom.LineString([(x[0], y[0]), (x[1], y[1])]).buffer(4.5).exterior.coords.xy
rr, cc = ski_draw.polygon(y_corr, x_corr, without_boundaries.shape)
new_img[rr, cc] = 35 # 35 --> brown in netlogo
final_img = new_img[400:800, 400:800]
final_img[np.where(ponds == 105)] = 105 # 105 --> blue in netlogo
return final_img
def make_buffers(corridor_img, is_center_img):
radius = 15
corridor_area = np.sum(corridor_img == 35)
no_of_ponds = np.sum(is_center_img)
buffer_radius = np.sqrt( ( (corridor_area / no_of_ponds) + np.pi *radius **2) / np.pi )
without_boundaries = np.zeros((400*3, 400*3))
without_boundaries[0:400, 400:800] = is_center_img
without_boundaries[400:800, 0:400] = is_center_img
without_boundaries[400:800, 400:800] = is_center_img
without_boundaries[800:1200, 400:800] = is_center_img
without_boundaries[400:800, 800:1200] = is_center_img
x,y = np.where(without_boundaries == 1)
new_img = np.full_like(without_boundaries, 55) # 55 --> green in netlogo
# make buffers
for x_i, y_i in zip(x,y):
rr, cc = ski_draw.disk((x_i, y_i), buffer_radius)
filter_1 = (rr >= 0) & (rr <= 1199)
filter_2 = (cc >= 0) & (cc <= 1199)
rr = rr[filter_1 & filter_2]
cc = cc[filter_1 & filter_2]
new_img[rr, cc] = 35
# make ponds
for x_i, y_i in zip(x,y):
rr, cc = ski_draw.disk((x_i, y_i), radius)
filter_1 = (rr >= 0) & (rr <= 1199)
filter_2 = (cc >= 0) & (cc <= 1199)
rr = rr[filter_1 & filter_2]
cc = cc[filter_1 & filter_2]
new_img[rr, cc] = 105
return new_img[400:800, 400:800]
if __name__ == "__main__":
#print('Scenario-Number:', sys.argv[1])
os.makedirs('gis_output/' + sys.argv[1])
os.chdir('gis_output/' + sys.argv[1])
is_center_of_pond, pond, pond_id = create_landscape(no_of_circles=int(sys.argv[2]), radius=int(sys.argv[3]))
corridors = make_corridors(is_center_of_pond, pond)
buffers = make_buffers(corridors, is_center_of_pond)
with open("../pcolor.asc") as myfile:
head = [next(myfile) for x in range(6)]
np.savetxt('corridors.asc',corridors, fmt='%i', newline='\n', header=''.join(head)[:-1], comments='')
np.savetxt('buffers.asc',buffers, fmt='%i', newline='\n', header=''.join(head)[:-1], comments='')
np.savetxt('center.asc',is_center_of_pond, fmt='%i', newline='\n', header=''.join(head)[:-1], comments='')
np.savetxt('id.asc',pond_id, fmt='%i', newline='\n', header=''.join(head)[:-1], comments='')
| 2.859375
| 3
|
pdip/integrator/integration/types/base/integration_adapter.py
|
ahmetcagriakca/pdip
| 2
|
12784727
|
from abc import ABC, abstractmethod
from ...domain.base import IntegrationBase
from ....operation.domain.operation import OperationIntegrationBase
from ....pubsub.base import ChannelQueue
class IntegrationAdapter(ABC):
@abstractmethod
def execute(
self,
operation_integration: OperationIntegrationBase,
channel: ChannelQueue
):
pass
@abstractmethod
def get_start_message(self, integration: IntegrationBase):
pass
@abstractmethod
def get_finish_message(self, integration: IntegrationBase, data_count: int):
pass
@abstractmethod
def get_error_message(self, integration: IntegrationBase):
pass
| 2.390625
| 2
|
student/urls.py
|
ashkantaravati/TDRRS
| 2
|
12784728
|
<filename>student/urls.py
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.get_dashboard, name='dashboard'),
url(r'^dashboard',views.get_dashboard,name='dashboard'),
url(r'^login',views.do_login, name='login'),
url(r'^defense-times',views.defense_times, name='defense_times'),
url(r'^logout$',views.do_logout, name='logout'),
url(r'^change-password',views.do_change_password, name='change_password'),
url(r'^cancel-reservation',views.do_submit_cancellation, name='cancel_reservation'),
url(r'^denied',views.denied,name='denied')
]
| 1.929688
| 2
|
barrel_reaktor/search/models.py
|
txtr/barrel-reaktor
| 0
|
12784729
|
from barrel import Store, Field, FloatField, EmbeddedStoreField
from barrel.rpc import RpcMixin
from barrel_reaktor.document.models import Document
from . import get_search_sources
class Stat(Store):
"""The reaktor always passes in `name` as the value to use for the search
facet. Since it's a value, let's rename it. Some fields also provide a
label, which we keep untouched.
"""
count = Field(target="count")
value = Field(target="name")
label = Field(target="label")
class CategoryStat(Store):
"""Category searching facet is inconsistent with other facets.
This model is there as an attempt to normalize that.
"""
count = Field(target="count")
value = Field(target="id")
label = Field(target="name")
class DocumentResult(Store):
"""Search result object wrapping search itemsalongside search info
like pagination information.
"""
class DocumentItem(Store):
"""Search result item wrapping a document alongside search info like
item relevance.
"""
document = EmbeddedStoreField(target="searchResult", store_class=Document)
relevance = FloatField(target="relevance")
class Stats(Store):
"""Represents stats about a search result, e.g. how many books for
this language, how many books available as pdf, ...
"""
category = EmbeddedStoreField(target="category", store_class=CategoryStat, is_array=True)
collection_title = EmbeddedStoreField(target="collectionTitle", store_class=Stat, is_array=True)
drm = EmbeddedStoreField(target="drmType", store_class=Stat, is_array=True)
format = EmbeddedStoreField(target="format", store_class=Stat, is_array=True)
language = EmbeddedStoreField(target="language", store_class=Stat, is_array=True)
price = EmbeddedStoreField(target="price", store_class=Stat, is_array=True)
pub_date = EmbeddedStoreField(target="publication_date", store_class=Stat, is_array=True)
rating = EmbeddedStoreField(target="rating", store_class=Stat, is_array=True)
source = EmbeddedStoreField(target="source", store_class=Stat, is_array=True)
tag = EmbeddedStoreField(target="tag", store_class=Stat, is_array=True)
# Without blocking search, other fields don't make sense anymore so there
# they are just ignored.
count = Field(target="numberOfResults")
has_less = Field(target="hasLess")
has_more = Field(target="hasMore")
items = EmbeddedStoreField(target='results', store_class=DocumentItem, is_array=True)
offset = Field(target="offset")
stats = EmbeddedStoreField(target='relatedObjects', store_class=Stats)
total_count = Field(target="totalNumberOfResults")
class Search(RpcMixin):
"""Interface to various API search endpoints. Beware that this one is not
a `Store`, which means that when calling its class methods,
expect different types.
"""
interface = 'WSSearchDocument'
@classmethod
def documents(cls, token, search_string, offset, number_of_results, sort=None, direction=None, include_search_fields=None, source=None, related=None, options=None):
"""Returns documents for a given string."""
invert = direction == 'desc'
if not options:
options = {'resultType': 'Object'}
if source:
sources = get_search_sources(source)
return cls.signature(method='searchDocuments', data_converter=DocumentResult,
args=[token, search_string, sources, offset, number_of_results, sort, invert, related, include_search_fields, options])
@classmethod
def suggestions(cls, token, search_string, number_of_results, sources=None, highlight=None):
"""Returns document suggestions for a given string."""
args = [token, search_string, sources, number_of_results]
method = 'getSuggestionObjects'
if highlight:
method = 'getSuggestionObjectsWithHighlights'
args.append(highlight)
return cls.signature(method=method, data_converter=Document, args=args)
| 2.421875
| 2
|
NumberTheoryAssigment3.py
|
esraagamal23/RSA-CryptoSystem-
| 0
|
12784730
|
import math
from random import randint
from numpy import sqrt
def GCD(a, b):
if b == 0:
return a
return GCD(b, a % b)
#######################################
def ExtendedEuclid(a, b):
if b == 0:
return (1, 0)
(x, y) = ExtendedEuclid(b, a % b)
k = a // b
return (y, x - k * y)
def InvertModulo(a, n):
(b, x) = ExtendedEuclid(a, n)
if b < 0:
b = (b % n + n) % n # we don’t want −ve integers
return b
##################################
def PowMod(a, n, mod):
if n == 0:
return 1 % mod
elif n == 1:
return a % mod
else:
b = PowMod(a, n // 2, mod)
b = b * b % mod
if n % 2 == 0:
return b
else:
return b * a % mod
def ConvertToInt( message_str):
res = 0
for i in range(len(message_str)):
res = res * 256 + ord(message_str[i])
return res
#####################################
def ConvertToStr(n):
res = ""
while n > 0:
res += chr(n % 256)
n //= 256
return res[::-1]
#question1
def Encrypt(m, n, e):
m=ConvertToInt(m)
c=PowMod(m,e,n)
return c
#############################
def Decrypt(c, p, q, e):
euler=(p-1)*(q-1)
d=InvertModulo(e,euler)
n=p*q
m=PowMod(c,d,n)
m=ConvertToStr(m)
return m
chiper_message=Encrypt("attack", 1000000007*1000000009,23917)
print(Decrypt(chiper_message, 1000000007,1000000009,23917))
#question2
def DecipherSimple(c, n, e, potential_messages):
decipheredtext=''
for i in potential_messages:
if Encrypt(i,n,e)==c:
decipheredtext=i
return decipheredtext
modulo = 101
exponent = 12
ciphertext = Encrypt("attack", modulo, exponent)
print(DecipherSimple(ciphertext, modulo, exponent, ["attack", "don't attack", "wait"]))
# get a missing prime number
def get_prime_number(i,j,n):
for i in range(i,j):
if(n%i==0):
return i
return 0
##question3
def DecipherSmallPrime(c, n, e):
p=get_prime_number(2,1000000,n)
decipheredtext=Decrypt(c,p,n//p,e)
return decipheredtext
modulo = 101 *18298970732541109011012304219376080251334480295537316123696052970419466495220522723330315111017831737980079504337868198011077274303193766040393009648852841770668239779097280026631944319501437547002412556176186750790476901358334138818777298389724049250700606462316428106882097210008142941838672676714188593227684360287806974345181893018133710957167334490627178666071809992955566020058374505477745993383434501768887090900283569055646901291270870833498474402084748161755197005050874785474707550376333429671113753137201128897550014524209754619355308207537703754006699795711188492048286436285518105948050401762394690148387
exponent = 239
ciphertext = Encrypt("attack", modulo, exponent)
print(DecipherSmallPrime(ciphertext, modulo, exponent))
#question4
def DecipherSmallDiff(c, n, e):
p=get_prime_number(int(sqrt(n)-5000),int(sqrt(n)),n)
decipheredtext=Decrypt(c,p,n//p,e)
return decipheredtext
p = 1000000007
q = 1000000009
n = p * q
e = 239
ciphertext = Encrypt("attack", n, e)
message = DecipherSmallDiff(ciphertext, n, e)
print(message)
#question5
def DecipherCommonDivisor(c1, n1, e1, c2, n2, e2):
p=GCD(n1,n2)
first_decipheredtext= Decrypt(c1,p,n1//p,e1)
second_decipheredtext=Decrypt(c2,p,n2//p,e2)
return first_decipheredtext, second_decipheredtext
p = 101
q1 = 18298970732541109011012304219376080251334480295537316123696052970419466495220522723330315111017831737980079504337868198011077274303193766040393009648852841770668239779097280026631944319501437547002412556176186750790476901358334138818777298389724049250700606462316428106882097210008142941838672676714188593227684360287806974345181893018133710957167334490627178666071809992955566020058374505477745993383434501768887090900283569055646901291270870833498474402084748161755197005050874785474707550376333429671113753137201128897550014524209754619355308207537703754006699795711188492048286436285518105948050401762394690148387
q2 = 1000000007
first_modulo = p * q1
second_modulo = p * q2
first_exponent = 239
second_exponent = 17
first_ciphertext = Encrypt("attack", first_modulo, first_exponent)
second_ciphertext = Encrypt("wait", second_modulo, second_exponent)
print(DecipherCommonDivisor(first_ciphertext, first_modulo, first_exponent, second_ciphertext, second_modulo, second_exponent))
#question6
def DecipherHastad(c1, n1, c2, n2, e):
N1=(n1*n2)//n1
N2=(n1*n2)//n2
x1=InvertModulo(N1,n1)
x2=InvertModulo(N2,n2)
c_square=(c1*N1*x1+c2*N2*x2)%(n1*n2)
c=int(round(sqrt(float(c_square))))
broadcast_message=ConvertToStr(c)
# m1= int(round(sqrt(float(c1))))
#m2= int(round(sqrt(float(c2))))
#if(m1==m2):
# broadcast_message=ConvertToStr(m1)
return broadcast_message
p1 = 790383132652258876190399065097
q1 = 662503581792812531719955475509
p2 = 656917682542437675078478868539
q2 = 1263581691331332127259083713503
n1 = p1 * q1
n2 = p2 * q2
e = 2
ciphertext1 = Encrypt("attack", n1, e)
ciphertext2 = Encrypt("attack", n2, e)
message = DecipherHastad(ciphertext1, n1, ciphertext2, n2, e)
print(message)
| 3.40625
| 3
|
Finance/jpy2ntd.1m.py
|
wwwins/MyBitbarPlugins
| 0
|
12784731
|
#!/Users/isobar/.virtualenvs/py3/bin/python
# -*- coding: utf-8 -*-
# <bitbar.title>JPY to NTD</bitbar.title>
# <bitbar.version>1.0</bitbar.version>
# <bitbar.author>wwwins</bitbar.author>
# <bitbar.author.github>wwwins</bitbar.author.github>
# <bitbar.desc>Japanese Yen to Taiwan New Dollar Rate</bitbar.desc>
# <bitbar.image></bitbar.image>
import time
import requests
from lxml import html
# Setting your currency buying/selling rate
BUY_RATE = 0.270
color = "cadetblue"
if (int(time.strftime("%H")) > 17):
print ('🈚️')
exit()
if (int(time.strftime("%H")) < 9):
print ('🈚️')
exit()
r = requests.get("https://rate.bot.com.tw/xrt?Lang=zh-TW")
doc = html.fromstring(r.text)
content = doc.cssselect("td.rate-content-cash")
jpy = content[15].text
if (float(jpy) < BUY_RATE):
color = "red"
print ('JPY:'+jpy+'| color='+color)
| 2.640625
| 3
|
dingtalk/python/alibabacloud_dingtalk/trade_1_0/models.py
|
aliyun/dingtalk-sdk
| 15
|
12784732
|
<filename>dingtalk/python/alibabacloud_dingtalk/trade_1_0/models.py
# -*- coding: utf-8 -*-
# This file is auto-generated, don't edit it. Thanks.
from Tea.model import TeaModel
from typing import Dict
class QueryTradeOrderHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class QueryTradeOrderRequest(TeaModel):
def __init__(
self,
outer_order_id: str = None,
order_id: int = None,
ding_isv_org_id: int = None,
ding_suite_key: str = None,
):
# 外部订单号
self.outer_order_id = outer_order_id
# 内部订单号
self.order_id = order_id
self.ding_isv_org_id = ding_isv_org_id
self.ding_suite_key = ding_suite_key
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.outer_order_id is not None:
result['outerOrderId'] = self.outer_order_id
if self.order_id is not None:
result['orderId'] = self.order_id
if self.ding_isv_org_id is not None:
result['dingIsvOrgId'] = self.ding_isv_org_id
if self.ding_suite_key is not None:
result['dingSuiteKey'] = self.ding_suite_key
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('outerOrderId') is not None:
self.outer_order_id = m.get('outerOrderId')
if m.get('orderId') is not None:
self.order_id = m.get('orderId')
if m.get('dingIsvOrgId') is not None:
self.ding_isv_org_id = m.get('dingIsvOrgId')
if m.get('dingSuiteKey') is not None:
self.ding_suite_key = m.get('dingSuiteKey')
return self
class QueryTradeOrderResponseBody(TeaModel):
def __init__(
self,
isv_crop_id: str = None,
article_name: str = None,
article_code: str = None,
article_item_name: str = None,
article_item_code: str = None,
quantity: int = None,
outer_order_id: str = None,
order_id: int = None,
fee: int = None,
pay_fee: int = None,
create_time: int = None,
refund_time: int = None,
close_time: int = None,
pay_time: int = None,
status: int = None,
):
# ISV的组织ID
self.isv_crop_id = isv_crop_id
# 商品名称
self.article_name = article_name
# 商品编码
self.article_code = article_code
# 规格名称
self.article_item_name = article_item_name
# 规格编码
self.article_item_code = article_item_code
# 商品数量
self.quantity = quantity
# 外部订单号
self.outer_order_id = outer_order_id
# 内部订单号
self.order_id = order_id
# 原价(单位:分)
self.fee = fee
# 实际支付的价格(单位:分)
self.pay_fee = pay_fee
# 订单创建时间(单位:ms)
self.create_time = create_time
# 订单退款时间(单位:ms)
self.refund_time = refund_time
# 订单关闭时间(单位:ms)
self.close_time = close_time
# 订单支付时间(单位:ms)
self.pay_time = pay_time
# 订单状态:-1表示已关闭、0表示未支付、1表示已支付、2表示已退款
self.status = status
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.isv_crop_id is not None:
result['isvCropId'] = self.isv_crop_id
if self.article_name is not None:
result['articleName'] = self.article_name
if self.article_code is not None:
result['articleCode'] = self.article_code
if self.article_item_name is not None:
result['articleItemName'] = self.article_item_name
if self.article_item_code is not None:
result['articleItemCode'] = self.article_item_code
if self.quantity is not None:
result['quantity'] = self.quantity
if self.outer_order_id is not None:
result['outerOrderId'] = self.outer_order_id
if self.order_id is not None:
result['orderId'] = self.order_id
if self.fee is not None:
result['fee'] = self.fee
if self.pay_fee is not None:
result['payFee'] = self.pay_fee
if self.create_time is not None:
result['createTime'] = self.create_time
if self.refund_time is not None:
result['refundTime'] = self.refund_time
if self.close_time is not None:
result['closeTime'] = self.close_time
if self.pay_time is not None:
result['payTime'] = self.pay_time
if self.status is not None:
result['status'] = self.status
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('isvCropId') is not None:
self.isv_crop_id = m.get('isvCropId')
if m.get('articleName') is not None:
self.article_name = m.get('articleName')
if m.get('articleCode') is not None:
self.article_code = m.get('articleCode')
if m.get('articleItemName') is not None:
self.article_item_name = m.get('articleItemName')
if m.get('articleItemCode') is not None:
self.article_item_code = m.get('articleItemCode')
if m.get('quantity') is not None:
self.quantity = m.get('quantity')
if m.get('outerOrderId') is not None:
self.outer_order_id = m.get('outerOrderId')
if m.get('orderId') is not None:
self.order_id = m.get('orderId')
if m.get('fee') is not None:
self.fee = m.get('fee')
if m.get('payFee') is not None:
self.pay_fee = m.get('payFee')
if m.get('createTime') is not None:
self.create_time = m.get('createTime')
if m.get('refundTime') is not None:
self.refund_time = m.get('refundTime')
if m.get('closeTime') is not None:
self.close_time = m.get('closeTime')
if m.get('payTime') is not None:
self.pay_time = m.get('payTime')
if m.get('status') is not None:
self.status = m.get('status')
return self
class QueryTradeOrderResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: QueryTradeOrderResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = QueryTradeOrderResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class CreateOpportunityHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class CreateOpportunityRequest(TeaModel):
def __init__(
self,
corp_id: str = None,
belong_to_phone_num: str = None,
contact_phone_num: str = None,
dept_id: int = None,
market_code: str = None,
ding_isv_org_id: int = None,
):
# 企业CorpId
self.corp_id = corp_id
# 归属人电话号码
self.belong_to_phone_num = belong_to_phone_num
# 联系人电话
self.contact_phone_num = contact_phone_num
# 部门Id
self.dept_id = dept_id
# 商品码
self.market_code = market_code
self.ding_isv_org_id = ding_isv_org_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.corp_id is not None:
result['corpId'] = self.corp_id
if self.belong_to_phone_num is not None:
result['belongToPhoneNum'] = self.belong_to_phone_num
if self.contact_phone_num is not None:
result['contactPhoneNum'] = self.contact_phone_num
if self.dept_id is not None:
result['deptId'] = self.dept_id
if self.market_code is not None:
result['marketCode'] = self.market_code
if self.ding_isv_org_id is not None:
result['dingIsvOrgId'] = self.ding_isv_org_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('corpId') is not None:
self.corp_id = m.get('corpId')
if m.get('belongToPhoneNum') is not None:
self.belong_to_phone_num = m.get('belongToPhoneNum')
if m.get('contactPhoneNum') is not None:
self.contact_phone_num = m.get('contactPhoneNum')
if m.get('deptId') is not None:
self.dept_id = m.get('deptId')
if m.get('marketCode') is not None:
self.market_code = m.get('marketCode')
if m.get('dingIsvOrgId') is not None:
self.ding_isv_org_id = m.get('dingIsvOrgId')
return self
class CreateOpportunityResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
):
self.headers = headers
def validate(self):
self.validate_required(self.headers, 'headers')
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
return self
class CheckOpportunityResultHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class CheckOpportunityResultRequest(TeaModel):
def __init__(
self,
corp_id: str = None,
belong_to_phone_num: str = None,
contact_phone_num: str = None,
dept_id: int = None,
market_code: str = None,
):
# corpId
self.corp_id = corp_id
# belongToPhoneNum
self.belong_to_phone_num = belong_to_phone_num
# contactPhoneNum
self.contact_phone_num = contact_phone_num
# deptId
self.dept_id = dept_id
# marketCode
self.market_code = market_code
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.corp_id is not None:
result['corpId'] = self.corp_id
if self.belong_to_phone_num is not None:
result['belongToPhoneNum'] = self.belong_to_phone_num
if self.contact_phone_num is not None:
result['contactPhoneNum'] = self.contact_phone_num
if self.dept_id is not None:
result['deptId'] = self.dept_id
if self.market_code is not None:
result['marketCode'] = self.market_code
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('corpId') is not None:
self.corp_id = m.get('corpId')
if m.get('belongToPhoneNum') is not None:
self.belong_to_phone_num = m.get('belongToPhoneNum')
if m.get('contactPhoneNum') is not None:
self.contact_phone_num = m.get('contactPhoneNum')
if m.get('deptId') is not None:
self.dept_id = m.get('deptId')
if m.get('marketCode') is not None:
self.market_code = m.get('marketCode')
return self
class CheckOpportunityResultResponseBody(TeaModel):
def __init__(
self,
biz_success: bool = None,
):
# success
self.biz_success = biz_success
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.biz_success is not None:
result['bizSuccess'] = self.biz_success
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('bizSuccess') is not None:
self.biz_success = m.get('bizSuccess')
return self
class CheckOpportunityResultResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: CheckOpportunityResultResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = CheckOpportunityResultResponseBody()
self.body = temp_model.from_map(m['body'])
return self
| 2.265625
| 2
|
gitdir/testsave.py
|
guyKN/voiceswap
| 1
|
12784733
|
import tensorflow as tf
import os
X = tf.placeholder(tf.float32,[None,1])
W = tf.Variable(tf.zeros([1,1]))
B = tf.Variable(tf.zeros([1]))
y = tf.matmul(X,W) + B
y_ = tf.placeholder(tf.float32,[None,1])
cost = tf.reduce_mean(tf.square(y-y_))
opt = tf.train.AdagradOptimizer(1).minimize(cost)
feed = {X:[[1],[4]], y_:[[3],[1]]}
init = tf.initialize_all_variables()
saver = tf.train.Saver()
with tf.Session() as sess:
#sess.run(init)
saver.restore(sess,"testDir/model.ckpt-97")
for i in range(100):
_, cost_ = sess.run([opt,cost], feed_dict=feed)
print cost_
saver.save(sess, os.path.join("testDir", 'model.ckpt'), global_step=i)
| 2.4375
| 2
|
laboratorios/1-recursion/E/problem.py
|
MatiwsxD/ayed-2019-1
| 0
|
12784734
|
import json
def supe(digit):
digit = str(digit)
if len(digit)==1:
return digit
else:
cont = 0
for i in range(len(digit)):
cont+= int(digit[i])
return supe(cont)
# TODO Complete!
def super_digit(n, k):
digit = str(str(n)*k)
return int(supe(digit))
if __name__ == '__main__':
with open('./data.json') as f:
tests = json.load(f)
for i, test in enumerate(tests):
n = test["n"]
k = test["k"]
actual = super_digit(n, k)
expected = test['result']
assert actual == expected, f'Test {i} | n: {n} | k: {k} | expected: {expected}, actual: {actual}'
print('OK!')
| 3.578125
| 4
|
mtbf_driver/MtbfTestCase.py
|
Mozilla-GitHub-Standards/0c166c201f85d9bdef4d4ad1ce985515176672819d41406b9977f54b7ac091dc
| 0
|
12784735
|
<filename>mtbf_driver/MtbfTestCase.py
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import time
from gaiatest import GaiaTestCase
from gaiatest.apps.homescreen.app import Homescreen
from marionette_driver.by import By
class GaiaMtbfTestCase(GaiaTestCase):
def launch_by_touch(self, app):
# Parameter: GaiaApp
Homescreen(self.marionette).launch()
icon = self.marionette.find_element(By.CSS_SELECTOR, 'gaia-app-icon[data-identifier="' + app.manifest_url + '"]')
self.marionette.execute_script("arguments[0].scrollIntoView(false);", [icon])
# Sleep because homescreen protect touch event when scrolling
time.sleep(1)
icon.tap()
time.sleep(3)
self.apps.switch_to_displayed_app()
def cleanup_storage(self):
pass
def cleanup_gaia(self, full_reset=True):
# Turn on screen
if not self.device.is_screen_enabled:
self.device.turn_screen_on()
# unlock
if self.data_layer.get_setting('lockscreen.enabled'):
self.device.unlock()
# kill FTU if possible
if self.apps.displayed_app.name.upper() == "FTU":
self.apps.kill(self.apps.displayed_app)
if full_reset:
# disable passcode
self.data_layer.set_setting('lockscreen.passcode-lock.code', '1111')
self.data_layer.set_setting('lockscreen.passcode-lock.enabled', False)
# change language back to english
self.data_layer.set_setting("language.current", "en-US")
# reset keyboard to default values
self.data_layer.set_setting("keyboard.enabled-layouts",
"{'app://keyboard.gaiamobile.org/manifest.webapp': {'en': True, 'number': True}}")
# reset do not track
self.data_layer.set_setting('privacy.donottrackheader.value', '-1')
# don't change status of airplane mode
# if self.data_layer.get_setting('airplaneMode.enabled'):
# # enable the device radio, disable airplane mode
# self.data_layer.set_setting('airplaneMode.enabled', False)
# Re-set edge gestures pref to False
self.data_layer.set_setting('edgesgesture.enabled', False)
# disable carrier data connection
if self.device.has_mobile_connection:
self.data_layer.disable_cell_data()
self.data_layer.disable_cell_roaming()
## TODO: Disable wifi operation since Bug 1064800
# if self.device.has_wifi:
# # Bug 908553 - B2G Emulator: support wifi emulation
# if not self.device.is_emulator:
# self.data_layer.enable_wifi()
# self.data_layer.forget_all_networks()
# self.data_layer.disable_wifi()
# don't remove contact data
# self.data_layer.remove_all_contacts()
# reset to home screen
self.device.touch_home_button()
# disable sound completely
self.data_layer.set_volume(0)
# disable auto-correction of keyboard
self.data_layer.set_setting('keyboard.autocorrect', False)
# restore settings from testvars
[self.data_layer.set_setting(name, value) for name, value in self.testvars.get('settings', {}).items()]
# restore prefs from testvars
for name, value in self.testvars.get('prefs', {}).items():
if type(value) is int:
self.data_layer.set_int_pref(name, value)
elif type(value) is bool:
self.data_layer.set_bool_pref(name, value)
else:
self.data_layer.set_char_pref(name, value)
def tearDown(self):
time.sleep(1)
self.device.touch_home_button()
time.sleep(1)
GaiaTestCase.tearDown(self)
| 2.21875
| 2
|
spec2scl/transformers/perl.py
|
pombredanne/bitbucket.org-bkabrda-spec2scl
| 0
|
12784736
|
from spec2scl import settings
from spec2scl import transformer
from spec2scl.decorators import matches
@transformer.Transformer.register_transformer
class PerlTransformer(transformer.Transformer):
def __init__(self, options={}):
super(PerlTransformer, self).__init__(options)
@matches(r'^[^\n]*%{__perl}\s+', one_line=False, sections=settings.RUNTIME_SECTIONS)
@matches(r'^\s*perl\s+', one_line=False, sections=settings.RUNTIME_SECTIONS) # carefully here, "perl" will occur often in the specfile
@matches(r'./Build', one_line=False)
def handle_perl_specific_commands(self, original_spec, pattern, text):
return self.sclize_all_commands(pattern, text)
| 2.359375
| 2
|
autogalaxy/analysis/aggregator/aggregator.py
|
jonathanfrawley/PyAutoGalaxy
| 0
|
12784737
|
<reponame>jonathanfrawley/PyAutoGalaxy
from autofit.database.model.fit import Fit
import autogalaxy as ag
from typing import Optional
from functools import partial
def plane_gen_from(aggregator):
"""
Returns a generator of `Plane` objects from an input aggregator, which generates a list of the `Plane` objects
for every set of results loaded in the aggregator.
This is performed by mapping the `plane_from_agg_obj` with the aggregator, which sets up each plane using only
generators ensuring that manipulating the planes of large sets of results is done in a memory efficient way.
Parameters
----------
aggregator : af.Aggregator
A PyAutoFit aggregator object containing the results of PyAutoGalaxy model-fits."""
return aggregator.map(func=plane_via_database_from)
def plane_via_database_from(fit: Fit):
"""
Returns a `Plane` object from an aggregator's `SearchOutput` class, which we call an 'agg_obj' to describe that
it acts as the aggregator object for one result in the `Aggregator`. This uses the aggregator's generator outputs
such that the function can use the `Aggregator`'s map function to to create a `Plane` generator.
The `Plane` is created following the same method as the PyAutoGalaxy `Search` classes using an instance of the
maximum log likelihood model's galaxies. These galaxies have their hyper-images added (if they were used in the
fit) and passed into a Plane object.
Parameters
----------
fit : af.SearchOutput
A PyAutoFit aggregator's SearchOutput object containing the generators of the results of PyAutoGalaxy model-fits.
"""
galaxies = fit.instance.galaxies
hyper_model_image = fit.value(name="hyper_model_image")
hyper_galaxy_image_path_dict = fit.value(name="hyper_galaxy_image_path_dict")
if hyper_galaxy_image_path_dict is not None:
for (galaxy_path, galaxy) in fit.instance.path_instance_tuples_for_class(
ag.Galaxy
):
if galaxy_path in hyper_galaxy_image_path_dict:
galaxy.hyper_model_image = hyper_model_image
galaxy.hyper_galaxy_image = hyper_galaxy_image_path_dict[galaxy_path]
return ag.Plane(galaxies=galaxies)
def imaging_gen_from(aggregator, settings_imaging: Optional[ag.SettingsImaging] = None):
"""
Returns a generator of `Imaging` objects from an input aggregator, which generates a list of the
`Imaging` objects for every set of results loaded in the aggregator.
This is performed by mapping the `imaging_from_agg_obj` with the aggregator, which sets up each
imaging using only generators ensuring that manipulating the imaging of large sets of results is done in a
memory efficient way.
Parameters
----------
aggregator : af.Aggregator
A PyAutoFit aggregator object containing the results of PyAutoGalaxy model-fits."""
func = partial(imaging_via_database_from, settings_imaging=settings_imaging)
return aggregator.map(func=func)
def imaging_via_database_from(
fit: Fit, settings_imaging: Optional[ag.SettingsImaging] = None
):
"""
Returns a `Imaging` object from an aggregator's `SearchOutput` class, which we call an 'agg_obj' to describe
that it acts as the aggregator object for one result in the `Aggregator`. This uses the aggregator's generator
outputs such that the function can use the `Aggregator`'s map function to to create a `Imaging` generator.
The `Imaging` is created following the same method as the PyAutoGalaxy `Search` classes, including using the
`SettingsImaging` instance output by the Search to load inputs of the `Imaging` (e.g. psf_shape_2d).
Parameters
----------
fit : af.SearchOutput
A PyAutoFit aggregator's SearchOutput object containing the generators of the results of PyAutoGalaxy model-fits.
"""
data = fit.value(name="data")
noise_map = fit.value(name="noise_map")
psf = fit.value(name="psf")
settings_imaging = settings_imaging or fit.value(name="settings_dataset")
imaging = ag.Imaging(
image=data,
noise_map=noise_map,
psf=psf,
settings=settings_imaging,
setup_convolver=True,
)
imaging.apply_settings(settings=settings_imaging)
return imaging
def fit_imaging_gen_from(
aggregator,
settings_imaging: Optional[ag.SettingsImaging] = None,
settings_pixelization: Optional[ag.SettingsPixelization] = None,
settings_inversion: Optional[ag.SettingsInversion] = None,
):
"""
Returns a generator of `FitImaging` objects from an input aggregator, which generates a list of the
`FitImaging` objects for every set of results loaded in the aggregator.
This is performed by mapping the `fit_imaging_from_agg_obj` with the aggregator, which sets up each fit using
only generators ensuring that manipulating the fits of large sets of results is done in a memory efficient way.
Parameters
----------
aggregator : af.Aggregator
A PyAutoFit aggregator object containing the results of PyAutoGalaxy model-fits."""
func = partial(
fit_imaging_via_database_from,
settings_imaging=settings_imaging,
settings_pixelization=settings_pixelization,
settings_inversion=settings_inversion,
)
return aggregator.map(func=func)
def fit_imaging_via_database_from(
fit: Fit,
settings_imaging: Optional[ag.SettingsImaging] = None,
settings_pixelization: Optional[ag.SettingsPixelization] = None,
settings_inversion: Optional[ag.SettingsInversion] = None,
):
"""
Returns a `FitImaging` object from an aggregator's `SearchOutput` class, which we call an 'agg_obj' to describe
that it acts as the aggregator object for one result in the `Aggregator`. This uses the aggregator's generator
outputs such that the function can use the `Aggregator`'s map function to to create a `FitImaging` generator.
The `FitImaging` is created following the same method as the PyAutoGalaxy `Search` classes.
Parameters
----------
fit : af.SearchOutput
A PyAutoFit aggregator's SearchOutput object containing the generators of the results of PyAutoGalaxy model-fits.
"""
imaging = imaging_via_database_from(fit=fit, settings_imaging=settings_imaging)
plane = plane_via_database_from(fit=fit)
settings_pixelization = settings_pixelization or fit.value(
name="settings_pixelization"
)
settings_inversion = settings_inversion or fit.value(name="settings_inversion")
return ag.FitImaging(
imaging=imaging,
plane=plane,
settings_pixelization=settings_pixelization,
settings_inversion=settings_inversion,
)
def interferometer_gen_from(
aggregator,
real_space_mask: Optional[ag.Mask2D] = None,
settings_interferometer: Optional[ag.SettingsInterferometer] = None,
):
"""
Returns a generator of `Interferometer` objects from an input aggregator, which generates a list of the
`Interferometer` objects for every set of results loaded in the aggregator.
This is performed by mapping the `interferometer_from_agg_obj` with the aggregator, which sets up each
interferometer object using only generators ensuring that manipulating the interferometer objects of large
sets of results is done in a memory efficient way.
Parameters
----------
aggregator : af.Aggregator
A PyAutoFit aggregator object containing the results of PyAutoGalaxy model-fits."""
func = partial(
interferometer_via_database_from,
real_space_mask=real_space_mask,
settings_interferometer=settings_interferometer,
)
return aggregator.map(func=func)
def interferometer_via_database_from(
fit: Fit,
real_space_mask: Optional[ag.Mask2D] = None,
settings_interferometer: Optional[ag.SettingsInterferometer] = None,
):
"""
Returns a `Interferometer` object from an aggregator's `SearchOutput` class, which we call an 'agg_obj' to
describe that it acts as the aggregator object for one result in the `Aggregator`. This uses the aggregator's
generator outputs such that the function can use the `Aggregator`'s map function to to create a
`Interferometer` generator.
The `Interferometer` is created following the same method as the PyAutoGalaxy `Search` classes, including
using the `SettingsInterferometer` instance output by the Search to load inputs of the `Interferometer`
(e.g. psf_shape_2d).
Parameters
----------
fit : af.SearchOutput
A PyAutoFit aggregator's SearchOutput object containing the generators of the results of PyAutoGalaxy
model-fits.
"""
data = fit.value(name="data")
noise_map = fit.value(name="noise_map")
uv_wavelengths = fit.value(name="uv_wavelengths")
real_space_mask = real_space_mask or fit.value(name="real_space_mask")
settings_interferometer = settings_interferometer or fit.value(
name="settings_dataset"
)
interferometer = ag.Interferometer(
visibilities=data,
noise_map=noise_map,
uv_wavelengths=uv_wavelengths,
real_space_mask=real_space_mask,
)
interferometer = interferometer.apply_settings(settings=settings_interferometer)
return interferometer
def fit_interferometer_gen_from(
aggregator,
real_space_mask: Optional[ag.Mask2D] = None,
settings_interferometer: Optional[ag.SettingsInterferometer] = None,
settings_pixelization: Optional[ag.SettingsPixelization] = None,
settings_inversion: Optional[ag.SettingsInversion] = None,
):
"""
Returns a `FitInterferometer` object from an aggregator's `SearchOutput` class, which we call an 'agg_obj' to
describe that it acts as the aggregator object for one result in the `Aggregator`. This uses the aggregator's
generator outputs such that the function can use the `Aggregator`'s map function to to create a `FitInterferometer`
generator.
The `FitInterferometer` is created following the same method as the PyAutoGalaxy `Search` classes.
Parameters
----------
agg_obj : af.SearchOutput
A PyAutoFit aggregator's SearchOutput object containing the generators of the results of PyAutoGalaxy model-fits.
"""
func = partial(
fit_interferometer_via_database_from,
real_space_mask=real_space_mask,
settings_interferometer=settings_interferometer,
settings_pixelization=settings_pixelization,
settings_inversion=settings_inversion,
)
return aggregator.map(func=func)
def fit_interferometer_via_database_from(
fit: Fit,
real_space_mask: Optional[ag.Mask2D] = None,
settings_interferometer: Optional[ag.SettingsInterferometer] = None,
settings_pixelization: Optional[ag.SettingsPixelization] = None,
settings_inversion: Optional[ag.SettingsInversion] = None,
):
"""
Returns a generator of `FitInterferometer` objects from an input aggregator, which generates a list of the
`FitInterferometer` objects for every set of results loaded in the aggregator.
This is performed by mapping the `fit_interferometer_from_agg_obj` with the aggregator, which sets up each fit
using only generators ensuring that manipulating the fits of large sets of results is done in a memory efficient
way.
Parameters
----------
aggregator : af.Aggregator
A PyAutoFit aggregator object containing the results of PyAutoGalaxy model-fits.
"""
settings_pixelization = settings_pixelization or fit.value(
name="settings_pixelization"
)
settings_inversion = settings_inversion or fit.value(name="settings_inversion")
interferometer = interferometer_via_database_from(
fit=fit,
real_space_mask=real_space_mask,
settings_interferometer=settings_interferometer,
)
plane = plane_via_database_from(fit=fit)
return ag.FitInterferometer(
interferometer=interferometer,
plane=plane,
settings_pixelization=settings_pixelization,
settings_inversion=settings_inversion,
)
| 3.015625
| 3
|
send_email.py
|
PoojaJain30/Daily-HackerNews-Email
| 0
|
12784738
|
import smtplib
from email.message import EmailMessage
# function to send email to listed email address
def send_email(info,news):
email = EmailMessage()
email['From'] = '< Sender Name >'
email['To'] = info[1]
email['Subject'] = 'Hello '+info[0]
email.set_content(news,'html')
with smtplib.SMTP(host='smtp.gmail.com', port=587) as smtp:
smtp.ehlo()
smtp.starttls()
smtp.login('<Sender Email>','<Sender Password>')
smtp.send_message(email)
smtp.quit()
| 3.40625
| 3
|
drlnd/core/common/path.py
|
yycho0108/DRLND_Core
| 0
|
12784739
|
#!/usr/bin/env python3
from pathlib import Path
def get_project_root() -> Path:
"""
Get project root directory with assumed structure as:
${PACKAGE_ROOT}/core/common/path.py
"""
return Path(__file__).resolve().parent.parent.parent
def get_config_file() -> Path:
"""
Get default config file.
"""
return get_project_root()/'data/config/config.yaml'
def main():
print(get_project_root())
if __name__ == '__main__':
main()
| 2.84375
| 3
|
riscof/utils.py
|
haseebazaz-10xe/riscof
| 0
|
12784740
|
# See LICENSE.incore for details
import pathlib
import logging
import argparse
import os
import sys
import subprocess
import operator
import shlex
import ruamel
from ruamel.yaml import YAML
#from riscof.log import logger
yaml = YAML(typ="rt")
yaml.default_flow_style = False
yaml.allow_unicode = True
logger = logging.getLogger(__name__)
def dump_yaml(foo, outfile):
yaml.dump(foo, outfile)
def load_yaml(foo):
try:
with open(foo, "r") as file:
return dict(yaml.load(file))
except ruamel.yaml.constructor.DuplicateKeyError as msg:
logger = logging.getLogger(__name__)
error = "\n".join(str(msg).split("\n")[2:-7])
logger.error(error)
raise SystemExit
def absolute_path(config_dir, entry_path):
"""
Create an absolute path based on the config's file directory location and a
path value from a configuration entry.
"""
# Allow entries relative to user home.
entry_path = os.path.expanduser(entry_path)
if os.path.exists(entry_path):
# If the entry is already a valid path, return the absolute value of it.
logger.debug("Path entry found: " + str(entry_path))
abs_entry_path = os.path.abspath(entry_path)
else:
# Assume that the entry is relative to the location of the config file.
logger.debug("Path entry '{}' not found. Combine it with config file "\
"location '{}'.".format(entry_path, config_dir))
abs_entry_path = os.path.abspath(os.path.join(config_dir, entry_path))
logger.debug("Using the path: " +str(abs_entry_path))
return abs_entry_path
class makeUtil():
"""
Utility for ease of use of make commands like `make` and `pmake`.
Supports automatic addition and execution of targets. Uses the class
:py:class:`shellCommand` to execute commands.
"""
def __init__(self,makeCommand='make',makefilePath="./Makefile"):
""" Constructor.
:param makeCommand: The variant of make to be used with optional arguments.
Ex - `pmake -j 8`
:type makeCommand: str
:param makefilePath: The path to the makefile to be used.
:type makefilePath: str
"""
self.makeCommand=makeCommand
self.makefilePath = makefilePath
self.targets = []
def add_target(self,command,tname=""):
"""
Function to add a target to the makefile.
:param command: The command to be executed when the target is run.
:type command: str
:param tname: The name of the target to be used. If not specified, TARGET<num> is used as the name.
:type tname: str
"""
if tname == "":
tname = "TARGET"+str(len(self.targets))
with open(self.makefilePath,"a") as makefile:
makefile.write("\n\n.PHONY : " + tname + "\n" + tname + " :\n\t"+command.replace("\n","\n\t"))
self.targets.append(tname)
def execute_target(self,tname,cwd="./"):
"""
Function to execute a particular target only.
:param tname: Name of the target to execute.
:type tname: str
:param cwd: The working directory to be set while executing the make command.
:type cwd: str
:raise AssertionError: If target name is not present in the list of defined targets.
"""
assert tname in self.targets, "Target does not exist."
return shellCommand(self.makeCommand+" -f "+self.makefilePath+" "+tname).run(cwd=cwd)
def execute_all(self,cwd):
"""
Function to execute all the defined targets.
:param cwd: The working directory to be set while executing the make command.
:type cwd: str
"""
return shellCommand(self.makeCommand+" -f "+self.makefilePath+" "+" ".join(self.targets)).run(cwd=cwd)
class Command():
"""
Class for command build which is supported
by :py:mod:`suprocess` module. Supports automatic
conversion of :py:class:`pathlib.Path` instances to
valid format for :py:mod:`subprocess` functions.
"""
def __init__(self, *args, pathstyle='auto', ensure_absolute_paths=False):
"""Constructor.
:param pathstyle: Determine the path style when adding instance of
:py:class:`pathlib.Path`. Path style determines the slash type
which separates the path components. If pathstyle is `auto`, then
on Windows backslashes are used and on Linux forward slashes are used.
When backslashes should be prevented on all systems, the pathstyle
should be `posix`. No other values are allowed.
:param ensure_absolute_paths: If true, then any passed path will be
converted to absolute path.
:param args: Initial command.
:type pathstyle: str
:type ensure_absolute_paths: bool
"""
self.ensure_absolute_paths = ensure_absolute_paths
self.pathstyle = pathstyle
self.args = []
for arg in args:
self.append(arg)
def append(self, arg):
"""Add new argument to command.
:param arg: Argument to be added. It may be list, tuple,
:py:class:`Command` instance or any instance which
supports :py:func:`str`.
"""
to_add = []
if type(arg) is list:
to_add = arg
elif type(arg) is tuple:
to_add = list(arg)
elif isinstance(arg, type(self)):
to_add = arg.args
elif isinstance(arg, str) and not self._is_shell_command():
to_add = shlex.split(arg)
else:
# any object which will be converted into str.
to_add.append(arg)
# Convert all arguments to its string representation.
# pathlib.Path instances
to_add = [
self._path2str(el) if isinstance(el, pathlib.Path) else str(el)
for el in to_add
]
self.args.extend(to_add)
def clear(self):
"""Clear arguments."""
self.args = []
def run(self, **kwargs):
"""Execute the current command.
Uses :py:class:`subprocess.Popen` to execute the command.
:return: The return code of the process .
:raise subprocess.CalledProcessError: If `check` is set
to true in `kwargs` and the process returns
non-zero value.
"""
kwargs.setdefault('shell', self._is_shell_command())
cwd = self._path2str(kwargs.get(
'cwd')) if not kwargs.get('cwd') is None else self._path2str(
os.getcwd())
kwargs.update({'cwd': cwd})
logger.debug(cwd)
# When running as shell command, subprocess expects
# The arguments to be string.
logger.debug(str(self))
cmd = str(self) if kwargs['shell'] else self
x = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
**kwargs)
out, err = x.communicate()
out = out.rstrip()
err = err.rstrip()
if x.returncode != 0:
if out:
logger.error(out.decode("ascii"))
if err:
logger.error(err.decode("ascii"))
else:
if out:
logger.warning(out.decode("ascii"))
if err:
logger.warning(err.decode("ascii"))
return x.returncode
def _is_shell_command(self):
"""
Return true if current command is supposed to be executed
as shell script otherwise false.
"""
return any('|' in arg for arg in self.args)
def _path2str(self, path):
"""Convert :py:class:`pathlib.Path` to string.
The final form of the string is determined by the
configuration of `Command` instance.
:param path: Path-like object which will be converted
into string.
:return: String representation of `path`
"""
path = pathlib.Path(path)
if self.ensure_absolute_paths and not path.is_absolute():
path = path.resolve()
if self.pathstyle == 'posix':
return path.as_posix()
elif self.pathstyle == 'auto':
return str(path)
else:
raise ValueError(f"Invalid pathstyle {self.pathstyle}")
def __add__(self, other):
cmd = Command(self,
pathstyle=self.pathstyle,
ensure_absolute_paths=self.ensure_absolute_paths)
cmd += other
return cmd
def __iadd__(self, other):
self.append(other)
return self
def __iter__(self):
"""
Support iteration so functions from :py:mod:`subprocess` module
support `Command` instance.
"""
return iter(self.args)
def __repr__(self):
return f'<{self.__class__.__name__} args={self.args}>'
def __str__(self):
return ' '.join(self.args)
class shellCommand(Command):
"""
Sub Class of the command class which always executes commands as shell commands.
"""
def __init__(self, *args, pathstyle='auto', ensure_absolute_paths=False):
"""
:param pathstyle: Determine the path style when adding instance of
:py:class:`pathlib.Path`. Path style determines the slash type
which separates the path components. If pathstyle is `auto`, then
on Windows backslashes are used and on Linux forward slashes are used.
When backslashes should be prevented on all systems, the pathstyle
should be `posix`. No other values are allowed.
:param ensure_absolute_paths: If true, then any passed path will be
converted to absolute path.
:param args: Initial command.
:type pathstyle: str
:type ensure_absolute_paths: bool
"""
return super().__init__(*args,
pathstyle=pathstyle,
ensure_absolute_paths=ensure_absolute_paths)
def _is_shell_command(self):
return True
class ColoredFormatter(logging.Formatter):
"""
Class to create a log output which is colored based on level.
"""
def __init__(self, *args, **kwargs):
super(ColoredFormatter, self).__init__(*args, **kwargs)
self.colors = {
'DEBUG': '\033[94m',
'INFO': '\033[92m',
'WARNING': '\033[93m',
'ERROR': '\033[91m',
}
self.reset = '\033[0m'
def format(self, record):
msg = str(record.msg)
level_name = str(record.levelname)
name = str(record.name)
color_prefix = self.colors[level_name]
return '{0}{1:>9s} | [--{2}--]: {3}{4}'.format(color_prefix,
level_name, name, msg,
self.reset)
def setup_logging(log_level):
"""Setup logging
Verbosity decided on user input
:param log_level: User defined log level
:type log_level: str
"""
numeric_level = getattr(logging, log_level.upper(), None)
if not isinstance(numeric_level, int):
print(
"\033[91mInvalid log level passed. Please select from debug | info | warning | error\033[0m"
)
raise ValueError("{}-Invalid log level.".format(log_level))
logging.basicConfig(level=numeric_level)
class SortingHelpFormatter(argparse.HelpFormatter):
def add_arguments(self, actions):
actions = sorted(actions, key=operator.attrgetter('option_strings'))
super(SortingHelpFormatter, self).add_arguments(actions)
class MyParser(argparse.ArgumentParser):
def error(self, message):
sys.stderr.write('error: %s\n' % message)
self.print_help()
raise SystemExit
def format_help(self):
formatter = self._get_formatter()
# usage
formatter.add_usage(self.usage, self._actions,
self._mutually_exclusive_groups)
# description
formatter.add_text(self.description)
# positionals, optionals and user-defined groups
for action_group in self._action_groups:
formatter.start_section(action_group.title)
formatter.add_text(action_group.description)
formatter.add_arguments(action_group._group_actions)
formatter.end_section()
# epilog
formatter.add_text(self.epilog)
# determine help from format above
return formatter.format_help()
def print_help(self,file=None):
if file is None:
file = sys.stdout
self._print_message(self.format_help(), file)
subparsers_actions = [
action for action in self._actions
if isinstance(action, argparse._SubParsersAction)]
for subparsers_action in subparsers_actions:
for choice, subparser in subparsers_action.choices.items():
self._print_message("Action '{}'\n\n".format(choice),file)
self._print_message("\t"+(subparser.format_help()).replace("\n","\n\t")+"\n",file)
class CustomAction(argparse.Action):
def __init__(self,
option_strings,
dest,
nargs=None,
const=None,
default=None,
type=None,
choices=None,
required=False,
help=None,
metavar=None):
argparse.Action.__init__(self,
option_strings=option_strings,
dest=dest,
nargs=nargs,
const=const,
default=default,
type=type,
choices=choices,
required=required,
help=help,
metavar=metavar,
)
return
def __call__(self, parser, namespace, values, option_string=None):
if isinstance(values, list):
values = [ str(pathlib.Path(v).absolute()) for v in values ]
else:
values = [str(pathlib.Path(values).absolute())]
existing_val = getattr(namespace, self.dest, None)
if existing_val:
setattr(namespace, self.dest, existing_val + values)
else:
setattr(namespace, self.dest, values)
def riscof_cmdline_args():
parser = MyParser(
formatter_class=SortingHelpFormatter,
prog="riscof",
description="RISCOF is a framework used to run the Architectural Tests on a DUT and check compatibility with the RISC-V ISA")
parser.add_argument('--version','-v',
help='Print version of RISCOF being used',
action='store_true')
parser.add_argument('--verbose',
action='store',
default='info',
choices = ['debug','info','warning','error'],
help='[Default=info]',
metavar="")
subparsers = parser.add_subparsers(dest='command',title="Action",description="The action to be performed by riscof.",help="List of actions supported by riscof.")
coverage = subparsers.add_parser('coverage',help='Generate Coverage Report for the given YAML spec.',formatter_class=SortingHelpFormatter)
coverage.add_argument('--config',
type= lambda p: str(pathlib.Path(p).absolute()),
action='store',
help='The Path to the config file. [Default=./config.ini]',
metavar= 'PATH',
default=str(pathlib.Path('./config.ini').absolute())
)
coverage.add_argument('--cgf',
action=CustomAction,
# required=True,
help='The Path to the cgf file(s). Multiple allowed',
metavar= 'PATH')
coverage.add_argument('--suite',
type= lambda p: str(pathlib.Path(p).absolute()),
action='store',
help='The Path to the custom suite directory.',
metavar= 'PATH')
coverage.add_argument('--env',
type= lambda p: str(pathlib.Path(p).absolute()),
action='store',
help='The Path to the custom env directory.',
metavar= 'PATH')
coverage.add_argument('--work-dir',
type= lambda p: str(pathlib.Path(p).absolute()),
action='store',
help='The Path to the work-dir.',
metavar= 'PATH',
default=str(pathlib.Path('./riscof_work').absolute())
)
coverage.add_argument('--no-browser',action='store_true',
help="Do not open the browser for showing the test report.")
generatedb = subparsers.add_parser('gendb',help='Generate Database for the standard suite.',formatter_class=SortingHelpFormatter)
generatedb.add_argument('--suite',
type= lambda p: str(pathlib.Path(p).absolute()),
action='store',
help='The Path to the custom suite directory.',
metavar= 'PATH')
generatedb.add_argument('--env',
type= lambda p: str(pathlib.Path(p).absolute()),
action='store',
help='The Path to the custom env directory.',
metavar= 'PATH')
generatedb.add_argument('--work-dir',
type= lambda p: str(pathlib.Path(p).absolute()),
action='store',
help='The Path to the work-dir.',
metavar= 'PATH',
default=str(pathlib.Path('./riscof_work').absolute())
)
setup = subparsers.add_parser('setup',help='Initiate setup for riscof.',formatter_class=SortingHelpFormatter)
setup.add_argument('--dutname',
action='store',
help='Name of DUT plugin. [Default=spike]',
default='spike',
metavar= 'NAME')
setup.add_argument('--refname',
action='store',
help='Name of Reference plugin. [Default=sail_cSim]',
default='sail_cSim',
metavar= 'NAME')
setup.add_argument('--work-dir',
type= lambda p: str(pathlib.Path(p).absolute()),
action='store',
help='The Path to the work-dir.',
metavar= 'PATH',
default=str(pathlib.Path('./riscof_work').absolute())
)
validate = subparsers.add_parser('validateyaml',
help='Validate the Input YAMLs using riscv-config.',formatter_class=SortingHelpFormatter)
validate.add_argument('--config',
type= lambda p: str(pathlib.Path(p).absolute()),
action='store',
help='The Path to the config file. [Default=./config.ini]',
metavar= 'PATH',
default=str(pathlib.Path('./config.ini').absolute())
)
validate.add_argument('--work-dir',
type= lambda p: str(pathlib.Path(p).absolute()),
action='store',
help='The Path to the work-dir.',
metavar= 'PATH',
default=str(pathlib.Path('./riscof_work').absolute())
)
run = subparsers.add_parser('run',
help='Run the tests on DUT and reference and compare signatures.',formatter_class=SortingHelpFormatter)
run.add_argument('--config',
type= lambda p: str(pathlib.Path(p).absolute()),
action='store',
help='The Path to the config file. [Default=./config.ini]',
metavar= 'PATH',
default=str(pathlib.Path('./config.ini').absolute())
)
run.add_argument('--suite',
type= lambda p: str(pathlib.Path(p).absolute()),
action='store',
help='The Path to the custom suite directory.',
metavar= 'PATH')
run.add_argument('--env',
type= lambda p: str(pathlib.Path(p).absolute()),
action='store',
help='The Path to the custom env directory.',
metavar= 'PATH')
run.add_argument('--no-browser',action='store_true',
help="Do not open the browser for showing the test report.")
run.add_argument('--work-dir',
type= lambda p: str(pathlib.Path(p).absolute()),
action='store',
help='The Path to the work-dir.',
metavar= 'PATH',
default=str(pathlib.Path('./riscof_work').absolute())
)
testlist = subparsers.add_parser('testlist',
help='Generate the test list for the given DUT and suite.',formatter_class=SortingHelpFormatter)
testlist.add_argument('--work-dir',
type= lambda p: str(pathlib.Path(p).absolute()),
action='store',
help='The Path to the work-dir.',
metavar= 'PATH',
default=str(pathlib.Path('./riscof_work').absolute())
)
testlist.add_argument('--config',
type= lambda p: str(pathlib.Path(p).absolute()),
action='store',
help='The Path to the config file. [Default=./config.ini]',
metavar= 'PATH',
default=str(pathlib.Path('./config.ini').absolute())
)
testlist.add_argument('--suite',
type= lambda p: str(pathlib.Path(p).absolute()),
action='store',
help='The Path to the custom suite directory.',
metavar= 'PATH')
testlist.add_argument('--env',
type= lambda p: str(pathlib.Path(p).absolute()),
action='store',
help='The Path to the custom env directory.',
metavar= 'PATH')
return parser
| 2.375
| 2
|
release/stubs.min/Tekla/Structures/ModelInternal_parts/dotBooleanPart_t.py
|
htlcnn/ironpython-stubs
| 182
|
12784741
|
<filename>release/stubs.min/Tekla/Structures/ModelInternal_parts/dotBooleanPart_t.py
class dotBooleanPart_t(object):
# no doc
Boolean=None
OperativePart=None
Type=None
| 1.359375
| 1
|
tests/unit/test_index_class.py
|
griggheo/cheesecake
| 12
|
12784742
|
"""
Prepare enviornment.
>>> import _path_cheesecake
>>> from cheesecake.cheesecake_index import Index
>>> from _helper_cheesecake import Glutton
*****
Default maximum value for an index should be 0.
>>> index = Index()
>>> index.max_value
0
To learn a class name, ask for its representation.
>>> Index
<Index class: unnamed>
>>> class NamedIndex(Index):
... pass
>>> NamedIndex
<Index class: NamedIndex>
*****
Create two indices.
>>> big_index = Index()
>>> index = Index()
>>> index.name = 'small_index'
Add one index to another.
>>> big_index.add_subindex(index)
>>> index in big_index.subindices
True
Try to add non-Index object as a subindex.
>>> big_index.add_subindex(42)
Traceback (most recent call last):
...
ValueError: subindex has to be instance of Index
Now remove subindex.
>>> big_index.remove_subindex('small_index')
>>> index in big_index.subindices
False
*****
Test passing subindices to index constructor.
>>> def create_index(name):
... idx = Index()
... idx.name = name
... return idx
>>> index_one = create_index('one')
>>> index_two = create_index('two')
>>> index_three = create_index('three')
>>> index = Index(index_one, index_two, index_three)
>>> def get_names(indices):
... return map(lambda idx: idx.name, indices)
>>> get_names(index.subindices)
['one', 'two', 'three']
>>> index.remove_subindex('one')
>>> get_names(index.subindices)
['two', 'three']
*****
Test requirements.
>>> class NewIndex(Index):
... def compute(self, one, two, three):
... pass
>>> new = NewIndex()
>>> new.requirements
['one', 'two', 'three']
Now create other index and add it to the NewIndex.
>>> class OtherIndex(Index):
... def compute(self, four):
... pass
>>> other = OtherIndex()
>>> other.requirements
['four']
>>> new.add_subindex(other)
>>> new.requirements
['one', 'two', 'three', 'four']
*****
Index which throws an Exception during computation will
get removed from the list of subindices.
>>> class BadIndex(Index):
... max_value = 10
... def compute(self):
... raise Exception("No reason.")
>>> bad_index = BadIndex()
>>> index = Index(bad_index)
>>> bad_index in index.subindices
True
>>> index.compute_with(Glutton())
0
>>> bad_index in index.subindices
False
>>> index.max_value
0
"""
| 3.34375
| 3
|
nlplingo/nn/extraction_model.py
|
BBN-E/nlplingo
| 3
|
12784743
|
from __future__ import absolute_import
from __future__ import division
from __future__ import with_statement
import abc
import json
import logging
import numpy as np
import os
import keras
from keras.optimizers import Adadelta, SGD, RMSprop, Adam
from nlplingo.nn.constants import supported_pytorch_models
from nlplingo.nn.keras_models.common import keras_custom_objects
import time
from datetime import datetime
from shutil import copyfile
import random
import math
from nlplingo.nn.framework.sentence_re import SentenceRETrain
logger = logging.getLogger(__name__)
class ExtractionModel(abc.ABC):
verbosity = 0
def __init__(self, params, extractor_params, event_domain, embeddings, hyper_params, features):
"""
:type event_domain: nlplingo.tasks.event_domain.EventDomain
:type embeddings: dict[str : nlplingo.embeddings.word_embeddings.WordEmbedding]
:type model_name: str
:type features: object containing a 'feature_strings' attribute
"""
self.hyper_params = hyper_params
self.params = params
self.extractor_params = extractor_params
self.event_domain = event_domain
self.num_event_types = len(event_domain.event_types)
self.num_role_types = len(event_domain.event_roles)
self.num_ne_types = len(event_domain.entity_types)
self.num_ne_bio_types = None
self.num_entity_relation_types = len(event_domain.entity_relation_types)
self.num_eer_types = len(event_domain.eer_types)
self.word_vec_length = 1 # because we use word vector index
self.embeddings_vector_size = None
if 'embeddings' in extractor_params:
self.embeddings_vector_size = extractor_params['embeddings']['vector_size']
self.word_embeddings = None
if embeddings is not None and 'word_embeddings' in embeddings:
self.word_embeddings = embeddings['word_embeddings'].word_vec
""":type: numpy.ndarray"""
self.model_type = extractor_params['model_type']
self.optimizer = self._configure_optimizer(extractor_params)
self.model_file = extractor_params['model_file']
self.data_keys = []
self.num_output = None
self.model_dir = None
self.model = None
self.id2label = dict([(v, k) for k, v in self.event_domain.event_roles.items()])
self.trained_model = None
self.features = features
if 'engine' in extractor_params and (extractor_params['engine'] == 'pytorch'):
import torch
import random
torch.manual_seed(extractor_params['seed'])
np.random.seed(extractor_params['seed'])
random.seed(1234)
self.extractor_params['cuda'] = torch.cuda.is_available()
if extractor_params.get('cpu', False):
self.extractor_params['cuda'] = False
elif extractor_params.get('cuda', False):
torch.cuda.manual_seed(extractor_params['seed'])
self.layers = None
def _get_framework_class(self):
if self.model_type in supported_pytorch_models:
return SentenceRETrain
else:
raise Exception('model type ' + self.model_type + ' is not supported')
def fit_txt(self, train_path, dev_path, test_path):
# uses framework (with distinct initialization args)
framework_class = self._get_framework_class()
framework = framework_class(self.model, train_path, dev_path, test_path, self.extractor_params, self.hyper_params, self.features, self.event_domain)
framework.train_model()
def fit_model(self, train_data_list, train_label, test_data_list, test_label):
# uses framework
if self.extractor_params.get('engine') == 'pytorch':
framework_class = self._get_framework_class()
framework = framework_class(self.model, train_data_list, train_label, test_data_list, test_label, self.extractor_params, self.hyper_params, self.features, self.event_domain)
framework.train_model()
elif 'engine' not in self.extractor_params or (('engine' in self.extractor_params) and (self.extractor_params['engine'] == 'keras')):
raise IOError(
"Extractor engine in {'keras', None} but KerasExtractionModel "
"should have implemented its own fit method overriding "
"ExtractionModel.fit_model. This error should no longer exist "
"once KerasExtractionModel is part of framework_class system.")
else:
raise Exception('Only Keras or PyTorch engines are supported.')
def _configure_optimizer(self, params):
optimizer_params = params.get('optimizer', dict())
tunable_params = {}
if 'engine' not in self.extractor_params or (('engine' in self.extractor_params) and (self.extractor_params['engine'] == 'keras')):
if optimizer_params.get('name') == 'SGD':
tunable_params = {
'name': 'SGD',
'lr': optimizer_params.get('lr', 0.01),
'momentum': optimizer_params.get('momentum', 0.0),
'decay': optimizer_params.get('decay', 0.0),
'nesterov': optimizer_params.get('nesterov', False)
}
optimizer = SGD(
lr=tunable_params['lr'],
momentum=tunable_params['momentum'],
decay=tunable_params['decay'],
nesterov=tunable_params['nesterov']
)
elif optimizer_params.get('name') == 'RMSprop':
tunable_params = {
'name': 'RMSprop',
'lr': optimizer_params.get('lr', 0.001),
'rho': optimizer_params.get('rho', 0.9),
'epsilon': optimizer_params.get('epsilon', None),
'decay': optimizer_params.get('decay', 0.0)
}
optimizer = RMSprop(
lr=tunable_params['lr'],
rho=tunable_params['rho'],
epsilon=tunable_params['epsilon'],
decay=tunable_params['decay']
)
elif optimizer_params.get('name') == 'Adam':
tunable_params = {
'name': 'Adam',
'lr': optimizer_params.get('lr', 0.001)
}
optimizer = Adam(
lr=tunable_params['lr']
)
else:
tunable_params = {
'name': 'Adadelta',
'lr': optimizer_params.get('lr', 0.1),
'rho': optimizer_params.get('rho', 0.95),
'epsilon': optimizer_params.get('epsilon', 1e-6),
'decay': optimizer_params.get('decay', 0.0)
}
# Default Adadelta
optimizer = Adadelta(
lr=tunable_params['lr'],
rho=tunable_params['rho'],
epsilon=tunable_params['epsilon']
)
print('=== Optimization parameters ===')
print(json.dumps(tunable_params, sort_keys=True, indent=4))
print('=== Optimization parameters ===')
return optimizer
elif self.extractor_params['engine'] == 'pytorch':
# TODO: make optimizer more configurable
optimizer_params['name'] = optimizer_params.get('name', 'sgd')
optimizer_params['lr'] = optimizer_params.get('lr', 0.3)
optimizer_params['lr_decay'] = optimizer_params.get('lr_decay', 0.9)
optimizer_params['decay_epoch'] = optimizer_params.get('decay_epoch', 5)
return optimizer_params
elif self.extractor_params['engine'] == 'transformers':
pass
else:
raise Exception('Only Keras or PyTorch engines are supported.')
def create_model(self):
pass
def __getstate__(self):
u"""Defines what is to be pickled.
Keras models cannot be pickled. Should call save_keras_model() and load_keras_model() separately.
The sequence is :
obj.save_keras_model('kerasFilename')
pickle.dump(obj, fileHandle)
...
obj = pickle.load(fileHandle)
obj.load_keras_model()"""
# Create state without self.keras_model
state = dict(self.__dict__)
#state.pop(u'keras_model') # probably not needed anymore, now that we've made keras_model global
return state
def __setstate__(self, state):
# Reload state for unpickling
self.__dict__ = state
def load_keras_model(self, filename=None):
self.model = keras.models.load_model(filename, keras_custom_objects)
def save_keras_model(self, filename):
self.model.save(filename)
print(self.model.summary())
def predict(self, test_data_list):
if 'engine' not in self.extractor_params or (('engine' in self.extractor_params) and (self.extractor_params['engine'] == 'keras')):
return self.model.predict(test_data_list)
elif self.extractor_params['engine'] == 'pytorch':
from data.loader import DataLoader as BatchDataLoader
print("Evaluating on test set...")
predictions = []
test_batch = BatchDataLoader(test_data_list, self.features.feature_strings, None, self.hyper_params.dict['batch_size'], self.hyper_params.dict, self.event_domain.event_roles, evaluation=True, test_mode=True)
for i, batch in enumerate(test_batch):
preds, _ = self.trained_model.predict(batch, compute_loss=False, compute_logits=True)
predictions.append(preds)
return np.vstack(predictions)
else:
raise Exception('Only Keras or PyTorch engines are supported.')
| 2.140625
| 2
|
netdev/vendors/cisco/cisco_asa.py
|
maliciousgroup/netdev
| 199
|
12784744
|
<reponame>maliciousgroup/netdev
"""Subclass specific to Cisco ASA"""
import re
from netdev.logger import logger
from netdev.vendors.ios_like import IOSLikeDevice
class CiscoASA(IOSLikeDevice):
"""Class for working with Cisco ASA"""
def __init__(self, *args, **kwargs):
"""
Initialize class for asynchronous working with network devices
:param str host: device hostname or ip address for connection
:param str username: username for logging to device
:param str password: <PASSWORD> for logging to device
:param str secret: secret password for privilege mode
:param int port: ssh port for connection. Default is 22
:param str device_type: network device type
:param known_hosts: file with known hosts. Default is None (no policy). With () it will use default file
:param str local_addr: local address for binding source of tcp connection
:param client_keys: path for client keys. Default in None. With () it will use default file in OS
:param str passphrase: password for encrypted client keys
:param float timeout: timeout in second for getting information from channel
:param loop: asyncio loop object
"""
super().__init__(*args, **kwargs)
self._multiple_mode = False
_disable_paging_command = "terminal pager 0"
@property
def multiple_mode(self):
""" Returning Bool True if ASA in multiple mode"""
return self._multiple_mode
async def connect(self):
"""
Async Connection method
Using 5 functions:
* _establish_connection() for connecting to device
* _set_base_prompt() for finding and setting device prompt
* _enable() for getting privilege exec mode
* _disable_paging() for non interact output in commands
* _check_multiple_mode() for checking multiple mode in ASA
"""
logger.info("Host {}: trying to connect to the device".format(self._host))
await self._establish_connection()
await self._set_base_prompt()
await self.enable_mode()
await self._disable_paging()
await self._check_multiple_mode()
logger.info("Host {}: Has connected to the device".format(self._host))
async def _set_base_prompt(self):
"""
Setting two important vars for ASA
base_prompt - textual prompt in CLI (usually hostname)
base_pattern - regexp for finding the end of command. IT's platform specific parameter
For ASA devices base_pattern is "prompt([\/\w]+)?(\(.*?\))?[#|>]
"""
logger.info("Host {}: Setting base prompt".format(self._host))
prompt = await self._find_prompt()
# Cut off prompt from "prompt/context/other" if it exists
# If not we get all prompt
prompt = prompt[:-1].split("/")
prompt = prompt[0]
self._base_prompt = prompt
delimiters = map(re.escape, type(self)._delimiter_list)
delimiters = r"|".join(delimiters)
base_prompt = re.escape(self._base_prompt[:12])
pattern = type(self)._pattern
self._base_pattern = pattern.format(prompt=base_prompt, delimiters=delimiters)
logger.debug("Host {}: Base Prompt: {}".format(self._host, self._base_prompt))
logger.debug("Host {}: Base Pattern: {}".format(self._host, self._base_pattern))
return self._base_prompt
async def _check_multiple_mode(self):
"""Check mode multiple. If mode is multiple we adding info about contexts"""
logger.info("Host {}:Checking multiple mode".format(self._host))
out = await self.send_command("show mode")
if "multiple" in out:
self._multiple_mode = True
logger.debug(
"Host {}: Multiple mode: {}".format(self._host, self._multiple_mode)
)
| 2.4375
| 2
|
jumpscale/packages/threebot_deployer/models/user_solutions.py
|
zaibon/js-sdk
| 0
|
12784745
|
from jumpscale.core.base import Base, fields
from enum import Enum
import hashlib
class ThreebotState(Enum):
RUNNING = "RUNNING" # the workloads are deployed and running
DELETED = "DELETED" # workloads and backups deleted
STOPPED = "STOPPED" # expired or manually stoped (delete workloads only)
class UserThreebot(Base):
# instance name is the f"threebot_{solution uuid}"
solution_uuid = fields.String()
identity_tid = fields.Integer()
name = fields.String()
owner_tname = fields.String() # owner's tname in Threefold Connect after cleaning
farm_name = fields.String()
state = fields.Enum(ThreebotState)
continent = fields.String()
explorer_url = fields.String()
threebot_container_wid = fields.Integer()
trc_container_wid = fields.Integer()
reverse_proxy_wid = fields.Integer()
subdomain_wid = fields.Integer()
secret_hash = fields.String()
def verify_secret(self, secret):
if not self.secret_hash:
return True
return self.secret_hash == hashlib.md5(secret.encode()).hexdigest()
def hash_secret(self, secret):
self.secret_hash = hashlib.md5(secret.encode()).hexdigest()
| 2.296875
| 2
|
clean_architecture_helper_gql_extension/connections.py
|
HerlanAssis/django-clean-architecture-helper-gql-extension
| 0
|
12784746
|
import graphene
from graphene import Int
class TotalItemsConnection(graphene.relay.Connection):
class Meta:
abstract = True
total = Int()
def resolve_total(self, info, **kwargs):
return len(self.iterable)
class BaseConnectionField(graphene.relay.ConnectionField):
def __init__(self, type, *args, **kwargs):
filters = type._meta.node._meta.filter_class
if filters is not None:
for key, value in vars(filters()).items():
kwargs.setdefault(key, value)
super(BaseConnectionField, self).__init__(type, *args, **kwargs)
| 2.296875
| 2
|
three/lights.py
|
jzitelli/three.py
| 12
|
12784747
|
<reponame>jzitelli/three.py
from . import *
class Light(Object3D):
def __init__(self, color=0xffffff, intensity=None, distance=None, shadowCameraNear=None, shadowCameraFar=None, shadowCameraFov=None, **kwargs):
Object3D.__init__(self, **kwargs)
self.color = color
self.intensity = intensity
self.distance = distance
self.shadowCameraNear = shadowCameraNear
self.shadowCameraFar = shadowCameraFar
self.shadowCameraFov = shadowCameraFov
class AmbientLight(Light):
pass
class PointLight(Light):
pass
class DirectionalLight(Light):
# TODO: specifying direction
def __init__(self, target=None, **kwargs):
Light.__init__(self, **kwargs)
self.target = target
class SpotLight(Light):
# TODO: set target (ObjectLoader does not support)
def __init__(self, angle=None, exponent=None, decay=None, target=None, **kwargs):
Light.__init__(self, **kwargs)
self.angle = angle
self.exponent = exponent
self.decay = decay
self.target = target
| 2.75
| 3
|
options/base_options.py
|
csqiangwen/Deep-Unsupervised-Pixelization
| 42
|
12784748
|
<gh_stars>10-100
import argparse
import os
import torch
class BaseOptions():
def __init__(self):
self.parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
self.initialized = False
def initialize(self):
self.parser.add_argument('--dataroot', required=True, help='path to images (should have subfolders trainA, trainB, etc)')
self.parser.add_argument('--batchSize', type=int, default=1, help='input batch size')
self.parser.add_argument('--loadSize', type=int, default=256, help='input resolution')
self.parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in first conv layer')
self.parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in first conv layer')
self.parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
self.parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints_pixelization', help='models are saved here')
self.parser.add_argument('--no_dropout', action='store_true', help='no dropout for the generator')
self.parser.add_argument('--resize_or_crop', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop|crop|scale_width|scale_width_and_crop]')
self.initialized = True
def parse(self):
if not self.initialized:
self.initialize()
self.opt = self.parser.parse_args()
self.opt.isTrain = self.isTrain # train or test
str_ids = self.opt.gpu_ids.split(',')
self.opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if id >= 0:
self.opt.gpu_ids.append(id)
# set gpu ids
if len(self.opt.gpu_ids) > 0:
torch.cuda.set_device(self.opt.gpu_ids[0])
args = vars(self.opt)
print('------------ Options -------------')
for k, v in sorted(args.items()):
print('%s: %s' % (str(k), str(v)))
print('-------------- End ----------------')
# save to the disk
self.mkdirs(self.opt.checkpoints_dir)
file_name = os.path.join(self.opt.checkpoints_dir, 'opt.txt')
with open(file_name, 'wt') as opt_file:
opt_file.write('------------ Options -------------\n')
for k, v in sorted(args.items()):
opt_file.write('%s: %s\n' % (str(k), str(v)))
opt_file.write('-------------- End ----------------\n')
return self.opt
def mkdir(self, path):
if not os.path.exists(path):
os.makedirs(path)
def mkdirs(self, paths):
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
self.mkdir(path)
else:
self.mkdir(paths)
| 2.21875
| 2
|
FileSize/Non_newline_file_size.py
|
JennyChi/Script-helper
| 0
|
12784749
|
import sys
f = file('100MB.log', 'r')
content = f.read()
totalbytes = 0
for line in content.split("\n"):
totalbytes = len(line.rstrip("\n")) + totalbytes
'''
method 2: using count the character != \n
for counter in content:
if counter != "\n":
totalbytes = totalbytes + 1
'''
print "Non-newline bytes:", totalbytes
| 2.8125
| 3
|
server/swagger_server/models/preferences.py
|
fabric-testbed/UserInformationService
| 0
|
12784750
|
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from swagger_server.models.base_model_ import Model
from swagger_server import util
class Preferences(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, settings: object=None, permissions: object=None, interests: object=None): # noqa: E501
"""Preferences - a model defined in Swagger
:param settings: The settings of this Preferences. # noqa: E501
:type settings: object
:param permissions: The permissions of this Preferences. # noqa: E501
:type permissions: object
:param interests: The interests of this Preferences. # noqa: E501
:type interests: object
"""
self.swagger_types = {
'settings': object,
'permissions': object,
'interests': object
}
self.attribute_map = {
'settings': 'settings',
'permissions': 'permissions',
'interests': 'interests'
}
self._settings = settings
self._permissions = permissions
self._interests = interests
@classmethod
def from_dict(cls, dikt) -> 'Preferences':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The Preferences of this Preferences. # noqa: E501
:rtype: Preferences
"""
return util.deserialize_model(dikt, cls)
@property
def settings(self) -> object:
"""Gets the settings of this Preferences.
:return: The settings of this Preferences.
:rtype: object
"""
return self._settings
@settings.setter
def settings(self, settings: object):
"""Sets the settings of this Preferences.
:param settings: The settings of this Preferences.
:type settings: object
"""
self._settings = settings
@property
def permissions(self) -> object:
"""Gets the permissions of this Preferences.
:return: The permissions of this Preferences.
:rtype: object
"""
return self._permissions
@permissions.setter
def permissions(self, permissions: object):
"""Sets the permissions of this Preferences.
:param permissions: The permissions of this Preferences.
:type permissions: object
"""
self._permissions = permissions
@property
def interests(self) -> object:
"""Gets the interests of this Preferences.
:return: The interests of this Preferences.
:rtype: object
"""
return self._interests
@interests.setter
def interests(self, interests: object):
"""Sets the interests of this Preferences.
:param interests: The interests of this Preferences.
:type interests: object
"""
self._interests = interests
| 2.203125
| 2
|