max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
tests/unit/test_resultlib.py | boxingbeetle/softfab | 20 | 12771951 | # SPDX-License-Identifier: BSD-3-Clause
"""Test result storage and processing functionality."""
from pytest import fixture, raises
from softfab.resultlib import ResultStorage
@fixture
def resultStorage(tmp_path):
return ResultStorage(tmp_path)
# Test data that can be used by various test cases:
TASK_NAME = 'testtask'
RUN_ID = 'faster'
KEY = 'dawn'
NR_RUNS = 50
def testResultsPutGet(resultStorage):
"""Test whether data can be stored and retrieved."""
def valueFunc(index):
return f'value{index:02d}'
runIds = []
for index in range(NR_RUNS):
runId = f'run{index:02d}'
runIds.append(runId)
data = {KEY: valueFunc(index)}
resultStorage.putData(TASK_NAME, runId, data)
results = resultStorage.getCustomData(TASK_NAME, runIds, KEY)
foundIds = []
for runId, value in results:
assert runId.startswith('run')
index = int(runId[3:])
assert 0 <= index < NR_RUNS
assert value == valueFunc(index)
foundIds.append(runId)
assert sorted(foundIds) == sorted(runIds)
def testResultsInvalidKey(resultStorage):
"""Test treatment of invalid keys."""
# TODO: Maybe we need more thought about what should be valid keys.
for key in ('../abc', ''):
data = {key: 'dummy'}
with raises(KeyError):
resultStorage.putData(TASK_NAME, RUN_ID, data)
results = resultStorage.getCustomData(TASK_NAME, [RUN_ID], key)
assert list(results) == []
def testResultsReplace(resultStorage):
"""Check that new data replaces old data."""
oldData = {KEY: 'old'}
newData = {KEY: 'new'}
resultStorage.putData(TASK_NAME, RUN_ID, oldData)
resultStorage.putData(TASK_NAME, RUN_ID, newData)
results = resultStorage.getCustomData(TASK_NAME, [RUN_ID], KEY)
assert list(results) == [(RUN_ID, 'new')]
def testResultsAdd(resultStorage):
"""Check that new data with different keys is added to old data."""
oldData = {'oldkey': 'old'}
newData = {'newkey': 'new'}
resultStorage.putData(TASK_NAME, RUN_ID, oldData)
resultStorage.putData(TASK_NAME, RUN_ID, newData)
results1 = resultStorage.getCustomData(TASK_NAME, [RUN_ID], 'oldkey')
assert list(results1) == [(RUN_ID, 'old')]
results2 = resultStorage.getCustomData(TASK_NAME, [RUN_ID], 'newkey')
assert list(results2) == [(RUN_ID, 'new')]
def testResultsListKeys(resultStorage):
"""Tests listing the keys that exist for a task name."""
for index in range(2, NR_RUNS):
runId = f'run{index:02d}'
keys = [
f'key{key:02d}'
for key in range(2, NR_RUNS)
if key % index == 0
]
data = dict.fromkeys(keys, 'dummy')
resultStorage.putData(TASK_NAME, runId, data)
assert resultStorage.getCustomKeys(TASK_NAME) == {
# for every N, N % N == 0 is true
# so every key for 2 <= key < nrRuns should be present
f'key{key:02d}' for key in range(2, NR_RUNS)
}
def testResultsListKeysNone(resultStorage):
"""Tests listing the keys if no data is stored for a task name."""
assert resultStorage.getCustomKeys(TASK_NAME) == set()
| 2.296875 | 2 |
01_Language/01_Functions/python/fileinode.py | cliff363825/TwentyFour | 3 | 12771952 | # coding: utf-8
import os
def fileinode(filename):
return os.stat(filename).st_ino
if __name__ == '__main__':
print(fileinode("test.txt"))
| 2.3125 | 2 |
sustainableCityManagement/tests/ML_Models/test_bikes_uasge_prediction.py | Josh-repository/Dashboard-CityManager- | 0 | 12771953 | <reponame>Josh-repository/Dashboard-CityManager-
from main_project.ML_models.bikes_usage_prediction import *
from django.test import TestCase
from unittest.mock import MagicMock
from main_project.Config.config_handler import read_config
import datetime
import unittest
class TestBikeUsagePrediction(TestCase):
@classmethod
def setUpTestData(cls):
pass
# Testing the prediction function works for given input array of data.
def test_predict_bikes_usage(self):
arrayOfUsagePerDay = [10, 20, 30, 15]
result = predict_bikes_usage(arrayOfUsagePerDay, predictDays=1,
previous_days_to_consider=config_vals["days_to_consider_for_prediction"])
expected_result = 23
assert (expected_result == result)
| 2.671875 | 3 |
pypeit/pypeit.py | joshwalawender/PypeIt | 0 | 12771954 | <gh_stars>0
"""
Main driver class for PypeIt run
"""
import time
import os
import numpy as np
from collections import OrderedDict
from astropy.io import fits
from pypeit import msgs
from pypeit import calibrations
from pypeit.images import scienceimage
from pypeit import ginga
from pypeit import reduce
from pypeit.core import qa
from pypeit.core import wave
from pypeit.core import save
from pypeit import specobjs
from pypeit.core import pixels
from pypeit.spectrographs.util import load_spectrograph
from configobj import ConfigObj
from pypeit.par.util import parse_pypeit_file
from pypeit.par import PypeItPar
from pypeit.metadata import PypeItMetaData
from IPython import embed
class PypeIt(object):
"""
This class runs the primary calibration and extraction in PypeIt
.. todo::
Fill in list of attributes!
Args:
pypeit_file (:obj:`str`):
PypeIt filename.
verbosity (:obj:`int`, optional):
Verbosity level of system output. Can be:
- 0: No output
- 1: Minimal output (default)
- 2: All output
overwrite (:obj:`bool`, optional):
Flag to overwrite any existing files/directories.
reuse_masters (:obj:`bool`, optional):
Reuse any pre-existing calibration files
logname (:obj:`str`, optional):
The name of an ascii log file with the details of the
reduction.
show: (:obj:`bool`, optional):
Show reduction steps via plots (which will block further
execution until clicked on) and outputs to ginga. Requires
remote control ginga session via ``ginga --modules=RC &``
redux_path (:obj:`str`, optional):
Over-ride reduction path in PypeIt file (e.g. Notebook usage)
Attributes:
pypeit_file (:obj:`str`):
Name of the pypeit file to read. PypeIt files have a
specific set of valid formats. A description can be found
:ref:`pypeit_file`.
fitstbl (:obj:`pypit.metadata.PypeItMetaData`): holds the meta info
"""
# __metaclass__ = ABCMeta
def __init__(self, pypeit_file, verbosity=2, overwrite=True, reuse_masters=False, logname=None,
show=False, redux_path=None):
# Load
cfg_lines, data_files, frametype, usrdata, setups \
= parse_pypeit_file(pypeit_file, runtime=True)
self.pypeit_file = pypeit_file
# Spectrograph
cfg = ConfigObj(cfg_lines)
spectrograph_name = cfg['rdx']['spectrograph']
self.spectrograph = load_spectrograph(spectrograph_name, ifile=data_files[0])
msgs.info('Loaded spectrograph {0}'.format(self.spectrograph.spectrograph))
# --------------------------------------------------------------
# Get the full set of PypeIt parameters
# - Grab a science or standard file for configuration specific parameters
scistd_file = None
for idx, row in enumerate(usrdata):
if ('science' in row['frametype']) or ('standard' in row['frametype']):
scistd_file = data_files[idx]
break
# - Configuration specific parameters for the spectrograph
if scistd_file is not None:
msgs.info('Setting configuration-specific parameters using {0}'.format(
os.path.split(scistd_file)[1]))
spectrograph_cfg_lines = self.spectrograph.config_specific_par(scistd_file).to_config()
# - Build the full set, merging with any user-provided
# parameters
self.par = PypeItPar.from_cfg_lines(cfg_lines=spectrograph_cfg_lines, merge_with=cfg_lines)
msgs.info('Built full PypeIt parameter set.')
# Check the output paths are ready
if redux_path is not None:
self.par['rdx']['redux_path'] = redux_path
# TODO: Write the full parameter set here?
# --------------------------------------------------------------
# --------------------------------------------------------------
# Build the meta data
# - Re-initilize based on the file data
msgs.info('Compiling metadata')
self.fitstbl = PypeItMetaData(self.spectrograph, self.par, files=data_files,
usrdata=usrdata, strict=True)
# - Interpret automated or user-provided data from the PypeIt
# file
self.fitstbl.finalize_usr_build(frametype, setups[0])
# --------------------------------------------------------------
# - Write .calib file (For QA naming amongst other things)
calib_file = pypeit_file.replace('.pypeit', '.calib')
self.fitstbl.write_calib(calib_file)
# Other Internals
self.logname = logname
self.overwrite = overwrite
# Currently the runtime argument determines the behavior for
# reuse_masters.
self.reuse_masters = reuse_masters
self.show = show
# Set paths
if self.par['calibrations']['caldir'] == 'default':
self.calibrations_path = os.path.join(self.par['rdx']['redux_path'], 'Masters')
else:
self.calibrations_path = self.par['calibrations']['caldir']
# Report paths
msgs.info('Setting reduction path to {0}'.format(self.par['rdx']['redux_path']))
msgs.info('Master calibration data output to: {0}'.format(self.calibrations_path))
msgs.info('Science data output to: {0}'.format(self.science_path))
msgs.info('Quality assessment plots output to: {0}'.format(self.qa_path))
# TODO: Is anything written to the qa dir or only to qa/PNGs?
# Should we have separate calibration and science QA
# directories?
# Instantiate Calibrations class
self.caliBrate \
= calibrations.MultiSlitCalibrations(self.fitstbl, self.par['calibrations'],
self.spectrograph,
caldir=self.calibrations_path,
qadir=self.qa_path,
reuse_masters=self.reuse_masters,
show=self.show)
# Init
self.verbosity = verbosity
# TODO: I don't think this ever used
self.frame = None
self.det = None
self.tstart = None
self.basename = None
self.sciI = None
self.obstime = None
@property
def science_path(self):
"""Return the path to the science directory."""
return os.path.join(self.par['rdx']['redux_path'], self.par['rdx']['scidir'])
@property
def qa_path(self):
"""Return the path to the top-level QA directory."""
return os.path.join(self.par['rdx']['redux_path'], self.par['rdx']['qadir'])
def build_qa(self):
"""
Generate QA wrappers
"""
qa.gen_mf_html(self.pypeit_file, self.qa_path)
qa.gen_exp_html()
# TODO: This should go in a more relevant place
def spec_output_file(self, frame, twod=False):
"""
Return the path to the spectral output data file.
Args:
frame (:obj:`int`):
Frame index from :attr:`fitstbl`.
twod (:obj:`bool`):
Name for the 2D output file; 1D file otherwise.
Returns:
:obj:`str`: The path for the output file
"""
return os.path.join(self.science_path, 'spec{0}d_{1}.fits'.format('2' if twod else '1',
self.fitstbl.construct_basename(frame)))
def outfile_exists(self, frame):
"""
Check whether the 2D outfile of a given frame already exists
Args:
frame (int): Frame index from fitstbl
Returns:
bool: True if the 2d file exists, False if it does not exist
"""
return os.path.isfile(self.spec_output_file(frame, twod=True))
def get_std_outfile(self, standard_frames):
"""
Grab the output filename from an input list of standard_frame indices
If more than one index is provided, the first is taken
Args:
standard_frames (list): List of indices corresponding to standard stars
Returns:
str: Full path to the standard spec1d output file
"""
# TODO: Need to decide how to associate standards with
# science frames in the case where there is more than one
# standard associated with a given science frame. Below, I
# just use the first standard
std_outfile = None
std_frame = None if len(standard_frames) == 0 else standard_frames[0]
# Prepare to load up standard?
if std_frame is not None:
std_outfile = self.spec_output_file(std_frame) \
if isinstance(std_frame, (int,np.integer)) else None
if std_outfile is not None and not os.path.isfile(std_outfile):
msgs.error('Could not find standard file: {0}'.format(std_outfile))
return std_outfile
def reduce_all(self):
"""
Main driver of the entire reduction
Calibration and extraction via a series of calls to reduce_exposure()
"""
# Validate the parameter set
required = ['rdx', 'calibrations', 'scienceframe', 'scienceimage', 'flexure', 'fluxcalib']
can_be_None = ['flexure', 'fluxcalib']
self.par.validate_keys(required=required, can_be_None=can_be_None)
self.tstart = time.time()
# Find the standard frames
is_standard = self.fitstbl.find_frames('standard')
# Find the science frames
is_science = self.fitstbl.find_frames('science')
# Frame indices
frame_indx = np.arange(len(self.fitstbl))
# Iterate over each calibration group and reduce the standards
for i in range(self.fitstbl.n_calib_groups):
# Find all the frames in this calibration group
in_grp = self.fitstbl.find_calib_group(i)
# Find the indices of the standard frames in this calibration group:
grp_standards = frame_indx[is_standard & in_grp]
# Reduce all the standard frames, loop on unique comb_id
u_combid_std= np.unique(self.fitstbl['comb_id'][grp_standards])
for j, comb_id in enumerate(u_combid_std):
frames = np.where(self.fitstbl['comb_id'] == comb_id)[0]
bg_frames = np.where(self.fitstbl['bkg_id'] == comb_id)[0]
if not self.outfile_exists(frames[0]) or self.overwrite:
std_dict = self.reduce_exposure(frames, bg_frames=bg_frames)
# TODO come up with sensible naming convention for save_exposure for combined files
self.save_exposure(frames[0], std_dict, self.basename)
else:
msgs.info('Output file: {:s} already exists'.format(self.fitstbl.construct_basename(frames[0])) +
'. Set overwrite=True to recreate and overwrite.')
# Iterate over each calibration group again and reduce the science frames
for i in range(self.fitstbl.n_calib_groups):
# Find all the frames in this calibration group
in_grp = self.fitstbl.find_calib_group(i)
# Find the indices of the science frames in this calibration group:
grp_science = frame_indx[is_science & in_grp]
# Associate standards (previously reduced above) for this setup
std_outfile = self.get_std_outfile(frame_indx[is_standard])
# Reduce all the science frames; keep the basenames of the science frames for use in flux calibration
science_basename = [None]*len(grp_science)
# Loop on unique comb_id
u_combid = np.unique(self.fitstbl['comb_id'][grp_science])
for j, comb_id in enumerate(u_combid):
frames = np.where(self.fitstbl['comb_id'] == comb_id)[0]
# Find all frames whose comb_id matches the current frames bkg_id.
bg_frames = np.where((self.fitstbl['comb_id'] == self.fitstbl['bkg_id'][frames][0]) &
(self.fitstbl['comb_id'] >= 0))[0]
# JFH changed the syntax below to that above, which allows frames to be used more than once
# as a background image. The syntax below would require that we could somehow list multiple
# numbers for the bkg_id which is impossible without a comma separated list
# bg_frames = np.where(self.fitstbl['bkg_id'] == comb_id)[0]
if not self.outfile_exists(frames[0]) or self.overwrite:
sci_dict = self.reduce_exposure(frames, bg_frames=bg_frames,
std_outfile=std_outfile)
science_basename[j] = self.basename
# TODO come up with sensible naming convention for save_exposure for combined files
self.save_exposure(frames[0], sci_dict, self.basename)
else:
msgs.warn('Output file: {:s} already exists'.format(self.fitstbl.construct_basename(frames[0])) +
'. Set overwrite=True to recreate and overwrite.')
msgs.info('Finished calibration group {0}'.format(i))
# Finish
self.print_end_time()
# This is a static method to allow for use in coadding script
@staticmethod
def select_detectors(detnum=None, ndet=1):
"""
Return the 1-indexed list of detectors to reduce.
Args:
detnum (:obj:`int`, :obj:`list`, optional):
One or more detectors to reduce. If None, return the
full list for the provided number of detectors (`ndet`).
ndet (:obj:`int`, optional):
The number of detectors for this instrument. Only used
if `detnum is None`.
Returns:
list: List of detectors to be reduced
"""
if detnum is None:
return np.arange(1, ndet+1).tolist()
return [detnum] if isinstance(detnum, int) else detnum
def reduce_exposure(self, frames, bg_frames=None, std_outfile=None):
"""
Reduce a single exposure
Args:
frame (:obj:`int`):
0-indexed row in :attr:`fitstbl` with the frame to
reduce.
bg_frames (:obj:`list`, optional):
List of frame indices for the background.
std_outfile (:obj:`str`, optional):
File with a previously reduced standard spectrum from
PypeIt.
Returns:
dict: The dictionary containing the primary outputs of
extraction.
"""
# TODO:
# - bg_frames should be None by default
# - change doc string to reflect that more than one frame can be
# provided
# if show is set, clear the ginga channels at the start of each new sci_ID
if self.show:
# TODO: Put this in a try/except block?
ginga.clear_all()
has_bg = True if bg_frames is not None and len(bg_frames) > 0 else False
# Is this an IR reduction?
# TODO: Why specific to IR?
self.ir_redux = True if has_bg else False
# TODO: JFH Why does this need to be ordered?
sci_dict = OrderedDict() # This needs to be ordered
sci_dict['meta'] = {}
sci_dict['meta']['ir_redux'] = self.ir_redux
# Print status message
msgs_string = 'Reducing target {:s}'.format(self.fitstbl['target'][frames[0]]) + msgs.newline()
# TODO: Print these when the frames are actually combined,
# backgrounds are used, etc?
msgs_string += 'Combining frames:' + msgs.newline()
for iframe in frames:
msgs_string += '{0:s}'.format(self.fitstbl['filename'][iframe]) + msgs.newline()
msgs.info(msgs_string)
if has_bg:
bg_msgs_string = ''
for iframe in bg_frames:
bg_msgs_string += '{0:s}'.format(self.fitstbl['filename'][iframe]) + msgs.newline()
bg_msgs_string = msgs.newline() + 'Using background from frames:' + msgs.newline() + bg_msgs_string
msgs.info(bg_msgs_string)
# Find the detectors to reduce
detectors = PypeIt.select_detectors(detnum=self.par['rdx']['detnum'],
ndet=self.spectrograph.ndet)
if len(detectors) != self.spectrograph.ndet:
msgs.warn('Not reducing detectors: {0}'.format(' '.join([ str(d) for d in
set(np.arange(self.spectrograph.ndet))-set(detectors)])))
# Loop on Detectors
for self.det in detectors:
msgs.info("Working on detector {0}".format(self.det))
sci_dict[self.det] = {}
# Calibrate
#TODO Is the right behavior to just use the first frame?
self.caliBrate.set_config(frames[0], self.det, self.par['calibrations'])
self.caliBrate.run_the_steps()
# Extract
# TODO: pass back the background frame, pass in background
# files as an argument. extract one takes a file list as an
# argument and instantiates science within
sci_dict[self.det]['sciimg'], sci_dict[self.det]['sciivar'], \
sci_dict[self.det]['skymodel'], sci_dict[self.det]['objmodel'], \
sci_dict[self.det]['ivarmodel'], sci_dict[self.det]['outmask'], \
sci_dict[self.det]['specobjs'], \
= self.extract_one(frames, self.det, bg_frames,
std_outfile=std_outfile)
# JFH TODO write out the background frame?
# Return
return sci_dict
def get_sci_metadata(self, frame, det):
"""
Grab the meta data for a given science frame and specific detector
Args:
frame (int): Frame index
det (int): Detector index
Returns:
5 objects are returned::
- str: Object type; science or standard
- str: Setup string from master_key()
- astropy.time.Time: Time of observation
- str: Basename of the frame
- str: Binning of the detector
"""
# Set binning, obstime, basename, and objtype
binning = self.fitstbl['binning'][frame]
obstime = self.fitstbl.construct_obstime(frame)
basename = self.fitstbl.construct_basename(frame, obstime=obstime)
objtype = self.fitstbl['frametype'][frame]
if 'science' in objtype:
objtype_out = 'science'
elif 'standard' in objtype:
objtype_out = 'standard'
else:
msgs.error('Unrecognized objtype')
setup = self.fitstbl.master_key(frame, det=det)
return objtype_out, setup, obstime, basename, binning
def get_std_trace(self, std_redux, det, std_outfile):
"""
Returns the trace of the standard if it is applicable to the current reduction
Args:
std_redux (bool): If False, proceed
det (int): Detector index
std_outfile (str): Filename for the standard star spec1d file
Returns:
ndarray: Trace of the standard star on input detector
"""
if std_redux is False and std_outfile is not None:
sobjs = specobjs.SpecObjs.from_fitsfile(std_outfile)
# Does the detector match?
# TODO Instrument specific logic here could be implemented with the parset. For example LRIS-B or LRIS-R we
# we would use the standard from another detector
this_det = sobjs.DET == det
if np.any(this_det):
sobjs_det = sobjs[this_det]
sobjs_std = sobjs_det.get_std()
std_trace = sobjs_std.TRACE_SPAT
# flatten the array if this multislit
if 'MultiSlit' in self.spectrograph.pypeline:
std_trace = std_trace.flatten()
elif 'Echelle' in self.spectrograph.pypeline:
std_trace = std_trace.T
else:
msgs.error('Unrecognized pypeline')
else:
std_trace = None
else:
std_trace = None
return std_trace
def extract_one(self, frames, det, bg_frames, std_outfile=None):
"""
Extract a single exposure/detector pair
sci_ID and det need to have been set internally prior to calling this method
Args:
frames (list):
List of frames to extract; stacked if more than one is provided
det (int):
bg_frames (list):
List of frames to use as the background
Can be empty
std_outfile (str, optional):
Returns:
seven objects are returned::
- ndarray: Science image
- ndarray: Science inverse variance image
- ndarray: Model of the sky
- ndarray: Model of the object
- ndarray: Model of inverse variance
- ndarray: Mask
- :obj:`pypeit.specobjs.SpecObjs`: spectra
"""
# Grab some meta-data needed for the reduction from the fitstbl
self.objtype, self.setup, self.obstime, self.basename, self.binning = self.get_sci_metadata(frames[0], det)
# Is this a standard star?
self.std_redux = 'standard' in self.objtype
# Get the standard trace if need be
std_trace = self.get_std_trace(self.std_redux, det, std_outfile)
# Build Science image
sci_files = self.fitstbl.frame_paths(frames)
self.sciImg = scienceimage.build_from_file_list(
self.spectrograph, det, self.par['scienceframe']['process'],
self.caliBrate.msbpm, sci_files, self.caliBrate.msbias,
self.caliBrate.mspixelflat, illum_flat=self.caliBrate.msillumflat)
# Background Image?
if len(bg_frames) > 0:
bg_file_list = self.fitstbl.frame_paths(bg_frames)
self.sciImg = self.sciImg - scienceimage.build_from_file_list(
self.spectrograph, det, self.par['scienceframe']['process'],
self.caliBrate.msbpm, bg_file_list, self.caliBrate.msbias,
self.caliBrate.mspixelflat, illum_flat=self.caliBrate.msillumflat)
# Update mask for slitmask
slitmask = pixels.tslits2mask(self.caliBrate.tslits_dict)
self.sciImg.update_mask_slitmask(slitmask)
# For QA on crash
msgs.sciexp = self.sciImg
# Instantiate Reduce object
self.maskslits = self.caliBrate.tslits_dict['maskslits'].copy()
# Required for pypeline specific object
# TODO -- caliBrate should be replaced by the ~3 primary Objects needed
# once we have the data models in place.
self.redux = reduce.instantiate_me(self.sciImg, self.spectrograph,
self.par, self.caliBrate,
maskslits=self.maskslits,
ir_redux=self.ir_redux,
std_redux=self.std_redux,
objtype=self.objtype,
setup=self.setup,
show=self.show,
det=det, binning=self.binning)
# Show?
if self.show:
self.redux.show('image', image=self.sciImg.image, chname='processed',
slits=True, clear=True)
# Prep for manual extraction (if requested)
manual_extract_dict = self.fitstbl.get_manual_extract(frames, det)
self.skymodel, self.objmodel, self.ivarmodel, self.outmask, self.sobjs = self.redux.run(
std_trace=std_trace, manual_extract_dict=manual_extract_dict, show_peaks=self.show,
basename=self.basename, ra=self.fitstbl["ra"][frames[0]], dec=self.fitstbl["dec"][frames[0]],
obstime=self.obstime)
# Return
return self.sciImg.image, self.sciImg.ivar, self.skymodel, self.objmodel, self.ivarmodel, self.outmask, self.sobjs
# TODO: Why not use self.frame?
def save_exposure(self, frame, sci_dict, basename):
"""
Save the outputs from extraction for a given exposure
Args:
frame (:obj:`int`):
0-indexed row in the metadata table with the frame that
has been reduced.
sci_dict (:obj:`dict`):
Dictionary containing the primary outputs of extraction
basename (:obj:`str`):
The root name for the output file.
Returns:
None or SpecObjs: All of the objects saved to disk
"""
# TODO: Need some checks here that the exposure has been reduced
# Determine the headers
head1d = self.fitstbl[frame]
# Need raw file header information
rawfile = self.fitstbl.frame_paths(frame)
head2d = fits.getheader(rawfile, ext=self.spectrograph.primary_hdrext)
refframe = 'pixel' if self.caliBrate.par['wavelengths']['reference'] == 'pixel' else \
self.caliBrate.par['wavelengths']['frame']
# Determine the paths/filenames
save.save_all(sci_dict, self.caliBrate.master_key_dict, self.caliBrate.master_dir,
self.spectrograph, head1d, head2d, self.science_path, basename,
update_det=self.par['rdx']['detnum'], binning=self.fitstbl['binning'][frame])
def msgs_reset(self):
"""
Reset the msgs object
"""
# Reset the global logger
msgs.reset(log=self.logname, verbosity=self.verbosity)
msgs.pypeit_file = self.pypeit_file
def print_end_time(self):
"""
Print the elapsed time
"""
# Capture the end time and print it to user
tend = time.time()
codetime = tend-self.tstart
if codetime < 60.0:
msgs.info('Execution time: {0:.2f}s'.format(codetime))
elif codetime/60.0 < 60.0:
mns = int(codetime/60.0)
scs = codetime - 60.0*mns
msgs.info('Execution time: {0:d}m {1:.2f}s'.format(mns, scs))
else:
hrs = int(codetime/3600.0)
mns = int(60.0*(codetime/3600.0 - hrs))
scs = codetime - 60.0*mns - 3600.0*hrs
msgs.info('Execution time: {0:d}h {1:d}m {2:.2f}s'.format(hrs, mns, scs))
# TODO: Move this to fitstbl?
def show_science(self):
"""
Simple print of science frames
"""
indx = self.fitstbl.find_frames('science')
print(self.fitstbl[['target','ra','dec','exptime','dispname']][indx])
def __repr__(self):
# Generate sets string
return '<{:s}: pypeit_file={}>'.format(self.__class__.__name__, self.pypeit_file)
| 2.203125 | 2 |
swig/x64dbgpy/__events.py | swigger/x64dbgpy | 1 | 12771955 | # import os
# from os import path
# import runpy
# import glob
from . utils import Singleton
EVENTS = [
# 'init_debug',
'stop_debug',
'breakpoint',
'create_process',
'exit_process',
'create_thread',
'exit_thread',
'system_breakpoint',
'load_dll',
'unload_dll',
'trace_execute'
]
class EventNames(object):
stop_debug = 'stop_debug'
breakpoint = 'breakpoint'
create_process = 'create_process'
exit_process = 'exit_process'
exit_thread = 'exit_thread'
create_thread = 'create_thread'
system_breakpoint = 'system_breakpoint'
load_dll = 'load_dll'
unload_dll = 'unload_dll'
trace_execute = 'trace_execute'
bp_run = 10001
bp_stepin = 10002
bp_stepout = 10003
bp_stepover = 10004
class Event(object):
__metaclass__ = Singleton
def __init__(self):
# Keys: type, addr, enabled, singleshoot, active, name, mod, slot
# typedef struct
# {
# BPXTYPE type;
# duint addr;
# bool enabled;
# bool singleshoot;
# bool active;
# char name[MAX_BREAKPOINT_SIZE];
# char mod[MAX_MODULE_SIZE];
# unsigned short slot;
# } BRIDGEBP;
#
# typedef enum
# {
# bp_none = 0,
# bp_normal = 1,
# bp_hardware = 2,
# bp_memory = 4
# } BPXTYPE;
self.breakpoint = None
# Keys: None
self.stop_debug = None
# Keys: CreateProcessInfo, modInfo, DebugFileName, fdProcessInfo
# typedef struct
# {
# CREATE_PROCESS_DEBUG_INFO* CreateProcessInfo;
# IMAGEHLP_MODULE64* modInfo;
# const char* DebugFileName;
# PROCESS_INFORMATION* fdProcessInfo;
# } PLUG_CB_CREATEPROCESS;
self.create_process = None
# Keys: dwExitCode
# typedef struct _EXIT_PROCESS_DEBUG_INFO {
# DWORD dwExitCode;
# } EXIT_PROCESS_DEBUG_INFO, *LPEXIT_PROCESS_DEBUG_INFO;
self.exit_process = None
# Keys: CreateThread, dwThreadId
# typedef struct
# {
# CREATE_THREAD_DEBUG_INFO* CreateThread;
# DWORD dwThreadId;
# } PLUG_CB_CREATETHREAD;
self.create_thread = None
# Keys: dwThreadId, dwExitCode
# typedef struct
# {
# EXIT_THREAD_DEBUG_INFO* ExitThread;
# DWORD dwThreadId;
# } PLUG_CB_EXITTHREAD;
self.exit_thread = None
# Keys: None
self.system_breakpoint = None
# Keys: LoadDll, modInfo, modname
# typedef LoadDll, modInfo, modname
# {
# LOAD_DLL_DEBUG_INFO* LoadDll;
# IMAGEHLP_MODULE64* modInfo;
# const char* modname;
# } PLUG_CB_LOADDLL;
self.load_dll = None
# Keys: lpBaseOfDll
# typedef struct
# {
# UNLOAD_DLL_DEBUG_INFO* UnloadDll;
# } PLUG_CB_UNLOADDLL;
self.unload_dll = None
# Keys: trace
# typedef struct
# {
# duint cip;
# bool stop;
# } PLUG_CB_TRACEEXECUTE;
self.trace_execute = None
def listen(self, event_name, callback):
"""
Listen to event with a callback,
Callback should always get key word arguments(kwargs).
For example:
def callback(**kwargs):
print kwargs
"""
event_name_lower = event_name.lower()
if event_name_lower not in EVENTS:
raise Exception("%s Is not a valid event." % event_name_lower)
setattr(self, event_name_lower, callback)
# @staticmethod
# def init_debug():
# old_path = os.getcwdu()
# os.chdir(path.join(path.dirname(__file__), 'autorun'))
# for file_path in glob.glob("*.py"):
# print "[PYTHON] Executing autorun file: '%s'." % file_path
# runpy.run_path(
# path_name=file_path,
# run_name='__main__',
# )
# os.chdir(old_path)
| 1.921875 | 2 |
CodeCrakers/drones.py | suhasksv/py-ground | 3 | 12771956 | <filename>CodeCrakers/drones.py
x = 0
drone_chk = [112, 334, 4444, 4444, 445, 112, 27466, 445, 27466]
for i in drone_chk:
x ^= i
print("The missing drone order ID is:", x)
| 2.828125 | 3 |
qt-creator-opensource-src-4.6.1/tests/system/suite_editors/tst_delete_externally/test.py | kevinlq/Qt-Creator-Opensource-Study | 5 | 12771957 | ############################################################################
#
# Copyright (C) 2016 The Qt Company Ltd.
# Contact: https://www.qt.io/licensing/
#
# This file is part of Qt Creator.
#
# Commercial License Usage
# Licensees holding valid commercial Qt licenses may use this file in
# accordance with the commercial license agreement provided with the
# Software or, alternatively, in accordance with the terms contained in
# a written agreement between you and The Qt Company. For licensing terms
# and conditions see https://www.qt.io/terms-conditions. For further
# information use the contact form at https://www.qt.io/contact-us.
#
# GNU General Public License Usage
# Alternatively, this file may be used under the terms of the GNU
# General Public License version 3 as published by the Free Software
# Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
# included in the packaging of this file. Please review the following
# information to ensure the GNU General Public License requirements will
# be met: https://www.gnu.org/licenses/gpl-3.0.html.
#
############################################################################
source("../../shared/qtcreator.py")
def main():
files = checkAndCopyFiles(testData.dataset("files.tsv"), "filename", tempDir())
if not files:
return
startApplication("qtcreator" + SettingsPath)
if not startedWithoutPluginError():
return
for currentFile in files:
test.log("Opening file %s" % currentFile)
invokeMenuItem("File", "Open File or Project...")
selectFromFileDialog(currentFile)
editor = getEditorForFileSuffix(currentFile)
if editor == None:
test.fatal("Could not get the editor for '%s'" % currentFile,
"Skipping this file for now.")
continue
contentBefore = readFile(currentFile)
os.remove(currentFile)
if not currentFile.endswith(".bin"):
popupText = ("The file %s has been removed from disk. Do you want to "
"save it under a different name, or close the editor?")
test.compare(waitForObject(":File has been removed_QMessageBox").text,
popupText % currentFile)
clickButton(waitForObject(":File has been removed.Save_QPushButton"))
waitFor("os.path.exists(currentFile)", 5000)
# avoids a lock-up on some Linux machines, purely empiric, might have different cause
waitFor("checkIfObjectExists(':File has been removed_QMessageBox', False, 0)", 5000)
test.compare(readFile(currentFile), contentBefore,
"Verifying that file '%s' was restored correctly" % currentFile)
# Test for QTCREATORBUG-8130
os.remove(currentFile)
test.compare(waitForObject(":File has been removed_QMessageBox").text,
popupText % currentFile)
clickButton(waitForObject(":File has been removed.Close_QPushButton"))
test.verify(checkIfObjectExists(objectMap.realName(editor), False),
"Was the editor closed after deleting the file?")
invokeMenuItem("File", "Exit")
| 1.445313 | 1 |
torchx/utils/Norm.py | antoniojkim/TorchX | 0 | 12771958 | # -*- coding: utf-8 -*-
import torch
def pixel_norm(x: torch.Tensor, epsilon: float = 1e-8):
"""Applies a pixel-wise normalization.
Note:
Implemented as described in `this paper <https://arxiv.org/pdf/1710.10196.pdf>`_.
`Reference <https://github.com/tkarras/progressive_growing_of_gans/blob/master/networks.py#L120-L122>`_.
""" # noqa: E501
return x * (x.pow(2).mean(axis=1, keepdim=True) + epsilon).rsqrt()
| 2.6875 | 3 |
web/talks/tests/tests_models.py | vtalks/vtalks.net | 1 | 12771959 | from django.test import TestCase
from channels.models import Channel
from talks.models import Talk
# Create your tests here.
class TalkModelTests(TestCase):
def setUp(self):
chanel_1 = Channel.objects.create(code='1', title='channel title 1')
Talk.objects.create(code='1', title='talk title 1', channel=chanel_1)
Talk.objects.create(code='11', title='talk title same title', channel=chanel_1)
Talk.objects.create(code='12', title='talk title same title', channel=chanel_1)
def test_instance_get_string_repr(self):
talk_1 = Talk.objects.get(code='1')
self.assertEquals(str(talk_1), talk_1.title)
def test_instance_get_youtube_valid_url(self):
talk_1 = Talk.objects.get(code='1')
self.assertEquals(talk_1.youtube_url,
'https://www.youtube.com/watch?v=1')
def test_instance_thumbnails(self):
talk_1 = Talk.objects.get(code='1')
self.assertEquals(talk_1.default_thumb,
'https://i.ytimg.com/vi/1/default.jpg')
self.assertEquals(talk_1.medium_thumb,
'https://i.ytimg.com/vi/1/mqdefault.jpg')
self.assertEquals(talk_1.high_thumb,
'https://i.ytimg.com/vi/1/hqdefault.jpg')
self.assertEquals(talk_1.standard_thumb,
'https://i.ytimg.com/vi/1/sddefault.jpg')
self.assertEquals(talk_1.maxres_thumb,
'https://i.ytimg.com/vi/1/maxresdefault.jpg')
def test_create_talk_slug(self):
talk_1 = Talk.objects.get(code='1')
self.assertEquals(talk_1.slug, 'talk-title-1')
def test_create_duplicate_title_slug(self):
talk_12 = Talk.objects.get(code='12')
self.assertEquals(talk_12.slug, 'talk-title-same-title-12')
def test_save_talk_slug(self):
talk_1 = Talk.objects.get(code=1)
talk_1.title = "another title"
talk_1.save()
self.assertEquals(talk_1.slug, 'talk-title-1')
| 2.4375 | 2 |
scripts/update_package_mapping.py | JonathanGailliez/azure-sdk-for-python | 1 | 12771960 | #!/usr/bin/env python
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import json
import logging
import os.path
from pathlib import Path
import sys
import requests
from cookiecutter.main import cookiecutter
from swaggertosdk.SwaggerToSdkNewCLI import generate_code
_LOGGER = logging.getLogger(__name__)
def create_package_service_mapping(service_info, autorest_options):
type_str = "Management" if service_info["is_arm"] else "Client"
return {
autorest_options["package-name"]: {
"service_name": service_info["pretty_name"],
"category": type_str,
"namespaces": [
autorest_options["namespace"]
]
}
}
def main(package_name):
service_info = {
"is_arm": package_name.startswith("azure-mgmt"),
"pretty_name": package_name # FIXME
}
autorest_options = {
"package-name": package_name,
"namespace": package_name.replace("-", ".")
}
package_service_mapping = Path("package_service_mapping.json")
if package_service_mapping:
_LOGGER.info("Updating package_service_mapping.json")
entry = create_package_service_mapping(service_info, autorest_options)
with package_service_mapping.open() as fd:
data_conf = json.load(fd)
data_conf.update(entry)
with package_service_mapping.open("w") as fd:
json.dump(data_conf, fd, indent=2, sort_keys=True)
_LOGGER.info("Done! Enjoy your Python SDK!!")
if __name__ == "__main__":
logging.basicConfig()
logging.getLogger().setLevel(logging.INFO)
main(sys.argv[1]) | 2.1875 | 2 |
day10/Day_10_Code_Challenge.py | dikshaa1702/ml | 1 | 12771961 |
"""
Code Challenge 1
Certificate Generator
Develop a Python code that can generate certificates in image format.
It must take names and other required information from the user and generates
certificate of participation in a Python Bootcamp conducted by Forsk.
Certificate should have Forsk Seal, Forsk Signature, Different Fonts
Code Challenge 2
I-Card Generation System
Write a Python code for a system that generates I-card for all studentsof Forsk
Summer Developer Program and store them in image format.
It must take names and other required information from the user.
Code Challenge 3
Watermarking Application
Have some pictures you want copyright protected? Add your own logo or text lightly
across the background so that no one can simply steal your graphics off your site.
Make a program that will add this watermark to the picture.
Code Challenge 4
GIF Creator
A program that puts together multiple images (PNGs, JPGs, TIFFs) to make a smooth
GIF that can be exported. Make the program convert small video files to GIFs as
well.
Code Challenge 5
Fortune Teller (Horoscope)
A program that checks your horoscope on various astrology sites and puts them
together for you each day. The code should share the Horoscope on Tweeter account
of the user.
""" | 3.8125 | 4 |
datasheets/tab.py | etcher-be/datasheets | 625 | 12771962 | import types
from collections import OrderedDict
import apiclient
import pandas as pd
from datasheets import exceptions, helpers
class Tab(object):
def __init__(self, tabname, workbook, drive_svc, sheets_svc):
"""Create a datasheets.Tab instance of an existing Google Sheets tab.
This class in not intended to be directly instantiated; it is created by
datasheets.Workbook.fetch_tab().
Args:
tabname (str): The name of the tab
workbook (datasheets.Workbook): The workbook instance that instantiated this tab
drive_svc (googleapiclient.discovery.Resource): An instance of Google Drive
sheets_svc (googleapiclient.discovery.Resource): An instance of Google Sheets
"""
self.tabname = tabname
self._workbook = workbook
self.drive_svc = drive_svc
self.sheets_svc = sheets_svc
# Get basic properties of the tab. We do this here partly
# to force failures early if tab can't be found
try:
self._update_tab_properties()
except apiclient.errors.HttpError as e:
if 'Unable to parse range'.encode() in e.content:
raise exceptions.TabNotFound('The given tab could not be found. Error generated: {}'.format(e))
else:
raise
self.url = 'https://docs.google.com/spreadsheets/d/{}#gid={}'.format(self.workbook.file_id, self.tab_id)
def __getattribute__(self, attr):
"""Get an attribute (variable or method) of this instance of this class
For client OAuth, before each user-facing method call this method will verify that the
access token is not expired and refresh it if it is.
We only refresh on user-facing method calls since otherwise we'd be refreshing multiple
times per user action (once for the user call, possibly multiple times for the private
method calls invoked by it).
"""
requested_attr = super(Tab, self).__getattribute__(attr)
if isinstance(requested_attr, types.MethodType) \
and not attr.startswith('_'):
self.workbook.client._refresh_token_if_needed()
return requested_attr
def __repr__(self):
msg = "<{module}.{name}(filename='{filename}', tabname='{tabname}')>"
return msg.format(module=self.__class__.__module__,
name=self.__class__.__name__,
filename=self.workbook.filename,
tabname=self.tabname)
@staticmethod
def _process_rows(raw_data):
"""Prepare a tab's raw data so that a pandas.DataFrame can be produced from it
Args:
raw_data (dict): The raw data from a tab
Returns:
list: A list of lists representing the raw_data, with one list per row in the tab
"""
raw_rows = raw_data['sheets'][0]['data'][0].get('rowData', {})
rows = []
for row_num, row in enumerate(raw_rows):
row_values = []
for col_num, cell in enumerate(row.get('values', {})):
# If the cell is empty, use None
value = cell.get('effectiveValue', {None: None})
# If a cell has an error in it (e.g. someone divides by zero, adds a number to
# text, etc.), then we raise an exception.
if 'errorValue' in value.keys():
cell_label = helpers.convert_cell_index_to_label(row_num+1, col_num+1)
error_type = value['errorValue'].get('type', 'unknown type')
error_message = value['errorValue'].get('message', 'unknown error message')
msg = 'Error of type "{}" within cell {} prevents fetching data. Message: "{}"'
raise exceptions.FetchDataError(msg.format(error_type, cell_label, error_message))
# value is a dict with only 1 key so this next(iter()) is safe
base_fmt, cell_value = next(iter(value.items()))
num_fmt = cell.get('effectiveFormat', {}).get('numberFormat')
if num_fmt:
cell_format = num_fmt['type']
else:
cell_format = base_fmt
formatting_fn = helpers._TYPE_CONVERSIONS[cell_format]
if cell_value:
try:
cell_value = formatting_fn(cell_value)
except ValueError:
pass
except TypeError:
raise TypeError(
"Mismatch exists in expected and actual data types for cell with "
"value '{value}'. Cell format is '{cell_format}' but cell value type "
"is '{value_type}'. To correct this, in Google Sheets set the "
"appropriate cell format or set it to Automatic".format(
value=cell_value,
cell_format=cell_format,
value_type=type(cell_value))
)
row_values.append(cell_value)
rows.append(row_values)
return rows
@property
def ncols(self):
""" Property for the number (int) of columns in the tab """
return self.properties['gridProperties']['columnCount']
@property
def nrows(self):
""" Property for the number (int) of rows in the tab """
return self.properties['gridProperties']['rowCount']
@property
def tab_id(self):
""" Property that gives the ID for the tab """
return self.properties['sheetId']
@property
def workbook(self):
""" Property for the workbook instance that this tab belongs to """
return self._workbook
def _add_rows_or_columns(self, kind, n):
request_body = {'appendDimension': {
'sheetId': self.tab_id,
'dimension': kind,
'length': n
}
}
body = {'requests': [request_body]}
self.workbook.batch_update(body)
self._update_tab_properties()
def _update_tab_properties(self):
raw_properties = self.sheets_svc.get(spreadsheetId=self.workbook.file_id,
ranges=self.tabname + '!A1',
fields='sheets/properties').execute()
self.properties = raw_properties['sheets'][0]['properties']
def add_rows(self, n):
"""Add n rows to the given tab
Args:
n (int): The number of rows to add
Returns:
None
"""
self._add_rows_or_columns(kind='ROWS', n=n)
def add_columns(self, n):
"""Add n columns to the given tab
Args:
n (int): The number of columns to add
Returns:
None
"""
self._add_rows_or_columns(kind='COLUMNS', n=n)
def align_cells(self, horizontal='LEFT', vertical='MIDDLE'):
"""Align all cells in the tab
Args:
horizontal (str): The horizontal alignment for cells. May be one of 'LEFT',
'CENTER', or 'RIGHT'
vertical (str): The vertical alignment for cells. May be one of 'TOP',
'MIDDLE', 'BOTTOM'
Returns:
None
"""
request_body = {'repeatCell': {
'range': {
'sheetId': self.tab_id,
'startRowIndex': 0,
'endRowIndex': self.nrows
},
'cell': {
'userEnteredFormat': {
'horizontalAlignment': horizontal,
'verticalAlignment': vertical,
}
},
'fields': 'userEnteredFormat(horizontalAlignment,verticalAlignment)'
}
}
body = {'requests': [request_body]}
self.workbook.batch_update(body)
def alter_dimensions(self, nrows=None, ncols=None):
"""Alter the dimensions of the current tab.
If either dimension is left to None, that dimension will not be altered. Note that it is
possible to set nrows or ncols to smaller than the current tab dimensions, in which case
that data will be eliminated.
Args:
nrows (int): The number of rows for the tab to have
ncols (int): The number of columns for the tab to have
Returns:
None
"""
request_body = {'updateSheetProperties': {
'properties': {
'sheetId': self.tab_id,
'gridProperties': {
'columnCount': ncols or self.ncols,
'rowCount': nrows or self.nrows
}
},
'fields': 'gridProperties(columnCount, rowCount)'
}
}
body = {'requests': [request_body]}
self.workbook.batch_update(body)
self._update_tab_properties()
def append_data(self, data, index=True, autoformat=True):
"""Append data to the existing data in this tab.
If the new data exceeds the tab's current dimensions the tab will be resized to
accommodate it. Data headers will not be included among the appended data as they are
assumed to already be among the existing tab data.
If the dimensions of `data` are larger than the tab's current dimensions,
the tab will automatically be resized to fit it.
Args:
data (pandas.DataFrame or dict or list): The data to be uploaded, formatted as a
pandas.DataFrame, a dict of lists, or a list of lists
index (bool): If `data` is a pandas.DataFrame, whether to upload the index as well
Returns:
None
"""
# Convert everything to lists of lists, which Google Sheets requires
headers, values = helpers._make_list_of_lists(data, index)
values = helpers._convert_nan_and_datelike_values(values)
body = {'values': values}
self.sheets_svc.values().append(spreadsheetId=self.workbook.file_id, range=self.tabname,
valueInputOption='USER_ENTERED', body=body).execute()
if autoformat:
self.autoformat(len(headers))
self._update_tab_properties()
def autoformat(self, n_header_rows):
"""Apply default stylings to the tab
This will apply the following stylings to the tab:
- Header rows will be formatted to a dark gray background and off-white text
- Font for all cells will be set to size 10 Proxima Nova
- Cells will be horizontally left-aligned and vertically middle-aligned
- Columns will be resized to display their largest entry
- Empty columns and rows will be trimmed from the tab
Args:
n_header_rows (int): The number of header rows (i.e. row of labels / metadata)
Returns:
None
"""
self.format_headers(nrows=n_header_rows)
self.format_font()
self.align_cells()
self.autosize_columns()
populated_cells = self.sheets_svc.values().get(spreadsheetId=self.workbook.file_id,
range=self.tabname).execute()
nrows = len(populated_cells['values'])
ncols = max(map(len, populated_cells['values']))
self.alter_dimensions(nrows=nrows, ncols=ncols)
self._update_tab_properties()
def autosize_columns(self):
"""Resize the widths of all columns in the tab to fit their data
Returns:
None
"""
request_body = {'autoResizeDimensions': {
'dimensions': {
'sheetId': self.tab_id,
'dimension': 'COLUMNS',
'startIndex': 0,
'endIndex': self.ncols
}
}
}
body = {'requests': [request_body]}
self.workbook.batch_update(body)
def clear_data(self):
"""Clear all data from the tab while leaving formatting intact
Returns:
None
"""
self.sheets_svc.values().clear(spreadsheetId=self.workbook.file_id,
range=self.tabname,
body={}).execute()
def format_font(self, font='Proxima Nova', size=10):
"""Set the font and size for all cells in the tab
Args:
font (str): The name of the font to use
size (int): The size to set the font to
Returns:
None
"""
request_body = {'repeatCell': {
'range': {'sheetId': self.tab_id},
'cell': {
'userEnteredFormat': {
'textFormat': {
'fontSize': size,
'fontFamily': font
}
}
},
'fields': 'userEnteredFormat(textFormat(fontSize,fontFamily))'
}
}
body = {'requests': [request_body]}
self.workbook.batch_update(body)
def format_headers(self, nrows):
"""Format the first n rows of a tab.
The following stylings will be applied to these rows:
- Background will be set to dark gray with off-white text
- Font will be set to size 10 Proxima Nova
- Text will be horizontally left-aligned and vertically middle-aligned
- Rows will be made "frozen" so that when the user scrolls these rows stay visible
Args:
nrows (int): The number of rows of headers in the tab
Returns:
None
"""
body = {
'requests': [
{
'repeatCell': {
'range': {
'sheetId': self.tab_id,
'startRowIndex': 0,
'endRowIndex': nrows
},
'cell': {
'userEnteredFormat': {
'backgroundColor': {
'red': 0.26274511,
'green': 0.26274511,
'blue': 0.26274511
},
'horizontalAlignment': 'LEFT',
'textFormat': {
'foregroundColor': {
'red': 0.95294118,
'green': 0.95294118,
'blue': 0.95294118
},
'fontSize': 10,
'fontFamily': 'Proxima Nova',
'bold': False
}
}
},
'fields': 'userEnteredFormat(backgroundColor,textFormat,horizontalAlignment)'
}
},
{
'updateSheetProperties': {
'properties': {
'sheetId': self.tab_id,
'gridProperties': {
'frozenRowCount': nrows
}
},
'fields': 'gridProperties(frozenRowCount)'
}
}
]
}
self.workbook.batch_update(body)
def fetch_data(self, headers=True, fmt='df'):
"""Retrieve the data within this tab.
Efforts are taken to ensure that returned rows are always the same length. If
headers=True, this length will be equal to the length of the headers. If headers=False,
this length will be equal to the longest row.
In either case, shorter rows will be padded with Nones and longer rows will be
truncated (i.e. if there are 3 headers then all rows will have 3 entries regardless
of the amount of populated cells they have).
Args:
headers (bool): If True, the first row will be used as the column names for the
pandas.DataFrame. Otherwise, a 0-indexed range will be used instead
fmt (str): The format in which to return the data. Accepted values: 'df', 'dict', 'list'
Returns:
When fmt='df' --> pandas.DataFrame
When fmt='dict' --> list of dicts, e.g.::
[{header1: row1cell1, header2: row1cell2},
{header1: row2cell1, header2: row2cell2},
...]
When fmt='list' --> tuple of header names, list of lists with row data, e.g.::
([header1, header2, ...],
[[row1cell1, row1cell2, ...], [row2cell1, row2cell2, ...], ...])
"""
if fmt not in ('df', 'dict', 'list'):
raise ValueError("Unexpected value '{}' for parameter `fmt`. "
"Accepted values are 'df', 'dict', and 'list'".format(fmt))
fields = 'sheets/data/rowData/values(effectiveValue,effectiveFormat/numberFormat/type)'
raw_data = self.sheets_svc.get(spreadsheetId=self.workbook.file_id, ranges=self.tabname,
includeGridData=True, fields=fields).execute()
processed_rows = self._process_rows(raw_data)
# filter out empty rows
max_idx = helpers._find_max_nonempty_row(processed_rows)
if max_idx is None:
if fmt == 'df':
return pd.DataFrame([])
elif fmt == 'dict':
return []
else:
return ([], [])
processed_rows = processed_rows[:max_idx+1]
# remove trailing Nones on rows
processed_rows = list(map(helpers._remove_trailing_nones, processed_rows))
if headers:
header_names = processed_rows.pop(0)
max_width = len(header_names)
else:
# Iterate through rows to find widest one
max_width = max(map(len, processed_rows))
header_names = list(range(max_width))
# resize the rows to match the number of column headers
processed_rows = [helpers._resize_row(row, max_width) for row in processed_rows]
if fmt == 'df':
df = pd.DataFrame(data=processed_rows, columns=header_names)
return df
elif fmt == 'dict':
make_row_dict = lambda row: OrderedDict(zip(header_names, row))
return list(map(make_row_dict, processed_rows))
else:
return header_names, processed_rows
def insert_data(self, data, index=True, autoformat=True):
"""Overwrite all data in this tab with the provided data.
All existing data in the tab will be removed, even if it might not have been overwritten
(for example, if there is 4x2 data already in the tab and only 2x2 data is being inserted).
If the dimensions of `data` are larger than the tab's current dimensions,
the tab will automatically be resized to fit it.
Args:
data (pandas.DataFrame or dict or list): The data to be uploaded, formatted as a
pandas.DataFrame, a dict of lists, or a list of lists
index (bool): If `data` is a pandas.DataFrame, whether to upload the index as well
Returns:
None
"""
# Convert everything to lists of lists, which Google Sheets requires
headers, values = helpers._make_list_of_lists(data, index)
values = headers + values # Include headers for inserts but not for appends
self.clear_data()
values = helpers._convert_nan_and_datelike_values(values)
body = {'values': values}
self.sheets_svc.values().update(spreadsheetId=self.workbook.file_id, range=self.tabname,
valueInputOption='USER_ENTERED', body=body).execute()
if autoformat:
self.autoformat(len(headers))
self._update_tab_properties()
| 3.015625 | 3 |
selenium_driver_updater/browsers/_operaBrowser.py | Svinokur/selenium_driver_updater | 8 | 12771963 | #pylint: disable=logging-fstring-interpolation
#Standart library imports
import subprocess
import os
import re
import platform
from typing import Tuple,Any
from pathlib import Path
import shutil
# Third party imports
from bs4 import BeautifulSoup
import wget
# Selenium imports
from selenium import webdriver
from selenium.common.exceptions import SessionNotCreatedException
from selenium.common.exceptions import WebDriverException
# Local imports
from selenium_driver_updater._setting import setting
from selenium_driver_updater.util.requests_getter import RequestsGetter
from selenium_driver_updater.util.extractor import Extractor
from selenium_driver_updater.util.logger import logger
class OperaBrowser():
"""Class for working with Opera browser"""
def __init__(self, **kwargs):
self.setting : Any = setting
self.check_browser_is_up_to_date = bool(kwargs.get('check_browser_is_up_to_date'))
self.operadriver_path = str(kwargs.get('path'))
self.requests_getter = RequestsGetter
self.extractor = Extractor
self.system_name = ''
self.url_release = ''
def main(self) -> None:
"""Main function, checks for the latest version, downloads or updates opera browser"""
if self.check_browser_is_up_to_date:
self._check_if_opera_browser_is_up_to_date()
def _check_if_opera_browser_is_up_to_date(self) -> None:
"""Сhecks for the latest version of opera browser
Raises:
Except: If unexpected error raised.
"""
try:
if platform.system() not in ['Darwin', 'Windows']:
message = 'Opera browser checking/updating is currently disabled for your OS. Please wait for the new releases.'
logger.error(message)
return
is_browser_up_to_date, current_version, latest_version = self._compare_current_version_and_latest_version_opera_browser()
if not is_browser_up_to_date:
self._get_latest_opera_browser_for_current_os()
is_browser_up_to_date, current_version, latest_version = self._compare_current_version_and_latest_version_opera_browser()
if not is_browser_up_to_date:
message = f'Problem with updating opera browser current_version: {current_version} latest_version: {latest_version}'
logger.info(message)
except (ValueError, FileNotFoundError):
pass
def _get_current_version_opera_browser_selenium(self) -> str:
"""Gets current opera browser version
Returns:
str
browser_version (str) : Current opera browser version.
Raises:
SessionNotCreatedException: Occurs when current operadriver could not start.
WebDriverException: Occurs when current operadriver could not start or critical error occured.
"""
browser_version : str = ''
try:
browser_version = self._get_current_version_opera_browser_selenium_via_terminal()
if not browser_version:
message = 'Trying to get current version of opera browser via operadriver'
logger.info(message)
if Path(self.operadriver_path).exists() and not browser_version:
with webdriver.Opera(executable_path = self.operadriver_path) as driver:
browser_version = driver.execute_script("return navigator.userAgent")
find_string = re.findall('OPR/' + self.setting["Program"]["wedriverVersionPattern"], browser_version)
browser_version = find_string[0] if len(find_string) > 0 else ''
logger.info(f'Current version of opera browser: {browser_version}')
except (WebDriverException, SessionNotCreatedException, OSError):
pass #[Errno 86] Bad CPU type in executable:
return browser_version
def _get_latest_version_opera_browser(self) -> str:
"""Gets latest opera browser version
Returns:
str
latest_version (str) : Latest version of opera browser.
Raises:
Except: If unexpected error raised.
"""
latest_version : str = ''
version : str = ''
url = self.setting["OperaBrowser"]["LinkAllLatestRelease"]
json_data = self.requests_getter.get_result_by_request(url=url)
soup = BeautifulSoup(json_data, 'html.parser')
system_name = platform.system()
system_name = system_name.replace('Darwin', 'mac')
system_name = system_name.replace('Windows', 'win')
self.system_name = system_name.lower() + '/' #mac -> mac/ or Linux -> linux/
elements = soup.findAll('a')
for i,_ in enumerate(elements, 1):
version = elements[-i].attrs.get('href')
self.url_release = url + version
json_data = self.requests_getter.get_result_by_request(url=self.url_release)
if not self.system_name in json_data:
continue
else:
break
latest_version = version.replace('/', '')
logger.info(f'Latest version of opera browser: {latest_version}')
return latest_version
def _get_latest_opera_browser_for_current_os(self) -> None:
"""Trying to update opera browser to its latest version"""
if platform.system() not in ['Darwin', 'Windows']:
message = 'Opera browser checking/updating is currently disabled for your OS. Please wait for the new releases.'
logger.error(message)
return
latest_version = self._get_latest_version_opera_browser()
url_full_release = self.url_release + self.system_name
if platform.system() == 'Darwin':
if 'arm' in str(os.uname().machine) and platform.system() == 'Darwin':
url_full_release = url_full_release + f'Opera_{latest_version}_Autoupdate_arm64.tar.xz'
else:
url_full_release = url_full_release + f'Opera_{latest_version}_Autoupdate.tar.xz'
elif platform.system() == 'Windows':
if self.setting['Program']['OSBitness'] == '64':
url_full_release = url_full_release + f'Opera_{latest_version}_Setup_x64.exe'
else:
url_full_release = url_full_release + f'Opera_{latest_version}_Setup.exe'
logger.info(f'Started download operabrowser by url: {url_full_release}')
path = self.operadriver_path.replace(self.operadriver_path.split(os.path.sep)[-1], '') + 'selenium-driver-updater' + os.path.sep
archive_name = url_full_release.split('/')[-1]
if not Path(path).exists():
Path(path).mkdir()
if Path(path + archive_name).exists():
Path(path + archive_name).unlink()
logger.info(f'Started to download opera browser by url: {url_full_release}')
archive_path = wget.download(url=url_full_release, out=path + archive_name)
logger.info(f'Opera browser was downloaded to path: {archive_path}')
if platform.system() == 'Darwin':
logger.info('Trying to kill all opera processes')
subprocess.Popen('killall Opera', shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
subprocess.Popen('killall Opera', shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
logger.info('Successfully killed all opera processes')
self.extractor.extract_all_tar_xz_archive(archive_path=archive_path, delete_archive=True, out_path=path)
opera_browser_path = path + 'Opera.app'
opera_browser_path_application = '/Applications/Opera.app'
if Path(opera_browser_path_application).exists():
shutil.rmtree(opera_browser_path_application)
shutil.move(opera_browser_path, opera_browser_path_application)
logger.info(f'Successfully moved opera browser from: {opera_browser_path} to: {opera_browser_path_application}')
if Path(archive_path).exists():
Path(archive_path).unlink()
elif platform.system() == 'Windows':
logger.info('Trying to kill all opera.exe processes')
subprocess.Popen('taskkill /F /IM "opera.exe" /T', shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
logger.info('Successfully killed all opera.exe processes')
logger.info('Trying to install new opera browser')
os.system(f'{archive_path} /install /silent /launchopera=no /desktopshortcut=no /pintotaskbar=no /setdefaultbrowser=0')
logger.info('Successfully updated current opera browser')
def _compare_current_version_and_latest_version_opera_browser(self) -> Tuple[bool, str, str]:
"""Compares current version of opera browser to latest version
Returns:
Tuple of bool, str and str
is_browser_up_to_date (bool) : It true the browser is up to date. Defaults to False.
current_version (str) : Current version of the browser.
latest_version (str) : Latest version of the browser.
Raises:
Except: If unexpected error raised.
"""
is_browser_up_to_date : bool = False
current_version : str = ''
latest_version : str = ''
current_version = self._get_current_version_opera_browser_selenium()
if not current_version:
return is_browser_up_to_date, current_version, latest_version
latest_version = self._get_latest_version_opera_browser()
if current_version == latest_version:
is_browser_up_to_date = True
message = f"Your existing opera browser is up to date. current_version: {current_version} latest_version: {latest_version}"
logger.info(message)
return is_browser_up_to_date, current_version, latest_version
def _get_current_version_opera_browser_selenium_via_terminal(self) -> str:
"""Gets current opera browser version via command in terminal
Returns:
str
browser_version (str) : Current opera browser version.
Raises:
Except: If unexpected error raised.
"""
browser_version : str = ''
browser_version_terminal : str = ''
operabrowser_path = self.setting["OperaBrowser"]["Path"]
if operabrowser_path:
logger.info('Trying to get current version of opera browser via terminal')
if platform.system() == 'Windows':
with subprocess.Popen(operabrowser_path, stdout=subprocess.PIPE) as process:
browser_version_terminal = process.communicate()[0].decode('UTF-8')
find_string_terminal = re.findall("Opera.*", browser_version_terminal)
browser_version_terminal = find_string_terminal[0] if len(find_string_terminal) > 0 else ''
elif platform.system() == 'Darwin':
with subprocess.Popen([operabrowser_path, '--version'], stdout=subprocess.PIPE) as process:
browser_version_terminal = process.communicate()[0].decode('UTF-8')
find_string = re.findall(self.setting["Program"]["wedriverVersionPattern"], browser_version_terminal)
browser_version = find_string[0] if len(find_string) > 0 else ''
return browser_version
| 2.171875 | 2 |
src/utils/config.py | slebras/index_runner | 0 | 12771964 | import urllib.request
import yaml
import os
import json
import time
import logging
import requests
logger = logging.getLogger('IR')
_FETCH_CONFIG_RETRIES = 5
# Defines a process-wide instance of the config.
# Alternative is passing the configuration through the entire stack, which is feasible,
# but we consider that YAGNI for now.
_CONFIG_SINGLETON = None
def get_sample_service_url(sw_url, ss_release):
""""""
payload = {
"method": "ServiceWizard.get_service_status",
"id": '',
"params": [{"module_name": "SampleService", "version": ss_release}],
"version": "1.1"
}
headers = {'Content-Type': 'application/json'}
sw_resp = requests.post(url=sw_url, headers=headers, data=json.dumps(payload))
if not sw_resp.ok:
raise RuntimeError(f"ServiceWizard error, with code {sw_resp.status_code}. \n{sw_resp.text}")
wiz_resp = sw_resp.json()
if wiz_resp.get('error'):
raise RuntimeError(f"ServiceWizard {sw_url} with params"
f" {json.dumps(payload)} Error - " + str(wiz_resp['error']))
return wiz_resp['result'][0]['url']
def config(force_reload=False):
"""wrapper for get config that reloads config every 'config_timeout' seconds"""
global _CONFIG_SINGLETON
if not _CONFIG_SINGLETON:
# could do this on module load, but let's delay until actually requested
_CONFIG_SINGLETON = Config()
_CONFIG_SINGLETON.reload(force_reload=force_reload)
return _CONFIG_SINGLETON
class Config:
"""
A class containing the configuration for the for the search indexer. Supports dict like
configuration item access, e.g. config['ws-token'].
Not thread-safe.
"""
def __init__(self):
"""Initialize configuration data from the environment."""
self._cfg = {}
self.reload()
def reload(self, force_reload=False):
"""
Reload the configuration data from the environment.
Only reloads if the configuration has expired or force_reload is true.
"""
if self._cfg:
expired = (time.time() - self._cfg['last_config_reload']) > self._cfg['config_timeout']
if not expired and not force_reload:
# can remove force_reload once all reload logic is handled here
return
reqs = ['WORKSPACE_TOKEN', 'RE_API_TOKEN']
for req in reqs:
if not os.environ.get(req):
raise RuntimeError(f'{req} env var is not set.')
es_host = os.environ.get("ELASTICSEARCH_HOST", 'elasticsearch')
es_port = os.environ.get("ELASTICSEARCH_PORT", 9200)
kbase_endpoint = os.environ.get(
'KBASE_ENDPOINT', 'https://ci.kbase.us/services').strip('/')
workspace_url = os.environ.get('WS_URL', kbase_endpoint + '/ws')
catalog_url = os.environ.get('CATALOG_URL', kbase_endpoint + '/catalog')
re_api_url = os.environ.get('RE_URL', kbase_endpoint + '/relation_engine_api').strip('/')
service_wizard_url = os.environ.get('SW_URL', kbase_endpoint + '/service_wizard').strip('/')
sample_service_release = os.environ.get('SAMPLE_SERVICE_RELEASE', 'dev')
sample_service_url = get_sample_service_url(service_wizard_url, sample_service_release)
config_url = os.environ.get('GLOBAL_CONFIG_URL')
github_release_url = os.environ.get(
'GITHUB_RELEASE_URL',
'https://api.github.com/repos/kbase/index_runner_spec/releases/latest'
)
# Load the global configuration release (non-environment specific, public config)
if config_url and not config_url.startswith('http'):
raise RuntimeError(f"Invalid global config url: {config_url}")
if not github_release_url.startswith('http'):
raise RuntimeError(f"Invalid global github release url: {github_release_url}")
gh_token = os.environ.get('GITHUB_TOKEN')
global_config = _fetch_global_config(config_url, github_release_url, gh_token)
skip_indices = _get_comma_delimited_env('SKIP_INDICES')
allow_indices = _get_comma_delimited_env('ALLOW_INDICES')
# Use a tempfile to indicate that the service is done booting up
proc_ready_path = '/tmp/IR_READY' # nosec
# Set the indexer log messages index name from a configured index name or alias
msg_log_index_name = os.environ.get('MSG_LOG_INDEX_NAME', 'indexer_messages')
if msg_log_index_name in global_config['latest_versions']:
msg_log_index_name = global_config['latest_versions'][msg_log_index_name]
self._cfg = {
'skip_releng': os.environ.get('SKIP_RELENG'),
'skip_features': os.environ.get('SKIP_FEATURES'),
'skip_indices': skip_indices,
'allow_indices': allow_indices,
'global': global_config,
'github_release_url': github_release_url,
'github_token': gh_token,
'global_config_url': config_url,
'ws_token': os.environ['WORKSPACE_TOKEN'],
'mount_dir': os.environ.get('MOUNT_DIR', os.getcwd()),
'kbase_endpoint': kbase_endpoint,
'catalog_url': catalog_url,
'workspace_url': workspace_url,
're_api_url': re_api_url,
're_api_token': os.environ['RE_API_TOKEN'],
'sample_service_url': sample_service_url,
'elasticsearch_host': es_host,
'elasticsearch_port': es_port,
'elasticsearch_url': f"http://{es_host}:{es_port}",
'kafka_server': os.environ.get('KAFKA_SERVER', 'kafka'),
'kafka_clientgroup': os.environ.get('KAFKA_CLIENTGROUP', 'search_indexer'),
'error_index_name': os.environ.get('ERROR_INDEX_NAME', 'indexing_errors'),
'msg_log_index_name': msg_log_index_name,
'elasticsearch_index_prefix': os.environ.get('ELASTICSEARCH_INDEX_PREFIX', 'search2'),
'topics': {
'workspace_events': os.environ.get('KAFKA_WORKSPACE_TOPIC', 'workspaceevents'),
'admin_events': os.environ.get('KAFKA_ADMIN_TOPIC', 'indexeradminevents')
},
'config_timeout': 600, # 10 minutes in seconds.
'last_config_reload': time.time(),
'proc_ready_path': proc_ready_path, # File indicating the daemon is booted and ready
'generic_shard_count': os.environ.get('GENERIC_SHARD_COUNT', 2),
'generic_replica_count': os.environ.get('GENERIC_REPLICA_COUNT', 1),
}
def __getitem__(self, key):
return self._cfg[key]
def _fetch_global_config(config_url, github_release_url, gh_token):
"""
Fetch the index_runner_spec configuration file from the Github release
using either the direct URL to the file or by querying the repo's release
info using the GITHUB API.
"""
if config_url:
print('Fetching config from the direct url')
# Fetch the config directly from config_url
with urllib.request.urlopen(config_url) as res: # nosec
return yaml.safe_load(res) # type: ignore
else:
print('Fetching config from the release info')
# Fetch the config url from the release info
if gh_token:
headers = {'Authorization': f'token {gh_token}'}
else:
headers = {}
tries = 0
# Sometimes Github returns usage errors and a retry will solve it
while True:
release_info = requests.get(github_release_url, headers=headers).json()
if release_info.get('assets'):
break
if tries == _FETCH_CONFIG_RETRIES:
raise RuntimeError(f"Cannot fetch config from {github_release_url}: {release_info}")
tries += 1
for asset in release_info['assets']:
if asset['name'] == 'config.yaml':
download_url = asset['browser_download_url']
with urllib.request.urlopen(download_url) as res: # nosec
return yaml.safe_load(res)
raise RuntimeError("Unable to load the config.yaml file from index_runner_spec")
def _get_comma_delimited_env(key):
"""
Fetch a comma-delimited list of strings from an environment variable as a set.
"""
ret = set()
for piece in os.environ.get(key, '').split(','):
piece = piece.strip()
if piece:
ret.add(piece)
return ret
| 2.28125 | 2 |
tests/__init__.py | theincognito-inc/manga-dl | 1 | 12771965 | <filename>tests/__init__.py
from .archive import TestArchive
from .base import TestBaseClass
from .http import TestHttpClasses
from .init_provider import TestInitProvider
from .matrix import TestMatrix
from .web_driver import TestWebDriver
from .crypt import TestCrypt
from .std import TestStd
| 1.171875 | 1 |
elections_admin/urls.py | newsdev/nyt-elections-admin | 3 | 12771966 | <reponame>newsdev/nyt-elections-admin<filename>elections_admin/urls.py
from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns('',
(r'^elections/2016/admin/', include(admin.site.urls)),
) | 1.453125 | 1 |
examples/streamline1.py | yang69can/pyngl | 125 | 12771967 | #
# File:
# streamline1.py
#
# Synopsis:
# Draws streamlines on a map over water only.
#
# Category:
# Streamlines on a map.
#
# Author:
# <NAME>
#
# Date of original publication:
# December, 2004
#
# Description:
# This example draws streamlines over water on a map using a
# Cylindrical Equidistant map projection. The "add_cyclic"
# function is illustrated graphically.
#
# Effects illustrated:
# o Streamlines over maps.
# o Adding cyclic points.
# o Specifying colors by name.
# o Polylines.
# o Masking land areas.
#
# Output:
# This example produces two visualizations:
# 1.) Streamlines on a Cylindrical Equidistant map over water
# only highlighting missing cyclic points.
# 2.) Same as 1.) with the cyclic points added.
#
# Notes:
#
#
# Import Nio for reading netCDF files.
#
from __future__ import print_function
import Nio
#
# Import Ngl support functions.
#
import Ngl
import os
#
# Open the netCDF file.
#
file = Nio.open_file(os.path.join(Ngl.pynglpath("data"),"cdf","pop.nc"))
#
# Open a workstation.
#
wks_type = "png"
wks = Ngl.open_wks(wks_type,"streamline1")
#
# Get the u/v and lat/lon variables.
#
urot = file.variables["urot"]
vrot = file.variables["vrot"]
lat2d = file.variables["lat2d"]
lon2d = file.variables["lon2d"]
#
# Set up resource list.
#
resources = Ngl.Resources()
#
# Don't advance frame, because we want to draw a couple of lines on
# plot later.
#
resources.nglFrame = False
#
# Coordinate arrays for data
#
resources.vfXArray = lon2d[::4,::4]
resources.vfYArray = lat2d[::4,::4]
resources.mpProjection = "CylindricalEquidistant"
resources.mpFillOn = True
resources.mpLandFillColor = "Tan1"
resources.mpOceanFillColor = "SkyBlue"
resources.mpInlandWaterFillColor = "SkyBlue"
resources.mpGridAndLimbOn = False
resources.tiMainString = "Streamline plot without cyclic point added"
plot = Ngl.streamline_map(wks,urot[::4,::4],vrot[::4,::4],resources)
#
# Add a couple of lines showing the area where there's a gap in the
# data because of lack of a cyclic point. (It should be obvious even
# without the lines.)
#
line_res = Ngl.Resources() # line resources
line_res.gsLineColor = "Red" # line color
line_res.gsLineThicknessF = 1.5 # line thickness scale
line_res.gsLineDashPattern = 2 # dashed lines
Ngl.polyline(wks,plot,lon2d[::4,0],lat2d[::4,0],line_res)
Ngl.polyline(wks,plot,lon2d[::4,-1],lat2d[::4,-1],line_res)
#
# Add a text string explaining the lines.
#
text_res = Ngl.Resources() # text resources
text_res.txFontHeightF = 0.03 # font height
text_res.txFontColor = "Red"
Ngl.text_ndc(wks,"dashed red line shows area with no data",0.5,0.17,text_res)
Ngl.frame(wks) # Now advance frame.
#
# Add cyclic points. Since lat2d/lon2d are 2D arrays, make them
# cyclic the same way you do the 2D data array.
#
u = Ngl.add_cyclic(urot[::4,::4])
v = Ngl.add_cyclic(vrot[::4,::4])
lon = Ngl.add_cyclic(lon2d[::4,::4])
lat = Ngl.add_cyclic(lat2d[::4,::4])
#
# Specify new coordinate arrays for data.
#
resources.vfXArray = lon
resources.vfYArray = lat
resources.tiMainString = "Streamline plot with cyclic point added"
plot = Ngl.streamline_map(wks,u,v,resources)
#
# Add a couple of lines showing the area where the missing data were.
# Make the lines solid so we can see them.
#
line_res.gsLineDashPattern = 0
Ngl.polyline(wks,plot,lon2d[::4,0],lat2d[::4,0],line_res)
Ngl.polyline(wks,plot,lon2d[::4,-1],lat2d[::4,-1],line_res)
#
# Add a text string explaining the lines.
#
Ngl.text_ndc(wks,"red line shows area that previously had no data",0.5,0.17,text_res)
Ngl.frame(wks)
Ngl.end()
| 3.15625 | 3 |
setup.py | hashimom/grpcping | 0 | 12771968 | from setuptools import find_packages, setup
setup(
name='grpcping',
version='0.1.0',
packages=find_packages(),
include_package_data=True,
url='https://github.com/hashimom/grpcping',
entry_points={
"console_scripts": [
"grpcping = grpcping.ping:main",
]
},
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
description=''
)
| 1.21875 | 1 |
src/companion-software/PiLED/matrix/matrix.py | LongMetal-Robotics/ShowBot | 0 | 12771969 | <filename>src/companion-software/PiLED/matrix/matrix.py
from rgbmatrix import RGBMatrix, RGBMatrixOptions
from video import playVideo, loopVideo
from image import displayImage
import logging
import threading
import re
class Matrix:
currentThread = None
threadStop = False
def __init__(self):
print("Matrix initialized")
options = RGBMatrixOptions()
options.cols = 64
self.matrix = RGBMatrix(options = options)
def stopThread(self):
if not self.currentThread is None:
print("Stopping running thread...")
self.threadStop = True
self.currentThread.join()
self.threadStop = False
print("Stopped thread")
def display(self, mediaType, file):
result = False
if mediaType == "image":
# Display image
try:
self.stopThread()
print("Displaying image")
displayImage(file, self.matrix)
result = True
except Exception, e:
print("Could not display image: " + str(e))
logging.error('Could not display image file %s. Exception: ' + str(e), file)
elif mediaType == "video" or mediaType == "video-one":
# Create a new thread to display
# the video (currentThread) and run it
try:
print("Displaying video")
video = open("./" + file + "/video", "r")
videoInfo = video.read()
print("Video info:")
print(videoInfo)
basename = re.search("basename \\w*", videoInfo).group()[9:] # Regex search, then get everything past `basname `
fileext = re.search("fileext \\w*", videoInfo).group()[8:]
frames = int(re.search("frames \\d*", videoInfo).group()[7:])
fps = int(re.search("fps \\d*", videoInfo).group()[4:])
print()
print("basename: " + basename)
print("fileext: " + fileext)
print("frames: " + str(frames))
print("fps: " + str(fps))
self.stopThread()
if mediaType == "video":
print("Video loops. Creating thread...")
self.currentThread = threading.Thread(target=loopVideo, args=(self.matrix, file, basename, fileext, frames, fps, self))
else:
print("Video is one-off. Creating thread...")
self.currentThread = threading.Thread(target=playVideo, args=(self.matrix, file, basename, fileext, frames, fps, self))
print("Thread created. Starting thread...")
self.currentThread.start()
print("Thread started")
result = True
except Exception, e:
print("Could not display video: " + str(e))
logging.error('Could not display the video. Exception: ' + str(e))
else:
# Blank the display
print("Media type " + mediaType + " not recognized. Blanking screen...")
self.stopThread()
self.matrix.Fill(0, 0, 0)
result = True
print("Screen blanked")
return result
| 2.703125 | 3 |
hanabi.py | drew-lustig/hanabi | 0 | 12771970 | <reponame>drew-lustig/hanabi<filename>hanabi.py
from random import shuffle
class CardDeck(object):
variations = {
'classic': (
['r', 'b', 'g', 'y', 'w'],
{1: 3, 2: 2, 3: 2, 4: 2, 5: 1}
)
}
def __init__(self, variation='classic'):
self._variation = variation
self.colors, self.numbers = self.variations[variation]
self.discarded = {color: [] for color in self.colors}
self.played = {color: 0 for color in self.colors}
self.deck = self.full_deck(variation=variation)
shuffle(self.deck)
@property
def variation(self):
return self._variation
@variation.setter
def variation(self, value):
if len(self.deck) < len(self.full_deck()):
raise AttributeError('Cannot change variation after game has started.')
else:
self.colors, self.numbers = self.variations[value]
self._variation = value
self.deck = self.full_deck(variation=variation)
shuffle(self.deck)
@classmethod
def full_deck(cls, variation='classic'):
full_deck = []
colors, numbers = cls.variations[variation]
for color in colors:
for number, quantity in numbers.items():
full_deck.extend([(color, number)] * quantity)
return full_deck
@classmethod
def single_possibilities(cls, variation='classic'):
colors, numbers = cls.variations[variation]
return [set(colors), set(x for x in numbers.keys())]
@classmethod
def all_possibilities(cls, hand_size, variation='classic'):
return [
cls.single_possibilities(variation) for i in range(hand_size)
]
@classmethod
def max_score(cls, variation='classic'):
colors, numbers = cls.variations[variation]
return len(colors) * len(numbers)
class Player(object):
def __init__(self, name, hand):
self.name = name
self.hand = hand
self.possibilities = CardDeck.all_possibilities(len(hand))
def __repr__(self):
return self.name
class Game(CardDeck):
def __init__(self, players=['Player 1', 'Player 2', 'Player 3'],
variation='classic'):
CardDeck.__init__(self, variation)
self._tokens = 8
self._fuses = 0
self.score = 0
self.current_max = self.max_score(variation=variation)
self.log = []
if len(players) < 4:
self.hand_size = 5
else:
self.hand_size = 4
self.players = []
for player in players:
self.players.append(Player(player, self.deck[:self.hand_size]))
self.deck = self.deck[self.hand_size:]
self.current_player = self.players[0]
def __repr__(self):
hints = '\n'.join([
f'{i}: {poss[0]}, {poss[1]}'
for i, poss in enumerate(self.current_player.possibilities)
])
hands = '\n'.join([
f'{player.name} hand: {player.hand}'
for player in self.players if player != self.current_player
])
return (
'-------------------------------------\n'
f'Played Cards: {self.played}\n'
f'Discarded Cards: {self.discarded}\n'
f'Cards Left: {len(self.deck)}\n'
f'Tokens: {self.tokens}\n'
f'Fuses: {self.fuses}\n'
f'Score: {self.score}/{self.current_max}\n'
'-------------------------------------\n'
f'Current Player: {self.current_player}\n'
f'Hints:\n{hints}\n'
'-------------------------------------\n'
f'{hands}'
)
@property
def tokens(self):
return self._tokens
@tokens.setter
def tokens(self, value):
if value >= 9:
raise ValueError('Cannot have more than 8 tokens.')
elif value < 0:
raise ValueError('Out of tokens')
else:
self._tokens = value
@property
def fuses(self):
return self._fuses
@fuses.setter
def fuses(self, value):
if value < 0:
raise ValueError('Cannot have negative fuses.')
else:
self._fuses = value
def update_score(self):
self.score = sum(self.played.values(), 0)
def update_max(self, color, number):
"""Decrease the maximum possible score for this game"""
quantity = self.numbers[number]
max_number = max(self.numbers.keys())
if self.discarded[color].count(number) == quantity:
deduction = number - max_number - 1
self.current_max += deduction
def remove_hint(self, player, index):
"""Update the player's hints to match new hand"""
player.possibilities.pop(index)
player.possibilities.append(self.single_possibilities(variation=self.variation))
def get_next_player(self, current_player):
if current_player == self.players[-1]:
return self.players[0]
else:
return self.players[self.players.index(current_player) + 1]
def hint(self, player, hint):
"""Provide player with a color or number hint"""
self.tokens += -1
if type(hint) == str:
index = 0
else:
index = 1
indices = [i for i, card in enumerate(player.hand) if card[index] == hint]
set_hint = set([hint])
for i in range(len(player.possibilities)):
if i in indices:
player.possibilities[i][index] = set_hint
else:
player.possibilities[i][index] = player.possibilities[i][index] - set_hint
def play(self, player, card_index):
"""Attempt to place the next card in a set"""
color, number = player.hand.pop(card_index)
if number - self.played[color] == 1:
self.played[color] += 1
if number == 5 and self.tokens < 8:
self.tokens += 1
self.update_score()
else:
self.discarded[color].append(number)
self.fuses += 1
self.update_max(color, number)
player.hand.append(self.deck.pop())
self.remove_hint(player, card_index)
def discard(self, player, card_index):
"""Remove a card from a player's hand"""
self.tokens += 1
color, number = player.hand.pop(card_index)
self.discarded[color].append(number)
player.hand.append(self.deck.pop())
self.remove_hint(player, card_index)
def turn(self, player, choice, value, hint_player=None):
return_val = None
if player != self.current_player:
msg = (f'{player.name} cannot play out of turn. '
f"It is {self.current_player.name}'s turn")
raise ValueError(msg)
if choice == 'hint':
if player == hint_player:
raise ValueError('Cannot give yourself a hint! :)')
else:
self.hint(hint_player, value)
elif choice == 'play':
self.play(player, value)
if (self.score == self.current_max) or (self.fuses == 3):
return_val = self.score
elif choice == 'discard':
self.discard(player, value)
if self.deck == []:
return_val = self.score
else:
raise ValueError('Must give a hint, play a card, or discard.')
self.log.append((player, choice, value, hint_player))
if not return_val:
self.current_player = self.get_next_player(player)
return self.current_player
return return_val
| 3.296875 | 3 |
hermes_fix/utils/date_helper.py | yabov/hermes_fix | 2 | 12771971 | <filename>hermes_fix/utils/date_helper.py<gh_stars>1-10
from datetime import datetime, timedelta
from dateutil import tz, parser
def inside_time_range(current_time, start_time, end_time):
if (current_time >= start_time) and (current_time <= end_time):
return True
return False
def get_session_times(dt, time_zone, start_time_str, end_time_str):
start_time = parser.parse(start_time_str, default=dt)
end_time = parser.parse(end_time_str, default=dt)
increment = timedelta(days=1)
#To test if the weekly compare day of week in dt with offset by day dt
dt_offset = dt + timedelta(days=1)
if parser.parse(start_time_str, default=dt).weekday() == parser.parse(start_time_str, default=dt_offset).weekday():
increment = timedelta(days=7)
#if logout is before logon, move it forward
if end_time < start_time:
end_time += increment
#if both times are in the future and de-incremented end_time is still greater than current, move both back
if start_time > dt and end_time > dt and end_time - increment > dt:
start_time -= increment
end_time -= increment
return start_time, end_time, increment
| 2.796875 | 3 |
challenges/texture_structure_nerf/worker.py | ritmps/kubric | 0 | 12771972 | <filename>challenges/texture_structure_nerf/worker.py
# Copyright 2021 The Kubric Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Worker script for NeRF texture-structure dataset generation.
The output is a JaxNeRF-compatible scene containing randomly placed,
procedurally textured objects annotated with frequency information.
"""
import logging
import kubric as kb
from kubric.renderer.blender import Blender
import numpy as np
import bpy
BACKGROUND_COLOR = kb.Color(1.0, 1.0, 1.0)
LIGHT_DIRECTION = (-1, -0.5, 3)
MIN_OBJECT_SCALE = 0.1
MAX_OBJECT_SCALE = 0.2
# --- CLI arguments
parser = kb.ArgumentParser()
parser.add_argument("--num_objects", type=int, default=30)
parser.add_argument("--num_frequency_bands", type=int, default=6)
parser.add_argument("--min_log_frequency", type=float, default=-1.0)
parser.add_argument("--max_log_frequency", type=float, default=2.0)
parser.add_argument("--num_train_frames", type=int, default=60)
parser.add_argument("--num_validation_frames", type=int, default=60)
parser.add_argument("--num_test_frames", type=int, default=60)
FLAGS = parser.parse_args()
# --- Common setups & resources
scene, rng, output_dir, scratch_dir = kb.setup(FLAGS)
renderer = Blender(scene, scratch_dir, use_denoising=True, adaptive_sampling=False)
# --- Add floor and light
logging.info("Setting up the scene.")
scene.background = BACKGROUND_COLOR
scene += kb.Cube(name="floor", scale=(100, 100, 0.1), position=(0, 0, -0.1))
scene += kb.DirectionalLight(name="sun", position=LIGHT_DIRECTION, look_at=(0, 0, 0), intensity=1.5)
bpy_scene = bpy.context.scene
# --- Add random objects to scene
logging.info("Adding objects to the scene.")
for j in range(FLAGS.num_objects):
position = (
(rng.uniform() - 0.5) * 2,
(rng.uniform() - 0.5) * 2,
rng.uniform()
)
# --- Create randomly scaled and rotated cube
scale = rng.uniform() * (MAX_OBJECT_SCALE - MIN_OBJECT_SCALE) + MIN_OBJECT_SCALE
instance = kb.Cube(name=f"inst_{j}", scale=scale, position=position, quaternion=kb.random_rotation(rng=rng))
instance.material = kb.PrincipledBSDFMaterial(name="material")
instance.material.metallic = 0.0
instance.material.roughness = 1.0
# --- Sample log-uniform texture frequency
fpower = rng.uniform()
frequency = 10**(fpower * (FLAGS.max_log_frequency - FLAGS.min_log_frequency) + FLAGS.min_log_frequency)
instance.segmentation_id = 1 + int(fpower * FLAGS.num_frequency_bands)
scene += instance
# --- Generate random procedural texture with Blender nodes
mat = bpy_scene.objects[f"inst_{j}"].active_material
tree = mat.node_tree
mat_node = tree.nodes["Principled BSDF"]
ramp_node = tree.nodes.new(type="ShaderNodeValToRGB")
tex_node = tree.nodes.new(type="ShaderNodeTexNoise")
scaling_node = tree.nodes.new(type="ShaderNodeMapping")
rotation_node = tree.nodes.new(type="ShaderNodeMapping")
vector_node = tree.nodes.new(type="ShaderNodeNewGeometry")
tree.links.new(vector_node.outputs["Position"], rotation_node.inputs["Vector"])
tree.links.new(rotation_node.outputs["Vector"], scaling_node.inputs["Vector"])
tree.links.new(scaling_node.outputs["Vector"], tex_node.inputs["Vector"])
tree.links.new(tex_node.outputs["Fac"], ramp_node.inputs["Fac"])
tree.links.new(ramp_node.outputs["Color"], mat_node.inputs["Base Color"])
rotation_node.inputs["Rotation"].default_value = (
rng.uniform() * np.pi,
rng.uniform() * np.pi,
rng.uniform() * np.pi,
)
scaling_node.inputs["Scale"].default_value = (
frequency,
frequency,
frequency,
)
tex_node.inputs["Roughness"].default_value = 0.0
tex_node.inputs["Detail"].default_value = 0.0
for x in np.linspace(0.0, 1.0, 5)[1:-1]:
ramp_node.color_ramp.elements.new(x)
for element in ramp_node.color_ramp.elements:
element.color = kb.random_hue_color(rng=rng)
# --- Create the camera
camera = kb.PerspectiveCamera(name="camera", look_at=(0, 0, 1))
scene += camera
def update_camera():
position = rng.normal(size=(3, ))
position *= 4 / np.linalg.norm(position)
position[2] = np.abs(position[2])
camera.position = position
camera.look_at((0, 0, 0))
return camera.matrix_world
def output_split(split_name, n_frames):
logging.info("Rendering the %s split.", split_name)
frames = []
# --- Render a set of frames from random camera poses
for i in range(n_frames):
matrix = update_camera()
frame = renderer.render_still()
frame["segmentation"] = kb.adjust_segmentation_idxs(frame["segmentation"], scene.assets, [])
kb.write_png(filename=output_dir / split_name / f"{i}.png", data=frame["rgba"])
kb.write_palette_png(filename=output_dir / split_name / f"{i}_segmentation.png", data=frame["segmentation"])
frame_data = {
"transform_matrix": matrix.tolist(),
"file_path": f"{split_name}/{i}",
}
frames.append(frame_data)
# --- Write the JSON descriptor for this split
kb.write_json(filename=output_dir / f"transforms_{split_name}.json", data={
"camera_angle_x": camera.field_of_view,
"frames": frames,
})
# --- Write train, validation, and test splits for the nerf data
output_split("train", FLAGS.num_train_frames)
output_split("val", FLAGS.num_validation_frames)
output_split("test", FLAGS.num_test_frames)
| 2.15625 | 2 |
process/templatetags/json_filters.py | pycess/pycess | 1 | 12771973 | from django.core.serializers import serialize
from django.core.serializers.json import DjangoJSONEncoder
from django.db.models.query import QuerySet
from django.utils.safestring import mark_safe
from django.template import Library
import json
register = Library()
@register.filter
def jsonify(object):
return json.dumps(object, cls=DjangoJSONEncoder)
| 1.796875 | 2 |
GEOS_Util/coupled_diagnostics/g5lib/confocn.py | GEOS-ESM/GMAO_Shared | 1 | 12771974 | <reponame>GEOS-ESM/GMAO_Shared
'''
@EXPDSC
### Put detailed experiment description here ###
'''
import os
from datetime import date
expid='@EXPID'
cmpexp='@EXPID'
data_path='@EXPDIR'
plot_path=data_path+'/plots_ocn'
basin_mask='@COUPLEDIR/basin_mask/@OGCM_IMx@OGCM_JM/basin_mask.nc'
grid_spec='@COUPLEDIR/a@AGCM_IMx@AGCM_JM_o@OGCM_IMx@OGCM_JM/INPUT/grid_spec.nc'
start_year=1980
end_year=1981
skip_yrs=0
dates=(date(start_year+skip_yrs,1,15),date(end_year,12,15))
'''
You can also specify a custom format string for data file locations for selected collections.
In the example below, a custom format is specified for geosgcm_ocn2d monthly and daily collections,
and for MOM output collection. For all other collections, a default format string will be used.
fmt={
'geosgcm_ocn2d':{
'MONTHLY': '{data_path}/{collection}/{expid}.{collection}.monthly.{date:%Y%m}.nc4',
'DAILY' : '{data_path}/{collection}/{expid}.{collection}.monthly.{date:%Y%m%d}_1500z.nc4'
},
'MOM_Output': '{data_path}/{collection}/ocean_month.e{date:%Y%m}01_00z.nc'
}
'''
| 1.992188 | 2 |
torchsr/models/carn.py | mgm52/torchSR | 0 | 12771975 | # Implementation from https://github.com/nmhkahn/CARN-pytorch
# Fast, Accurate, and Lightweight Super-Resolution with Cascading Residual Network
# https://arxiv.org/abs/1803.08664
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.hub import load_state_dict_from_url
__all__ = [ 'carn', 'carn_m' ]
urls = {
'carn': 'https://github.com/Coloquinte/torchSR/releases/download/v1.0.3/carn.pt',
'carn_m': 'https://github.com/Coloquinte/torchSR/releases/download/v1.0.3/carn_m.pt',
}
class MeanShift(nn.Module):
def __init__(self, mean_rgb, sub):
super(MeanShift, self).__init__()
sign = -1 if sub else 1
r = mean_rgb[0] * sign
g = mean_rgb[1] * sign
b = mean_rgb[2] * sign
self.shifter = nn.Conv2d(3, 3, 1, 1, 0)
self.shifter.weight.data = torch.eye(3).view(3, 3, 1, 1)
self.shifter.bias.data = torch.Tensor([r, g, b])
# Freeze the mean shift layer
for params in self.shifter.parameters():
params.requires_grad = False
def forward(self, x):
x = self.shifter(x)
return x
class BasicBlock(nn.Module):
def __init__(self,
in_channels, out_channels,
ksize=3, stride=1, pad=1):
super(BasicBlock, self).__init__()
self.body = nn.Sequential(
nn.Conv2d(in_channels, out_channels, ksize, stride, pad),
nn.ReLU(inplace=True)
)
def forward(self, x):
out = self.body(x)
return out
class ResidualBlock(nn.Module):
def __init__(self,
in_channels, out_channels):
super(ResidualBlock, self).__init__()
self.body = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, 1, 1),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, 3, 1, 1),
)
def forward(self, x):
out = self.body(x)
out = F.relu(out + x)
return out
class EResidualBlock(nn.Module):
def __init__(self,
in_channels, out_channels,
group=1):
super(EResidualBlock, self).__init__()
self.body = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, 1, 1, groups=group),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, 3, 1, 1, groups=group),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, 1, 1, 0),
)
def forward(self, x):
out = self.body(x)
out = F.relu(out + x)
return out
class UpsampleBlock(nn.Module):
def __init__(self,
n_channels, scale, multi_scale,
group=1):
super(UpsampleBlock, self).__init__()
if multi_scale:
self.up2 = _UpsampleBlock(n_channels, scale=2, group=group)
self.up3 = _UpsampleBlock(n_channels, scale=3, group=group)
self.up4 = _UpsampleBlock(n_channels, scale=4, group=group)
else:
self.up = _UpsampleBlock(n_channels, scale=scale, group=group)
self.multi_scale = multi_scale
def forward(self, x, scale):
if self.multi_scale:
if scale == 2:
return self.up2(x)
elif scale == 3:
return self.up3(x)
elif scale == 4:
return self.up4(x)
else:
return self.up(x)
class _UpsampleBlock(nn.Module):
def __init__(self,
n_channels, scale,
group=1):
super(_UpsampleBlock, self).__init__()
modules = []
if scale == 2 or scale == 4 or scale == 8:
for _ in range(int(math.log(scale, 2))):
modules += [nn.Conv2d(n_channels, 4*n_channels, 3, 1, 1, groups=group), nn.ReLU(inplace=True)]
modules += [nn.PixelShuffle(2)]
elif scale == 3:
modules += [nn.Conv2d(n_channels, 9*n_channels, 3, 1, 1, groups=group), nn.ReLU(inplace=True)]
modules += [nn.PixelShuffle(3)]
self.body = nn.Sequential(*modules)
def forward(self, x):
out = self.body(x)
return out
class CARNBlock(nn.Module):
def __init__(self,
in_channels, out_channels,
group=1):
super(CARNBlock, self).__init__()
self.b1 = ResidualBlock(64, 64)
self.b2 = ResidualBlock(64, 64)
self.b3 = ResidualBlock(64, 64)
self.c1 = BasicBlock(64*2, 64, 1, 1, 0)
self.c2 = BasicBlock(64*3, 64, 1, 1, 0)
self.c3 = BasicBlock(64*4, 64, 1, 1, 0)
def forward(self, x):
c0 = o0 = x
b1 = self.b1(o0)
c1 = torch.cat([c0, b1], dim=1)
o1 = self.c1(c1)
b2 = self.b2(o1)
c2 = torch.cat([c1, b2], dim=1)
o2 = self.c2(c2)
b3 = self.b3(o2)
c3 = torch.cat([c2, b3], dim=1)
o3 = self.c3(c3)
return o3
class CARN(nn.Module):
def __init__(self, scale, pretrained=False, map_location=None):
super(CARN, self).__init__()
multi_scale = True
group = 1
self.scale = scale
self.sub_mean = MeanShift((0.4488, 0.4371, 0.4040), sub=True)
self.add_mean = MeanShift((0.4488, 0.4371, 0.4040), sub=False)
self.entry = nn.Conv2d(3, 64, 3, 1, 1)
self.b1 = CARNBlock(64, 64)
self.b2 = CARNBlock(64, 64)
self.b3 = CARNBlock(64, 64)
self.c1 = BasicBlock(64*2, 64, 1, 1, 0)
self.c2 = BasicBlock(64*3, 64, 1, 1, 0)
self.c3 = BasicBlock(64*4, 64, 1, 1, 0)
self.upsample = UpsampleBlock(64, scale=scale,
multi_scale=multi_scale,
group=group)
self.exit = nn.Conv2d(64, 3, 3, 1, 1)
if pretrained:
self.load_pretrained(map_location=map_location)
def forward(self, x, scale=None):
if self.scale is not None:
if scale is not None and scale != self.scale:
raise ValueError(f"Network scale is {self.scale}, not {scale}")
scale = self.scale
else:
if scale is None:
raise ValueError(f"Network scale was not set")
x = self.sub_mean(x)
x = self.entry(x)
c0 = o0 = x
b1 = self.b1(o0)
c1 = torch.cat([c0, b1], dim=1)
o1 = self.c1(c1)
b2 = self.b2(o1)
c2 = torch.cat([c1, b2], dim=1)
o2 = self.c2(c2)
b3 = self.b3(o2)
c3 = torch.cat([c2, b3], dim=1)
o3 = self.c3(c3)
out = self.upsample(o3, scale=scale)
out = self.exit(out)
out = self.add_mean(out)
return out
def load_pretrained(self, map_location=None):
if torch.cuda.is_available():
map_location = torch.device('cuda')
else:
map_location = torch.device('cpu')
state_dict = load_state_dict_from_url(urls["carn"], map_location=map_location, progress=True)
self.load_state_dict(state_dict)
class CARNMBlock(nn.Module):
def __init__(self,
in_channels, out_channels,
group=1):
super(CARNMBlock, self).__init__()
self.b1 = EResidualBlock(64, 64, group=group)
self.c1 = BasicBlock(64*2, 64, 1, 1, 0)
self.c2 = BasicBlock(64*3, 64, 1, 1, 0)
self.c3 = BasicBlock(64*4, 64, 1, 1, 0)
def forward(self, x):
c0 = o0 = x
b1 = self.b1(o0)
c1 = torch.cat([c0, b1], dim=1)
o1 = self.c1(c1)
b2 = self.b1(o1)
c2 = torch.cat([c1, b2], dim=1)
o2 = self.c2(c2)
b3 = self.b1(o2)
c3 = torch.cat([c2, b3], dim=1)
o3 = self.c3(c3)
return o3
class CARNM(nn.Module):
def __init__(self, scale, pretrained=False, map_location=None):
super(CARNM, self).__init__()
multi_scale = True
group = 4
self.scale = scale
self.sub_mean = MeanShift((0.4488, 0.4371, 0.4040), sub=True)
self.add_mean = MeanShift((0.4488, 0.4371, 0.4040), sub=False)
self.entry = nn.Conv2d(3, 64, 3, 1, 1)
self.b1 = CARNMBlock(64, 64, group=group)
self.b2 = CARNMBlock(64, 64, group=group)
self.b3 = CARNMBlock(64, 64, group=group)
self.c1 = BasicBlock(64*2, 64, 1, 1, 0)
self.c2 = BasicBlock(64*3, 64, 1, 1, 0)
self.c3 = BasicBlock(64*4, 64, 1, 1, 0)
self.upsample = UpsampleBlock(64, scale=scale,
multi_scale=multi_scale,
group=group)
self.exit = nn.Conv2d(64, 3, 3, 1, 1)
if pretrained:
self.load_pretrained(map_location=map_location)
def forward(self, x, scale=None):
if self.scale is not None:
if scale is not None and scale != self.scale:
raise ValueError(f"Network scale is {self.scale}, not {scale}")
scale = self.scale
else:
if scale is None:
raise ValueError(f"Network scale was not set")
scale = self.scale
x = self.sub_mean(x)
x = self.entry(x)
c0 = o0 = x
b1 = self.b1(o0)
c1 = torch.cat([c0, b1], dim=1)
o1 = self.c1(c1)
b2 = self.b2(o1)
c2 = torch.cat([c1, b2], dim=1)
o2 = self.c2(c2)
b3 = self.b3(o2)
c3 = torch.cat([c2, b3], dim=1)
o3 = self.c3(c3)
out = self.upsample(o3, scale=scale)
out = self.exit(out)
out = self.add_mean(out)
return out
def load_pretrained(self, map_location=None):
if not torch.cuda.is_available():
map_location = torch.device('cpu')
state_dict = load_state_dict_from_url(urls["carn_m"], map_location=map_location, progress=True)
self.load_state_dict(state_dict)
def carn(scale, pretrained=False):
return CARN(scale, pretrained)
def carn_m(scale, pretrained=False):
return CARNM(scale, pretrained)
| 2.40625 | 2 |
__init__.py | austinmilt/video-game-view | 3 | 12771976 | """
Video Game HUD for Youtube videos.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
from detection import *
| 0.855469 | 1 |
src/CPMel/api/apiwrap.py | 2921251087/CPMel | 1 | 12771977 | #!/usr/bin/python
# -*-coding:utf-8 -*-
u"""
:创建时间: 2020/5/18 23:57
:作者: 苍之幻灵
:我的主页: https://cpcgskill.com
:QQ: 2921251087
:爱发电: https://afdian.net/@Phantom_of_the_Cang
:aboutcg: https://www.aboutcg.org/teacher/54335
:bilibili: https://space.bilibili.com/351598127
"""
import maya.cmds as cmds
from .. import core as cmcore
from . import OpenMaya
class MeshVertex(object):
u"""
Meshapi包装
"""
def __init__(self, obj_name):
sel = OpenMaya.MSelectionList()
sel.add(obj_name)
path = OpenMaya.MDagPath()
obj = OpenMaya.MObject()
sel.getDagPath(path, obj)
self.it = OpenMaya.MItMeshVertex(path, obj)
self.fn = OpenMaya.MFnMesh(path)
self.init_points = OpenMaya.MPointArray()
self.fn.getPoints(self.init_points)
self.init_us = OpenMaya.MFloatArray()
self.init_vs = OpenMaya.MFloatArray()
self.fn.getUVs(self.init_us, self.init_vs)
def end(self):
u"""
结束方法
:return:
"""
self.end_points = OpenMaya.MPointArray()
self.fn.getPoints(self.end_points)
self.end_us = OpenMaya.MFloatArray()
self.end_vs = OpenMaya.MFloatArray()
self.fn.getUVs(self.end_us, self.end_vs)
def redoIt(self):
u"""
执行
:return:
"""
self.fn.setPoints(self.end_points)
self.fn.setUVs(self.end_us, self.end_vs)
def undoIt(self):
u"""
撤销
:return:
"""
self.fn.setPoints(self.init_points)
self.fn.setUVs(self.init_us, self.init_vs)
def __enter__(self):
return self.it
def __exit__(self, exc_type, exc_val, exc_tb):
self.end()
cmcore.defAddCommandList(self.redoIt, self.undoIt)
| 2.296875 | 2 |
Algorithms/Medium/873. Length of Longest Fibonacci Subsequence/answer.py | KenWoo/Algorithm | 0 | 12771978 | <filename>Algorithms/Medium/873. Length of Longest Fibonacci Subsequence/answer.py
from typing import List
import collections
class Solution:
def lenLongestFibSubseq(self, A: List[int]) -> int:
indexes = {x: i for i, x in enumerate(A)}
longest = collections.defaultdict(lambda: 2)
res = 0
for k in range(len(A)):
for j in range(k):
i = indexes.get(A[k]-A[j], None)
if i is not None and i < j:
cand = longest[j, k] = longest[i, j] + 1
res = max(res, cand)
return res if res >= 3 else 0
if __name__ == "__main__":
s = Solution()
result = s.lenLongestFibSubseq([1, 2, 3, 4, 5, 6, 7, 8])
print(result)
| 3.90625 | 4 |
templates/control.py | Coricos/Challenger | 15 | 12771979 | # Author: <NAME>
# Date: 05 April 2020
# Project: Challenger
import os, json, sys, subprocess
rmf = ['bin', 'etc', 'include', 'lib', 'lib64', 'pyvenv.cfg', 'share']
msk = ['challenger', 'package', 'elementtree', 'ffprobe']
def packages_from_project(path):
try:
cmd = 'pipreqs --force --no-pin --print --savepath /dev/null'
pck = subprocess.check_output(cmd.split(' ') + [path])
return pck.decode('utf-8')[:-1].split('\n')
except:
return []
def update_requirements(path, mask):
try:
cmd = 'pipreqs --force --print --savepath /dev/null'
pck = subprocess.check_output(cmd.split(' ') + [path])
pck = pck.decode('utf-8')[:-1].split('\n')
pck = [e for e in pck if not e.split('==')[0] in mask and len(e) > 0]
if len(pck) != 0:
with open('{}/requirements.txt'.format(path), 'w') as f:
f.write('\n'.join(pck)+'\n')
except:
pass
def compile_list_packages(packages):
lst = []
for itm in packages:
for p in itm:
if p not in lst:
lst.append(p)
return lst
def c_server(file='config-instance.json'):
cfg = {'instance_type': 't3.micro'}
if os.path.exists(file): cfg.update(json.load(open(file)))
return cfg
def get_tags(file='.elasticbeanstalk/config.yml'):
import yaml
if os.path.exists(file):
cfg = yaml.safe_load(open(file))
app = cfg.get('global').get('application_name').lower()
try: nme = cfg.get('branch-defaults').get('master').get('environment').lower()
except: nme = cfg.get('tbranch-defaults').get('default').get('environment').lower()
cfg = dict(zip(['application', 'service'], [app, nme]))
return ','.join(['{}={}'.format(k,v) for k,v in cfg.items()])
else:
return ''
def env_vars(file='config-environment.json'):
import datetime
cfg = dict()
if os.path.exists(file): cfg.update(json.load(open(file)))
cfg.update({'BIRTH_DATE': str(datetime.datetime.now().date())})
return cfg
def get_conf(root='.', file='config-environment.json'):
cfg = dict()
for path in os.listdir(root):
if os.path.isdir('/'.join([root, path])):
fle = '/'.join([root, path, file])
if os.path.exists(fle): cfg.update(json.load(open(fle)))
return cfg
if __name__ == '__main__':
if sys.argv[1] == 'config-project':
os.system('rm -rf {}'.format(' '.join(rmf)))
os.system('python3 -m venv .')
if sys.argv[1] == 'create-project':
os.system('pip install setuptools wheel pip --upgrade')
os.system('pip install pipreqs --upgrade')
frc = ['numpy', 'cmake', 'jupyter', 'notebook', 'ipython', 'ipykernel']
with open('requirements.txt', 'w') as f: f.write('\n'.join(frc)+'\n')
os.system('pip install -r requirements.txt')
os.system('pip install jupyter_contrib_nbextensions')
os.system('jupyter contrib nbextension install --user')
os.system('jupyter nbextension enable codefolding/main')
src = os.getcwd().split('/')[-1]
os.system('python -m ipykernel install --user --name={}'.format(src.lower()))
lst = [d for d in os.listdir() if os.path.isdir(d) and not d.startswith('.') and d not in rmf]
lst = [packages_from_project(d) for d in lst]
lst = compile_list_packages(lst)
lst = [p for p in lst if p not in msk + frc]
with open('requirements.txt', 'w') as f: f.write('\n'.join(lst)+'\n')
os.system('pip install -r requirements.txt')
os.remove('requirements.txt')
if sys.argv[1] == 'update-project':
lst = [d for d in os.listdir() if os.path.isdir(d) and not d.startswith('.') and d not in rmf]
for drc in lst: update_requirements(drc, msk)
if sys.argv[1] == 'config-python':
vars_env = env_vars()
cfg_size = len(vars_env.keys())
if not os.path.exists('bin/activate-origin'):
os.system('cp bin/activate bin/activate-origin')
os.system('cp bin/activate-origin bin/activate')
env_vars = get_conf()
add_vars = ['export {}={}'.format(key, env_vars.get(key)) for key in sorted(env_vars.keys())]
add_vars = '\n' + '\n'.join(add_vars)
del_vars = ['unset {}'.format(key) for key in sorted(env_vars.keys())]
del_vars = '\n ' + '\n '.join(del_vars)
old_file = open('bin/activate').readlines()
new_file = ''.join(old_file[:37]) + del_vars + ''.join(old_file[37:]) + add_vars
open('bin/activate', 'w').write(new_file)
if sys.argv[1] == 'create-service':
src_tags = get_tags()
vars_env = env_vars()
cfg_size = len(vars_env.keys())
c_server = c_server()
template = 'eb create {} {} --envvars {} --tags {}'
instance = ' '.join(["--{} '{}'".format(k, str(v)) for k,v in c_server.items()])
env_vars = ','.join(["{}='{}'".format(k, str(v)) for k,v in vars_env.items()])
print('\n# Launch {} Creation'.format(sys.argv[2]))
print('# On {} with {} Associated Variables\n'.format(c_server.get('instance_type'), cfg_size))
os.system(template.format(sys.argv[2], instance, env_vars, src_tags))
if sys.argv[1] == 'config-service':
vars_env = env_vars()
cfg_size = len(vars_env.keys())
template = 'eb setenv {}'
env_vars = ','.join(['='.join([k, str(v)]) for k,v in vars_env.items()])
print('\n# Update Environment Variables')
print('# {} Associated Variables\n'.format(cfg_size))
os.system(template.format(env_vars))
if sys.argv[1] == 'config-docker':
vars_env = env_vars()
cfg_size = len(vars_env.keys())
env_vars = '\n'.join(['='.join([k, str(v)]) for k,v in vars_env.items()])
env_file = open('config-docker.env', 'w')
env_file.write(env_vars + '\n')
if sys.argv[1] == 'config-lambda':
sip = sys.argv[2].replace('.', '-')
try: avz = sys.argv[3]
except: avz = 'us-east-2'
try: key = sys.argv[4]
except: key = '../aws.pem'
os.system("scp -i {} packages.sh requirements.txt ec2-user@ec2-{}.{}.compute.amazonaws.com:~".format(key, sip, avz))
os.system("ssh -i {} ec2-user@ec2-{}.{}.compute.amazonaws.com 'bash -s' < packages.sh".format(key, sip, avz))
os.system("scp -i {} ec2-user@ec2-{}.{}.compute.amazonaws.com:~/app/packages.zip .".format(key, sip, avz))
os.system("unzip packages.zip -d packages")
os.remove("packages.zip")
if sys.argv[1] == 'create-lambda':
os.system("mkdir tmp; cp *.py *.json tmp; cp -r packages/* tmp")
os.system("cd tmp; zip -r ../function.zip *; cd ..; rm -rf tmp")
| 2.1875 | 2 |
lib/analyzers.py | sully90/spider | 1 | 12771980 | import collections
import math
import numpy as np
import mlpy
class TermFrequencyAnalyzer(object):
def __init__(self, *documents):
self.idf = self.compute_idf(*documents)
def compute_idf(self, *documents):
# document frequency
df = collections.defaultdict(int)
for tokens in documents:
for token in set(tokens):
df[token] += 1
# idf
idf = dict()
for token, count in df.iteritems():
idf[token] = math.log(float(len(documents)) / float(count))
return idf
def get_similarity(self, *strings):
if len(strings) <= 1:
return 0.0
counts = [collections.defaultdict(int) for _ in strings]
for index, tokens in enumerate(strings):
for token in tokens:
counts[index][token] += 1
score = 0.0
# intercept of the tokens
for token in set.intersection(*[set(tokens) for tokens in strings]):
# term frequency
tf = float(sum([count[token] for count in counts]))
score += tf * self.idf[token]
return score
class LongestAnalyzer(object):
def __init__(self, *documents):
pass
def get_similarity(self, a, b):
#return self.lcs(a, b)
a = np.array(list(a), dtype='U1').view(np.uint32)
b = np.array(list(b), dtype='U1').view(np.uint32)
length, path = mlpy.lcs_std(a, b)
return length
def lcs(self, a, b):
a = a[:200]
b = b[:200]
if (len(a) < len(b)):
a, b = b, a
M = len(a)
N = len(b)
arr = np.zeros((2, N + 1))
for i in range(1, M + 1):
curIdx = i % 2
prevIdx = 1 - curIdx
ai = a[i - 1]
for j in range(1, N + 1):
bj = b[j - 1]
if (ai == bj):
arr[curIdx][j] = 1 + arr[prevIdx][j - 1]
else:
arr[curIdx][j] = max(arr[curIdx][j - 1], arr[prevIdx][j])
return arr[M % 2][N]
| 2.734375 | 3 |
src/client/ui/widget/dynamic_textbox_example.py | Tubular-Terriers/code-jam | 1 | 12771981 | <gh_stars>1-10
#!/usr/bin/env python
import asyncio
import curses
import curses.textpad
import pygame
from pynput import keyboard
def static_vars(**kwargs):
def async_wrapper(callback):
def inner(key):
temp = asyncio.run(callback(key))
# key combinations traitement
if temp == -1:
kwargs["_str"] = kwargs["_str"][:-1]
else:
kwargs["_str"] += temp
print_scr(window, kwargs["_str"])
return inner
return async_wrapper
@static_vars(_str="")
async def on_release(key):
# await asyncio.sleep(1) # async support
if key == keyboard.Key.esc:
# Stop listener
global crashed
crashed = True
return False
if key == keyboard.Key.backspace:
return -1
try:
return key.char
except AttributeError:
return ""
def print_scr(stdscr, text):
stdscr.clear()
print(text)
title = "Text Box"
width = max(len(title), len(text))
title_lspace = (width - len(title)) // 2
title_rspace = width - len(title) - title_lspace
stdscr.addstr(0, 0, f"┌─{'─' * (title_lspace)}{title}{'─' * (title_rspace)}─┐")
text_lspace = (width - len(text)) // 2
text_rspace = width - len(text) - text_lspace
stdscr.addstr(1, 0, f"│ {' ' * ( text_lspace)}{text}{' ' * ( text_rspace)} │")
stdscr.addstr(
2, 0, f"└─{'─' * ( title_lspace)}{'─'*len(title)}{'─' * (title_rspace)}─┘"
)
stdscr.refresh()
return True
stdscr = curses.initscr()
curses.noecho()
curses.cbreak()
window = curses.newwin(20, 20, 0, 0)
# non-blocking listener
listener = keyboard.Listener(on_release=on_release)
listener.start()
# for non-blocking code, used below loop
pygame.init()
clock = pygame.time.Clock()
crashed = False
counter = 1
while not crashed:
counter += 1
clock.tick(1) # will be 10 in the next run
# classic conf
curses.nocbreak()
stdscr.keypad(False)
curses.echo()
curses.endwin()
| 2.8125 | 3 |
activflow/quotient/forms.py | mcgauranc/mosaiq | 0 | 12771982 | """Custom Forms"""
from django import forms
from django.db.models import (
CharField)
class CustomForm(forms.ModelForm):
"""Sample Custom form"""
sample_id = CharField("Sample Id:", max_length=200, editable=False)
| 2.421875 | 2 |
presidio-analyzer/analyzer/field_types/us/phone.py | kant/presidio | 0 | 12771983 | from field_types import field_type, field_regex_pattern
class Phone(field_type.FieldType):
name = "PHONE_NUMBER"
context = ["phone", "number", "telephone", "cell", "mobile", "call"]
patterns = []
# Strong pattern: e.g., (425) 882 8080, 425 882-8080, 425.882.8080
pattern = field_regex_pattern.RegexFieldPattern()
pattern.regex = r'(\(\d{3}\)\s*\d{3}[-\.\s]??\d{4}|d{3}[-\.\s]\d{3}[-\.\s]\d{4})' # noqa: E501
pattern.name = 'Phone (strong)'
pattern.strength = 0.7
patterns.append(pattern)
# Medium pattern: e.g., 425 8828080
pattern = field_regex_pattern.RegexFieldPattern()
pattern.regex = r'\b(\d{3}[-\.\s]\d{3}[-\.\s]??\d{4})\b'
pattern.name = 'Phone (medium)'
pattern.strength = 0.5
patterns.append(pattern)
# Weak pattern: e.g., 4258828080
pattern = field_regex_pattern.RegexFieldPattern()
pattern.regex = r'(\b\d{10}\b)'
pattern.name = 'Phone (weak)'
pattern.strength = 0.05
patterns.append(pattern)
patterns.sort(key=lambda p: p.strength, reverse=True)
| 2.734375 | 3 |
tests/test_optim_loss.py | naokishibuya/simple_transformer | 5 | 12771984 | <reponame>naokishibuya/simple_transformer<gh_stars>1-10
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from simple_transformer.optim import TranslationLoss
from simple_transformer.data import PAD_IDX, EOS_IDX
def loss_calc(logits: Tensor, labels: Tensor, label_smoothing: float=0.0) -> float:
vocab_size = logits.shape[-1]
logits = logits.reshape(-1, vocab_size)
labels = labels.reshape(-1)
# Negative log-likelihood loss ignoring PAD_idX
log_preds = F.log_softmax(logits, dim=-1)
nll_loss = F.nll_loss(log_preds, labels, ignore_index=PAD_IDX)
# Mean log softmax ignoring PAD_IDX
loss = -log_preds.sum(1)
loss[labels==PAD_IDX] = 0
loss = loss.mean()
# Without smoothing, the loss is the same as CrossEntropy ignoring PAD_IDX
return (1-label_smoothing) * nll_loss + (label_smoothing / vocab_size) * loss
def test_loss_no_PAD() -> None:
batch_size = 3
seq_length = 5
vocab_size = 10
logits = torch.rand(batch_size, seq_length, vocab_size)
labels = torch.Tensor([
[4, 9, 7, 5, EOS_IDX],
[5, 6, 8, 7, EOS_IDX],
[6, 7, 4, 6, EOS_IDX],
]).long()
loss_func = TranslationLoss()
avg_loss = loss_func(logits, labels)
expected = loss_calc(logits, labels)
assert np.allclose(avg_loss, expected)
def test_loss_no_PAD_with_label_smoothing() -> None:
batch_size = 3
seq_length = 5
vocab_size = 10
label_smoothing = 0.1
logits = torch.rand(batch_size, seq_length, vocab_size)
labels = torch.Tensor([
[4, 9, 7, 5, EOS_IDX],
[5, 6, 8, 7, EOS_IDX],
[6, 7, 4, 6, EOS_IDX],
]).long()
loss_func = TranslationLoss(label_smoothing)
avg_loss = loss_func(logits, labels)
expected = loss_calc(logits, labels, label_smoothing)
assert np.allclose(avg_loss, expected)
def test_loss_with_PAD() -> None:
batch_size = 4
seq_length = 5
vocab_size = 10
logits = torch.rand(batch_size, seq_length, vocab_size)
labels = torch.Tensor([
[4, 9, 5, EOS_IDX, PAD_IDX],
[4, 9, 7, EOS_IDX, PAD_IDX],
[5, 6, 8, 7, EOS_IDX],
[6, 7, EOS_IDX, PAD_IDX, PAD_IDX],
]).long()
loss_func = TranslationLoss()
avg_loss = loss_func(logits, labels)
expected = loss_calc(logits, labels)
assert np.allclose(avg_loss, expected)
def test_loss_with_PAD_with_label_smoothing() -> None:
batch_size = 4
seq_length = 5
vocab_size = 10
label_smoothing = 0.1
logits = torch.rand(batch_size, seq_length, vocab_size)
labels = torch.Tensor([
[4, 9, 5, EOS_IDX, PAD_IDX],
[4, 9, 7, EOS_IDX, PAD_IDX],
[5, 6, 8, 7, EOS_IDX],
[6, 7, EOS_IDX, PAD_IDX, PAD_IDX],
]).long()
loss_func = TranslationLoss(label_smoothing)
avg_loss = loss_func(logits, labels)
expected = loss_calc(logits, labels, label_smoothing)
assert np.allclose(avg_loss, expected) | 2.078125 | 2 |
scripts/metaphlan2species.py | CGATOxford/proj029 | 3 | 12771985 | <filename>scripts/metaphlan2species.py
'''
metaphlan2species.py - template for CGAT scripts
====================================================
:Author: <NAME>
:Release: $Id$
:Date: |today|
:Tags: Python
Purpose
-------
create list of species from output of metaphlan2table.py --relab
Usage
-----
Example::
python metaphlan2species.py
Type::
python metaphlan2species.py --help
for command line help.
Command line options
--------------------
'''
import os
import sys
import re
import optparse
import CGAT.Experiment as E
def main( argv = None ):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if argv == None: argv = sys.argv
# setup command line parser
parser = E.OptionParser( version = "%prog version: $Id$",
usage = globals()["__doc__"] )
## add common options (-h/--help, ...) and parse command line
(options, args) = E.Start( parser, argv = argv )
header = options.stdin.readline()
for line in options.stdin.readlines():
data = line[:-1].split("\t")
taxa, species, relab = data[0], data[1], data[2]
if taxa == "species":
options.stdout.write("%s\n" % species)
## write footer and output benchmark information.
E.Stop()
if __name__ == "__main__":
sys.exit( main( sys.argv) )
| 2.6875 | 3 |
TensorFlow_Federated/models/convolutional_NN.py | BaptisteTomasin/Federated-Learning-Frameworks | 0 | 12771986 | <reponame>BaptisteTomasin/Federated-Learning-Frameworks<gh_stars>0
import tensorflow as tf
import numpy as np
class Convolutional_NN(object):
def __init__(self):
pass
def lr_network(self, input_shape, label_shape):
"""
Create loss function and the list of metrics
Arguments:
input_shape: [list / tuple] input shape
label_shape: [list / tuple] output shape
"""
self.label_shape = label_shape
self.input_shape = input_shape
self.loss = tf.keras.losses.SparseCategoricalCrossentropy()
self.metrics = [tf.keras.metrics.SparseCategoricalAccuracy()]
def build_model(self):
'''
Return a CNN model
'''
model = tf.keras.models.Sequential()
model.add( tf.keras.Input(shape=self.input_shape))
if len(self.input_shape) == 1:
model.add(tf.keras.layers.Reshape((int(np.sqrt(self.input_shape[-1])), int(np.sqrt(self.input_shape[-1])),1), input_shape=(784,)))
model.add( tf.keras.layers.Conv2D(32, kernel_size=(3, 3), activation="relu"))
model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(128))
model.add(tf.keras.layers.Dense(self.label_shape[-1], activation="softmax"))
return model | 2.546875 | 3 |
upsilont/features/__init__.py | dwkim78/UPSILoN-T | 3 | 12771987 | from upsilont.features.variability_features import VariabilityFeatures
from upsilont.features.variability_features import get_train_feature_name
from upsilont.features.variability_features import get_all_feature_name
| 1.171875 | 1 |
presto/migrations/0035_auto_20180327_0753.py | pwgbots/presto | 0 | 12771988 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-03-27 05:53
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('presto', '0034_auto_20180324_1319'),
]
operations = [
migrations.CreateModel(
name='AppraisalOption',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('label', models.CharField(max_length=128)),
('value', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
('instruction', models.TextField(blank=True, default='')),
('word_count', models.IntegerField(default=0)),
('appraisal', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='ItemAppraisal',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
('style', models.CharField(max_length=64)),
('options', models.ManyToManyField(to='presto.AppraisalOption')),
],
),
migrations.AddField(
model_name='course',
name='suffix',
field=models.CharField(default='', max_length=10),
),
migrations.AddField(
model_name='courseestafette',
name='is_hidden',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='estafettetemplate',
name='editors',
field=models.ManyToManyField(related_name='est_editors', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='questionnairetemplate',
name='editors',
field=models.ManyToManyField(related_name='evt_editors', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='estafettetemplate',
name='last_editor',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='est_last_editor', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='questionnairetemplate',
name='last_editor',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='evt_last_editor', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='estafetteleg',
name='review_items',
field=models.ManyToManyField(to='presto.Item'),
),
]
| 1.71875 | 2 |
dentalapp-backend/dentalapp/userauth/api.py | PavelescuVictor/DentalApplicationReact | 1 | 12771989 | <reponame>PavelescuVictor/DentalApplicationReact<gh_stars>1-10
from django.contrib.auth.signals import user_logged_in, user_logged_out
from rest_framework import generics, permissions, status
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated, AllowAny
from knox.models import AuthToken
from knox.auth import TokenAuthentication
from knox.views import LoginView, LogoutView, LogoutAllView
from .serializers import UserSerializer, UserProfileSerializer, RegisterSerializer, LoginSerializer, ChangePasswordSerializer
from .models import User, UserProfile
class RegisterApi(generics.GenericAPIView):
"""
@Description:
An Endpoint for registering.
"""
serializer_class = RegisterSerializer
permission_classes = [AllowAny]
def post(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
user = serializer.save()
return Response({
"user": UserSerializer(user, context=self.get_serializer_context()).data,
"token": AuthToken.objects.create(user)[1],
})
class LoginApi(generics.GenericAPIView, LoginView):
"""
@Description:
An Endpoint for logging in.
"""
serializer_class = LoginSerializer
permission_classes = [AllowAny]
def post(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
user = serializer.validated_data
tokenInstance, token = AuthToken.objects.create(user)
return Response({
"user": UserSerializer(user, context=self.get_serializer_context()).data,
"token": token,
"tokenExpiry": tokenInstance.expiry,
})
class LogoutApi(APIView):
"""
@Description:
An Endpoint for logging out.
"""
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
def post(self, request, format=None):
request._auth.delete()
user_logged_out.send(sender=request.user.__class__,
request=request, user=request.user)
return Response(None, status=status.HTTP_204_NO_CONTENT)
class LogoutAllApi(APIView):
'''
@Description:
An Endpoint for loggin out from all sessions
Log the user out of all sessions
I.E. deletes all auth tokens for the user
'''
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
def post(self, request, format=None):
request.user.auth_token_set.all().delete()
user_logged_out.send(sender=request.user.__class__,
request=request, user=request.user)
return Response(None, status=status.HTTP_204_NO_CONTENT)
class RenewTokenApi(generics.GenericAPIView):
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
def post(self, request, *args, **kwargs):
user = User.objects.get(id=request.data['id'])
tokenInstance, token = AuthToken.objects.create(user)
return Response({
"user": UserSerializer(user, context=self.get_serializer_context()).data,
"token": token,
"tokenExpiry": tokenInstance.expiry,
})
class ChangePasswordApi(generics.UpdateAPIView):
"""
@Description:
An Endpoint for changing password.
"""
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
serializer_class = ChangePasswordSerializer
model = User
def get_object(self, queryset=None):
obj = self.request.user
return obj
def update(self, request, *args, **kwargs):
self.object = self.get_object()
serializer = self.get_serializer(data=request.data)
if serializer.is_valid():
# Check old password
if not self.object.check_password(serializer.data.get("oldPassword")):
return Response({"oldPassword": ["Wrong Password."]}, status=status.HTTP_400_BAD_REQUEST)
if serializer.data.get("newPassword") != serializer.data.get("confirmNewPassword"):
return Response({"message": ["New password and confirm new password are not the same."]}, status=status.HTTP_400_BAD_REQUEST)
# Setting the new password. set_password() also hashes the password
self.object.set_password(serializer.data.get("newPassword"))
self.object.save()
response = {
'status': 'success',
'code': status.HTTP_200_OK,
'message': 'Password updated succesfully',
'data': []
}
return Response(response)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class UserListApi(APIView):
serializer_class = UserSerializer
permission_classes = [IsAuthenticated]
def get(self, request, format=None):
users = User.objects.all()
serializer = UserSerializer(users, many=True)
return Response(serializer.data)
class UserProfileListApi(APIView):
serializer_class = UserProfileSerializer
permission_classes = [IsAuthenticated]
def get(self, request, format=None):
userprofiles = UserProfile.objects.all()
serializer = UserProfileSerializer(userprofiles, many=True)
return Response(serializer.data)
| 2.03125 | 2 |
visualSHARK/models.py | benjaminLedel/visualSHARK_topicShark | 0 | 12771990 | <reponame>benjaminLedel/visualSHARK_topicShark<filename>visualSHARK/models.py
##!/usr/bin/env python
# -*- coding: utf-8 -*-
import copy
import json
import uuid
from django.conf import settings
from django.db.models.signals import post_save
from django.db import models, transaction
from django.dispatch import receiver
from django.contrib.auth.models import User
from rest_framework.authtoken.models import Token
from mongoengine import connect, Document, StringField, DictField, FileField, BooleanField
from pycoshark.mongomodels import Project, VCSSystem, Commit, Tag, File, CodeEntityState, FileAction, People, IssueSystem, IssueComment, Issue, Message, MailingList, Event, MynbouData, TravisBuild
from visualSHARK.util.rmq import send_to_queue, send_to_user
class TopicModel(Document):
meta = {
'indexes': [
'#project_id'
],
'shard_key': ('project_id', ),
}
default = BooleanField()
name = StringField()
project_id = StringField()
config = DictField()
view = FileField()
dic = FileField()
corpus = FileField()
corpus_index = FileField()
lda = FileField()
lda_id2word = FileField()
lda_state = FileField()
lda_expElogbeta = FileField()
# this is just because we do not have access to reindex the database and mongodb does not check for indexes but just creates them and if they are there does nothing
# nevertheless this requires the right to index stuff which we not have
def remove_index(cls):
tmp = copy.deepcopy(cls._meta)
if 'indexes' in tmp.keys():
del tmp['indexes']
del tmp['index_specs']
tmp['index_specs'] = []
return tmp
if not settings.TESTING:
con = {'host': settings.DATABASES['mongodb']['HOST'],
'port': settings.DATABASES['mongodb']['PORT'],
'db': settings.DATABASES['mongodb']['NAME'],
'username': settings.DATABASES['mongodb']['USER'],
'password': settings.DATABASES['mongodb']['PASSWORD'],
'authentication_source': settings.DATABASES['mongodb']['AUTHENTICATION_DB'],
'connect': False}
connect(**con)
# these are the mongodb models which we directly use in the visualSHARK
Project._meta = remove_index(Project)
VCSSystem._meta = remove_index(VCSSystem)
Commit._meta = remove_index(Commit)
Tag._meta = remove_index(Tag)
File._meta = remove_index(File)
FileAction._meta = remove_index(FileAction)
People._meta = remove_index(People)
CodeEntityState._meta = remove_index(CodeEntityState)
IssueSystem._meta = remove_index(IssueSystem)
Issue._meta = remove_index(Issue)
Message._meta = remove_index(Message)
MailingList._meta = remove_index(MailingList)
Event._meta = remove_index(Event)
TravisBuild._meta = remove_index(TravisBuild)
MynbouData._meta = remove_index(MynbouData)
if settings.TESTING:
connect('test', host='mongomock://localhost')
class UserProfile(models.Model):
"""Fow now the userprofile only holds the channel.
This can be extended to hold more information in the future, e.g.,
customizable dashboards.
"""
user = models.OneToOneField(User, related_name='profile')
channel = models.UUIDField(default=uuid.uuid4, editable=False)
def __str__(self):
return self.user.username
# this is just for the token authentication for the rest-api
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
def create_auth_token(sender, instance=None, created=False, **kwargs):
if created:
# create Token for the rest framework
Token.objects.create(user=instance)
# create user profile for channel
UserProfile.objects.create(user=instance)
class ProjectStats(models.Model):
"""Contains project stats per day."""
project_name = models.CharField(max_length=255)
stats_date = models.DateField(auto_now=True)
number_commits = models.IntegerField(default=0)
number_issues = models.IntegerField(default=0)
number_files = models.IntegerField(default=0)
number_messages = models.IntegerField(default=0)
number_people = models.IntegerField(default=0)
def __str__(self):
return self.project_name
class CommitGraph(models.Model):
"""Contains the raw data (pickle) and pre-computed graph nodes and their layout for the CommitGraph View."""
vcs_system_id = models.CharField(max_length=255)
title = models.CharField(max_length=255)
directed_graph = models.FileField(blank=True, null=True, upload_to=settings.COMPUTED_FILES)
directed_pickle = models.FileField(blank=True, null=True, upload_to=settings.COMPUTED_FILES)
last_updated = models.DateTimeField(blank=True, null=True, auto_now=True)
def __str__(self):
return self.title
class CommitLabelField(models.Model):
"""Contains currently available commit labels from labelSHARK.
This needs to be synced with the fetch_commit_label_approaches command.
"""
approach = models.CharField(max_length=255)
name = models.CharField(max_length=255)
description = models.TextField()
@property
def label(self):
return '{}: {}'.format(self.approach, self.name)
class VSJobType(models.Model):
"""Possible types of jobs."""
ident = models.CharField(max_length=255)
name = models.CharField(max_length=255)
def __str__(self):
return self.ident
class VSJob(models.Model):
"""A Job with configuration and results."""
created_at = models.DateTimeField(auto_now_add=True, blank=False)
executed_at = models.DateTimeField(blank=True, null=True)
requested_by = models.ForeignKey(settings.AUTH_USER_MODEL)
job_type = models.ForeignKey(VSJobType)
data = models.TextField() # for now jsonized data (username, etc.)
result = models.TextField(blank=True, null=True)
error_count = models.IntegerField(default=0) # number of tries for execution (error count)
def __str__(self):
return self.job_type.ident
def requeue(self):
send_to_queue(settings.QUEUE['job_queue'], {'job_type': self.job_type.ident, 'data': json.loads(self.data), 'job_id': self.pk})
send_to_user(self.requested_by.profile.channel, {'msg': '{} job re-queued'.format(self.job_type.name), 'job_type': self.job_type.ident, 'created': True})
@staticmethod
@receiver(post_save, sender='visualSHARK.VSJob')
def job_created(sender, instance, created, **kwargs):
"""Trigger the submission to the worker queue."""
if created:
# changes are saved but not committed to the database before the request finishes, so we hook on_commit with a callback
def callme():
send_to_queue(settings.QUEUE['job_queue'], {'job_type': instance.job_type.ident, 'data': json.loads(instance.data), 'job_id': instance.pk})
send_to_user(instance.requested_by.profile.channel, {'msg': '{} job queued'.format(instance.job_type.name), 'job_type': instance.job_type.ident, 'created': True})
transaction.on_commit(callme)
# on save of the results we can also pass the result to the user
if not created:
send_to_user(instance.requested_by.profile.channel, {'msg': '{} job finished'.format(instance.job_type.name), 'job_type': instance.job_type.ident, 'created': False, 'success': instance.error_count == 0})
| 1.976563 | 2 |
src/ts_analysis/ts_correlation.py | daudprobst/master_thesis | 0 | 12771991 | <gh_stars>0
import pandas as pd
import numpy as np
from src.ts_analysis.timeseries import Timeseries, load_ts_from_csv
from src.utils.output_folders import DATA_HYPE_PHASE_TS_FOLDER
import scipy.stats as stats
def ts_pearson_correlation(A: Timeseries, B: Timeseries, lag: int = 0):
if len(A) != len(B):
raise ValueError(
"The two time series must be of the same length to test for correlation."
)
ys_as_df = pd.DataFrame(list(zip(A.y, B.y)))
if lag >= 0:
# drop from end
seriesA = ys_as_df.iloc[:, 0].drop(ys_as_df.tail(lag).index)
# drop from start (shifted ts)
seriesB = ys_as_df.iloc[:, 1].drop(ys_as_df.head(lag).index)
else:
# negative shift is equal to shifting the other ts
# drop from start (shifted ts)
seriesA = ys_as_df.iloc[:, 0].drop(ys_as_df.head(-lag).index)
# drop from end
seriesB = ys_as_df.iloc[:, 1].drop(ys_as_df.tail(-lag).index)
return stats.pearsonr(seriesA, seriesB)
def significance_mark(p: float) -> str:
if p < 0.001:
return "***"
elif p < 0.01:
return "**"
elif p < 0.05:
return "*"
return ""
if __name__ == "__main__":
aggression_ts_list = load_ts_from_csv(
DATA_HYPE_PHASE_TS_FOLDER + "aggression_ts.csv", normalize=True
)
quants_ts_list = load_ts_from_csv(
DATA_HYPE_PHASE_TS_FOLDER + "quantities_ts.csv", normalize=True
)
zipped_ts = list(zip(aggression_ts_list, quants_ts_list))
for (firestorm_name, aggresion_ts), (firestorm_name2, quants_ts) in zipped_ts:
if firestorm_name != firestorm_name2:
raise ValueError(
"Firestorm names not identical: {firestorm_name} and {firestorm_name2}"
)
print("==========")
print(firestorm_name)
# Pearson Corr with lags
lagged_corrs = [
ts_pearson_correlation(aggresion_ts, quants_ts, lag) for lag in range(-3, 4)
]
for i, (r, p) in enumerate(lagged_corrs, start=-3):
print(
f'{i}: p: {np.round(p,2)}, r: {np.round(r,4)} [{("POS" if p >=0 else "NEG")}]{significance_mark(p)}'
)
| 2.625 | 3 |
tox/__main__.py | doismellburning/tox | 1 | 12771992 | from tox._cmdline import main
main()
| 1.15625 | 1 |
digitalarchive/models.py | epikulski/digitalarchive | 2 | 12771993 | """
digitalarchive.models
The module provides documented models and an ORM for interacting with the DA API.
"""
from __future__ import annotations
# Standard Library
import dataclasses
import json
import logging
import copy
from datetime import datetime, date
from typing import List, Any, Optional, Union, Dict, ClassVar
from abc import ABC
# 3rd Party Libraries
import pydantic
# Application Modules
from pydantic import validator
import digitalarchive.matching as matching
import digitalarchive.api as api
import digitalarchive.exceptions as exceptions
class Resource(pydantic.BaseModel, ABC):
"""
Abstract parent for all DigitalArchive objects.
We add custom hash and eq fields so that hydrated and unhydrated records are equal.
"""
id: str
def __hash__(self):
return hash(self.id)
def __eq__(self, other):
if not self.__class__ == other.__class__:
return NotImplemented
else:
return self.id == other.id
class MatchingMixin:
"""Abstract parent for Resources that can be searched against."""
@classmethod
def match(cls, **kwargs) -> matching.ResourceMatcher:
"""Find a resource using passed keyword arguments.
Note:
If called without arguments, returns all records in the DA .
"""
# Check that we no invalid search terms were passed.
for key in kwargs:
if key not in cls.__fields__.keys():
raise exceptions.InvalidSearchFieldError
# Prepare the "term" search field.
# If we've got both a name and a value, join them.
if kwargs.get("name") and kwargs.get("value"):
kwargs["term"] = " ".join([kwargs.pop("name"), kwargs.pop("value")])
# Otherwise, treat the one that exists as the term.
elif kwargs.get("name"):
kwargs["term"] = kwargs.pop("name")
elif kwargs.get("value"):
kwargs["term"] = kwargs.pop("value")
return matching.ResourceMatcher(cls, **kwargs)
class HydrateMixin:
"""Mixin for resources that can be individually accessed and hydrated."""
def pull(self):
"""Update the resource using data from the DA API."""
data = api.get(endpoint=self.endpoint, resource_id=self.id)
self.__init__(**data)
def hydrate(self):
"""
Populate all unhydrated fields of a resource.
"""
# Preserve unhydrated fields.
unhydrated_fields = copy.copy(self.__dict__)
# Hydrate
self.pull()
hydrated_fields = vars(self)
# Merge fields
for key, value in unhydrated_fields.items():
if (
hydrated_fields.get(key) is None
and unhydrated_fields.get(key) is not None
):
hydrated_fields[key] = value
# Re-initialize the object.
self.__init__(**hydrated_fields)
class Subject(Resource, MatchingMixin, HydrateMixin):
"""
A historical topic to which documents can be related.
Attributes:
id (str): The ID of the record.
name (str): The name of the subject.
value (str): An alias for :attr:`~digitalarchive.models.Subject.name`.
uri (str): The URI for the Subject in the API.
"""
name: str
# Optional fields
value: Optional[str] = None
uri: Optional[str] = None
# Private fields
endpoint: ClassVar[str] = "subject"
class Language(Resource):
"""
The original language of a resource.
Attributes:
id (str): An ISO 639-2/B language code.
name (str): The ISO language name for the language.
"""
name: Optional[str] = None
class Asset(Resource, ABC, HydrateMixin):
"""
Abstract parent for Translations, Transcriptions, and MediaFiles.
Note:
We don't define raw, html, or pdf here because they are not present on
the stub version of Assets.
"""
# pylint: disable=too-many-instance-attributes
filename: str
content_type: str
extension: str
asset_id: str
source_created_at: str
source_updated_at: str
url: Optional[str] = None
raw: Optional[str] = None
pdf: Optional[str] = None
html: Optional[str] = None
def hydrate(self):
"""Populate all unhydrated fields of a :class:`digitalarchive.models._Asset`."""
response = api.SESSION.get(
f"https://digitalarchive.wilsoncenter.org/{self.url}"
)
if response.status_code == 200:
# Preserve the raw content from the DA in any case.
self.raw = response.content
# Add add helper attributes for the common filetypes.
if self.extension == "html":
self.html = response.text
self.pdf = None
elif self.extension == "pdf":
self.pdf = response.content
self.html = None
else:
logging.warning(
"[!] Unknown file format '%s' encountered!", self.extension
)
else:
raise exceptions.APIServerError(
f"[!] Hydrating asset ID#: %s failed with code: %s",
self.id,
response.status_code,
)
class Transcript(Asset):
"""A transcript of a document in its original language.
Attributes:
id (str): The ID# of the Transcript.
url (str): A URL to accessing the hydrated Transcript.
html (str): The html of of the Transcript.
pdf (bytes): A bytes object of the Transcript pdf content.
raw (str or bytes): The raw content recieved from the DA API for the Transcript.
filename (str): The filename of the Transcript on the content server.
content_type (str): The MIME type of the Transcript file.
extension (str): The file extension of the Transcript.
asset_id (str): The Transcript's unique ID on the content server.
source_created_at (str): ISO 8601 timestamp of the first time the Translation was published.
source_updated_at (str): ISO 8601 timestamp of the last time the Translation was modified.
"""
url: str
html: Optional[str] = None
pdf: Optional[bytes] = None
raw: Union[str, bytes, None] = None
class Translation(Asset):
"""
A translation of a Document into a another language.
Attributes:
id (str): The ID# of the Translation.
language (:class:`digitalarchive.models.Language`) The langauge of the Translation.
html (str): The HTML-formatted text of the Translation.
pdf (bytes): A bytes object of the Translation pdf content.
raw (str or bytes): The raw content recieved from the DA API for the Translation.
filename (str): The filename of the Translation on the content server.
content_type (str): The MIME type of the Translation file.
extension (str): The file extension of the Translation.
asset_id (str): The Translation's unique ID on the content server.
source_created_at (str): ISO 8601 timestamp of the first time the Translation was published.
source_updated_at (str): ISO 8601 timestamp of the last time the Translation was modified.
"""
url: str
language: Union[Language, dict]
html: Optional[str] = None
pdf: Optional[bytes] = None
raw: Optional[str] = None
class MediaFile(Asset):
"""
An original scan of a Document.
Attributes:
id (str): The ID# of the MediaFile.
pdf (bytes): A bytes object of the MediaFile content.
raw (str or bytes): The raw content received from the DA API for the MediaFile.
filename (str): The filename of the MediaFile on the content server.
content_type (str): The MIME type of the MediaFile file.
extension (str): The file extension of the MediaFile.
asset_id (str): The MediaFile's unique ID on the content server.
source_created_at (str): ISO 8601 timestamp of the first time the MediaFile was published.
source_updated_at (str): ISO 8601 timestamp of the last time the MediaFile was modified.
"""
path: str
def __init__(self, **data):
data["url"] = data.get("path")
super().__init__(**data)
class Contributor(Resource, MatchingMixin, HydrateMixin):
"""
An individual person or organization that contributed to the creation of the document.
Contributors are typically the Document's author, but for meeting minutes and similar documents,
a Contributor may simply be somebody who was in attendance at the meeting.
Attributes:
id (str): The ID# of the Contributor.
name (str): The name of the contributor.
uri (str): The URI of the contributor metadata on the DA API.
"""
name: str
value: Optional[str] = None
uri: Optional[str] = None
endpoint: ClassVar[str] = "contributor"
class Donor(Resource):
"""
An entity whose resources helped publish or translate a document.
Attributes:
id (str): The ID# of the Donor.
name (str): The name of the Donor.
"""
name: str
endpoint: ClassVar[str] = "donor"
class Coverage(Resource, MatchingMixin, HydrateMixin):
"""
A geographical area referenced by a Document.
Attributes:
id (str): The ID# of the geographic Coverage.
name (str): The name of geographic coverage area.
value (str): An alias to :attr:`~digitalarchive.models.Coverage.name`.
uri (str): URI to the Coverage's metadata on the DA API.
parent (:class:`~digitalarchive.models.Coverage`): The parent coverage,
if any
children: (list of :class:`~digitalarchive.models.Covereage`):
Subordinate geographical areas, if any.
"""
name: str
uri: str
value: Optional[str] = None
parent: Union[Coverage, List, None] = None # Inconsistent endpoint. Parent is either a dict or a empty list.
children: Optional[List[Coverage]] = None
endpoint: ClassVar[str] = "coverage"
@validator("parent")
def _process_parent(cls, parent):
if isinstance(parent, list):
return None
return parent
Coverage.update_forward_refs()
class Collection(Resource, MatchingMixin, HydrateMixin):
"""
A collection of Documents on a single topic
Attributes:
name (str): The title of the collection.
slug (str): A url-friendly name of the collection.
uri (str): The URI of the record on the DA API.
parent(:class:`digitalarchive.models.Collection`): A `Collection` containing the `Collection`.
model (str): A sting name of the model used to differentiate `Collection` and `Document` searches in the DA API.
value (str): A string identical to the `title` field.
description (str): A 1-2 sentence description of the `Collection`'s content.
short_description (str): A short description that appears in search views.
main_src (str): Placeholder
no_of_documents (str): The count of documents contained in the collection.
is_inactive (str): Whether the collection is displayed in the collections list.
source_created_at(:class:`datetime.datetime`): Timestamp of when the Document was first added to the DA.
source_updated_at(:class:`datetime.datetime`): Timestamp of when the Document was last edited.
first_published_at(:class:`datetime.datetime`): Timestamp of when the document was first made publically
accessible.
"""
# pylint: disable=too-many-instance-attributes
# Required Fields
name: str
slug: str
# Optional Fields
uri: Optional[str] = None
parent: Optional[Collection] = None
model: Optional[str] = None
value: Optional[str] = None
description: Optional[str] = None
short_description: Optional[str] = None
main_src: Optional[str] = None
thumb_src: Optional[str] = None
no_of_documents: Optional[str] = None
is_inactive: Optional[str] = None
source_created_at: Optional[datetime] = None
source_updated_at: Optional[datetime] = None
first_published_at: Optional[datetime] = None
# Internal Fields
endpoint: ClassVar[str] = "collection"
Collection.update_forward_refs()
class Repository(Resource, MatchingMixin, HydrateMixin):
"""
The archive or library possessing the original, physical Document.
Attributes:
id (str): The ID# of the Repository.
name (str): The name of the repository
uri (str): The URI for the Repository's metadata on the Digital Archive API.
value (str): An alias to :attr:`~digitalarchive.models.Repository.name`
"""
name: str
uri: Optional[str] = None
value: Optional[str] = None
endpoint: ClassVar[str] = "repository"
class Publisher(Resource):
"""
An organization involved in the publication of the document.
Attributes:
id (str): The ID# of the Publisher.
name (str): The name of the Publisher.
"""
name: str
value: str
endpoint: ClassVar[str] = "publisher"
class Type(Resource):
"""
The type of a document (memo, report, etc).
Attributes:
id (str): The ID# of the Type.
name (str): The name of the resource Type.
"""
name: str
class Right(Resource):
"""
A copyright notice attached to the Document.
Attributes:
id (str): The ID# of the Copyright type.
name (str): The name of the Copyright type.
rights (str): A description of the copyright requirements.
"""
name: str
rights: str
class Classification(Resource):
"""
A classification marking applied to the original Document.
Attributes:
id (str): The ID# of the Classification type.
name (str): A description of the Classification type.
"""
name: str
class Document(Resource, MatchingMixin, HydrateMixin):
"""
A Document corresponding to a single record page on digitalarchive.wilsoncenter.org.
Note:
Avoid constructing Documents directly--use the `match` function to create
Documents by keyword search or by ID.
**Attributes present on all Documents:**
Attributes:
id (str): The ID# of the record in the DA.
title (str): The title of a document.
description (str): A one-sentence description of the document's content.
doc_date (str): The date of the document's creation in ``YYYYMMDD`` format.
frontend_doc_date (str): How the date appears when presented on the DA website.
slug (str): A url-friendly name for the document. Not currently used.
source_created_at(:class:`datetime.datetime`): Timestamp of when the Document was first added to the DA.
source_updated_at(:class:`datetime.datetime`): Timestamp of when the Document was last edited.
first_published_at(:class:`datetime.datetime`): Timestamp of when the document was first made publically
accessible.
**Attributes present only on hydrated Documents**
These attributes are aliases of :class:`UnhydratedField` until :func:`Document.hydrate` is called on the Document.
Attributes:
source (str): The archive where the document was retrieved from.
type (:class:`digitalarchive.models.Type`): The type of the document (meeting minutes, report, etc.)
rights (:obj:`list` of :class:`digitalarchive.models.Right`): A list of entities holding the copyright of the
Document.
pdf_generated_at (str): The date that the combined source, translations, and transcriptions PDF. was generated.
date_range_start (:class:`datetime.date`): A rounded-down date used to standardize approximate dates for
date-range matching.
sort_string_by_coverage (str): An alphanumeric identifier used by the API to sort search results.
main_src (str): The original Source that a Document was retrieved from.
model (str): The model of a record, used to differentiate collections and keywords in searches.
donors (:obj:`list` of :class:`digitalarchive.models.Donor`): A list of donors whose funding make the acquisiton
or translation of a document possible.
subjects (:obj:`list` of :class:`digitalarchive.models.Subject`): A list of subjects that the document is tagged
with.
transcripts (:obj:`list` of :class:`digitalarchive.models.Transcript`): A list of transcripts of the document's
contents.
translations (:obj:`list` of :class:`digitalarchive.models.Translation`): A list of translations of the original
document.
media_files (:obj:`list` of :class:`digitalarchive.models.MediaFile`): A list of attached original scans of the
document.
languages(:obj:`list` of :class:`digitalarchive.models.Language`): A list of langauges contained in the
document.
creators (:obj:`list` of :class:`digitalarhive.models.Creator`): A list of persons who authored the document.
original_coverages (:obj:`list` of :class:`digitalarchive.models.Coverage`): A list of geographic locations
referenced in the document.
collections (:obj:`list` of :class:`digitalarchive.models.Collection`): A list of Collections that contain this
document.
attachments (:obj:`list` of :class:`digitalarchive.models.Document`): A list of Documents that were attached to
the Document.
links (:obj:`list` of :class:`digitalarchive.models.Document`): A list of topically related documents.
respositories (:obj:`list` of :class:`digitalarchive.models.Repository`): A list of archives/libraries
containing this document.
publishers (:obj:`list` of :class:`digitalarchive.models.Publisher`): A list of Publishers that released the
document.
classifications (:obj:`list` of :class:`digitalarchive.models.Publisher`): A list of security classification
markings present on the document.
"""
# pylint: disable=too-many-instance-attributes
# Required Fields
uri: str
title: str
doc_date: str
frontend_doc_date: str
slug: str
source_created_at: datetime
source_updated_at: datetime
first_published_at: datetime
# Optional Fields
description: Optional[str] = None
source: Optional[str] = None
type: Optional[List[Type]] = None
rights: Optional[Right] = None
pdf_generated_at: Optional[datetime] = None
date_range_start: Optional[date] = None
sort_string_by_coverage: Optional[str] = None
main_src: Optional[
Any
] = None # TODO: Never seen one of these in the while, so not sure how to handle.
model: Optional[str] = None
# Optional Lists:
donors: Optional[List[Donor]] = None
subjects: Optional[List[Subject]] = None
transcripts: Optional[List[Transcript]] = None
translations: Optional[List[Translation]] = None
media_files: Optional[List[MediaFile]] = None
languages: Optional[List[Language]] = None
contributors: Optional[List[Contributor]] = None
creators: Optional[List[Contributor]] = None
original_coverages: Optional[List[Coverage]] = None
collections: Optional[List[Collection]] = None
attachments: Optional[List[Any]] = None # TODO: Should be "document" -- fix.
links: Optional[List[Any]] = None
repositories: Optional[List[Repository]] = None
publishers: Optional[List[Publisher]] = None
classifications: Optional[List[Classification]] = None
# Private properties
endpoint: ClassVar[str] = "record"
@validator("date_range_start", pre=True)
def _parse_date_range_start(cls, doc_date) -> date:
"""Transform a DA-style date string to a Python datetime."""
if isinstance(doc_date, date):
return doc_date
elif doc_date is None:
return doc_date
# Try to parse it as a normal one
try:
return date.fromisoformat(doc_date)
except ValueError:
pass
year = int(doc_date[:4])
month = int(doc_date[4:6])
day = int(doc_date[-2:])
return date(year, month, day)
@classmethod
def match(cls, **kwargs) -> matching.ResourceMatcher:
"""
Search for a Document by keyword, or fetch one by ID.
Matching on the Document model runs a full-text search using keywords passed via the title and description
keywords. Results can also be limited by dates or by related records, as described below.
Note:
Title and description keywords are not searched for individually. All
non-date or child record searches are concatenated to single querystring.
Note:
Collection and other related record searches use `INNER JOIN` logic when
passed multiple related resources.
**Allowed search fields:**
Args:
title (:obj:`str`, optional): Title search keywords.
description (:obj:`str`, optional): Title search keywords.
start_date (:class:`datetime.date`, optional): Return only Documents with a `doc_date` after the passed
`start_date`.
end_date (:class:`datetime.date`, optional): Return only Documents with a `doc_date` before the passed
`end_date`.
collections (:obj:`list` of :class:`digitalarchive.models.Collection`, optional): Restrict results to
Documents contained in all of the passed Collections.
publishers (:obj:`list` of :class:`digitalarchive.models.Publisher`, optional): Restrict results to
Documents published by all of the passed Publishers.
repositories (:obj:`list` of :class:`digitalarchive.models.Repository`, optional) Restrict results to
Documents contained in all of the passed Repositories.
coverages (:obj:`list` of :class:`digitalarchive.models.Coverage`, optional) Restrict results to Documents
relating to all of the passed geographical Coverages.
subjects (:obj:`list` of :class:`digitalarchive.models.Subject`) Restrict results to Documents tagged with
all of the passed subjects
contributors (:obj:`list of :class:`digitalarchive.models.Contributor`) Restrict results to Documents whose
authors include all of the passed contributors.
donors (list(:class:`digitalarchive.models.Donor`)) Restrict results to Documents who were obtained or
translated with support from all of the passed donors.
languages (:class:`digitalarchive.models.Language` or str) Restrict results to Documents by language of
original document. If passing a string, you must pass an ISO 639-2/B language code.
translation (:class:`digitalarchive.models.Translation`) Restrict results to Documents for which there
is a translation available in the passed Language.
theme (:class:`digitalarchive.models.Theme`) Restrict results to Documents belonging to the passed Theme.
Returns:
An instance of (:class:`digitalarchive.matching.ResourceMatcher`) containing any records responsive to the
search.
"""
# Limit search to only Documents (this excludes Collections from search result).
kwargs["model"] = "Record"
# Check that search keywords are valid.
allowed_search_fields = [
*cls.__fields__.keys(),
"start_date",
"end_date",
"themes",
"model",
]
for key in kwargs:
if key not in allowed_search_fields:
logging.error(
f"[!] {key} is not a valid search term for {cls}. Valid terms: {allowed_search_fields}"
)
raise exceptions.InvalidSearchFieldError
# Process date searches if they are present.
if any(key in kwargs.keys() for key in ["start_date", "end_date"]):
kwargs = Document._process_date_searches(kwargs)
# Process language searches if they are present.
if "languages" in kwargs.keys():
kwargs = Document._process_language_search(kwargs)
# Process any related model searches.
if any(
key in kwargs.keys()
for key in [
"collections",
"publishers",
"repositories",
"original_coverages",
"subjects",
"contributors",
"donors",
"languages",
"translations",
"themes",
]
):
kwargs = Document._process_related_model_searches(kwargs)
# Prepare the 'q' fulltext search field.
keywords = []
for field in ["name", "title", "description", "slug", "q"]:
if kwargs.get(field) is not None:
keywords.append(kwargs.pop(field))
kwargs["q"] = " ".join(keywords)
# Reformat fields that accept lists. This makes the queries inner joins rather than union all.
for field in ["donor", "subject", "contributor", "coverage", "collection"]:
if field in kwargs.keys():
kwargs[f"{field}[]"] = kwargs.pop(field)
# Run the match.
return matching.ResourceMatcher(cls, **kwargs)
def hydrate(self, recurse: bool = False):
"""
Downloads the complete version of the Document with metadata for any related objects.
Args:
recurse (bool): If true, also hydrate subordinate and related records records.
"""
# Preserve unhydrated fields.
unhydrated_fields = copy.copy(self.__dict__)
# Hydrate
self.pull()
hydrated_fields = vars(self)
# Merge fields
for key, value in unhydrated_fields.items():
if (
hydrated_fields.get(key) is None
and unhydrated_fields.get(key) is not None
):
hydrated_fields[key] = value
# Re-initialize the object.
self.__init__(**hydrated_fields)
# Hydrate Assets
if recurse is True:
[transcript.hydrate() for transcript in self.transcripts]
[translation.hydrate() for translation in self.translations]
[media_file.hydrate() for media_file in self.media_files]
[collection.hydrate() for collection in self.collections]
@staticmethod
def _process_date_searches(query: dict) -> dict:
"""Run formatting and type checks against date search fields."""
date_search_terms = ["start_date", "end_date"]
# Handle open-ended date searches.
if "start_date" in query.keys() and "end_date" not in query.keys():
query["end_date"] = date.today()
elif "end_date" in query.keys() and "start_date" not in query.keys():
# Pull earliest record date from API.
da_date_range = api.get_date_range()
start_date = Document._parse_date_range_start(da_date_range["begin"])
query["start_date"] = start_date
# Transform datetime objects into formatted string and return
for field in date_search_terms:
search_date = query[field]
if isinstance(search_date, date):
query[
field
] = f"{search_date.year}{search_date.strftime('%m')}{search_date.strftime('%d')}"
# If passed a string but its wrong length, raise.
elif isinstance(search_date, str) and len(search_date) != 8:
logging.error("[!] Invalid date string! Format is: YYYYMMDD")
raise exceptions.MalformedDateSearch
return query
@staticmethod
def _process_related_model_searches(query: dict) -> dict:
"""
Process and format searches by related models.
We have to re-name the fields from plural to singular to match the DA format.
"""
multi_terms = {
"collections": "collection",
"publishers": "publisher",
"repositories": "repository",
"original_coverages": "coverage",
"subjects": "subject",
"contributors": "contributor",
"donors": "donor",
"languages": "language",
"translations": "translation",
"themes": "theme",
}
# Rename each term to singular
for key, value in multi_terms.items():
if key in query.keys():
query[value] = query.pop(key)
# Build list of terms we need to parse
terms_to_parse = []
for term in multi_terms.values():
if term in query.keys():
terms_to_parse.append(term)
# transform each term list into a list of IDs
for term in terms_to_parse:
query[term] = [str(item.id) for item in query[term]]
# Special handling for langauges, translations, themes.
# Unlike they above, they only accept singular values
for term in ["language", "translation", "theme"]:
if term in query.keys():
if len(query[term]) > 1:
logging.error(f"[!] Cannot filter for more than one %s", term)
raise exceptions.InvalidSearchFieldError
# Pull out the singleton.
query[term] = query[term][0]
# Return the reformatted query.
return query
@staticmethod
def _process_language_search(query: dict) -> dict:
"""
Process a language search
Looks up the DA's language ID# for user provided ISO 639-2/B language codes and updates the query.
Args:
query (dict): A ResourceMatcher query.
Returns:
dict: A query dict with a ISO 639-2/B string replaced with appropriate Language object.
"""
parsed_languages = []
for language in query["languages"]:
# Check if ID# is instance of language, bail on yes.
if isinstance(language, Language):
parsed_languages.append(language)
# If str, lookup ID# of language
elif isinstance(language, str) and len(language) == 3:
parsed_languages.append(Language(id=language))
else:
raise exceptions.MalformedLanguageSearch
# Replace kwarg with Langauge object.
query["languages"] = parsed_languages
return query
class Theme(Resource, HydrateMixin):
"""
A parent container for collections on a single geopolitical topic.
Note:
Themes never appear on any record model, but can be passed as a search param to Document.
Attributes:
id (str): The ID# of the Theme.
slug (str): A url-friendly version of the theme title.
title (str): The name of the Theme.
description (str): A short description of the Theme contents.
main_src: A URI for the Theme's banner image on the Digital Archive website.
has_map (str): A boolean value for whether the Theme has an accompanying map on the Digital Archive website.
has_timeline(str) : A boolean value for whether the Theme has a Timeline on the Digital Archive website.
featured_collections (list of :class:`~digitalarchive.models.Collection`): A list of related collections.
dates_with_events (list): A list of date ranges that the Theme has timeline entries for.
"""
# Required fields
slug: str
# Optional Fields
title: Optional[str] = None
value: Optional[str] = None
description: Optional[str] = None
main_src: Optional[str] = None
uri: Optional[str] = None
featured_resources: Optional[List[dict]] = None
has_map: Optional[str] = None
has_timeline: Optional[str] = None
featured_collections: Optional[List[Collection]] = None
dates_with_events: Optional[list] = None
# Private fields.
endpoint: ClassVar[str] = "theme"
def pull(self):
"""
Downloads the complete Theme object from the DA and re-initializes the dataclass..
Note: The Theme pull method differs from from the pull methods of other models as Themes use the `slug`
attribute as a primary key, rather than the `id` attribute.
"""
data = api.get(endpoint=self.endpoint, resource_id=self.slug)
self.__init__(**data)
| 2.5625 | 3 |
install/core/python/tank/util/metrics.py | JoanAzpeitia/lp_sg | 0 | 12771994 | <reponame>JoanAzpeitia/lp_sg
# Copyright (c) 2016 Shotgun Software Inc.
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit
# Source Code License included in this distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the Shotgun Pipeline Toolkit Source Code License. All rights
# not expressly granted therein are reserved by Shotgun Software Inc.
"""Classes and functions for logging Toolkit metrics.
Internal Use Only - We provide no guarantees that the classes and functions
here will be backwards compatible. These objects are also subject to change and
are not part of the public Sgtk API.
"""
###############################################################################
# imports
from collections import deque
from threading import Event, Thread, Lock
import urllib2
from copy import deepcopy
from . import constants
# use api json to cover py 2.5
from tank_vendor import shotgun_api3
json = shotgun_api3.shotgun.json
###############################################################################
# Metrics Queue, Dispatcher, and worker thread classes
class MetricsQueueSingleton(object):
"""A FIFO queue for logging metrics.
This is a singleton class, so any instantiation will return the same object
instance within the current process.
"""
MAXIMUM_QUEUE_SIZE = 100
"""
Maximum queue size (arbitrary value) until oldest queued item is remove.
This is to prevent memory leak in case the engine isn't started.
"""
# keeps track of the single instance of the class
__instance = None
# A set of log identifier strings used to check whether a metric has been
# logged already.
__logged_metrics = set()
def __new__(cls, *args, **kwargs):
"""Ensures only one instance of the metrics queue exists."""
# create the queue instance if it hasn't been created already
if not cls.__instance:
# remember the instance so that no more are created
metrics_queue = super(MetricsQueueSingleton, cls).__new__(
cls, *args, **kwargs)
metrics_queue._lock = Lock()
# The underlying collections.deque instance
metrics_queue._queue = deque(maxlen=cls.MAXIMUM_QUEUE_SIZE)
cls.__instance = metrics_queue
return cls.__instance
def log(self, metric, log_once=False):
"""
Add the metric to the queue for dispatching.
If ``log_once`` is set to ``True``, this will only log the metric if it
is the first attempt to log it.
:param EventMetric metric: The metric to log.
:param bool log_once: ``True`` if this metric should be ignored if it
has already been logged. ``False`` otherwise. Defaults to ``False``.
"""
# This assumes that supplied object's classes implement __repr__
# to return consistent results when building objects with the same
# internal data. See the UserActivityMetric and UserAttributeMetric
# classes below.
metric_identifier = repr(metric)
if log_once and metric_identifier in self.__logged_metrics:
# the metric is already logged! nothing to do.
return
self._lock.acquire()
try:
self._queue.append(metric)
# remember that we've logged this one already
self.__logged_metrics.add(metric_identifier)
except:
pass
finally:
self._lock.release()
def get_metrics(self, count=None):
"""Return `count` metrics.
:param int count: The number of pending metrics to return.
If `count` is not supplied, or greater than the number of pending
metrics, returns all metrics.
Should never raise an exception.
"""
metrics = []
self._lock.acquire()
try:
num_pending = len(self._queue)
# there are pending metrics
if num_pending:
# determine how many metrics to retrieve
if not count or count > num_pending:
count = num_pending
# would be nice to be able to pop N from deque. oh well.
metrics = [self._queue.popleft() for i in range(0, count)]
except:
pass
finally:
self._lock.release()
return metrics
class MetricsDispatcher(object):
"""This class manages 1 or more worker threads dispatching toolkit metrics.
After initializing the object, the `start()` method is called to
spin up worker threads for dispatching logged metrics. The `stop()` method
is later called to stop the worker threads.
"""
def __init__(self, engine, num_workers=1):
"""Initialize the dispatcher object.
:param engine: An engine instance for logging, and api access
:param workers: The number of worker threads to start.
"""
self._engine = engine
self._num_workers = num_workers
self._workers = []
self._dispatching = False
def start(self):
"""Starts up the workers for dispatching logged metrics.
If called on an already dispatching instance, then result is a no-op.
"""
if self._dispatching:
self._engine.log_debug(
"Metrics dispatching already started. Doing nothing.")
return
# Now check that we have a valid authenticated user, which is
# required for metrics dispatch. This is to ensure certain legacy
# and edge case scenarios work, for example the
# shotgun_cache_actions tank command which runs un-authenticated.
from ..api import get_authenticated_user
if not get_authenticated_user():
return
# start the dispatch workers to use this queue
for i in range(self._num_workers):
worker = MetricsDispatchWorkerThread(self._engine)
worker.start()
self._engine.log_debug("Added worker thread: %s" % (worker,))
self._workers.append(worker)
self._dispatching = True
def stop(self):
"""Instructs all worker threads to stop processing metrics."""
for worker in self.workers:
worker.halt()
self._dispatching = False
self._workers = []
@property
def dispatching(self):
"""True if started and dispatching metrics."""
return self._dispatching
@property
def workers(self):
"""A list of workers threads dispatching metrics from the queue."""
return self._workers
class MetricsDispatchWorkerThread(Thread):
"""
Worker thread for dispatching metrics to sg logging endpoint.
Once started this worker will dispatch logged metrics to the shotgun api
endpoint, if available. The worker retrieves any pending metrics after the
`DISPATCH_INTERVAL` and sends them all in a single request to sg.
This worker will also fire the `log_metrics` hooks.
"""
API_ENDPOINT = "api3/track_metrics/"
DISPATCH_INTERVAL = 5
"""Worker will wait this long between metrics dispatch attempts."""
DISPATCH_SHORT_INTERVAL = 0.1
"""
Delay in seconds between the posting of consecutive batches within a
dispatcher cycle.
"""
DISPATCH_BATCH_SIZE = 10
"""
Worker will dispatch this many metrics at a time, or all if <= 0.
NOTE: that current SG server code reject batches larger than 10.
"""
# List of Event names suported by our backend
SUPPORTED_EVENTS = [
"Launched Action",
"Launched Command",
"Launched Software",
"Loaded Published File",
"Opened Workfile",
"Published",
"Saved Workfile",
]
def __init__(self, engine):
"""
Initialize the worker thread.
:params engine: Engine instance
"""
super(MetricsDispatchWorkerThread, self).__init__()
self._engine = engine
self._endpoint_available = False
# Make this thread a daemon. This means the process won't wait for this
# thread to complete before exiting. In most cases, proper engine
# shutdown should halt the worker correctly. In cases where an engine
# is improperly shut down, this will prevent the process from hanging.
self.daemon = True
# makes possible to halt the thread
self._halt_event = Event()
def run(self):
"""Runs a loop to dispatch metrics that have been logged."""
# First of all, check if metrics dispatch is supported
# connect to shotgun and probe for server version
sg_connection = self._engine.shotgun
self._endpoint_available = (
hasattr(sg_connection, "server_caps") and
sg_connection.server_caps.version and
sg_connection.server_caps.version >= (7, 4, 0)
)
# Run until halted
while not self._halt_event.isSet():
# get the next available metric and dispatch it
try:
# For each dispatch cycle, we empty the queue to prevent
# metric events from accumulating in the queue.
# Because the server has a limit, we dispatch
# 'DISPATCH_BATCH_SIZE' items at a time.
while True:
metrics = MetricsQueueSingleton().get_metrics(
self.DISPATCH_BATCH_SIZE
)
if metrics:
self._dispatch(metrics)
self._halt_event.wait(self.DISPATCH_SHORT_INTERVAL)
else:
break
except Exception as e:
pass
finally:
# wait, checking for halt event before more processing
self._halt_event.wait(self.DISPATCH_INTERVAL)
def halt(self):
"""
Ask the worker thread to halt as soon as possible.
"""
self._halt_event.set()
def _dispatch(self, metrics):
"""
Dispatch the supplied metric to the sg api registration endpoint and fire
the log_metrics hook.
:param metrics: A list of :class:`EventMetric` instances.
"""
if self._endpoint_available:
self._dispatch_to_endpoint(metrics)
# Execute the log_metrics core hook
try:
self._engine.tank.execute_core_hook_method(
constants.TANK_LOG_METRICS_HOOK_NAME,
"log_metrics",
metrics=[m.data for m in metrics]
)
except Exception as e:
# Catch errors to not kill our thread, log them for debug purpose.
self._engine.log_debug("%s hook failed with %s" % (
constants.TANK_LOG_METRICS_HOOK_NAME,
e,
))
def _dispatch_to_endpoint(self, metrics):
"""
Dispatch the supplied metric to the sg api registration endpoint.
:param metrics: A list of :class:`EventMetric` instances.
"""
# Filter out metrics we don't want to send to the endpoint.
filtered_metrics_data = []
for metric in metrics:
# Only send internal Toolkit events
if metric.is_internal_event:
data = metric.data
if data["event_name"] not in self.SUPPORTED_EVENTS:
# Still log the event but change its name so it's easy to
# spot all unofficial events which are logged.
# Later we might want to simply discard them instead of logging
# them as "Unknown"
# Forge a new properties dict with the original data under the
# "Event Data" key
properties = data["event_properties"]
new_properties = {
"Event Name": data["event_name"],
"Event Data": properties,
EventMetric.KEY_APP: properties.get(EventMetric.KEY_APP),
EventMetric.KEY_APP_VERSION: properties.get(EventMetric.KEY_APP_VERSION),
EventMetric.KEY_ENGINE: properties.get(EventMetric.KEY_ENGINE),
EventMetric.KEY_ENGINE_VERSION: properties.get(EventMetric.KEY_ENGINE_VERSION),
EventMetric.KEY_HOST_APP: properties.get(EventMetric.KEY_HOST_APP),
EventMetric.KEY_HOST_APP_VERSION: properties.get(EventMetric.KEY_HOST_APP_VERSION),
}
data["event_properties"] = new_properties
data["event_name"] = "Unknown Event"
filtered_metrics_data.append(data)
# Bail out if there is nothing to do
if not filtered_metrics_data:
return
# get this thread's sg connection via tk api
sg_connection = self._engine.tank.shotgun
# handle proxy setup by pulling the proxy details from the main
# shotgun connection
if sg_connection.config.proxy_handler:
opener = urllib2.build_opener(sg_connection.config.proxy_handler)
urllib2.install_opener(opener)
# build the full endpoint url with the shotgun site url
url = "%s/%s" % (sg_connection.base_url, self.API_ENDPOINT)
# construct the payload with the auth args and metrics data
payload = {
"auth_args": {
"session_token": sg_connection.get_session_token()
},
"metrics": filtered_metrics_data
}
payload_json = json.dumps(payload)
header = {"Content-Type": "application/json"}
try:
request = urllib2.Request(url, payload_json, header)
urllib2.urlopen(request)
except urllib2.HTTPError as e:
# fire and forget, so if there's an error, ignore it.
pass
###############################################################################
# ToolkitMetric classes and subclasses
class EventMetric(object):
"""
Convenience class for creating a metric event to be logged on a Shotgun site.
Use this helper class to create a suitable metric structure that you can
then pass to the `tank.utils.metrics.EventMetric.log` method.
The simplest usage of this class is simply to provide an event group and
event name to the constructor. The "Toolkit" group is reserved for internal
use.
Optionally, you can add your own specific metrics by using the
`properties` parameter. The latter simply takes a standard
dictionary.
The class also defines numerous standard definition.
We highly recommand usage of them. Below is a complete typical usage:
```
metric = EventMetric.log(
"Custom Event Group",
"User Logged In",
properties={
EventMetric.KEY_ENGINE: "tk-maya",
EventMetric.KEY_ENGINE_VERSION: "v0.2.2",
EventMetric.KEY_HOST_APP: "Maya",
EventMetric.KEY_HOST_APP_VERSION: "2017",
EventMetric.KEY_APP: "tk-multi-publish2",
EventMetric.KEY_APP_VERSION: "v0.2.3",
"CustomBoolMetric": True,
"RenderJobsSumitted": 173,
}
)
```
"""
# Toolkit internal event group
GROUP_TOOLKIT = "Toolkit"
# Event property keys
KEY_ACTION_TITLE = "Action Title"
KEY_APP = "App"
KEY_APP_VERSION = "App Version"
KEY_COMMAND = "Command"
KEY_ENGINE = "Engine"
KEY_ENGINE_VERSION = "Engine Version"
KEY_ENTITY_TYPE = "Entity Type"
KEY_HOST_APP = "Host App"
KEY_HOST_APP_VERSION = "Host App Version"
KEY_PUBLISH_TYPE = "Publish Type"
def __init__(self, group, name, properties=None):
"""
Initialize a metric event with the given name for the given group.
:param str group: A group or category this metric event falls into.
Any value can be used to implement your own taxonomy.
The "Toolkit" group name is reserved for internal use.
:param str name: A short descriptive event name or performed action,
e.g. 'Launched Command', 'Opened Workfile', etc..
:param dict properties: An optional dictionary of extra properties to be
attached to the metric event.
"""
self._group = str(group)
self._name = str(name)
self._properties = properties or {} # Ensure we always have a valid dict.
def __repr__(self):
"""Official str representation of the user activity metric."""
return "%s:%s" % (self._group, self._name)
def __str__(self):
"""Readable str representation of the metric."""
return "%s: %s" % (self.__class__, self.data)
@property
def data(self):
"""
:returns: The underlying data this metric represents, as a dictionary.
"""
return {
"event_group": self._group,
"event_name": self._name,
"event_properties": deepcopy(self._properties)
}
@property
def is_internal_event(self):
"""
:returns: ``True`` if this event is an internal Toolkit event, ``False`` otherwise.
"""
return self._group == self.GROUP_TOOLKIT
@classmethod
def log(cls, group, name, properties=None, log_once=False):
"""
Queue a metric event with the given name for the given group on
the :class:`MetricsQueueSingleton` dispatch queue.
This method simply adds the metric event to the dispatch queue meaning that
the metric has to be treated by a dispatcher to be posted.
:param str group: A group or category this metric event falls into.
Any values can be used to implement your own taxonomy,
the "Toolkit" group name is reserved for internal use.
:param str name: A short descriptive event name or performed action,
e.g. 'Launched Command', 'Opened Workfile', etc..
:param dict properties: An optional dictionary of extra properties to be
attached to the metric event.
:param bool log_once: ``True`` if this metric should be ignored if it has
already been logged. Defaults to ``False``.
"""
MetricsQueueSingleton().log(
cls(group, name, properties),
log_once=log_once
)
###############################################################################
#
# metrics logging convenience functions (All deprecated)
#
def log_metric(metric, log_once=False):
"""
This method is deprecated and shouldn't be used anymore.
Please use the `EventMetric.log` method.
"""
pass
def log_user_activity_metric(module, action, log_once=False):
"""
This method is deprecated and shouldn't be used anymore.
Please use the `EventMetric.log` method.
"""
pass
def log_user_attribute_metric(attr_name, attr_value, log_once=False):
"""
This method is deprecated and shouldn't be used anymore.
Please use the `EventMetric.log` method.
"""
pass
| 2.015625 | 2 |
h/auth/policy/bearer_token.py | pombredanne/h | 0 | 12771995 | from pyramid import interfaces
from zope import interface
from h.auth.policy._identity_base import IdentityBasedPolicy
from h.security import Identity
@interface.implementer(interfaces.IAuthenticationPolicy)
class TokenAuthenticationPolicy(IdentityBasedPolicy):
"""
A bearer token authentication policy.
This policy uses a bearer token which is validated against Token objects
in the DB. This can come from the `request.auth_token` (from
`h.auth.tokens.auth_token`) or in the case of Websocket requests the
GET parameter `access_token`.
"""
def identity(self, request):
"""
Get an Identity object for valid credentials.
Validate the token from the request by matching them to Token records
in the DB.
:param request: Pyramid request to inspect
:returns: An `Identity` object if the login is authenticated or None
"""
token_str = self._get_token(request)
if token_str is None:
return None
token = request.find_service(name="auth_token").validate(token_str)
if token is None:
return None
user = request.find_service(name="user").fetch(token.userid)
if user is None:
return None
return Identity(user=user)
def _get_token(self, request):
token_str = None
if self._is_ws_request(request):
token_str = request.GET.get("access_token", None)
return token_str or getattr(request, "auth_token", None)
@staticmethod
def _is_ws_request(request):
return request.path == "/ws"
| 2.515625 | 3 |
CodeWars/2016/MultiplesOf3and5-6k.py | JLJTECH/TutorialTesting | 0 | 12771996 | #Return the sum of all the multiples of 3 or 5 below the number passed in.
def solution(number):
lit = []
for i in range(0, number):
if i % 3 == 0 or i % 5 == 0:
lit.append(i)
return sum(lit)
#Alternate Solution
def solution(number):
return sum(x for x in range(number) if x % 3 == 0 or x % 5 == 0) | 4.1875 | 4 |
analysis/plot_dat_mod.py | noahfranz13/BL-TESSsearch | 0 | 12771997 | import os, glob, sys
from turbo_seti.find_event.plot_dat import plot_dat
from turbo_seti import find_event as find
import numpy as np
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--dir', default=os.getcwd())
parser.add_argument('--minHit', type=float, default=None)
parser.add_argument('--maxHit', type=float, default=None)
args = parser.parse_args()
path = args.dir
dat_files = glob.glob(path + "*.dat")
min_hit = 1e9
max_hit = 0
if args.minHit == None or args.maxHit == None:
for file in dat_files:
tbl = find.read_dat(file)
min_freq, max_freq = min(tbl["Freq"]), max(tbl["Freq"])
if min_freq < min_hit:
min_hit = min_freq
if max_freq > max_hit:
max_hit = max_freq
else:
min_hit = args.minHit
max_hit = args.maxHit # set min and max hits by hand just to get this image
print("Lowest frequency hit: ", min_hit)
print("Highext frequency hit: ", max_hit)
plot_range = 2000*1e-6 # a 2000Hz width, adjusted to be in units of MHz
freq_range = np.arange(np.round(min_hit, 2), np.round(max_hit), plot_range)
outDir = path + "bautista-analysis/"
if not os.path.exists(outDir):
os.mkdir(outDir)
for center in freq_range:
plot_dat(path + "dat-list.lst",
path + "h5-list.lst",
path + "events-list.csv",
outdir=outDir,
check_zero_drift=False,
alpha=0.65,
color="black",
window=(center-0.001, center+0.001))
if __name__ == '__main__':
sys.exit(main())
| 2.328125 | 2 |
Bagpipe/Coordinator/sender.py | kasramvd/Rexy | 51 | 12771998 | """
=====
sender.py
=====
Import data from server.
============================
"""
from flask import Flask, jsonify
from flask import make_response
from config import sender_links
app = Flask(__name__)
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({'error': 'Not found'}), 404)
# ####### #
# Profile #
# ####### #
@app.route(sender_links['user'], methods=['GET'])
def get_users(task_id):
task = [task for task in tasks if task['id'] == task_id]
if len(task) == 0:
abort(404)
return jsonify({'task': task[0]})
def get_products(task_ids):
pass
# ####### #
# Profile #
# ####### # | 2.40625 | 2 |
tests/test_marble.py | mcgibbon/marble | 3 | 12771999 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `marble` package."""
import unittest
import marble
import numpy as np
import sympl as sp
test_era5_filename = '/home/twine/data/era5/era5-interp-2016.nc'
def get_test_state(pc_value=0.):
n_features = marble.components.marble.name_feature_counts
state = {
'time': sp.timedelta(0),
'liquid_water_static_energy_components': sp.DataArray(
np.ones([n_features['sl']]) * pc_value,
dims=('sl_latent',), attrs={'units': ''}),
'total_water_mixing_ratio_components': sp.DataArray(
np.ones([n_features['rt']]) * pc_value,
dims=('rt_latent',), attrs={'units': ''}),
'cloud_water_mixing_ratio_components': sp.DataArray(
np.ones([n_features['rcld']]) * pc_value,
dims=('rcld_latent',), attrs={'units': ''}),
'rain_water_mixing_ratio_components': sp.DataArray(
np.ones([n_features['rrain']]) * pc_value,
dims=('rrain_latent',), attrs={'units': ''}),
'cloud_fraction_components': sp.DataArray(
np.ones([n_features['cld']]) * pc_value,
dims=('cld_latent',), attrs={'units': ''}),
'liquid_water_static_energy_components_horizontal_advective_tendency': sp.DataArray(
np.ones([n_features['sl']]) * pc_value,
dims=('sl_latent',), attrs={'units': ''}),
'total_water_mixing_ratio_components_horizontal_advective_tendency': sp.DataArray(
np.ones([n_features['sl']]) * pc_value,
dims=('rt_latent',), attrs={'units': ''}),
'vertical_wind_components': sp.DataArray(
np.ones([n_features['w']]) * pc_value,
dims=('w_latent',), attrs={'units': ''}),
}
return state
class TestPrincipalComponentConversions(unittest.TestCase):
"""Tests for `marble` package."""
def test_convert_input_zero_latent_to_height_and_back(self):
state = get_test_state(pc_value=0.)
converter = marble.InputPrincipalComponentsToHeight()
inverse_converter = marble.InputHeightToPrincipalComponents()
intermediate = converter(state)
intermediate['time'] = state['time']
result = inverse_converter(intermediate)
for name in result.keys():
self.assertIn(name, state)
self.assertEqual(result[name].shape, state[name].shape, name)
self.assertTrue(np.allclose(result[name].values, state[name].values), name)
def test_convert_input_nonzero_latent_to_height_and_back(self):
state = get_test_state(pc_value=0.6)
converter = marble.InputPrincipalComponentsToHeight()
inverse_converter = marble.InputHeightToPrincipalComponents()
intermediate = converter(state)
intermediate['time'] = state['time']
result = inverse_converter(intermediate)
for name in result.keys():
self.assertIn(name, state)
self.assertEqual(result[name].shape, state[name].shape, name)
self.assertTrue(np.allclose(result[name].values, state[name].values), name)
def test_convert_diagnostic_zero_latent_to_height(self):
"""
This only tests that the conversion runs without errors, it does not
check anything about the output value.
"""
state = get_test_state(pc_value=0.)
converter = marble.DiagnosticPrincipalComponentsToHeight()
result = converter(state)
if __name__ == '__main__':
unittest.main()
| 2.125 | 2 |
python/coursera_python/MICHIGAN/WEB/week4/asss.py | SayanGhoshBDA/code-backup | 16 | 12772000 | <reponame>SayanGhoshBDA/code-backup
# socketTest.py
import socket
mysocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
mysocket.connect(('http://data.pr4e.org/intro-short.txt', 80))
mysocket.send('GET intro-short.txt HTTP/1.0\n')
mysocket.send('Host: www.pythonlearn.com\n\n')
# mysocket.send(b'GET http://www.pythonlearn.com/code/intro-short.txt HTTP/1.1 Host: www.pythonlearn.com Proxy-Connection: keep-alive Cache-Control: max-age=0 Upgrade-Insecure-Requests: 1 User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36 Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8 Accept-Encoding: gzip, deflate, sdch Accept-Language: zh-CN,zh;q=0.8,en;q=0.6 Cookie: __cfduid=da807f472bbfb5777530c786a56bc13491472801448 If-None-Match: W/"1d3-521e9853a392b" If-Modified-Since: Mon, 12 Oct 2015 14:55:29 GMT')
# mysocket.connect(('www.py4inf.com', 80))
# mysocket.send('GET http://www.py4inf.com/code/romeo.txt HTTP/1.0\n\n')
while True:
data = mysocket.recv(512)
if(len(data) < 1):
break
print(data)
mysocket.close()
| 2.796875 | 3 |
tests/clients/sendgrid/test_sendgrid.py | zaibon/js-sdk | 13 | 12772001 | <reponame>zaibon/js-sdk
import email
import imaplib
import os
import string
import gevent
import pytest
from jumpscale.loader import j
from tests.base_tests import BaseTests
@pytest.mark.integration
class Sendgrid(BaseTests):
SMTP_SERVER = "imap.gmail.com"
def setUp(self):
self.sendgird_client_name = self.random_name()
self.sendgrid_client = j.clients.sendgrid.get(name=self.sendgird_client_name)
self.send_gird_api_key_token = os.getenv("SEND_GRID_API_KEY_TOKEN")
self.recipient_mail = os.getenv("RECIPIENT_MAIL")
self.recipient_pass = os.<PASSWORD>("<PASSWORD>")
if self.send_gird_api_key_token and self.recipient_mail and self.recipient_pass:
self.sendgrid_client.apikey = self.send_gird_api_key_token
self.recipient_mail = self.recipient_mail
else:
raise Exception(
"Please add (SEND_GRID_API_KEY_TOKEN, RECIPIENT_MAIL, RECIPIENT_PASS) as environment variables "
)
self.sender_mail = j.data.fake.email()
self.subject = j.data.fake.sentence()
self.attachment_type = "application/txt"
file_name = self.random_name()
self.attachment_path = f"/tmp/{file_name}.txt"
# Writing txt to be used in attachemt
j.sals.fs.write_file(path=self.attachment_path, data="i am testing")
def test01_test_sendgrid_send_mail(self):
"""Test for sending an email without attachment.
**Test Scenario**
- Get sendgrid object.
- Send the email.
- Validate that the email send by accessing the receiver mail and check the inbox for the send email.
- Delete the send email from the receiver mail inbox.
"""
self.sendgrid_client.send(sender=self.sender_mail, subject=self.subject, recipients=[self.recipient_mail])
self.assertTrue(self.await_validate_mail(validate_attachment=False))
def test02_test_sendgrid_send_mail_with_attachment(self):
"""Test for sending an email without attachment.
**Test Scenario**
- Get sendgrid object.
- Create attachment.
- Add the attachment to sendgrid object.
- Send the email.
- Validate that the email send by accessing the receiver mail and check the inbox for the send email.
- Delete the send email from the receiver mail inbox.
"""
attach = self.sendgrid_client.build_attachment(filepath=self.attachment_path, typ=self.attachment_type)
self.sendgrid_client.send(
sender=self.sender_mail, subject=self.subject, recipients=[self.recipient_mail], attachments=[attach]
)
self.assertTrue(self.await_validate_mail(validate_attachment=False, attachment_type=self.attachment_type))
def read_email_from_gmail(self, validate_attachment=True, attachment_type=None):
try:
mail = imaplib.IMAP4_SSL(self.SMTP_SERVER)
mail.login(self.recipient_mail, self.recipient_pass)
mail.select("inbox")
_, data = mail.search(None, "ALL")
mail_ids = data[0]
id_list = mail_ids.split()
first_email_id = int(id_list[0])
latest_email_id = int(id_list[-1])
for i in range(latest_email_id, first_email_id, -1):
if self.validate_mail(mail, i, validate_attachment, attachment_type):
# Delete the mail
mail.store(str(i).encode(), "+FLAGS", "\\Deleted")
mail.expunge()
return True
return False
except Exception as e:
print(str(e))
def validate_mail(self, mail, mail_index, validate_attachment=True, attachment_type=None):
_, data = mail.fetch(str(mail_index), "(RFC822)")
for response_part in data:
if isinstance(response_part, tuple):
msg = email.message_from_bytes(response_part[1])
email_subject = msg["subject"]
email_from = msg["from"]
if email_from == self.sender_mail:
self.assertEqual(email_subject, self.subject)
if validate_attachment:
attachment = msg.get_payload()[1]
self.assertEqual(attachment.get_content_type(), attachment_type)
return True
return False
def await_validate_mail(self, seconds=10, validate_attachment=True, attachment_type=None):
for _ in range(seconds):
if self.read_email_from_gmail(validate_attachment, attachment_type):
return True
gevent.sleep(1)
return False
def tearDown(self):
j.clients.sendgrid.delete(self.sendgird_client_name)
os.remove(self.attachment_path)
| 2.046875 | 2 |
Paper_topography_figures/Figure_5_C_E_Figure_S6_A_C_E_F.py | kavli-ntnu/mini2p_topography | 1 | 12772002 | ### Figure 5 C and E - Obenhaus et al.
# Figure S6 A, C, E and F - Obenhaus et al.
#
# NN distance analysis
# Pairwise distance analysis
#
import sys, os
import os.path
import numpy as np
import pandas as pd
import datajoint as dj
import cmasher as cmr
from tabulate import tabulate
import itertools
# Make plots pretty
import seaborn as sns
sns.set(style='white')
# Prevent bug in figure export as pdf:
import matplotlib as mpl
mpl.rcParams['pdf.fonttype'] = 42
##### IMPORTS ###########################################################################
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
from general import print_mannwhitneyu, print_wilcoxon
from dj_plotter.helpers.plotting_helpers import make_linear_colormap
from helpers_topography.notebooks.pairw_distances import norm_pairw_nn_df, plot_pairw_nn_summary
##### LOAD SCHEMA COMPONENTS #############################################################
from dj_schemas.dj_conn import *
##### EXPORT LOCATION ####################################################################
figure_dir = 'YOUR_EXPORT_DIRECTORY/'
def pairw_dist(animals,
col_dict,
param_hash_session='cf83e1357eefb8bd',
param_hash_id_cell='standard',
region='MEC',
pairwise_dist_param='A',
cutoff_n_starters=0,
plot=True
):
# Print col_dict
print(f'\nReceived the following column dictionary \n{col_dict}\n')
# Brain region filter
assert region in ['MEC','PAS'], f'Region "{region}" not understood. Choose "MEC" or "PAS"'
all_sessions = (Session.proj('animal_name') * FilteredSessions
& [f'animal_name = "{animal}"' for animal in animals]
& f'param_hash_session = "{param_hash_session}"'
)
# Print pairw dist. parameter
score_, score_cutoff_ = (PairwDistParams & f'pairwise_dist_param = "{pairwise_dist_param}"').fetch1('score','score_cutoff')
print(f'Filtering pairwise distances by {score_} > {score_cutoff_}')
pairw = (Session.proj('animal_name') * PairwDist.Cells * PairwDist.PairwD
& all_sessions.proj() \
& f'param_hash_id_cell = "{param_hash_id_cell}"'
& f'pairwise_dist_param = "{pairwise_dist_param}"'
& f'region = "{region}"'
& f'n_startr > {cutoff_n_starters}')
pairw_df = pd.DataFrame(pairw.fetch(as_dict=True))
pairw_df.dropna(inplace=True)
colors = make_linear_colormap(pairw_df.animal_name, categorical=True, cmap='cmr.guppy')
### COLS TO NORMALIZE #################################################################################
cols_to_norm = col_dict['cols_to_norm'] # ['mean_pairw_dist_shuffref', 'mean_pairw_dist']
cols_to_norm_label = col_dict['cols_to_norm_label'] # ['Ref', 'Data']
norm_to = col_dict['norm_to'] # mean_pairw_dist_shuffall'
cols = col_dict['cols'] # 'animal_name'
# Normalize
pairw_df_norm = norm_pairw_nn_df(pairw_df, cols_to_norm, cols, norm_to)
pairw_df_norm.reset_index(drop=True, inplace=True)
# Plot
if plot:
plot_pairw_nn_summary(pairw_df_norm,
cols_to_norm,
colors=colors,
xlabels=cols_to_norm_label,
save_path=figure_dir,
label='PairwD')
# Print statistics
print(f'Data over {len(pairw_df.session_name)} datasets (careful! Can be multiplane!) ({len(set(pairw_df.animal_name))} animals)')
print(f'{set(pairw_df.animal_name)}')
# Calculate p values MannWhithney and 1 sample Wilcoxon rank
pairw_df_norm_ = pairw_df_norm[cols_to_norm]
results = pd.DataFrame(columns = pairw_df_norm_.columns,
index = pairw_df_norm_.columns)
for (label1, column1), (label2, column2) in itertools.combinations(pairw_df_norm_.items(), 2):
_ ,results.loc[label1, label2] = _ ,results.loc[label2, label1] = print_mannwhitneyu(column1, column2, label_A=label1, label_B=label2)
#print(tabulate(results, headers='keys', tablefmt='psql'))
print('\nWilcoxon signed rank test (against 1.):')
for col in cols_to_norm:
try:
print_wilcoxon(pairw_df_norm[col] - 1., label=col)
except ValueError:
print(f'Skipping column {col} (all zero?)')
# Print some more stats
print('Mean and SEM for PairwDist results')
for col in cols_to_norm:
mean_col, sem_col = np.nanmean(pairw_df_norm[col]), np.std(pairw_df_norm[col]) / np.sqrt(len(pairw_df_norm[col]))
print(f'{col:<30} | Mean ± SEM: {mean_col:.2f} ± {sem_col:.2f}')
return pairw_df_norm, len(set(pairw_df.animal_name)), len(pairw_df.session_name)
def group_nn_dist(animals,
col_dict,
param_hash_session='cf83e1357eefb8bd',
param_hash_id_cell = 'standard',
region='MEC',
pairwise_dist_param='A',
cutoff_n_starters=0,
nn_group_number=5,
plot=True
):
'''
Like pairw_dist() but for PairwDist.NN instead of PairwDist.PairwD, i.e. grouped NN results
nn_group_number : default 5 : Number of NN to consider (group size).
Careful: Zero indexed! 0 = first nearest neighbour
'''
# Print col_dict
print(f'\nReceived the following column dictionary \n{col_dict}\n')
# Brain region filter
assert region in ['MEC','PAS'], f'Region "{region}" not understood. Choose "MEC" or "PAS"'
all_sessions = (Session.proj('animal_name') * FilteredSessions
& [f'animal_name = "{animal}"' for animal in animals]
& f'param_hash_session = "{param_hash_session}"'
)
# Print pairw dist. parameter
score_, score_cutoff_ = (PairwDistParams & f'pairwise_dist_param = "{pairwise_dist_param}"').fetch1('score','score_cutoff')
print(f'Filtering pairwise distances by {score_} > {score_cutoff_}')
nn = (Session.proj('animal_name') * PairwDist.Cells * PairwDist.NN
& all_sessions.proj()
& f'param_hash_id_cell = "{param_hash_id_cell}"'
& f'pairwise_dist_param = "{pairwise_dist_param}"'
& f'region = "{region}"'
& f'n_startr > {cutoff_n_starters}')
nn_df = pd.DataFrame(nn.fetch(as_dict=True))
nn_df.dropna(inplace=True) # Important here because apparently some of the stuff can be None
colors = make_linear_colormap(nn_df.animal_name, categorical=True, cmap='cmr.guppy')
# Subselect a specific nn_number = number of NN in result (group size)
data_cols_pairwDist_NN = ['mean_nn','mean_nn_shuff_all',
'mean_nn_shuff_ref','mean_nn_csr'] # All data columns in table
for col in data_cols_pairwDist_NN:
nn_df[col] = [res[nn_group_number] for res in nn_df[col]]
### COLS TO NORMALIZE #################################################################################
cols_to_norm = col_dict['cols_to_norm']
cols_to_norm_label = col_dict['cols_to_norm_label']
norm_to = col_dict['norm_to']
cols = col_dict['cols']
# Normalize
nn_df_norm = norm_pairw_nn_df(nn_df, cols_to_norm, cols, norm_to)
nn_df_norm.reset_index(drop=True, inplace=True)
# Plot
if plot:
plot_pairw_nn_summary(nn_df_norm,
cols_to_norm,
colors=colors,
xlabels=cols_to_norm_label,
save_path=figure_dir,
label='NN')
# Print statistics
print(f'Data over {len(nn_df.session_name)} datasets (careful! Can be multiplane!) ({len(set(nn_df.animal_name))} animals)')
print(f'{set(nn_df.animal_name)}')
# Calculate p values MannWhithney and 1 sample Wilcoxon rank
nn_df_norm_ = nn_df_norm[cols_to_norm]
results = pd.DataFrame(columns = nn_df_norm_.columns,
index = nn_df_norm_.columns)
for (label1, column1), (label2, column2) in itertools.combinations(nn_df_norm_.items(), 2):
_ ,results.loc[label1, label2] = _ ,results.loc[label2, label1] = print_mannwhitneyu(column1, column2, label_A=label1, label_B=label2)
#print(tabulate(results, headers='keys', tablefmt='psql'))
print('\nWilcoxon signed rank test (against 1.):')
for col in cols_to_norm:
#_, onesample_p_data = ttest_1samp(pairw_df_norm[col], 1.)
try:
print_wilcoxon(nn_df_norm[col] - 1., label=col)
except ValueError:
print(f'Skipping column {col} (all zero?)')
# Print some more stats
print('Mean and SEM for NN results')
for col in cols_to_norm:
mean_col, sem_col = np.nanmean(nn_df_norm[col]), np.std(nn_df_norm[col]) / np.sqrt(len(nn_df_norm[col]))
print(f'{col:<30} | Mean ± SEM: {mean_col:.2f} ± {sem_col:.2f}')
return nn_df_norm, len(set(nn_df.animal_name)), len(set(nn_df.session_name))
if __name__ == "__main__":
grid_mice = [
'82913','88592', '87244', '60480',
'97046','89841'
]
ov_mice = [
'87187','88106','87245','90222',
'94557','89622'
]
all_animals = [
'90222','90218','90647',
'82913','88592','89622',
'87244','89841','60480',
'87245','87187','88106',
'94557','97045','97046',
]
animals = grid_mice
pairwise_dist_param = "A"
param_hash_id_cell = 'standard'
region = 'MEC'
# Cutoff number of cells
cutoff_n_starters = 15.
# For NN
nn_group_number = 5
###### PAIRWISE DISTANCES ####################################################################################
# print(f'Creating pairwise distance figure for {len(animals)} animal(s)')
# print(animals)
# print('\n')
# # Create column dictionary
# col_dict = {}
# col_dict['cols_to_norm'] = ['mean_pairw_dist_shuffall', 'mean_pairw_dist_shuffref', 'mean_pairw_dist']
# #mean_pairw_dist_shuffref, mean_pairw_dist_shuffall
# col_dict['cols_to_norm_label'] = ['All', 'Ref', 'Data']
# col_dict['norm_to'] = 'mean_pairw_dist_shuffall'
# col_dict['cols'] = 'animal_name'
# pairw_dist(animals,
# col_dict,
# param_hash_session='cf83e1357eefb8bd',
# param_hash_id_cell=param_hash_id_cell,
# region=region,
# pairwise_dist_param=pairwise_dist_param,
# cutoff_n_starters=cutoff_n_starters,
# )
####### NN DISTANCES ###########################################################################################
print('\n########################################################################################################')
print(f'\nCreating NN distance figure for {len(animals)} animal(s)')
print(animals)
print('\n')
# Create column dictionary
col_dict = {}
col_dict['cols_to_norm'] = ['mean_nn_shuff_all', 'mean_nn_shuff_ref', 'mean_nn']
col_dict['cols_to_norm_label'] = ['All', 'Ref', 'Data']
col_dict['norm_to'] = 'mean_nn_shuff_all'
col_dict['cols'] = 'animal_name'
group_nn_dist(animals,
col_dict,
param_hash_session='cf83e1357eefb8bd',
param_hash_id_cell=param_hash_id_cell,
region=region,
pairwise_dist_param=pairwise_dist_param,
cutoff_n_starters=cutoff_n_starters,
nn_group_number=nn_group_number,
plot=True)
print(figure_dir)
print('Success.') | 2.125 | 2 |
Definitions/definitions_for_scraping_fitbit_data.py | ddritsa/PhD-Thesis-repository | 0 | 12772003 | <filename>Definitions/definitions_for_scraping_fitbit_data.py
#!/usr/bin/env python
# coding: utf-8
# To use this:
#
# Put in the parenthesis: User email, password, user ID)
#
# (Email+password that are used in Strava
#
# In[3]:
import pandas as pd
from xml.dom import minidom
import csv
from bs4 import BeautifulSoup
import requests
#import regular expressions
import re
import numpy as np
import time
import datetime
from datetime import datetime
from datetime import timedelta
# In[5]:
def import_gpx(file_dir):
#read each datastream from the gpx file
#file_dir = file_dir.decode('UTF-8')
mydoc2 = minidom.parseString(file_dir)
trkpt = mydoc2.getElementsByTagName('trkpt')
time = mydoc2.getElementsByTagName('time')
ele = mydoc2.getElementsByTagName('ele')
hr = mydoc2.getElementsByTagName('gpxtpx:hr')
cad = mydoc2.getElementsByTagName('ns3:cad')
#create empty lists to store the data streams
times = []
lats = []
longs = []
eles = []
hrs = []
distances = []
#extract data from each element of the created lists and append it to the corresponding list
for elem in trkpt:
lats.append(float(elem.attributes['lat'].value))
longs.append(float(elem.attributes['lon'].value))
for elem in time:
times.append(elem.firstChild.data)
for elem in hr:
hrs.append(int(elem.firstChild.data))
for elem in ele:
eles.append(float(elem.firstChild.data))
if len(hrs)>0:
data = {'Datetime': times[1:],
'Latitude':lats,
'Longitude':longs,
'Altitude':eles,
'Distance':0,
'Heart rate':hrs}
else:
data = {'Datetime': times[1:],
'Latitude':lats,
'Longitude':longs,
'Altitude' : eles,
'Distance':0,
'Heart rate':0
}
#make data frame from dictionary
data_df = pd.DataFrame(data=data)
data_df['Datetime'] = pd.to_datetime(data_df.loc[:,'Datetime'],utc=True)
data_df.set_index('Datetime', inplace=True)
data_df.index = data_df.index.tz_convert('Australia/Sydney')
data_df = data_df.sort_index()
return(data_df)
# In[4]:
VERSION = '0.1.0'
class StravaScraper(object):
# https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/User-Agent
USER_AGENT = "strava_scraper/%s" % VERSION
HEADERS = {'User-Agent': USER_AGENT}
BASE_URL = "https://www.strava.com"
URL_LOGIN = "%s/login" % BASE_URL
URL_SESSION = "%s/session" % BASE_URL
URL_DASHBOARD = "%s/dashboard" % BASE_URL
is_authed = False
def __init__(self, email, password, user_id):
self.email = email
self.password = password
self.session = requests.Session()
self.user_id = user_id
self.gpx_files = []
self.df = pd.DataFrame()
def get_page(self, url):
response = self.session.get(url, headers=StravaScraper.HEADERS)
response.raise_for_status()
return response
def login(self):
response = self.get_page(StravaScraper.URL_LOGIN)
soup = BeautifulSoup(response.content, 'html.parser')
utf8 = soup.find_all('input',
{'name': 'utf8'})[0].get('value').encode('utf-8')
token = soup.find_all('input',
{'name': 'authenticity_token'})[0].get('value')
data = {
'utf8': utf8,
'authenticity_token': token,
'plan': "",
'email': self.email,
'password': <PASSWORD>,
}
response = self.session.post(StravaScraper.URL_SESSION,
data=data,
headers=StravaScraper.HEADERS)
response.raise_for_status()
# Simulate that redirect here:
response = self.get_page(StravaScraper.URL_DASHBOARD)
response_soup = BeautifulSoup(response.content,'html.parser')
#find athlete url
athlete_url = 0
all_links = response_soup.find_all('a')
for link in all_links:
if 'My Profile' in link:
athlete_url = self.BASE_URL + link.get('href') + '/training/log'
print('athlete_url:', athlete_url)
all_urls = [self.BASE_URL +'/athlete/calendar/2018', self.BASE_URL +'/athlete/calendar/2019']
activities = []
#find activities from training log
#here we can use "athlete_url" instead of "year_url"
#this gives us limited results though (as not all the activities are shown in the fetched page)
for year_url in all_urls:
training_log = self.session.get(year_url)
training_log_soup = BeautifulSoup(training_log.content,'html.parser')
#print('training log:', type(training_log.content.decode('UTF-8')))
#print(training_log_soup.prettify())
decoded_log = training_log.content.decode('UTF-8')
find_ids = [m.start() for m in re.finditer('"id":', decoded_log)]
for index_start in find_ids:
teststr = decoded_log[index_start:index_start+22]
if "name" in teststr:
result_id = teststr[5:-7].strip(' ')
#print('activity id:', result_id, type(result_id))
activities.append(result_id)
#make sure that we have the correct num of activities
unique_activities = set(activities)
print('no of activities:', len(unique_activities))
for activity_id in unique_activities:
new_url = self.BASE_URL + '/activities/' + activity_id + '/export_gpx.json'
#print(new_url)
test_act = self.session.get(new_url)
test_act_soup = BeautifulSoup(test_act.content,'html.parser')
self.gpx_files.append(test_act.content)
self.is_authed = True
def export_gpx(self):
for item in range(0, len(self.gpx_files)):
this_df = import_gpx(self.gpx_files[item].decode('utf-8'))
if "gpxtpx" in self.gpx_files[item].decode('utf-8'):
print(this_df.index[0])
else:
print(this_df.index[0])
print('no heart rate')
#assign session
this_df = this_df.assign(Session=0)
collection_date = (this_df.index[0].day, this_df.index[0].month, this_df.index[0].year, this_df.index[0].hour)
session_id = str(self.user_id) + '_' + this_df.index[0].strftime("%d-%m-%Y %H:%M")
if 'Session' in self.df.columns:
if session_id in self.df['Session'].unique():
print('is already: ', session_id)
this_df.loc[:, 'Session'] = session_id
#assign session-end
self.df = self.df.append(this_df)
self.df = self.df.sort_index()
self.df = self.df.assign(UserID = 0)
self.df.loc[:,'UserID'] = self.user_id
return(self.df)
def merge_sessions(self):
if not self.df.empty:
self.df = self.df[~self.df.index.duplicated(keep='first')]
if 'Session' in self.df.columns:
sessions = self.df['Session'].unique().copy()
for i in range(0, len(sessions)):
item = sessions[i]
df_a = self.df[self.df['Session']==item]
if i>0:
pos = self.df.index.get_loc(df_a.index[0])
session_a = self.df.loc[self.df.index[pos],'Session']
session_b = self.df.loc[self.df.index[pos-1],'Session']
#print(session_a, session_b)
df_b = self.df[self.df['Session']==session_b]
#check how many minutes have passed
#print(df_a.index[0]-df_b.index[-1], (df_a.index[0]-df_b.index[-1]).total_seconds()/60)
minutes_passed = (df_a.index[0]-df_b.index[-1]).total_seconds()/60
if minutes_passed <15:
self.df.loc[df_a.index, 'Session'] = session_b
return(self.df)
def scrape_movement_data(email,password, userid):
#import sys
#email,password = "<EMAIL>", "<PASSWORD>"
scraper = StravaScraper(email, password, userid)
scraper.login()
df = scraper.export_gpx()
df = scraper.merge_sessions()
return(df)
# In[ ]:
| 2.953125 | 3 |
sensitivity_general.py | kwabenaOwusu/CovidInterventionsABM | 0 | 12772004 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
simulations for sensitivity to results
Author: <NAME>
Date: May, 2020
"""
import csv
import time as mytime
############################################ENVIRONMENT###################################################################################
exec(open('./environment.py').read()) # execute the environment script
####################################################### STANDARD (MARKET)#############################################################################
numsim = 4 # number of years of simulations
start_time=mytime.time() # set time for starting
risk_life = 0.5 # risk level by moving outside
social_radius = 2 # social radius within which interaction is possible
eff_quarantined = 0.25 # efficiency of contact tracing symptomatic for treatments at hospitals
hospital_capacity = 0.5 # the capacity of the hospitals (in reference to the general population)
essentials_move = 8 # move out only for essentials
exec(open('./market_modules.py').read()) # execute the main script
outfname = 'sim_standard_market.csv'
with open(outfname,'w') as outfile:
allsimdat=csv.writer(outfile)
for rep in range(numsim):
exec(open('./loop_modules.py').read())
with open('./simulation_data.csv', 'r') as csvfile:
onesimdat = csv.reader(csvfile, delimiter=',')
header = next(onesimdat)
header.append('NoSim')
if rep==0:
allsimdat.writerow(header)
for row in onesimdat:
row.append(str(rep))
allsimdat.writerow(row)
print('Done, simulation %i, with standard paramaters, ended at %.4f hours '%(rep+1,(mytime.time()-start_time)/3600. ))
#os.rename('sim_movie.mp4', 'movie_standard_market_rep_%i.mp4' %(rep+1) )
####################################################### STANDARD (MARKET) WITH MASK #############################################################################
numsim = 4 # number of years of simulations
start_time=mytime.time() # set time for starting
risk_life = 0.5 # risk level by moving outside
social_radius = 2 # social radius within which interaction is possible
eff_quarantined = 0.25 # efficiency of contact tracing symptomatic for treatments at hospitals
hospital_capacity = 0.5 # the capacity of the hospitals (in reference to the general population)
essentials_move = 8 # move out only for essentials
wearing_mask = 0.5 # prob of wearing mask
exec(open('./market_mask_modules.py').read()) # execute the main script
outfname = 'sim_standard_market.csv'
with open(outfname,'w') as outfile:
allsimdat=csv.writer(outfile)
for rep in range(numsim):
exec(open('./loop_modules.py').read())
with open('./simulation_data.csv', 'r') as csvfile:
onesimdat = csv.reader(csvfile, delimiter=',')
header = next(onesimdat)
header.append('NoSim')
if rep==0:
allsimdat.writerow(header)
for row in onesimdat:
row.append(str(rep))
allsimdat.writerow(row)
print('Done, simulation %i, with standard paramaters, ended at %.4f hours '%(rep+1,(mytime.time()-start_time)/3600. ))
#os.rename('sim_movie.mp4', 'movie_standard_market_rep_%i.mp4' %(rep+1) )
| 2.28125 | 2 |
tests/test_model_serializer_deserialize.py | aswinkp/swampdragon | 366 | 12772005 | from swampdragon.serializers.model_serializer import ModelSerializer
from swampdragon.testing.dragon_testcase import DragonTestCase
from .models import TextModel, SDModel
from datetime import datetime
from django.db import models
# to make sure none of the ModelSerializer variables are clobbering the data
MODEL_KEYWORDS = ('data', )
# TODO: support the rest of these field names
# MODEL_KEYWORDS = ('data', 'opts', 'initial', 'base_fields', 'm2m_fields', 'related_fields', 'errors')
class KeywordModel(SDModel):
data = models.TextField()
# TODO: support the rest of these field names
# opts = models.TextField()
# initial = models.TextField()
# base_fields = models.TextField()
# m2m_fields = models.TextField()
# related_fields = models.TextField()
# errors = models.TextField()
class KeywordModelSerializer(ModelSerializer):
class Meta:
model = KeywordModel
publish_fields = MODEL_KEYWORDS
update_fields = MODEL_KEYWORDS
class DateModel(SDModel):
date = models.DateTimeField()
class DateModelSerializer(ModelSerializer):
class Meta:
model = DateModel
publish_fields = ('date')
update_fields = ('date')
class TextModelSerializer(ModelSerializer):
class Meta:
model = TextModel
publish_fields = ('text')
update_fields = ('text')
class TestModelSerializer(DragonTestCase):
def test_deserialize_model(self):
data = {'text': 'foo'}
serializer = TextModelSerializer(data)
model_instance = serializer.save()
self.assertEqual(model_instance.text, data['text'])
def test_passing_invalid_data(self):
foo = 'text'
with self.assertRaises(Exception):
TextModelSerializer(foo)
def test_ignore_non_model_fields(self):
data = {'text': 'foo', 'random_field': 'val'}
serializer = TextModelSerializer(data)
model_instance = serializer.deserialize()
self.assertEqual(model_instance.text, data['text'])
def test_deserialize_field(self):
date = datetime.now()
data = {'date': str(date)}
serializer = DateModelSerializer(data)
object = serializer.save()
self.assertEqual(object.date, date)
def test_deserialize_keyword_field(self):
data = dict(zip(MODEL_KEYWORDS, MODEL_KEYWORDS))
serializer = KeywordModelSerializer(data)
object = serializer.save()
for attr in MODEL_KEYWORDS:
self.assertEqual(getattr(object, attr), attr)
| 2.546875 | 3 |
pyx12/test/test_xmlwriter.py | sanjaysiddhanti/pyx12 | 0 | 12772006 | <reponame>sanjaysiddhanti/pyx12<filename>pyx12/test/test_xmlwriter.py
import os.path
import sys
import os
import unittest
try:
from StringIO import StringIO
except:
from io import StringIO
import tempfile
from pyx12.xmlwriter import XMLWriter
class TestWriter(unittest.TestCase):
"""
"""
def setUp(self):
self.res = '<?xml version="1.0" encoding="utf-8"?>\n<x12err>\n</x12err>\n'
def test_write1(self):
try:
fd = StringIO(encoding='ascii')
#print('CASE 1:')
except:
fd = StringIO()
#print('CASE 2:')
writer = XMLWriter(fd)
writer.push("x12err")
while len(writer) > 0:
writer.pop()
self.assertEqual(fd.getvalue(), self.res)
fd.close()
def test_write_temp(self):
(fdesc, filename) = tempfile.mkstemp('.xml', 'pyx12_')
with open(filename, 'w') as fd:
writer = XMLWriter(fd)
writer.push("x12err")
while len(writer) > 0:
writer.pop()
with open(filename, 'r') as fd:
self.assertEqual(fd.read(), self.res)
try:
os.remove(filename)
except:
pass
| 2.640625 | 3 |
housing_prices_kaggle/results/2020-04-13/runall.py | be2112/modeling_projects | 0 | 12772007 | <reponame>be2112/modeling_projects
from lib_bre import *
import pandas as pd
import matplotlib.pyplot as plt
# Set pandas options to be more user friendly for a wide dataset
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
# Ignore Matplotlib memory warnings
plt.rcParams.update({'figure.max_open_warning': 100})
# Load data into a pandas dataframe.
data_file = get_dataset_file_path('2020-04-13', 'train.csv')
df = pd.read_csv(data_file)
# Get familiar with the data
df.info()
df.describe()
# Which columns have NaNs?
cols_with_nan = df.columns[df.isna().any()].tolist()
for col in cols_with_nan:
print(col, df[col].isna().sum())
# Drop categorical variables that have over 10% of the data missing
df.drop(axis=1, labels=['Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], inplace=True)
# Fill numeric NaNs with the median value
df.fillna(df.median(), inplace=True)
# Drop remaining rows with NaNs
df.dropna(inplace=True)
# Produce LOTS of histograms
df_columns = list(df.columns.values)
for column in df_columns:
fig, ax = plt.subplots()
ax.hist(df[column])
ax.set_ylabel('Count', fontdict={'fontsize': 20})
ax.set_xlabel(column, fontdict={'fontsize': 20})
ax.set_title(column + ' Histogram', fontdict={'fontsize': 20})
fig.savefig(column + '_Histogram', format='png')
# Check out correlations
corr_matrix = df.corr()
| 2.765625 | 3 |
docs/examples/docker/mrcnn/handler.py | mysuperai/superai-sdk | 1 | 12772008 | import base64
import json
import os
import zlib
from urllib.request import urlretrieve
import boto3
import mrcnn.model as modellib
import numpy as np
import pandas as pd
import skimage.io
from mrcnn import utils
from mrcnn.config import Config
from superai.meta_ai import BaseModel
s3 = boto3.client("s3")
_MODEL_PATH = os.path.join("sagify_base/local_test/test_dir/", "model")
# _MODEL_PATH = "s3://canotic-ai/model/mask-rcnn-model.tar.gz"
# _MODEL_PATH = 'Mask_RCNN' # Path for models
# COCO Class names
# Index of the class in the list is its ID. For example, to get ID of
# the teddy bear class, use: class_names.index('teddy bear')
class_names = [
"BG",
"person",
"bicycle",
"car",
"motorcycle",
"airplane",
"bus",
"train",
"truck",
"boat",
"traffic light",
"fire hydrant",
"stop sign",
"parking meter",
"bench",
"bird",
"cat",
"dog",
"horse",
"sheep",
"cow",
"elephant",
"bear",
"zebra",
"giraffe",
"backpack",
"umbrella",
"handbag",
"tie",
"suitcase",
"frisbee",
"skis",
"snowboard",
"sports ball",
"kite",
"baseball bat",
"baseball glove",
"skateboard",
"surfboard",
"tennis racket",
"bottle",
"wine glass",
"cup",
"fork",
"knife",
"spoon",
"bowl",
"banana",
"apple",
"sandwich",
"orange",
"broccoli",
"carrot",
"hot dog",
"pizza",
"donut",
"cake",
"chair",
"couch",
"potted plant",
"bed",
"dining table",
"toilet",
"tv",
"laptop",
"mouse",
"remote",
"keyboard",
"cell phone",
"microwave",
"oven",
"toaster",
"sink",
"refrigerator",
"book",
"clock",
"vase",
"scissors",
"teddy bear",
"hair drier",
"toothbrush",
]
class ModelService(BaseModel):
def __init__(self):
super().__init__()
self.model = None
self.initialized = False
def initialize(self, context):
class InferenceConfig(Config):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
NAME = "inference"
GPU_COUNT = 1
IMAGES_PER_GPU = 1
# Number of classes (including background)
NUM_CLASSES = 1 + 80 # COCO has 80 classes
config = InferenceConfig()
config.display()
print("Initialised class...")
self.initialized = True
properties = context.system_properties
_MODEL_PATH = properties.get("model_dir")
if self.model is None:
print("Model Content : ", os.listdir(_MODEL_PATH))
# Local path to trained weights file
COCO_MODEL_PATH = os.path.join(_MODEL_PATH, "mask_rcnn_coco.h5")
# Download COCO trained weights from Releases if needed
try:
if not os.path.exists(COCO_MODEL_PATH):
utils.download_trained_weights(COCO_MODEL_PATH)
# Create model object in inference mode.
model = modellib.MaskRCNN(mode="inference", model_dir=os.path.join("logs"), config=config)
# Load weights trained on MS-COCO
model.load_weights(COCO_MODEL_PATH, by_name=True)
self.model = model
except RuntimeError:
raise MemoryError
return self.model
def predict_from_image(self, path, class_id=3):
image = skimage.io.imread(path)
# Run detection
clf = self.model
print("model retrieved.")
results = clf.detect([image], verbose=0)
print("detection on image done.")
# Visualize results
r = results[0]
# get indices corresponding to unwanted classes
indices_to_remove = np.where(r["class_ids"] != class_id)
# remove corresponding entries from `r`
new_masks = np.delete(r["masks"], indices_to_remove, axis=2)
scores = np.delete(r["scores"], indices_to_remove, axis=0)
aggregate_mask = np.logical_not(new_masks.any(axis=2))
class_ids = np.delete(r["class_ids"], indices_to_remove, axis=0)
return {
"new_masks": new_masks,
"aggregate_mask": aggregate_mask,
"scores": scores,
"class_ids": class_ids,
}
def predict_intermediate(self, input):
image_urls = input["image_url"]
predictions = []
for i, url in enumerate(image_urls):
image_path = f"image_{i}.jpg"
# download image
urlretrieve(url, image_path)
print("image retrieved")
image_path = os.getcwd() + "/" + image_path
prediction = self.predict_from_image(image_path)
print("predict from image done.")
new_masks = prediction["new_masks"]
aggregate_mask = prediction["aggregate_mask"]
n_masks = new_masks.shape[-1]
pred = []
for inst in range(n_masks):
pred.append(self._handle_mask(prediction, inst))
print(f"processing mask number {inst} done")
# num_workers = mp.cpu_count() // 4
# with Pool(num_workers) as pool:
# result = [pool.apply_async(_handle_mask, (prediction, i),) for i in range(n_masks)]
# pred = [res.get(timeout=15) for res in result]
print("everything done, uploading data.")
# data_uri = save_and_upload(aggregate_mask)
# pred.append({
# "category": "Background",
# "maskUrl": data_uri,
# "instance": 0
# })
predictions.append(pred)
return predictions
def predict(self, json_input):
"""
Prediction given the request input
:param json_input: [dict], request input
:return: [dict], prediction
"""
# transform json_input and assign the transformed value to model_input
print("json input", json_input)
json_input = json_input[0]["body"]
json_input = json_input.decode("utf-8")
print("Fixed json input", json_input)
try:
model_input = pd.read_json(json.loads(json_input))
except ValueError:
model_input = pd.read_json(json_input)
predictions = self.predict_intermediate(model_input)
print("Predictions: ", predictions)
# TODO If we have more than 1 model, then create additional classes similar to ModelService
# TODO where each of one will load one of your models
# # transform predictions to a list and assign and return it
# prediction_list = []
# output_keys = set([key.split("_")[0] for key in predictions.keys()])
# for index, row in predictions.iterrows():
# out_row = {key: {} for key in output_keys}
# for i, j in row.items():
# name, p_type = i.split("_")
# if p_type == "predictions":
# p_type = "prediction"
# if p_type == "probabilities":
# p_type = "probability"
# out_row[name][p_type] = j
# prediction_list.append(out_row)
return predictions
def train(self, input_data_path, model_save_path, hyperparams_path=None):
pass
@classmethod
def load_weights(cls, weights_path):
pass
@staticmethod
def get_encoding_string(mask):
data = zlib.compress(mask)
encoded_string = base64.b64encode(data).decode("utf-8")
return encoded_string
def _handle_mask(self, prediction, inst):
new_masks = prediction["new_masks"]
scores = prediction["scores"]
class_ids = prediction["class_ids"]
print(f"processing mask number {inst}")
mask = new_masks[..., inst]
mask_data = self.get_encoding_string(mask)
class_id = class_ids[inst]
w, h = mask.shape[:2]
print(f"processing mask number {inst} done")
return {
"category": class_names[class_id],
"class_id": int(class_id),
"maskData": mask_data,
"instance": inst,
"score": float(scores[inst]),
"width": w,
"height": h,
}
| 2.484375 | 2 |
testes e exercícios/exercicios/script_040.py | LightSnow17/exercicios-Python | 0 | 12772009 | <reponame>LightSnow17/exercicios-Python
print('Notas: menor que 5, REPROVADO / entre 5 e 6.9, RECUPERAÇÃO / maior ou igual a 7, APROVADO')
nota1 = float(input('Primeira nota: '))
nota2 = float(input('Segunda nota: '))
media = (nota1 + nota2) / 2
if media < 5:
print('Sua média é {} e você está REPROVADO!!'.format(media))
elif media >= 5 and media < 7:
print('Sua média é {} e você está de RECUPERAÇÃO!!'.format(media))
elif media >= 7:
print('Sua média é {} e você está APROVADO!!'.format(media))
| 4.09375 | 4 |
cogs/commands/mod/modactions.py | BinnyDevel/Bloo | 0 | 12772010 | <reponame>BinnyDevel/Bloo<gh_stars>0
from apscheduler.jobstores.base import ConflictingIdError
import discord
from discord.commands import Option, slash_command
from discord.commands.commands import message_command, user_command
from discord.errors import HTTPException
from discord.ext import commands
from discord.utils import escape_markdown, escape_mentions
import traceback
import humanize
import pytimeparse
from datetime import datetime, timedelta, timezone
from data.model.case import Case
from data.services.guild_service import guild_service
from data.services.user_service import user_service
from utils.autocompleters import liftwarn_autocomplete
from utils.config import cfg
from utils.logger import logger
from utils.context import BlooContext
from utils.mod.mod_logs import (prepare_editreason_log, prepare_liftwarn_log, prepare_mute_log, prepare_removepoints_log, prepare_unban_log, prepare_unmute_log, prepare_warn_log)
from utils.mod.modactions_helpers import (add_ban_case, add_kick_case, notify_user, notify_user_warn, submit_public_log)
from utils.mod.global_modactions import warn
from utils.permissions.checks import PermissionsFailure, always_whisper, mod_and_up, whisper
from utils.permissions.converters import (mods_and_above_external_resolver, mods_and_above_member_resolver, user_resolver)
from utils.permissions.slash_perms import slash_perms
from utils.views.modactions import WarnView
class ModActions(commands.Cog):
def __init__(self, bot):
self.bot = bot
@mod_and_up()
@slash_command(guild_ids=[cfg.guild_id], description="Warn a user", permissions=slash_perms.mod_and_up())
async def warn(self, ctx: BlooContext, user: Option(discord.Member, description="User to warn"), points: Option(int, description="Amount of points to warn for", min_value=1, max_value=600), reason: Option(str, description="Reason for warn")):
"""Warns a user (mod only)
Example usage
--------------
/warn user:<user> points:<points> reason:<reason>
Parameters
----------
user : discord.Member
"The member to warn"
points : int
"Number of points to warn far"
reason : str, optional
"Reason for warning, by default 'No reason.'"
"""
user = await mods_and_above_external_resolver(ctx, user)
if points < 1: # can't warn for negative/0 points
raise commands.BadArgument(message="Points can't be lower than 1.")
await warn(ctx, user, points, reason)
@mod_and_up()
@always_whisper()
@user_command(guild_ids=[cfg.guild_id], name="Warn 50 points")
async def warn_rc(self, ctx: BlooContext, member: discord.Member) -> None:
member = await mods_and_above_external_resolver(ctx, member)
view = WarnView(ctx, member)
await ctx.respond(embed=discord.Embed(description=f"Choose a warn reason for {member.mention}.", color=discord.Color.blurple()), view=view, ephemeral=True)
@mod_and_up()
@always_whisper()
@message_command(guild_ids=[cfg.guild_id], name="Warn 50 points")
async def warn_msg(self, ctx: BlooContext, message: discord.Message) -> None:
member = await mods_and_above_external_resolver(ctx, message.author)
view = WarnView(ctx, message.author)
await ctx.respond(embed=discord.Embed(description=f"Choose a warn reason for {member.mention}.", color=discord.Color.blurple()), view=view, ephemeral=True)
@mod_and_up()
@slash_command(guild_ids=[cfg.guild_id], description="Kick a user", permissions=slash_perms.mod_and_up())
async def kick(self, ctx: BlooContext, member: Option(discord.Member, description="User to kick"), *, reason: Option(str, description="Reason for kick")) -> None:
"""Kicks a user (mod only)
Example usage
--------------
/kick member:<member> reason:<reason>
Parameters
----------
user : discord.Member
"User to kick"
reason : str, optional
"Reason for kick, by default 'No reason.'"
"""
member = await mods_and_above_member_resolver(ctx, member)
reason = escape_markdown(reason)
reason = escape_mentions(reason)
db_guild = guild_service.get_guild()
log = await add_kick_case(ctx, member, reason, db_guild)
await notify_user(member, f"You were kicked from {ctx.guild.name}", log)
await member.kick(reason=reason)
await ctx.respond(embed=log, delete_after=10)
await submit_public_log(ctx, db_guild, member, log)
@mod_and_up()
@slash_command(guild_ids=[cfg.guild_id], description="Kick a user", permissions=slash_perms.mod_and_up())
async def roblox(self, ctx: BlooContext, member: Option(discord.Member, description="User to kick")) -> None:
"""Kicks a user and refers to the Roblox Jailbreak game server (mod only)
Example usage
--------------
/roblox member:<member>
Parameters
----------
user : discord.Member
"User to kick"
"""
member = await mods_and_above_member_resolver(ctx, member)
reason = "This Discord server is for iOS jailbreaking, not Roblox. Please join https://discord.gg/jailbreak instead, thank you!"
db_guild = guild_service.get_guild()
log = await add_kick_case(ctx, member, reason, db_guild)
await notify_user(member, f"You were kicked from {ctx.guild.name}", log)
await member.kick(reason=reason)
await ctx.respond(embed=log, delete_after=10)
await submit_public_log(ctx, db_guild, member, log)
@mod_and_up()
@slash_command(guild_ids=[cfg.guild_id], description="Mute a user", permissions=slash_perms.mod_and_up())
async def mute(self, ctx: BlooContext, member: Option(discord.Member, description="User to mute"), duration: Option(str, description="Duration for mute") = "", reason: Option(str, description="Reason for mute") = "No reason.") -> None:
"""Mutes a user (mod only)
Example usage
--------------
/mute member:<member> dur:<duration> reason:<reason>
Parameters
----------
user : discord.Member
"Member to mute"
dur : str
"Duration of mute (i.e 1h, 10m, 1d)"
reason : str, optional
"Reason for mute, by default 'No reason.'"
"""
await ctx.defer()
member: discord.Member = await mods_and_above_member_resolver(ctx, member)
reason = escape_markdown(reason)
reason = escape_mentions(reason)
now = datetime.now(tz=timezone.utc)
delta = pytimeparse.parse(duration)
if delta is None:
raise commands.BadArgument("Please input a valid duration!")
if member.timed_out:
raise commands.BadArgument("This user is already muted.")
time = now + timedelta(seconds=delta)
if time > now + timedelta(days=14):
raise commands.BadArgument("Mutes can't be longer than 14 days!")
db_guild = guild_service.get_guild()
case = Case(
_id=db_guild.case_id,
_type="MUTE",
date=now,
mod_id=ctx.author.id,
mod_tag=str(ctx.author),
reason=reason,
)
case.until = time
case.punishment = humanize.naturaldelta(
time - now, minimum_unit="seconds")
try:
await member.timeout(until=time, reason=reason)
ctx.tasks.schedule_untimeout(member.id, time)
except ConflictingIdError:
raise commands.BadArgument("The database thinks this user is already muted.")
guild_service.inc_caseid()
user_service.add_case(member.id, case)
log = prepare_mute_log(ctx.author, member, case)
await ctx.respond(embed=log, delete_after=10)
log.remove_author()
log.set_thumbnail(url=member.display_avatar)
dmed = await notify_user(member, f"You have been muted in {ctx.guild.name}", log)
await submit_public_log(ctx, db_guild, member, log, dmed)
@mod_and_up()
@slash_command(guild_ids=[cfg.guild_id], description="Unmute a user", permissions=slash_perms.mod_and_up())
async def unmute(self, ctx: BlooContext, member: Option(discord.Member, description="User to mute"), reason: Option(str, description="Reason for mute")) -> None:
"""Unmutes a user (mod only)
Example usage
--------------
/unmute member:<member> reason:<reason>
Parameters
----------
user : discord.Member
"Member to unmute"
reason : str, optional
"Reason for unmute, by default 'No reason.'"
"""
member = await mods_and_above_member_resolver(ctx, member)
db_guild = guild_service.get_guild()
if not member.timed_out:
raise commands.BadArgument("This user is not muted.")
await member.remove_timeout()
try:
ctx.tasks.cancel_unmute(member.id)
except Exception:
pass
case = Case(
_id=db_guild.case_id,
_type="UNMUTE",
mod_id=ctx.author.id,
mod_tag=str(ctx.author),
reason=reason,
)
guild_service.inc_caseid()
user_service.add_case(member.id, case)
log = prepare_unmute_log(ctx.author, member, case)
await ctx.respond(embed=log, delete_after=10)
dmed = await notify_user(member, f"You have been unmuted in {ctx.guild.name}", log)
await submit_public_log(ctx, db_guild, member, log, dmed)
@mod_and_up()
@slash_command(guild_ids=[cfg.guild_id], description="Ban a user", permissions=slash_perms.mod_and_up())
async def ban(self, ctx: BlooContext, user: Option(discord.Member, description="User to ban"), reason: Option(str, description="Reason for ban")):
"""Bans a user (mod only)
Example usage
--------------
/ban user:<user> reason:<reason>
Parameters
----------
user : discord.Member
"The user to be banned, doesn't have to be part of the guild"
reason : str, optional
"Reason for ban, by default 'No reason.'"
"""
user = await mods_and_above_external_resolver(ctx, user)
reason = escape_markdown(reason)
reason = escape_mentions(reason)
db_guild = guild_service.get_guild()
member_is_external = isinstance(user, discord.User)
# if the ID given is of a user who isn't in the guild, try to fetch the profile
if member_is_external:
async with ctx.typing():
if self.bot.ban_cache.is_banned(user.id):
raise commands.BadArgument("That user is already banned!")
self.bot.ban_cache.ban(user.id)
log = await add_ban_case(ctx, user, reason, db_guild)
if not member_is_external:
await notify_user(user, f"You have been banned from {ctx.guild.name}", log)
await user.ban(reason=reason)
else:
# hackban for user not currently in guild
await ctx.guild.ban(discord.Object(id=user.id))
await ctx.respond(embed=log, delete_after=10)
await submit_public_log(ctx, db_guild, user, log)
@mod_and_up()
@slash_command(guild_ids=[cfg.guild_id], description="Unban a user", permissions=slash_perms.mod_and_up())
async def unban(self, ctx: BlooContext, user: Option(discord.Member, description="User to unban"), reason: Option(str, description="Reason for unban")) -> None:
"""Unbans a user (must use ID) (mod only)
Example usage
--------------
/unban user:<userid> reason:<reason>
Parameters
----------
user : discord.Member
"ID of the user to unban"
reason : str, optional
"Reason for unban, by default 'No reason.'"
"""
user = await user_resolver(ctx, user)
if ctx.guild.get_member(user.id) is not None:
raise commands.BadArgument(
"You can't unban someone already in the server!")
reason = escape_markdown(reason)
reason = escape_mentions(reason)
if not self.bot.ban_cache.is_banned(user.id):
raise commands.BadArgument("That user isn't banned!")
try:
await ctx.guild.unban(discord.Object(id=user.id), reason=reason)
except discord.NotFound:
raise commands.BadArgument(f"{user} is not banned.")
self.bot.ban_cache.unban(user.id)
db_guild = guild_service.get_guild()
case = Case(
_id=db_guild.case_id,
_type="UNBAN",
mod_id=ctx.author.id,
mod_tag=str(ctx.author),
reason=reason,
)
guild_service.inc_caseid()
user_service.add_case(user.id, case)
log = prepare_unban_log(ctx.author, user, case)
await ctx.respond(embed=log, delete_after=10)
await submit_public_log(ctx, db_guild, user, log)
@mod_and_up()
@slash_command(guild_ids=[cfg.guild_id], description="Purge channel messages", permissions=slash_perms.mod_and_up())
async def purge(self, ctx: BlooContext, limit: Option(int, description="Number of messages to remove", min_value=1, max_value=100)) -> None:
"""Purges messages from current channel (mod only)
Example usage
--------------
/purge limit:<number of messages>
Parameters
----------
limit : int, optional
"Number of messages to purge, must be > 0, by default 0 for error handling"
"""
if limit <= 0:
raise commands.BadArgument(
"Number of messages to purge must be greater than 0")
elif limit >= 100:
limit = 100
msgs = await ctx.channel.history(limit=limit).flatten()
await ctx.channel.purge(limit=limit)
await ctx.respond(f'Purged {len(msgs)} messages.', delete_after=10)
@mod_and_up()
@slash_command(guild_ids=[cfg.guild_id], description="Lift a warn", permissions=slash_perms.mod_and_up())
async def liftwarn(self, ctx: BlooContext, user: Option(discord.Member, description="User to lift warn of"), case_id: Option(int, autocomplete=liftwarn_autocomplete), reason: Option(str)) -> None:
"""Marks a warn as lifted and remove points. (mod only)
Example usage
--------------
/liftwarn user:<user> case_id:<case ID> reason:<reason>
Parameters
----------
user : discord.Member
"User to remove warn from"
case_id : int
"The ID of the case for which we want to remove points"
reason : str, optional
"Reason for lifting warn, by default 'No reason.'"
"""
user = await mods_and_above_external_resolver(ctx, user)
# retrieve user's case with given ID
cases = user_service.get_cases(user.id)
case = cases.cases.filter(_id=case_id).first()
reason = escape_markdown(reason)
reason = escape_mentions(reason)
# sanity checks
if case is None:
raise commands.BadArgument(
message=f"{user} has no case with ID {case_id}")
elif case._type != "WARN":
raise commands.BadArgument(
message=f"{user}'s case with ID {case_id} is not a warn case.")
elif case.lifted:
raise commands.BadArgument(
message=f"Case with ID {case_id} already lifted.")
u = user_service.get_user(id=user.id)
if u.warn_points - int(case.punishment) < 0:
raise commands.BadArgument(
message=f"Can't lift Case #{case_id} because it would make {user.mention}'s points negative.")
# passed sanity checks, so update the case in DB
case.lifted = True
case.lifted_reason = reason
case.lifted_by_tag = str(ctx.author)
case.lifted_by_id = ctx.author.id
case.lifted_date = datetime.now()
cases.save()
# remove the warn points from the user in DB
user_service.inc_points(user.id, -1 * int(case.punishment))
dmed = True
# prepare log embed, send to #public-mod-logs, user, channel where invoked
log = prepare_liftwarn_log(ctx.author, user, case)
dmed = await notify_user(user, f"Your warn has been lifted in {ctx.guild}.", log)
await ctx.respond(embed=log, delete_after=10)
await submit_public_log(ctx, guild_service.get_guild(), user, log, dmed)
@mod_and_up()
@slash_command(guild_ids=[cfg.guild_id], description="Edit case reason", permissions=slash_perms.mod_and_up())
async def editreason(self, ctx: BlooContext, user: Option(discord.Member), case_id: Option(int, autocomplete=liftwarn_autocomplete), new_reason: Option(str)) -> None:
"""Edits a case's reason and the embed in #public-mod-logs. (mod only)
Example usage
--------------
/editreason user:<user> case_id:<case ID> reason:<reason>
Parameters
----------
user : discord.Member
"User to edit case of"
case_id : int
"The ID of the case for which we want to edit reason"
new_reason : str
"New reason"
"""
user = await mods_and_above_external_resolver(ctx, user)
# retrieve user's case with given ID
cases = user_service.get_cases(user.id)
case = cases.cases.filter(_id=case_id).first()
new_reason = escape_markdown(new_reason)
new_reason = escape_mentions(new_reason)
# sanity checks
if case is None:
raise commands.BadArgument(
message=f"{user} has no case with ID {case_id}")
old_reason = case.reason
case.reason = new_reason
case.date = datetime.now()
cases.save()
dmed = True
log = prepare_editreason_log(ctx.author, user, case, old_reason)
dmed = await notify_user(user, f"Your case was updated in {ctx.guild.name}.", log)
public_chan = ctx.guild.get_channel(
guild_service.get_guild().channel_public)
found = False
async with ctx.typing():
async for message in public_chan.history(limit=200):
if message.author.id != ctx.me.id:
continue
if len(message.embeds) == 0:
continue
embed = message.embeds[0]
if embed.footer.text == discord.Embed.Empty:
continue
if len(embed.footer.text.split(" ")) < 2:
continue
if f"#{case_id}" == embed.footer.text.split(" ")[1]:
for i, field in enumerate(embed.fields):
if field.name == "Reason":
embed.set_field_at(
i, name="Reason", value=new_reason)
await message.edit(embed=embed)
found = True
if found:
await ctx.respond(f"We updated the case and edited the embed in {public_chan.mention}.", embed=log, delete_after=10)
else:
await ctx.respond(f"We updated the case but weren't able to find a corresponding message in {public_chan.mention}!", embed=log, delete_after=10)
log.remove_author()
log.set_thumbnail(url=user.display_avatar)
await public_chan.send(user.mention if not dmed else "", embed=log)
@mod_and_up()
@slash_command(guild_ids=[cfg.guild_id], description="Edit case reason", permissions=slash_perms.mod_and_up())
async def removepoints(self, ctx: BlooContext, user: Option(discord.Member), points: Option(int), reason: Option(str)) -> None:
"""Removes warnpoints from a user. (mod only)
Example usage
--------------
/removepoints user:<user> points:<points> reasons:<reason>
Parameters
----------
user : discord.Member
"User to remove warn from"
points : int
"Amount of points to remove"
reason : str, optional
"Reason for lifting warn, by default 'No reason.'"
"""
user = await mods_and_above_external_resolver(ctx, user)
reason = escape_markdown(reason)
reason = escape_mentions(reason)
if points < 1:
raise commands.BadArgument("Points can't be lower than 1.")
u = user_service.get_user(id=user.id)
if u.warn_points - points < 0:
raise commands.BadArgument(
message=f"Can't remove {points} points because it would make {user.mention}'s points negative.")
# passed sanity checks, so update the case in DB
# remove the warn points from the user in DB
user_service.inc_points(user.id, -1 * points)
db_guild = guild_service.get_guild()
case = Case(
_id=db_guild.case_id,
_type="REMOVEPOINTS",
mod_id=ctx.author.id,
mod_tag=str(ctx.author),
punishment=str(points),
reason=reason,
)
# increment DB's max case ID for next case
guild_service.inc_caseid()
# add case to db
user_service.add_case(user.id, case)
# prepare log embed, send to #public-mod-logs, user, channel where invoked
log = prepare_removepoints_log(ctx.author, user, case)
dmed = await notify_user(user, f"Your points were removed in {ctx.guild.name}.", log)
await ctx.respond(embed=log, delete_after=10)
await submit_public_log(ctx, db_guild, user, log, dmed)
@unmute.error
@mute.error
@liftwarn.error
@unban.error
@ban.error
@warn.error
@warn_rc.error
@warn_msg.error
@purge.error
@kick.error
@roblox.error
@editreason.error
@removepoints.error
async def info_error(self, ctx: BlooContext, error):
if isinstance(error, discord.ApplicationCommandInvokeError):
error = error.original
if (isinstance(error, commands.MissingRequiredArgument)
or isinstance(error, PermissionsFailure)
or isinstance(error, commands.BadArgument)
or isinstance(error, commands.BadUnionArgument)
or isinstance(error, commands.MissingPermissions)
or isinstance(error, commands.BotMissingPermissions)
or isinstance(error, commands.MaxConcurrencyReached)
or isinstance(error, HTTPException)
or isinstance(error, commands.NoPrivateMessage)):
await ctx.send_error(error)
else:
await ctx.send_error("A fatal error occured. Tell <@109705860275539968> about this.")
logger.error(traceback.format_exc())
def setup(bot):
bot.add_cog(ModActions(bot))
| 1.828125 | 2 |
feature_eng.py | vikrantpailkar/Loan_Default_Prediction | 2 | 12772011 | <reponame>vikrantpailkar/Loan_Default_Prediction
# -*- coding: utf-8 -*-
"""
Created on 02/02/2019
Author: Vikrant
"""
from trim_data import ext_num_from_sub_grade
from trim_data import drop_emp_title
from trim_data import fill_na_annual_inc
from trim_data import drop_zip_code
from trim_data import fill_na_delinq_2yrs
from trim_data import drop_earliest_cr_line
from trim_data import fill_na_inq_last_6mths
from trim_data import fill_na_open_acc
from trim_data import fill_na_pub_rec
from trim_data import fill_na_revol_util
from trim_data import fill_na_total_acc
from trim_data import drop_out_prncp
from trim_data import drop_out_prncp_inv
from trim_data import drop_total_rec_late_fee
from trim_data import drop_recoveries
from trim_data import drop_collection_recovery_fee
from trim_data import drop_last_pymnt_d
from trim_data import drop_collections_12_mths_ex_med
from trim_data import drop_policy_code
from trim_data import drop_application_type
from trim_data import drop_acc_now_delinq
from trim_data import drop_tot_coll_amt
from trim_data import drop_tot_cur_bal
from trim_data import fill_na_total_rev_hi_lim
from trim_data import drop_url
from trim_data import drop_pymnt_plan
from trim_data import drop_issue_d
from trim_data import drop_addr_state
from trim_data import drop_last_credit_pull_d
def trim_features(loan):
ext_num_from_sub_grade(loan)
drop_emp_title(loan)
fill_na_annual_inc(loan)
drop_zip_code(loan)
fill_na_delinq_2yrs(loan)
drop_earliest_cr_line(loan)
fill_na_inq_last_6mths(loan)
fill_na_open_acc(loan)
fill_na_pub_rec(loan)
fill_na_revol_util(loan)
fill_na_total_acc(loan)
drop_pymnt_plan(loan)
drop_url(loan)
drop_total_rec_late_fee(loan)
drop_out_prncp(loan)
drop_out_prncp_inv(loan)
drop_recoveries(loan)
drop_collection_recovery_fee(loan)
drop_last_pymnt_d(loan)
drop_collections_12_mths_ex_med(loan)
drop_policy_code(loan)
drop_application_type(loan)
drop_acc_now_delinq(loan)
drop_tot_coll_amt(loan)
drop_tot_cur_bal(loan)
fill_na_total_rev_hi_lim(loan)
drop_issue_d(loan)
drop_addr_state(loan)
drop_last_credit_pull_d(loan)
| 1.289063 | 1 |
perses/storage/storage.py | mikemhenry/perses | 0 | 12772012 | """
Storage layer for perses automated molecular design.
TODO
----
* Add write_sampler_state(modname, sampler_state, iteration)
* Generalize write_quantity to handle units
* Add data access routines for reading to isolate low-level storage layer
"""
__author__ = '<NAME>'
################################################################################
# IMPORTS
################################################################################
import os, os.path
import sys, math
import numpy as np
import copy
import time
import netCDF4 as netcdf
import pickle
import json
import mdtraj
from simtk import unit
import codecs
################################################################################
# LOGGER
################################################################################
import logging
logger = logging.getLogger(__name__)
################################################################################
# STORAGE
################################################################################
class NetCDFStorage(object):
"""NetCDF storage layer.
"""
def __init__(self, filename, mode='w'):
"""Create NetCDF storage layer, creating or appending to an existing file.
Parameters
----------
filename : str
Name of storage file to bind to.
mode : str, optional, default='w'
File open mode, 'w' for (over)write, 'a' for append.
"""
self._filename = filename
self._ncfile = netcdf.Dataset(self._filename, mode=mode)
self._envname = None
self._modname = None
# Create standard dimensions.
if 'iterations' not in self._ncfile.dimensions:
self._ncfile.createDimension('iterations', size=None)
if 'spatial' not in self._ncfile.dimensions:
self._ncfile.createDimension('spatial', size=3)
def _find_group(self):
"""Retrieve the specified group, creating it if it does not exist.
"""
groupname = '/'
if self._envname is not None:
groupname += self._envname + '/'
if self._modname is not None:
groupname += self._modname + '/'
ncgrp = self._ncfile.createGroup(groupname)
return ncgrp
def _encode_string(string, encoding='ascii'):
"""Encode strings to ASCII to avoid python 3 crap.
"""
try:
return string.encode(encoding)
except UnicodeEncodeError:
return string
def sync(self):
"""Flush write buffer.
"""
self._ncfile.sync()
def close(self):
"""Close the storage layer.
"""
self._ncfile.close()
def write_configuration(self, varname, positions, topology, iteration=None, frame=None, nframes=None):
"""Write a configuration (or one of a sequence of configurations) to be stored as a native NetCDF array
Parameters
----------
varname : str
The variable name to be stored
positions : simtk.unit.Quantity of size [natoms,3] with units compatible with angstroms
The positions to be written
topology : md.Topology object
The corresponding Topology object
iteration : int, optional, default=None
The local iteration for the module, or `None` if this is a singleton
frame : int, optional, default=None
If these coordinates are part of multiple frames in a sequence, the frame number
nframes : int, optional, default=None
If these coordinates are part of multiple frames in a sequence, the total number of frames in the sequence
"""
ncgrp = self._find_group()
if ((nframes is not None) and (frame is None)) or ((nframes is None) and (frame is not None)):
raise Exception("Both 'nfranes' and 'frame' must be used together.")
def dimension_name(iteration, suffix):
dimension_name = ''
if self._envname: dimension_name += self._envname + '_'
if self._modname: dimension_name += self._modname + '_'
dimension_name += varname + '_' + suffix + '_' + str(iteration)
return dimension_name
if iteration is not None:
varname += '_' + str(iteration)
if varname not in ncgrp.variables:
# Create dimensions
if (frame is not None):
frames_dimension_name = dimension_name(varname, 'frames')
ncdim = self._ncfile.createDimension(frames_dimension_name, nframes)
natoms = topology.n_atoms
atoms_dimension_name = dimension_name(varname, 'atoms')
ncdim = self._ncfile.createDimension(atoms_dimension_name, natoms)
# Create variables
# TODO: Handle cases with no iteration but with specified frames
if (iteration is not None) and (frame is not None):
ncgrp.createVariable(varname, np.float32, dimensions=(frames_dimension_name, atoms_dimension_name, 'spatial'), chunksizes=(1,natoms,3))
elif (iteration is not None):
ncgrp.createVariable(varname, np.float32, dimensions=(atoms_dimension_name, 'spatial'), chunksizes=(natoms,3))
else:
ncgrp.createVariable(varname, np.float32, dimensions=(atoms_dimension_name, 'spatial'), chunksizes=(natoms,3))
# Write Topology
if (frame is None) or (frame == 0):
topology_varname = varname + '_topology'
if (iteration is not None):
topology_varname += '_' + str(iteration)
self.write_object(topology_varname, topology, iteration=iteration)
# Write positions
# TODO: Handle cases with no iteration but with specified frames
positions_unit = unit.angstroms
if (frame is not None):
ncgrp.variables[varname][frame,:,:] = positions[:,:] / positions_unit
else:
ncgrp.variables[varname] = positions[:,:] / positions_unit
def write_object(self, varname, obj, iteration=None):
"""Serialize a Python object, encoding as pickle when storing as string in NetCDF.
Parameters
----------
varname : str
The variable name to be stored
obj : object
The object to be serialized
iteration : int, optional, default=None
The local iteration for the module, or `None` if this is a singleton
"""
ncgrp = self._find_group()
if varname not in ncgrp.variables:
if iteration is not None:
ncgrp.createVariable(varname, str, dimensions=('iterations',), chunksizes=(1,))
else:
ncgrp.createVariable(varname, str, dimensions=(), chunksizes=(1,))
pickled = codecs.encode(pickle.dumps(obj), "base64").decode()
if iteration is not None:
ncgrp.variables[varname][iteration] = pickled
else:
ncgrp.variables[varname] = pickled
def get_object(self, envname, modname, varname, iteration=None):
"""Get the serialized Python object.
Parameters
----------
envname : str
The name of the environment for the variable
modname : str
The name of the module for the variable
varname : str
The variable name to be stored
iteration : int, optional, default=None
The local iteration for the module, or `None` if this is a singleton
Returns
-------
obj : object
The retrieved object
"""
nc_path = "/{envname}/{modname}/{varname}".format(envname=envname, modname=modname, varname=varname)
if iteration is not None:
pickled = self._ncfile[nc_path][iteration]
else:
pickled = self._ncfile[nc_path][0]
obj = pickle.loads(codecs.decode(pickled.encode(), "base64"))
return obj
def write_quantity(self, varname, value, iteration=None):
"""Write a floating-point number
Parameters
----------
varname : str
The variable name to be stored
value : float
The floating-point value to be written
iteration : int, optional, default=None
The local iteration for the module, or `None` if this is a singleton
"""
ncgrp = self._find_group()
if varname not in ncgrp.variables:
if iteration is not None:
ncgrp.createVariable(varname, 'f8', dimensions=('iterations',), chunksizes=(1,))
else:
ncgrp.createVariable(varname, 'f8', dimensions=(), chunksizes=(1,))
if iteration is not None:
ncgrp.variables[varname][iteration] = value
else:
ncgrp.variables[varname] = value
def write_array(self, varname, array, iteration=None):
"""Write a numpy array as a native NetCDF array
Parameters
----------
varname : str
The variable name to be stored
array : numpy.array of arbitrary dimension
The numpy array to be written
iteration : int, optional, default=None
The local iteration for the module, or `None` if this is a singleton
"""
ncgrp = self._find_group()
def dimension_name(dimension_index):
dimension_name = ''
if self._envname: dimension_name += self._envname + '_'
if self._modname: dimension_name += self._modname + '_'
dimension_name += varname + '_' + str(dimension_index)
return dimension_name
if varname not in ncgrp.variables:
# Create dimensions
dimensions = list()
if iteration is not None:
dimensions.append('iterations')
for (dimension_index, size) in enumerate(array.shape):
ncdim = self._ncfile.createDimension(dimension_name(dimension_index), size)
dimensions.append(dimension_name(dimension_index))
dimensions = tuple(dimensions)
# Create variables
if iteration is not None:
ncgrp.createVariable(varname, array.dtype, dimensions=dimensions, chunksizes=((1,) + array.shape))
else:
ncgrp.createVariable(varname, array.dtype, dimensions=dimensions, chunksizes=array.shape)
# Check dimensions
expected_shape = list()
for (dimension_index, size) in enumerate(array.shape):
expected_shape.append(self._ncfile.dimensions[dimension_name(dimension_index)].size)
expected_shape = tuple(expected_shape)
if expected_shape != array.shape:
raise Exception("write_array called for /%s/%s/%s with different dimension (%s) than initially called (%s); dimension must stay constant." % (envname, modname, varname, str(array.shape), str(expected_shape)))
if iteration is not None:
ncgrp.variables[varname][iteration] = array
else:
ncgrp.variables[varname] = array
################################################################################
# BOUND STORAGE VIEWS THAT ENCAPSULATE ENVIRONMENT NAMES AND MODULE NAMES
################################################################################
class NetCDFStorageView(NetCDFStorage):
"""NetCDF storage view with bound environment and module names.
"""
def __init__(self, storage, envname=None, modname=None):
"""Initialize a view of the storage with a specific environment and module name.
Parameters
----------
envname : str, optional, default=None
Set the name of the environment this module is attached to.
modname : str, optional, default=None
Set the name of the module in the code writing the variable
"""
self._filename = storage._filename
self._ncfile = storage._ncfile
self._envname = storage._envname
self._modname = storage._modname
if envname: self._envname = envname
if modname: self._modname = modname
| 2.203125 | 2 |
services/docService/app/models.py | anaquin135/modularCPQ | 0 | 12772013 | from datetime import datetime
from app import db
class DOCUMENT(db.Model):
id = db.Column(db.Integer, nullable=False, primary_key=True)
description = db.Column(db.String(300), nullable=False, default='Missing Description')
isActive = db.Column(db.Boolean, nullable=False, default=True)
template = db.Column(db.String(300), nullable=False)
class TERMS(db.Model):
id = db.Column(db.Integer, nullable=False, primary_key=True)
description = db.Column(db.String(300), nullable=False, default='Missing Description')
isActive = db.Column(db.Boolean, nullable=False, default=True)
template = db.Column(db.String(300), nullable=False)
enabledPNs = db.Column(db.Text, nullable=False, default='ALL')
| 2.421875 | 2 |
snake/helpers/queue.py | JacobLiu001/slitherin | 83 | 12772014 | <filename>snake/helpers/queue.py
class Queue:
def __init__(self, initial_values):
self.queue = initial_values
def enqueue(self, val):
self.queue.insert(0, val)
def dequeue(self):
if self.is_empty():
return None
else:
return self.queue.pop()
def size(self):
return len(self.queue)
def is_empty(self):
return self.size() == 0 | 3.265625 | 3 |
main.py | devlights/excel_shape_position_adjust | 1 | 12772015 | #################################################################
# 指定されたフォルダ配下のExcelを開いていき、画像が指定位置に貼付けされていないファイルを出力or調整します。
#
# 実行には、以下のライブラリが必要です.
# - win32com
# - $ python -m pip install pywin32
#
# [参考にした情報]
# - https://www.sejuku.net/blog/23647
#################################################################
import argparse
# noinspection SpellCheckingInspection
def go(target_dir: str, base_position: float, report_only: bool):
import pathlib
import pywintypes
import win32com.client
excel_dir = pathlib.Path(target_dir)
if not excel_dir.exists():
print(f'target directory not found [{target_dir}]')
return
try:
excel = win32com.client.Dispatch('Excel.Application')
excel.Visible = True
for f in excel_dir.glob('**/*.xlsx'):
abs_path = str(f)
try:
wb = excel.Workbooks.Open(abs_path)
wb.Activate()
except pywintypes.com_error as err:
print(err)
continue
try:
sheets_count = wb.Sheets.Count
for sheet_index in range(0, sheets_count):
ws = wb.Worksheets(sheet_index + 1)
ws.Activate()
for sh in ws.Shapes:
if base_position <= sh.Left:
if report_only:
print(f'{abs_path}-{ws.Name}')
else:
sh.Left = base_position
if not report_only:
wb.Save()
wb.Saved = True
finally:
wb.Close()
finally:
excel.Quit()
if __name__ == '__main__':
parser = argparse.ArgumentParser(
usage='python main.py -d /path/to/excel/dir -p base-left-position(e.g. 100.0) [-r]',
description='指定されたフォルダ配下のExcelを開いていき、画像が左端に貼付けされていないファイルを出力します。',
add_help=True
)
parser.add_argument('-d', '--directory', help='対象ディレクトリ', required=True)
parser.add_argument('-p', '--position', help='基準となるShape.Leftの値', type=float, default=100.0)
parser.add_argument('-r', '--report', help='情報のみ出力して変更はしない', action='store_true')
args = parser.parse_args()
go(args.directory, args.position, args.report)
| 3.171875 | 3 |
mmdet/models/roi_heads/wsod_embedding_head.py | dyabel/wsod-mmdet | 6 | 12772016 | import torch
from mmdet.core import bbox2result, bbox2roi, build_assigner, build_sampler
from ..builder import HEADS, build_head, build_roi_extractor
from .base_roi_head import BaseRoIHead
from .test_mixins import BBoxTestMixin, MaskTestMixin
from mmdet.core import multiclass_nms,bbox_select_per_class
from mmdet.core.evaluation import bbox_overlaps
from mmdet.models.losses import accuracy
from mmdet.core import (bbox2roi, bbox_mapping, merge_aug_bboxes,
merge_aug_masks, multiclass_nms)
import time
@HEADS.register_module()
class WsodEmbedHead(BaseRoIHead, BBoxTestMixin, MaskTestMixin):
"""Simplest base roi head including one bbox head and one mask head."""
def __init__(self,
bbox_roi_extractor=None,
bbox_head=None,
contrast_head=None,
mask_roi_extractor=None,
mask_head=None,
shared_head=None,
train_cfg=None,
test_cfg=None):
super(WsodEmbedHead,self).__init__(
bbox_roi_extractor=bbox_roi_extractor,
bbox_head=bbox_head,
mask_roi_extractor=mask_roi_extractor,
mask_head=mask_head,
shared_head=shared_head,
train_cfg=train_cfg,
test_cfg=test_cfg)
# self.init_contrast_head(contrast_head)
def init_assigner_sampler(self):
"""Initialize assigner and sampler."""
self.bbox_assigner = None
self.bbox_sampler = None
if self.train_cfg:
self.bbox_assigner = build_assigner(self.train_cfg.assigner)
self.bbox_sampler = build_sampler(
self.train_cfg.sampler, context=self)
def init_bbox_head(self, bbox_roi_extractor, bbox_head):
"""Initialize ``bbox_head``"""
self.bbox_roi_extractor = build_roi_extractor(bbox_roi_extractor)
self.bbox_head = build_head(bbox_head)
# def init_embedding_head(self,embedding_head):
# self.embedding_head = build_head(embedding_head)
def init_contrast_head(self,contrast_head):
self.contrast_head = build_head(contrast_head)
def init_mask_head(self, mask_roi_extractor, mask_head):
"""Initialize ``mask_head``"""
if mask_roi_extractor is not None:
self.mask_roi_extractor = build_roi_extractor(mask_roi_extractor)
self.share_roi_extractor = False
else:
self.share_roi_extractor = True
self.mask_roi_extractor = self.bbox_roi_extractor
self.mask_head = build_head(mask_head)
def init_weights(self, pretrained):
"""Initialize the weights in head.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
if self.with_shared_head:
self.shared_head.init_weights(pretrained=pretrained)
if self.with_bbox:
self.bbox_roi_extractor.init_weights()
self.bbox_head.init_weights()
if self.with_mask:
self.mask_head.init_weights()
if not self.share_roi_extractor:
self.mask_roi_extractor.init_weights()
def forward_dummy(self, x, proposals):
"""Dummy forward function."""
# bbox head
outs = ()
rois = bbox2roi([proposals])
bbox_feats = self.bbox_roi_extractor(
x[:self.bbox_roi_extractor.num_inputs], rois)
bbox_feats = self.bbox_head.double_fc_forward(bbox_feats)
if self.with_shared_head:
bbox_feats = self.shared_head(bbox_feats)
if self.with_bbox:
bbox_results = self._bbox_forward_strong_branch2(bbox_feats)
outs = outs + (bbox_results['cls_score'],
bbox_results['bbox_pred'])
# mask head
if self.with_mask:
mask_rois = rois[:100]
mask_results = self._mask_forward(x, mask_rois)
outs = outs + (mask_results['mask_pred'], )
return outs
def match(self,bboxes1=None,bboxes2=None,labels1=None,labels2=None):
flag = False
for i,box1 in enumerate(bboxes1):
if labels1[i] != labels2[i]:
return False
for box2 in bboxes2:
if bbox_overlaps(box1.unsqueeze(0).cpu().numpy(),box2.unsqueeze(0).cpu().numpy())[0][0] > 0.5:
flag = True
break
if not flag: return False
return True
#duyu
#TODO online augmentation
@torch.no_grad()
def OAM_Confidence(self,
x,
img_metas,
proposal_list,
gt_bboxes,
gt_labels,
max_iter=30,
gt_bboxes_ignore=None,
gt_masks=None):
if not self.with_bbox:
raise Exception
num_imgs = len(img_metas)
if gt_bboxes_ignore is None:
gt_bboxes_ignore = [None for _ in range(num_imgs)]
sampling_results = []
for i in range(num_imgs):
assign_result = self.bbox_assigner.assign(
proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i],
gt_labels[i])
sampling_result = self.bbox_sampler.sample(
assign_result,
proposal_list[i],
gt_bboxes[i],
gt_labels[i],
feats=[lvl_feat[i][None] for lvl_feat in x])
sampling_results.append(sampling_result)
bbox_results, oam_bboxes, oam_labels = self._bbox_forward_train_strong(x, sampling_results,
gt_bboxes, gt_labels,
img_metas)
oam_bboxes = [oam_bboxes[:, :4]]
oam_labels = [oam_labels]
#begin iter
k = 0
T = max_iter
count = 0
while k < max_iter:
k += 1
num_imgs = len(img_metas)
if gt_bboxes_ignore is None:
gt_bboxes_ignore = [None for _ in range(num_imgs)]
sampling_results = []
for i in range(num_imgs):
assign_result = self.bbox_assigner.assign(
oam_bboxes[i], gt_bboxes[i], gt_bboxes_ignore[i],
gt_labels[i])
sampling_result = self.bbox_sampler.sample(
assign_result,
oam_bboxes[i],
gt_bboxes[i],
gt_labels[i],
feats=[lvl_feat[i][None] for lvl_feat in x])
sampling_results.append(sampling_result)
bbox_results, oam_bboxes_next, oam_labels_next = self._bbox_forward_train_strong(x, sampling_results,
gt_bboxes, gt_labels,
img_metas)
oam_bboxes_next = [oam_bboxes_next[:,:4]]
oam_labels_next = [oam_labels_next]
if self.match(bboxes1=oam_bboxes_next[0],bboxes2=oam_bboxes[0],labels1=oam_labels_next[0],labels2=oam_labels[0]):
count += 1
if count == 3:
T = k
k = max_iter + 1
break
else:
count = 0
oam_bboxes,oam_labels = oam_bboxes_next,oam_labels_next
return T
#duyu
#TODO Double pass seems not work well. Here the first pass is removed
def forward_train(self,
x,
img_metas,
proposal_list,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_masks=None):
# losses_first_pass,oam_bboxes,oam_labels = self.forward_train_first_pass(x,img_metas,proposal_list,gt_bboxes,gt_labels,gt_bboxes_ignore,
# gt_masks=None)
losses_second_pass = self.forward_train_second_pass(x,img_metas,proposal_list,gt_bboxes,gt_labels,gt_bboxes_ignore,
gt_masks=None)
losses = dict()
# losses.update(losses_first_pass)
losses.update(losses_second_pass)
return losses
#duyu
def forward_train_first_pass(self,
x,
img_metas,
proposal_list,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_masks=None):
"""
Args:
x (list[Tensor]): list of multi-level img features.
img_metas (list[dict]): list of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
proposals (list[Tensors]): list of region proposals.
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor]): specify which bounding
boxes can be ignored when computing the loss.
gt_masks (None | Tensor) : true segmentation masks for each box
used if the architecture supports a segmentation task.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
# assign gts and sample proposals
if self.with_bbox or self.with_mask:
num_imgs = len(img_metas)
if gt_bboxes_ignore is None:
gt_bboxes_ignore = [None for _ in range(num_imgs)]
sampling_results = []
for i in range(num_imgs):
assign_result = self.bbox_assigner.assign(
proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i],
gt_labels[i])
sampling_result = self.bbox_sampler.sample(
assign_result,
proposal_list[i],
gt_bboxes[i],
gt_labels[i],
feats=[lvl_feat[i][None] for lvl_feat in x])
sampling_results.append(sampling_result)
losses = dict()
# bbox head forward and loss
if self.with_bbox:
bbox_results_strong,bbox_results_weak,oam_bboxes,oam_labels = \
self._bbox_forward_train_first_pass(x,sampling_results,
gt_bboxes, gt_labels,
img_metas)
losses.update(bbox_results_strong['loss_bbox_strong_fp'])
losses.update(bbox_results_weak['loss_bbox_weak_fp'])
# mask head forward and loss
#TODO
if self.with_mask:
mask_results = self._mask_forward_train(x, sampling_results,
bbox_results_strong['bbox_feats'],
gt_masks, img_metas)
losses.update(mask_results['loss_mask'])
return losses,oam_bboxes,oam_labels
def forward_train_second_pass(self,
x,
img_metas,
proposal_list,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_masks=None):
if self.with_bbox or self.with_mask:
num_imgs = len(img_metas)
if gt_bboxes_ignore is None:
gt_bboxes_ignore = [None for _ in range(num_imgs)]
sampling_results = []
assert num_imgs == 2
#assign for strong image
assign_result = self.bbox_assigner.assign(
proposal_list[0], gt_bboxes[0], gt_bboxes_ignore[0],
gt_labels[0])
sampling_result = self.bbox_sampler.sample(
assign_result,
proposal_list[0],
gt_bboxes[0],
gt_labels[0],
feats=[lvl_feat[0][None] for lvl_feat in x])
sampling_results.append(sampling_result)
#assign for weak image
assign_result = self.bbox_assigner.assign(
proposal_list[1], gt_bboxes[1], gt_bboxes_ignore[1],
gt_labels=None)
sampling_result = self.bbox_sampler.sample(
assign_result,
proposal_list[1],
gt_bboxes[1],
gt_labels=None,
feats=[lvl_feat[1][None] for lvl_feat in x])
sampling_results.append(sampling_result)
# print('#'*100)
# print(sampling_results)
losses = dict()
# bbox head forward and loss
if self.with_bbox:
bbox_results_weak_branch1, bbox_results_strong_branch1, bbox_results_weak_branch2, bbox_results_strong_branch2 = \
self._bbox_forward_train_second_pass(x, sampling_results,
gt_bboxes, gt_labels,
img_metas,gt_bboxes_ignore=gt_bboxes_ignore)
losses.update(bbox_results_weak_branch1['loss_bbox_weak_branch1_sp'])
losses.update(bbox_results_strong_branch1['loss_bbox_strong_branch1_sp'])
losses.update(bbox_results_weak_branch2['loss_bbox_weak_branch2'])
losses.update(bbox_results_strong_branch2['loss_bbox_strong_branch2'])
# losses.update(contrastive_losses)
# mask head forward and loss
# TODO
if self.with_mask:
mask_results = self._mask_forward_train(x, sampling_results,
bbox_results_strong_branch1['bbox_feats'],
gt_masks, img_metas)
losses.update(mask_results['loss_mask'])
return losses
#duyu
def _bbox_forward_strong_branch1(self,bbox_feats):
"""Box head forward function used in both training and testing."""
# TODO: a more flexible way to decide which feature maps to use
cls_score, bbox_pred = self.bbox_head.forward_strong_branch1(bbox_feats)
bbox_results = dict(
cls_score = cls_score, bbox_pred = bbox_pred, bbox_feats = bbox_feats)
return bbox_results
#duyu
def _bbox_forward_strong_branch2(self,bbox_feats):
"""Box head forward function used in both training and testing."""
# TODO: a more flexible way to decide which feature maps to use
cls_score, bbox_pred = self.bbox_head.forward_strong_branch2(bbox_feats)
bbox_results = dict(
cls_score = cls_score, bbox_pred = bbox_pred, bbox_feats = bbox_feats)
return bbox_results
#duyu
def _bbox_forward_weak(self,bbox_feats):
"""Box head forward function used in both training and testing."""
# TODO: a more flexible way to decide which feature maps to use
cls_proposal_mat = self.bbox_head.forward_weak(bbox_feats)
bbox_results = dict(
cls_proposal_mat = cls_proposal_mat, bbox_feats=bbox_feats)
return bbox_results
#duyu
def _bbox_forward_train_first_pass(self, x, sampling_results, gt_bboxes, gt_labels,
img_metas):
"""Run forward function and calculate loss for box head in training."""
torch_device = gt_labels[0].get_device()
x_strong = tuple([torch.unsqueeze(xx[0],0) for xx in x])
x_weak = tuple([torch.unsqueeze(xx[1],0) for xx in x])
rois_strong = bbox2roi([res.bboxes for res in [sampling_results[0]]])
rois_weak = bbox2roi([res.bboxes for res in [sampling_results[1]]])
bbox_feats_strong = self.bbox_roi_extractor(
x_strong[:self.bbox_roi_extractor.num_inputs], rois_strong)
bbox_feats_strong = self.bbox_head.double_fc_forward(bbox_feats_strong)
bbox_feats_weak = self.bbox_roi_extractor(
x_weak[:self.bbox_roi_extractor.num_inputs], rois_weak)
bbox_feats_weak = self.bbox_head.double_fc_forward(bbox_feats_weak)
if self.with_shared_head:
bbox_feats_strong = self.shared_head(bbox_feats_strong)
bbox_feats_weak = self.shared_head(bbox_feats_weak)
#caculate loss_strong_branch1
bbox_targets_strong = self.bbox_head.get_targets([sampling_results[0]], [gt_bboxes[0]],
[gt_labels[0]], self.train_cfg)
bbox_results_strong = self._bbox_forward_strong_branch1(bbox_feats_strong)
loss_bbox_strong = self.bbox_head.loss_strong(bbox_results_strong['cls_score'],
bbox_results_strong['bbox_pred'], rois_strong,
*bbox_targets_strong)
loss_strong = dict()
loss_strong['loss_cls_strong_branch1_fp'] = loss_bbox_strong['loss_cls_strong']
loss_strong['acc_strong_branch1_fp'] = loss_bbox_strong['acc_strong']
loss_strong['loss_bbox_strong_branch1_fp'] = loss_bbox_strong['loss_bbox_strong']
bbox_results_strong.update(loss_bbox_strong_fp=loss_strong)
oam_bboxes_strong,oam_labels_strong = bbox_select_per_class(bbox_results_strong['bbox_pred'],
bbox_results_strong['cls_score'],
gt_labels[1],
score_thr=0,
nms_cfg={'iou_threshold':0.5},
max_num=-1
)
#calculate loss_weak_branch1
bbox_targets_weak = self.bbox_head.get_targets([sampling_results[1]], [gt_bboxes[1]],
[gt_labels[1]], self.train_cfg)
bbox_results_weak = self._bbox_forward_weak(bbox_feats_weak)
bbox_results_weak_pseudo = self._bbox_forward_strong_branch1(bbox_feats_weak)
loss_bbox_weak = self.bbox_head.loss_weak(bbox_results_weak['cls_proposal_mat'],
rois_weak,
*bbox_targets_weak)
loss_weak = dict()
loss_weak['loss_img_level_fp'] = loss_bbox_weak['loss_img_level']
bbox_results_weak.update(loss_bbox_weak_fp=loss_weak)
oam_bboxes_weak,oam_labels_weak = bbox_select_per_class(bbox_results_weak_pseudo['bbox_pred'],
bbox_results_weak_pseudo['cls_score'],
gt_labels[1],
score_thr=0,
nms_cfg={'iou_threshold':0.5},
max_num=-1
)
# print('oam_labels_first_pass: ',oam_labels_weak)
oam_bboxes = []
oam_labels = []
oam_bboxes.append(oam_bboxes_strong[:,:4])
oam_bboxes.append(oam_bboxes_weak[:,:4])
oam_labels.append(oam_labels_strong.to(torch_device))
oam_labels.append(oam_labels_weak.to(torch_device))
return bbox_results_strong,bbox_results_weak,oam_bboxes,oam_labels
#duyu
def contrast_forward_train(self,
x,
strong_bboxes,
strong_labels,
oam_bboxes,
oam_labels,
img_metas,
gt_bboxes_ignore=None,
):
torch_device = strong_labels.get_device()
oam_labels = oam_labels.to(torch_device)
x_strong = tuple([torch.unsqueeze(xx[0], 0) for xx in x])
x_weak = tuple([torch.unsqueeze(xx[1], 0) for xx in x])
rois_strong = bbox2roi([strong_bboxes])
rois_weak = bbox2roi([oam_bboxes])
bbox_feats_strong = self.bbox_roi_extractor(
x_strong[:self.bbox_roi_extractor.num_inputs], rois_strong)
bbox_feats_strong = self.bbox_head.double_fc_forward(bbox_feats_strong)
bbox_feats_weak = self.bbox_roi_extractor(
x_weak[:self.bbox_roi_extractor.num_inputs], rois_weak)
bbox_feats_weak = self.bbox_head.double_fc_forward(bbox_feats_weak)
if self.with_shared_head:
bbox_feats_strong = self.shared_head(bbox_feats_strong)
bbox_feats_weak = self.shared_head(bbox_feats_weak)
contrastive_losses = self.contrast_head.forward_train(bbox_feats_strong,bbox_feats_weak,strong_labels,oam_labels)
losses = dict()
losses.update(contrastive_losses)
# losses['contrastive_loss'] = torch.tensor([0.0])
return losses
# duyu
def _bbox_forward_embedding_branch2(self, bbox_feats,hard_neg_roi_id=None,pos_roi_id=None):
"""Box head forward function used in both training and testing."""
# TODO: a more flexible way to decide which feature maps to use
# time_start = time.time()
# cls_score, bbox_pred,min_pos_pos_dist, min_neg_neg_dist = self.bbox_head.forward_embedding(bbox_feats,hard_neg_roi_id=hard_neg_roi_id,pos_roi_id=pos_roi_id)
cls_score_fc,cls_score, bbox_pred,min_pos_pos_dist, min_neg_neg_dist = self.bbox_head.forward_embedding(bbox_feats,hard_neg_roi_id=hard_neg_roi_id,pos_roi_id=pos_roi_id)
# print(time.time()-time_start)
bbox_results = dict(
cls_score_fc=cls_score_fc,cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats,min_pos_pos_dist=min_pos_pos_dist,min_neg_neg_dist=min_neg_neg_dist)
# cls_score = cls_score, bbox_pred = bbox_pred, bbox_feats = bbox_feats, min_pos_pos_dist = min_pos_pos_dist, min_neg_neg_dist = min_neg_neg_dist)
return bbox_results
#yangyk
def get_hard_neg_target(self,rois,sampling_results):
#print([res.hard_neg_bboxes for res in sampling_results])
#print(sampling_results)
flag = True
for res in sampling_results:
if res.hard_neg_bboxes is None:
flag = False
if flag is False:
hard_neg_labels = None
hard_neg_roi_id = None
return hard_neg_labels,hard_neg_roi_id
# time_start = time.time()
hard_neg_labels = sampling_results[0].hard_neg_labels
hard_neg_roi_id = sampling_results[0].hard_neg_id
# hard_neg_roi_id = torch.cat(hard_neg_roi_id, 0)
# hard_neg_labels = torch.cat(hard_neg_labels_list, 0)
# print(time.time()-time_start)
return hard_neg_labels,hard_neg_roi_id
#duyu
def _bbox_forward_train_second_pass(self, x, sampling_results, gt_bboxes, gt_labels,
img_metas,gt_bboxes_ignore=None):
"""Run forward function and calculate loss for box head in training."""
x_strong = tuple([torch.unsqueeze(xx[0],0) for xx in x])
x_weak = tuple([torch.unsqueeze(xx[1],0) for xx in x])
rois_strong = bbox2roi([res.bboxes for res in [sampling_results[0]]])
rois_weak = bbox2roi([res.bboxes for res in [sampling_results[1]]])
bbox_feats_strong = self.bbox_roi_extractor(
x_strong[:self.bbox_roi_extractor.num_inputs], rois_strong)
bbox_feats_strong = self.bbox_head.double_fc_forward(bbox_feats_strong)
bbox_feats_weak = self.bbox_roi_extractor(
x_weak[:self.bbox_roi_extractor.num_inputs], rois_weak)
bbox_feats_weak = self.bbox_head.double_fc_forward(bbox_feats_weak)
if self.with_shared_head:
bbox_feats_strong = self.shared_head(bbox_feats_strong)
bbox_feats_weak = self.shared_head(bbox_feats_weak)
#calculate loss_strong_branch1
bbox_targets_strong = self.bbox_head.get_targets([sampling_results[0]], [gt_bboxes[0]],
[gt_labels[0]], self.train_cfg)
#yanyk
bbox_results_strong_branch1 = self._bbox_forward_strong_branch1(bbox_feats_strong)
loss_bbox_strong_branch1 = self.bbox_head.loss_strong_branch1(bbox_results_strong_branch1['cls_score'],
bbox_results_strong_branch1['bbox_pred'], rois_strong,
*bbox_targets_strong)
loss_strong_branch1 = dict()
loss_strong_branch1['loss_cls_strong_branch1_sp'] = loss_bbox_strong_branch1['loss_cls_strong']
loss_strong_branch1['loss_bbox_strong_branch1_sp'] = loss_bbox_strong_branch1['loss_bbox_strong']
loss_strong_branch1['acc_strong_branch1_sp'] = loss_bbox_strong_branch1['acc_strong']
bbox_results_strong_branch1.update(loss_bbox_strong_branch1_sp=loss_strong_branch1)
#calculate loss_weak_branch1
bbox_results_weak_pseudo = self._bbox_forward_strong_branch1(bbox_feats_weak)
bbox_results_weak_branch1 = self._bbox_forward_weak(bbox_feats_weak)
loss_bbox_weak_branch1 = self.bbox_head.loss_weak_branch1(bbox_results_weak_branch1['cls_proposal_mat'],
gt_labels[1]
)
loss_weak_branch1 = dict()
loss_weak_branch1['loss_img_level'] = loss_bbox_weak_branch1['loss_img_level']
bbox_results_weak_branch1.update(loss_bbox_weak_branch1_sp=loss_weak_branch1)
#generate oam labels for weak image
oam_bboxes_weak, oam_labels_weak = bbox_select_per_class(bbox_results_weak_pseudo['bbox_pred'],
bbox_results_weak_pseudo['cls_score'],
gt_labels[1],
score_thr=0,
nms_cfg={'iou_threshold': 0.5},
max_num=-1
)
#contrastive_losses
# contrastive_losses = self.contrast_forward_train(x,gt_bboxes[0],gt_labels[0],oam_bboxes_weak[:,:4],
# oam_labels_weak,img_metas,gt_bboxes_ignore=gt_bboxes_ignore)
#calculate loss_strong_branch2
hard_neg_roi_labels, hard_neg_roi_id = self.get_hard_neg_target(rois_strong, [sampling_results[0]])
all_bbox_labels = bbox_targets_strong[0]
pos_roi_labels, pos_roi_id = all_bbox_labels[(all_bbox_labels != self.bbox_head.num_classes).nonzero(as_tuple=False)], \
(all_bbox_labels != self.bbox_head.num_classes).nonzero(as_tuple=False)
pos_roi_id = pos_roi_id.squeeze(1)
pos_roi_labels = pos_roi_labels.squeeze(1)
bbox_results_strong_branch2 = self._bbox_forward_embedding_branch2(bbox_feats_strong,
hard_neg_roi_id=hard_neg_roi_id,
pos_roi_id=pos_roi_id)
loss_bbox_strong_branch2 = self.bbox_head.loss_strong_branch2(
bbox_results_strong_branch2['cls_score_fc'],
bbox_results_strong_branch2['cls_score'],
bbox_results_strong_branch2['bbox_pred'],
rois_strong,
*bbox_targets_strong,
min_pos_pos_dist=bbox_results_strong_branch2['min_pos_pos_dist'],
min_neg_neg_dist=bbox_results_strong_branch2['min_neg_neg_dist'],
pos_roi_labels=pos_roi_labels,
hard_neg_roi_labels=hard_neg_roi_labels)
loss_strong_branch2 = dict()
loss_strong_branch2['loss_cls_strong_branch2'] = loss_bbox_strong_branch2['loss_cls_strong']
loss_strong_branch2['acc_strong_branch2'] = loss_bbox_strong_branch2['acc_strong']
loss_strong_branch2['acc_strong_branch2_fc'] = loss_bbox_strong_branch2['acc_fc']
loss_strong_branch2['loss_cls_strong_branch2_fc'] = loss_bbox_strong_branch2['loss_cls_fc']
loss_strong_branch2['loss_bbox_strong_branch2'] = loss_bbox_strong_branch2['loss_bbox_strong']
loss_strong_branch2['loss_embedding_strong'] = loss_bbox_strong_branch2['loss_embed_strong']
bbox_results_strong_branch2.update(loss_bbox_strong_branch2=loss_strong_branch2)
#calculate loss_weak_branch2
bbox_targets_weak_branch2 = self.bbox_head.get_targets([sampling_results[1]],oam_bboxes_weak,oam_labels_weak,self.train_cfg)
hard_neg_roi_labels, hard_neg_roi_id = self.get_hard_neg_target(rois_weak, [sampling_results[1]])
all_bbox_labels = bbox_targets_weak_branch2[0]
pos_roi_labels, pos_roi_id = all_bbox_labels[
(all_bbox_labels != self.bbox_head.num_classes).nonzero(as_tuple=False)], (
all_bbox_labels != self.bbox_head.num_classes).nonzero(as_tuple=False)
pos_roi_id = pos_roi_id.squeeze(1)
pos_roi_labels = pos_roi_labels.squeeze(1)
bbox_results_weak_branch2 = self._bbox_forward_embedding_branch2(bbox_feats_weak,
hard_neg_roi_id=hard_neg_roi_id,
pos_roi_id=pos_roi_id)
labels,label_weights,bbox_targets,bbox_weights = bbox_targets_weak_branch2
# avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.)
# acc_weak = accuracy(bbox_results_weak_branch2['cls_score'],labels)
loss_bbox_weak_branch2 = self.bbox_head.loss_weak_branch2(
bbox_results_weak_branch2['cls_score_fc'],
bbox_results_weak_branch2['cls_score'],
labels,
label_weights,
min_pos_pos_dist=bbox_results_weak_branch2['min_pos_pos_dist'],
min_neg_neg_dist=bbox_results_weak_branch2['min_neg_neg_dist'],
pos_roi_labels=pos_roi_labels,
hard_neg_roi_labels=hard_neg_roi_labels)
loss_weak_branch2 = dict()
loss_weak_branch2['loss_cls_weak_branch2'] = loss_bbox_weak_branch2['loss_cls_weak']
loss_weak_branch2['acc_weak_branch2'] = loss_bbox_weak_branch2['acc_weak']
loss_strong_branch2['acc_weak_branch2_fc'] = loss_bbox_strong_branch2['acc_fc']
loss_strong_branch2['loss_cls_weak_branch2_fc'] = loss_bbox_strong_branch2['loss_cls_fc']
# loss_weak_branch2['loss_embedding_weak'] = loss_bbox_weak_branch2['loss_embed_weak']
bbox_results_weak_branch2.update(loss_bbox_weak_branch2=loss_weak_branch2)
return bbox_results_weak_branch1,bbox_results_strong_branch1,bbox_results_weak_branch2,bbox_results_strong_branch2
# contrastive_losses
def _mask_forward_train(self, x, sampling_results, bbox_feats, gt_masks,
img_metas):
"""Run forward function and calculate loss for mask head in
training."""
if not self.share_roi_extractor:
pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results])
mask_results = self._mask_forward(x, pos_rois)
else:
pos_inds = []
device = bbox_feats.device
for res in sampling_results:
pos_inds.append(
torch.ones(
res.pos_bboxes.shape[0],
device=device,
dtype=torch.uint8))
pos_inds.append(
torch.zeros(
res.neg_bboxes.shape[0],
device=device,
dtype=torch.uint8))
pos_inds = torch.cat(pos_inds)
mask_results = self._mask_forward(
x, pos_inds=pos_inds, bbox_feats=bbox_feats)
mask_targets = self.mask_head.get_targets(sampling_results, gt_masks,
self.train_cfg)
pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])
loss_mask = self.mask_head.loss(mask_results['mask_pred'],
mask_targets, pos_labels)
mask_results.update(loss_mask=loss_mask, mask_targets=mask_targets)
return mask_results
def _mask_forward(self, x, rois=None, pos_inds=None, bbox_feats=None):
"""Mask head forward function used in both training and testing."""
assert ((rois is not None) ^
(pos_inds is not None and bbox_feats is not None))
if rois is not None:
mask_feats = self.mask_roi_extractor(
x[:self.mask_roi_extractor.num_inputs], rois)
if self.with_shared_head:
mask_feats = self.shared_head(mask_feats)
else:
assert bbox_feats is not None
mask_feats = bbox_feats[pos_inds]
mask_pred = self.mask_head(mask_feats)
mask_results = dict(mask_pred=mask_pred, mask_feats=mask_feats)
return mask_results
async def async_simple_test(self,
x,
proposal_list,
img_metas,
proposals=None,
rescale=False):
"""Async test without augmentation."""
assert self.with_bbox, 'Bbox head must be implemented.'
print('async_simple_test')
det_bboxes, det_labels = await self.async_test_bboxes(
x, img_metas, proposal_list, self.test_cfg, rescale=rescale)
bbox_results = bbox2result(det_bboxes, det_labels,
self.bbox_head.num_classes)
if not self.with_mask:
return bbox_results
else:
segm_results = await self.async_test_mask(
x,
img_metas,
det_bboxes,
det_labels,
rescale=rescale,
mask_test_cfg=self.test_cfg.get('mask'))
return bbox_results, segm_results
def simple_test(self,
x,
proposal_list,
img_metas,
proposals=None,
rescale=False):
"""Test without augmentation."""
assert self.with_bbox, 'Bbox head must be implemented.'
det_bboxes, det_labels = self.simple_test_bboxes(
x, img_metas, proposal_list, self.test_cfg, rescale=rescale)
if torch.onnx.is_in_onnx_export():
if self.with_mask:
segm_results = self.simple_test_mask(
x, img_metas, det_bboxes, det_labels, rescale=rescale)
return det_bboxes, det_labels, segm_results
else:
return det_bboxes, det_labels
bbox_results = [
bbox2result(det_bboxes[i], det_labels[i],
self.bbox_head.num_classes)
for i in range(len(det_bboxes))
]
if not self.with_mask:
return bbox_results
else:
segm_results = self.simple_test_mask(
x, img_metas, det_bboxes, det_labels, rescale=rescale)
return list(zip(bbox_results, segm_results))
def simple_test_bboxes(self,
x,
img_metas,
proposals,
rcnn_test_cfg,
rescale=False):
"""Test only det bboxes without augmentation."""
rois = bbox2roi(proposals)
bbox_feats = self.bbox_roi_extractor(
x[:self.bbox_roi_extractor.num_inputs], rois)
bbox_feats = self.bbox_head.double_fc_forward(bbox_feats)
if self.with_shared_head:
bbox_feats = self.shared_head(bbox_feats)
bbox_results = self._bbox_forward_embedding_branch2(bbox_feats)
img_shapes = tuple(meta['img_shape'] for meta in img_metas)
scale_factors = tuple(meta['scale_factor'] for meta in img_metas)
# split batch bbox prediction back to each image
cls_score = bbox_results['cls_score_fc']
bbox_pred = bbox_results['bbox_pred']
num_proposals_per_img = tuple(len(p) for p in proposals)
rois = rois.split(num_proposals_per_img, 0)
cls_score = cls_score.split(num_proposals_per_img, 0)
# some detector with_reg is False, bbox_pred will be None
if bbox_pred is not None:
# the bbox prediction of some detectors like SABL is not Tensor
if isinstance(bbox_pred, torch.Tensor):
bbox_pred = bbox_pred.split(num_proposals_per_img, 0)
else:
bbox_pred = self.bbox_head.bbox_pred_split(
bbox_pred, num_proposals_per_img)
else:
bbox_pred = (None, ) * len(proposals)
# apply bbox post-processing to each image individually
det_bboxes = []
det_labels = []
for i in range(len(proposals)):
det_bbox, det_label = self.bbox_head.get_bboxes(
rois[i],
cls_score[i],
bbox_pred[i],
img_shapes[i],
scale_factors[i],
rescale=rescale,
cfg=rcnn_test_cfg)
det_bboxes.append(det_bbox)
det_labels.append(det_label)
return det_bboxes, det_labels
def aug_test(self, x, proposal_list, img_metas, rescale=False):
"""Test with augmentations.
If rescale is False, then returned bboxes and masks will fit the scale
of imgs[0].
"""
det_bboxes, det_labels = self.aug_test_bboxes(x, img_metas,
proposal_list,
self.test_cfg)
if rescale:
_det_bboxes = det_bboxes
else:
_det_bboxes = det_bboxes.clone()
_det_bboxes[:, :4] *= det_bboxes.new_tensor(
img_metas[0][0]['scale_factor'])
bbox_results = bbox2result(_det_bboxes, det_labels,
self.bbox_head.num_classes)
# det_bboxes always keep the original scale
if self.with_mask:
segm_results = self.aug_test_mask(x, img_metas, det_bboxes,
det_labels)
return [(bbox_results, segm_results)]
else:
return [bbox_results]
def aug_test_bboxes(self, feats, img_metas, proposal_list, rcnn_test_cfg):
"""Test det bboxes with test time augmentation."""
aug_bboxes = []
aug_scores = []
for x, img_meta in zip(feats, img_metas):
# only one image in the batch
img_shape = img_meta[0]['img_shape']
scale_factor = img_meta[0]['scale_factor']
flip = img_meta[0]['flip']
flip_direction = img_meta[0]['flip_direction']
# TODO more flexible
proposals = bbox_mapping(proposal_list[0][:, :4], img_shape,
scale_factor, flip, flip_direction)
rois = bbox2roi([proposals])
bbox_feats = self.bbox_roi_extractor(
x[:self.bbox_roi_extractor.num_inputs], rois)
bbox_feats = self.bbox_head.double_fc_forward(bbox_feats)
if self.with_shared_head:
bbox_feats = self.shared_head(bbox_feats)
bbox_results = self._bbox_forward_embedding_branch2(bbox_feats)
bboxes, scores = self.bbox_head.get_bboxes(
rois,
bbox_results['cls_score'],
bbox_results['bbox_pred'],
img_shape,
scale_factor,
rescale=False,
cfg=None)
aug_bboxes.append(bboxes)
aug_scores.append(scores)
# after merging, bboxes will be rescaled to the original image size
merged_bboxes, merged_scores = merge_aug_bboxes(
aug_bboxes, aug_scores, img_metas, rcnn_test_cfg)
det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores,
rcnn_test_cfg.score_thr,
rcnn_test_cfg.nms,
rcnn_test_cfg.max_per_img)
return det_bboxes, det_labels | 1.703125 | 2 |
esphome/components/am2320/sensor.py | TheEggi/esphomeyaml | 0 | 12772017 | import esphome.codegen as cg
import esphome.config_validation as cv
from esphome.components import i2c, sensor
from esphome.const import CONF_HUMIDITY, CONF_ID, CONF_TEMPERATURE, \
UNIT_CELSIUS, ICON_THERMOMETER, ICON_WATER_PERCENT, UNIT_PERCENT
DEPENDENCIES = ['i2c']
am2320_ns = cg.esphome_ns.namespace('am2320')
AM2320Component = am2320_ns.class_('AM2320Component', cg.PollingComponent, i2c.I2CDevice)
CONFIG_SCHEMA = cv.Schema({
cv.GenerateID(): cv.declare_id(AM2320Component),
cv.Optional(CONF_TEMPERATURE): sensor.sensor_schema(UNIT_CELSIUS, ICON_THERMOMETER, 1),
cv.Optional(CONF_HUMIDITY): sensor.sensor_schema(UNIT_PERCENT, ICON_WATER_PERCENT, 1),
}).extend(cv.polling_component_schema('60s')).extend(i2c.i2c_device_schema(0x5C))
def to_code(config):
var = cg.new_Pvariable(config[CONF_ID])
yield cg.register_component(var, config)
yield i2c.register_i2c_device(var, config)
if CONF_TEMPERATURE in config:
sens = yield sensor.new_sensor(config[CONF_TEMPERATURE])
cg.add(var.set_temperature_sensor(sens))
if CONF_HUMIDITY in config:
sens = yield sensor.new_sensor(config[CONF_HUMIDITY])
cg.add(var.set_humidity_sensor(sens))
| 2.21875 | 2 |
estate/settings/base.py | badri/estate | 0 | 12772018 | import ast
import os
import dj_database_url
PROJECT_ROOT = os.path.normpath(os.path.join(os.path.dirname(__file__), os.pardir))
SECRET_KEY = os.environ.get("SECRET_KEY", "UNKNOWN")
DEBUG = ast.literal_eval(os.environ.get('DEBUG', 'False'))
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
ROOT_URLCONF = 'estate.urls'
WSGI_APPLICATION = 'estate.wsgi.application'
ALLOWED_HOSTS = [
'*',
]
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'whitenoise.runserver_nostatic',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(PROJECT_ROOT, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
DATABASES = {
'default': dj_database_url.config(default=os.environ.get("DATABASE_URL"))
}
TERRAFORM_ELASTICACHE_URL = os.environ.get("TERRAFORM_ELASTICACHE_URL")
if TERRAFORM_ELASTICACHE_URL:
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.dummy.DummyCache"
},
"terraform": {
"BACKEND": "django_elasticache.memcached.ElastiCache",
"LOCATION": TERRAFORM_ELASTICACHE_URL,
}
}
else:
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.dummy.DummyCache"
},
"terraform": {
"BACKEND": "django.core.cache.backends.filebased.FileBasedCache",
"LOCATION": "/tmp/django_cache_terraform"
}
}
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'static')
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, 'assets'),
)
| 1.757813 | 2 |
movies/entertainment_center.py | MANOJPATRA1991/fresh_tomatoes | 0 | 12772019 | <reponame>MANOJPATRA1991/fresh_tomatoes
"""This module opens the fresh_tomatoes.html in a web browser"""
import media
import my_movies
# creating instances of the Movie class
# INTERSTELLAR
interstellar = media.Movie(
"Interstellar",
"In the future, Earth is slowly becoming uninhabitable. Ex"
"Ex-NASA pilot Cooper, along with a team of researchers, "
"is sent on a planet exploration mission to report which "
"planet can sustain life.",
media.Movie.MOVIE_GENRE[0] + " | " + media.Movie.MOVIE_GENRE[4],
"http://static.tvtropes.org/pmwiki/pub/images/" +
"interstellar_film_poster_1146.jpg",
"https://www.youtube.com/watch?v=ePbKGoIGAXY",
media.Movie.VALID_RATINGS[2],
8.6,
"71%",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>"],
"<NAME>",
[
"In 2001, Kubrick saw a future that was out of our hands. "
"For Nolan, our reliance on one another is all we have got.",
"Brainy, barmy and beautiful to behold, this is Stephen "
"Hawkings Star Trek: a mind-bending opera of space and "
"time with a soul wrapped up in all the science.",
"As visually and conceptually audacious as anything Nolan "
"has yet done, the directors ninth feature also proves "
"more emotionally accessible than his coolly cerebral "
"thrillers and Batman movies."
],
[
"<NAME>, <NAME>",
"<NAME>, Empire",
"<NAME>, Variety"
])
# AVATAR
avatar = media.Movie(
"Avatar",
"Jake, a paraplegic marine, replaces his brother on the "
"Na'vi inhabited Pandora for a corporate mission. He is "
"accepted by the natives as one of their own but he must "
"decide where his loyalties lie.",
media.Movie.MOVIE_GENRE[0] + " | " + media.Movie.MOVIE_GENRE[3] +
" | " + media.Movie.MOVIE_GENRE[4] + " | " +
media.Movie.MOVIE_GENRE[5],
"https://upload.wikimedia.org/wikipedia/en/thumb/b/b0/" +
"Avatar-Teaser-Poster.jpg/220px-Avatar-Teaser-Poster.jpg",
"https://www.youtube.com/watch?v=5PSNL1qE6VY",
media.Movie.VALID_RATINGS[2],
7.8,
"83%",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>"
],
"<NAME>",
[
"Worth watching for fans, completists and anyone "
"who missed it on the big screen first time around "
"- but it wont win over any haters.",
"What if the director of the highest-grossing movie "
"ever made (Titanic) spent a rumored $500 million on "
"a spectacular futuristic sci-fi epic and no one "
"other than hardcore fanboys went to see it?",
"Fifteen years in the making, James Camerons latest "
"creation is an eye-popping spectacle of conflict "
"between idyllic aliens and greedy humans saturated "
"with environmental and spiritual themes."
],
[
"<NAME>, Empire",
"<NAME>, Common Sense Media",
"<NAME>, Plugged In"
])
# THE AVENGERS
the_avengers = media.Movie(
"The Avengers",
"S.H.I.E.L.D. leader <NAME> is compelled to launch "
"the 'Avengers Initiative' when Loki poses a threat to "
"planet Earth. Will <NAME>'s squad of superheroes "
"prove themselves equal to the task?",
media.Movie.MOVIE_GENRE[0] + " | " + media.Movie.MOVIE_GENRE[3] +
" | " + media.Movie.MOVIE_GENRE[4] + " | " +
media.Movie.MOVIE_GENRE[5],
"https://upload.wikimedia.org/wikipedia/en/thumb/f/f9" +
"/TheAvengers2012Poster.jpg/220px-TheAvengers2012Poster.jpg",
"https://www.youtube.com/watch?v=eOrNdBpGMv8",
media.Movie.VALID_RATINGS[2],
8.1,
"92%",
[
"<NAME>.",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>"
],
"<NAME>",
[
"If twos company and threes a crowd, whats four? Or six? "
"In this case, its Marvels newest superhero movie.",
"Quick-witted and nuanced, this movie takes the best of "
"the genre -- iconic heroes fighting for truth and justice "
"-- and dishes it out in a fanboy-pleasing, edge-of-your seat way.",
"A joyous blend of heroism and humour that raises the "
"stakes even as it maintains a firm grip on what makes "
"the individual heroes tick."
],
[
"<NAME>, Plugged In",
"S. <NAME>, Common Sense Media",
"Empire"
])
# DOCTOR STRANGE
doctor_strange = media.Movie(
"Doctor Strange",
"While on a journey of physical and spiritual healing, a brilliant "
"neurosurgeon is drawn into the world of the mystic arts.",
media.Movie.MOVIE_GENRE[0] + " | " + media.Movie.MOVIE_GENRE[3] +
" | " + media.Movie.MOVIE_GENRE[4] + " | " +
media.Movie.MOVIE_GENRE[5],
"https://upload.wikimedia.org/wikipedia/en/thumb/c/c7/" +
"Doctor_Strange_poster.jpg/220px-Doctor_Strange_poster.jpg",
"https://www.youtube.com/watch?v=MWRUNTLisPo",
media.Movie.VALID_RATINGS[2],
7.6,
"90%",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>"
],
"<NAME>",
[
"Marvels 14th Cinematic Universe movie has all the "
"usual action and explosions, but it also has a different "
"type of main character -- one whos magical and appealingly "
"flawed but willing to change.",
"A bizarre and beautiful detour on the Marvel journey, "
"which culminates in a mind-bending, expectation-inverting "
"final act. Not to be watched under the influence.",
"Aesthetically, Doctor Strange is a good movie, one of the "
"strongest in the Marvel canon thus far. But is it a good "
"movie? A movie suitable for you or your family? That depends."
],
[
"<NAME>, Common Sense Media",
"<NAME>, Empire",
"<NAME>, Plugged In"
])
# THE DARK KNIGHT
the_dark_knight = media.Movie(
"The Dark Knight",
"The Joker, a psychopath, terrorises Gotham so he can "
"prove that even the most incorruptible people can become "
"evil. However, Batman, Gordon and Dent stand against him.",
media.Movie.MOVIE_GENRE[1] + " | " + media.Movie.MOVIE_GENRE[2] +
" | " + media.Movie.MOVIE_GENRE[4] + " | " +
media.Movie.MOVIE_GENRE[5],
"https://upload.wikimedia.org/wikipedia/en/8/8a/Dark_Knight.jpg",
"https://www.youtube.com/watch?v=EXeTwQWrcwY",
media.Movie.VALID_RATINGS[2],
9.0,
"94%",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>"
],
"<NAME>",
[
"Heads up: a thunderbolt is about to rip into the "
"blanket of bland we call summer movies.",
"Ledgers performance is monumental, but The Dark Knight "
"lives up to it. Nolan cements his position as Hollywoods "
"premier purveyor of blockbuster smarts and the Batbike "
"is kinda cool, too.",
"This sequel to Batman Begins is mesmerizing and "
"thought-provoking. But it shouldnt have been named after "
"the good guy. This is the Jokers court, and hes not "
"looking for a laugh."
],
[
"<NAME>, Rolling Stone",
"<NAME>, Empire",
"<NAME>, Plugged In"
])
# NIGHTCRAWLER
nightcrawler = media.Movie(
"Nightcrawler",
"<NAME> wants to set a score with his rival Joe Loder "
"even as he tries to shoot sensational stories of the rich "
"neighbourhood of the city. His quest gets a new direction "
"after he meets Nina Romina.",
media.Movie.MOVIE_GENRE[1] + " | " + media.Movie.MOVIE_GENRE[2] +
" | " + media.Movie.MOVIE_GENRE[4],
"https://resizing.flixster.com/V49PiBgbepPpFjn3J_b4ikNd6wA=/" +
"206x305/v1.bTsxMTE4OTQ3MztqOzE3NDA5OzEyMDA7ODAwOzEyMDA",
"https://www.youtube.com/watch?v=X8kYDQan8bw",
media.Movie.VALID_RATINGS[3],
7.9,
"95%",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>"
],
"<NAME>",
[
"Sharp, dark, satirical and bone-rattlingly thrilling, "
"with a career-peak turn from <NAME>al. "
"Its this years Drive.",
"Nightcrawler curves and hisses its way into your "
"head with demonic skill.",
"The result is a very good movie and a milestone in "
"Gyllenhaals career but one in which the pieces dont "
"quite fit together."
],
[
"<NAME>",
"<NAME>, <NAME>",
"<NAME>, The Atlantic"
])
movies = [interstellar, avatar, the_avengers, doctor_strange,
the_dark_knight, nightcrawler]
my_movies.open_movies_page(movies)
| 3.03125 | 3 |
LeetCode/Python3/Math/1175. Prime Arrangements.py | WatsonWangZh/CodingPractice | 11 | 12772020 | # Return the number of permutations of 1 to n so that prime numbers are at prime indices (1-indexed.)
# (Recall that an integer is prime if and only if it is greater than 1,
# and cannot be written as a product of two positive integers both smaller than it.)
# Since the answer may be large, return the answer modulo 10^9 + 7.
# Example 1:
# Input: n = 5
# Output: 12
# Explanation: For example [1,2,5,4,3] is a valid permutation,
# but [5,2,3,4,1] is not because the prime number 5 is at index 1.
# Example 2:
# Input: n = 100
# Output: 682289015
# Constraints:
# 1 <= n <= 100
# Hints:
# Solve the problem for prime numbers and composite numbers separately.
# Multiply the number of permutations of prime numbers over prime indices
# with the number of permutations of composite numbers over composite indices.
# The number of permutations equals the factorial.
import math
class Solution(object):
def numPrimeArrangements(self, n):
"""
:type n: int
:rtype: int
"""
# 排列
# 将数字分为质数和非质数两部分。
# 假设有 p 个质数,答案就是 p!⋅(n−p)!。
# 时间复杂度
# 需要求质数的个数,暴力的做法需要 O(n^3/2) 的时间,可以采用欧拉线性筛法在 O(n) 的时间内求出。
# 空间复杂度
# 暴力仅需要常数空间,线性筛法需要O(n)的空间。
# M1. 暴力求质数个数 O(nlgn) O(1)
# mod = 10 ** 9 + 7
# def factorial(n):
# res = 1
# for i in range(2, n+1):
# res *= i % mod
# return res
# def isPrime(n):
# if n <= 1:
# return False
# for i in range(2, int(math.sqrt(n))+1):
# if n % i == 0:
# return False
# return True
# p = 0
# for i in range(1, n+1):
# if isPrime(i):
# p += 1
# return factorial(p) * factorial(n-p) %mod
# M2. 欧拉线性筛法求质数个数 O(n) O(n)
def oulashai(n):
prime = [0 for i in range(n+1)]
res = []
for i in range(2,n+1):
if prime[i] == 0:
res.append(i)
for j in res:
if i * j > n:
break
prime[i * j]=1
if i % j==0:
break;
return res
mod = 10 ** 9 + 7
def factorial(n):
res = 1
for i in range(2, n+1):
res *= i % mod
return res
p = len(oulashai(n))
return factorial(p) * factorial(n-p) % mod | 3.84375 | 4 |
keywords_per_year_organiser.py | Track-your-parliament/track-your-parliament-data | 0 | 12772021 | <reponame>Track-your-parliament/track-your-parliament-data
import pandas as pd
import numpy as np
import datetime
PROPOSALS = './data/votes_keywords_distributions.json'
df = pd.read_json(PROPOSALS)
df = df.assign(year=lambda x: x.date.dt.year)
group_by_years = df.groupby(by=['year'])
yearly_keywords = pd.DataFrame(df.year.unique(), columns=['year'])
yearly_keywords = yearly_keywords.assign(keywords_list=lambda x: x.year)
def get_top_keywords_for_year(year):
year_keywords = []
group_by_years.get_group(year).keywords.apply(lambda x: year_keywords.extend(x))
return pd.DataFrame(year_keywords).drop_duplicates().sort_values(by=['tfidf'], ascending=False).head(20).word.to_numpy()
yearly_keywords.keywords_list = yearly_keywords.keywords_list.apply(lambda x: get_top_keywords_for_year(x))
yearly_keywords = yearly_keywords.sort_values(by=['year'], ascending=False)
yearly_keywords.to_json('./data/top_keywords_by_year.json', orient='records')
| 3.125 | 3 |
units/volume/u_s_tablespoons.py | putridparrot/PyUnits | 0 | 12772022 | # <auto-generated>
# This code was generated by the UnitCodeGenerator tool
#
# Changes to this file will be lost if the code is regenerated
# </auto-generated>
def to_millilitres(value):
return value * 14.786764781249998848
def to_litres(value):
return value * 0.014786764781249998848
def to_kilolitres(value):
return value * 0.000014786764781249998
def to_teaspoons(value):
return value * 2.4980215213991718912
def to_tablespoons(value):
return value * 0.83267384046639071232
def to_quarts(value):
return value * 0.013010528757287354368
def to_pints(value):
return value * 0.026021057514574708736
def to_gallons(value):
return value * 0.003252632189321838592
def to_fluid_ounces(value):
return value * 0.52042115029149417472
def to_u_s_teaspoons(value):
return value * 3.0
def to_u_s_quarts(value):
return value / 64.0
def to_u_s_pints(value):
return value / 32.0
def to_u_s_gallons(value):
return value / 256.0
def to_u_s_fluid_ounces(value):
return value / 2.0
def to_u_s_cups(value):
return value / 16.0
| 2.546875 | 3 |
pyavreceiver/command.py | JPHutchins/pyavreceiver | 2 | 12772023 | """Define commands."""
from abc import ABC, abstractmethod
from typing import Callable, List, Sequence, Tuple, Union
from pyavreceiver import const
from pyavreceiver.error import AVReceiverInvalidArgumentError
from pyavreceiver.functions import identity
class CommandValues:
"""Possible values for a command."""
__slots__ = ("_values",)
def __init__(self, values: dict):
self._values = values
if self._values.get("min") is not None:
self._values["min"] = self._values.get("min")
if self._values.get("max") is not None:
self._values["max"] = self._values.get("max")
def __repr__(self) -> str:
return str(self._values)
def __str__(self) -> str:
return str({x for x in self._values if self._values[x] is not None})
def get(self, name) -> Union[int, str, float]:
"""Patch to dict.get()."""
return self._values.get(name)
def keys(self) -> list:
"""Patch dict.keys()."""
return self._values.keys()
def update(self, _dict):
"""Patch to dict.update()."""
self._values.update(_dict)
def items(self) -> Sequence[Tuple[str, Union[int, str, float]]]:
"""Patch to dict.items()."""
return self._values.items()
def values(self) -> List[str]:
"""Patch to dict.values()."""
return self._values.values()
def __getattr__(self, name: str) -> Union[int, str, float]:
if name in self._values:
return self._values[name]
raise AVReceiverInvalidArgumentError
def __getitem__(self, name: str) -> Union[int, str, float]:
if name in self._values:
return self._values[name]
raise AVReceiverInvalidArgumentError
def __setitem__(self, name: str, val):
"""Only set if name does not exist."""
if name not in self._values:
self._values[name] = val
class Command(ABC):
"""Command base class."""
class TelnetCommand(Command, ABC):
"""Define the telnet command interface."""
__slots__ = (
"_name",
"_group",
"_values",
"_val_pfx",
"_func",
"_zero",
"_val",
"_valid_strings",
"_message",
"_qos",
"_sequence",
"_retries",
)
def __init__(
self,
*,
name: str = None,
group: str = None,
values: CommandValues = None,
val_pfx: str = "",
func: Callable = identity,
zero: int = 0,
val: Union[float, int, str] = None,
valid_strings: list = None,
message: str = None,
qos: int = 0,
sequence: int = -1,
):
self._name = name
self._group = group
self._values = values
self._val_pfx = val_pfx
self._func = func
self._zero = zero
self._val = val
self._valid_strings = valid_strings
self._message = message
self._qos = qos
self._sequence = sequence
self._retries = const.DEFAULT_RETRY_SCHEMA[qos] # qos defines number of retries
def __hash__(self):
return self._sequence
def __eq__(self, other):
try:
return self._sequence == other._sequence
except AttributeError:
return False
def __repr__(self):
args = list(self._values.keys())
for i, arg in enumerate(args):
if arg in ("min", "max"):
args[i] = f"{arg}: {self._values[arg]}"
return (
f"{self.__class__.__name__}, name: {self._name}, group: {self._group},"
f"val: {self._val}, args: {args}"
)
@abstractmethod
def set_val(self, val: Union[int, float, str], qos: int = None, sequence: int = -1):
"""Format the command with argument and return."""
@abstractmethod
def set_query(self, qos: int = None) -> str:
"""Format the command with query and return."""
def init_values(self, values: CommandValues) -> None:
"""Init the values attribite with values."""
self._values = values
def set_sequence(self, sequence) -> None:
"""Set the sequence to use as hash and id."""
self._sequence = sequence
def lower_qos(self):
"""Lower the QoS level by one."""
self._qos -= 1
def raise_qos(self):
"""Raise the QoS level by one."""
self._qos += 1
@property
def group(self) -> str:
"""The group portion of the message."""
return self._group
@property
def message(self) -> str:
"""The complete message; group + argument."""
if not self._message:
raise Exception
return self._message
@property
def name(self) -> str:
"""The name of the command."""
return self._name
@property
def retries(self) -> int:
"""The number of retries to attempt if the command fails."""
return self._retries
@property
def val(self) -> str:
"""The argument of the command."""
return self._val
@property
def values(self) -> list:
"""Return the valid argument values."""
return self._values
@property
def qos(self) -> int:
"""Return the QoS level."""
return self._qos
| 3.1875 | 3 |
app/accounts/admin.py | bladewing/bahnplan-next | 0 | 12772024 | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.models import User
from accounts.models import Player
class PlayerInline(admin.StackedInline):
model = Player
class UserAdmin(BaseUserAdmin):
inlines = (PlayerInline,)
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
admin.site.register(Player)
| 1.945313 | 2 |
libraries/botbuilder-core/botbuilder/core/state_property_info.py | awaemmanuel/botbuilder-python | 1 | 12772025 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from abc import ABC
class StatePropertyInfo(ABC):
@property
def name(self):
raise NotImplementedError(); | 2.15625 | 2 |
workalendar/usa/pennsylvania.py | taiyeoguns/workalendar | 405 | 12772026 | <gh_stars>100-1000
from ..registry_tools import iso_register
from .core import UnitedStates
@iso_register('US-PA')
class Pennsylvania(UnitedStates):
"""Pennsylvania"""
include_good_friday = True
include_thanksgiving_friday = True
include_election_day_every_year = True
| 1.601563 | 2 |
homeassistant/components/blnet/__init__.py | nielstron/home-assistant | 1 | 12772027 | """
Connect to a BL-NET via it's web interface and read and write data
TODO: as component
"""
import logging
import voluptuous as vol
from homeassistant.helpers.discovery import load_platform
from homeassistant.const import (
CONF_RESOURCE, CONF_PASSWORD, CONF_SCAN_INTERVAL, TEMP_CELSIUS,
)
from homeassistant.helpers.event import async_track_time_interval
from datetime import timedelta
from datetime import datetime
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = [
'pyblnet==0.8.0'
]
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'blnet'
CONF_WEB_PORT = 'web_port'
CONF_TA_PORT = 'ta_port'
CONF_USE_WEB = 'use_web'
CONF_USE_TA = 'use_ta'
CONF_NODE = 'can_node'
# Defaults
DEFAULT_WEB_PORT = 80
DEFAULT_TA_PORT = 40000
# scan every 6 minutes per default
DEFAULT_SCAN_INTERVAL = 360
UNIT = {
'analog': TEMP_CELSIUS,
'speed': 'rpm',
'power': 'kW',
'energy': 'kWh'
}
ICON = {
'analog': 'mdi:thermometer',
'speed': 'mdi:speedometer',
'power': 'mdi:power-plug',
'energy': 'mdi:power-plug'
}
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_RESOURCE): cv.url,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_NODE): cv.positive_int,
vol.Optional(CONF_SCAN_INTERVAL,
default=DEFAULT_SCAN_INTERVAL): cv.positive_int,
vol.Optional(CONF_WEB_PORT, default=DEFAULT_WEB_PORT): cv.positive_int,
vol.Optional(CONF_TA_PORT, default=DEFAULT_TA_PORT): cv.positive_int,
vol.Optional(CONF_USE_WEB, default=True): cv.boolean,
vol.Optional(CONF_USE_TA, default=False): cv.boolean
}),
}, extra=vol.ALLOW_EXTRA)
def setup(hass, config):
"""Set up the BLNET component"""
from pyblnet import BLNET, test_blnet
config = config[DOMAIN]
resource = config.get(CONF_RESOURCE)
password = config.get(CONF_PASSWORD)
can_node = config.get(CONF_NODE)
scan_interval = config.get(CONF_SCAN_INTERVAL)
web_port = config.get(CONF_WEB_PORT)
ta_port = config.get(CONF_TA_PORT)
use_web = config.get(CONF_USE_WEB)
use_ta = config.get(CONF_USE_TA)
# Initialize the BL-NET sensor
try:
blnet = BLNET(resource, password=password, web_port=web_port,
ta_port=ta_port, use_web=use_web, use_ta=use_ta)
except (ValueError, AssertionError) as ex:
if isinstance(ex, ValueError):
_LOGGER.error("No BL-Net reached at {}".format(resource))
else:
_LOGGER.error("Configuration invalid: {}".format(ex))
return False
# set the communication entity
hass.data["DATA_{}".format(DOMAIN)] = BLNETComm(blnet, can_node)
# make sure the communication device gets updated once in a while
def fetch_data(*_):
return hass.data["DATA_{}".format(DOMAIN)].update()
# Get the latest data from REST API and load
# sensors and switches accordingly
data = fetch_data()
async_track_time_interval(hass,
fetch_data,
timedelta(seconds=scan_interval))
i = 0
# iterate through the list and create a sensor for every value
for domain in ['analog', 'speed', 'power', 'energy']:
for sensor_id in data[domain]:
_LOGGER.info("Discovered {} sensor {} in use, adding".format(domain, sensor_id))
i+=1
disc_info = {
'name': '{} {} {}'.format(DOMAIN, domain, sensor_id),
'domain': domain,
'id': sensor_id
}
load_platform(hass, 'sensor', DOMAIN, disc_info, config)
# iterate through the list and create a sensor for every value
for sensor_id in data['digital']:
_LOGGER.info("Discovered digital sensor {} in use, adding".format(sensor_id))
i+=1
disc_info = {
'name': '{} digital {}'.format(DOMAIN, sensor_id),
'id': sensor_id,
'domain': 'digital'
}
if use_web:
component = 'switch'
else:
component = 'sensor'
load_platform(hass, component, DOMAIN, disc_info, config)
_LOGGER.info("Added overall {} sensors".format(i))
return True
class BLNETComm(object):
"""Implementation of a BL-NET - UVR1611 communication component"""
def __init__(self, blnet, node):
self.blnet = blnet
self.node = node
# Map id -> attributes
self.data = {}
self._last_updated = None
def turn_on(self, switch_id):
# only change active node if this is desired
self.blnet.turn_on(switch_id, self.node)
def turn_off(self, switch_id):
# only change active node if this is desired
self.blnet.turn_off(switch_id, self.node)
def turn_auto(self, switch_id):
# only change active node if this is desired
self.blnet.turn_auto(switch_id, self.node)
def last_updated(self):
return self._last_updated
def update(self):
"""Get the latest data from BLNET and update the state."""
data = self.blnet.fetch(self.node)
for domain in ['analog', 'speed', 'power', 'energy']:
# iterate through the list and create a sensor for every value
for key, sensor in data.get(domain, {}).items():
attributes = {}
entity_id = '{} {} {}'.format(DOMAIN, domain, key)
attributes['value'] = sensor.get('value')
attributes['unit_of_measurement'] = sensor.get('unit_of_measurement',
UNIT[domain])
attributes['friendly_name'] = sensor.get('name')
attributes['icon'] = ICON[domain]
self.data[entity_id] = attributes
# iterate through the list and create a sensor for every value
for key, sensor in data.get('digital', {}).items():
attributes = {}
entity_id = '{} digital {}'.format(DOMAIN, key)
attributes['friendly_name'] = sensor.get('name')
attributes['mode'] = sensor.get('mode')
attributes['value'] = sensor.get('value')
# Change the symbol according to current mode and setting
# Automated switch => gear symbol
if sensor.get('mode') == 'AUTO':
attributes['icon'] = 'mdi:settings'
# Nonautomated switch, toggled on => switch on
elif sensor.get('mode') == 'EIN':
attributes['icon'] = 'mdi:toggle-switch'
# Nonautomated switch, toggled off => switch off
else:
attributes['icon'] = 'mdi:toggle-switch-off'
self.data[entity_id] = attributes
# save that the data was updated now
self._last_updated = datetime.now()
return data
| 2.34375 | 2 |
sdk/python/pulumi_aws_native/pinpointemail/configuration_set.py | AaronFriel/pulumi-aws-native | 29 | 12772028 | <gh_stars>10-100
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['ConfigurationSetArgs', 'ConfigurationSet']
@pulumi.input_type
class ConfigurationSetArgs:
def __init__(__self__, *,
delivery_options: Optional[pulumi.Input['ConfigurationSetDeliveryOptionsArgs']] = None,
name: Optional[pulumi.Input[str]] = None,
reputation_options: Optional[pulumi.Input['ConfigurationSetReputationOptionsArgs']] = None,
sending_options: Optional[pulumi.Input['ConfigurationSetSendingOptionsArgs']] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input['ConfigurationSetTagsArgs']]]] = None,
tracking_options: Optional[pulumi.Input['ConfigurationSetTrackingOptionsArgs']] = None):
"""
The set of arguments for constructing a ConfigurationSet resource.
"""
if delivery_options is not None:
pulumi.set(__self__, "delivery_options", delivery_options)
if name is not None:
pulumi.set(__self__, "name", name)
if reputation_options is not None:
pulumi.set(__self__, "reputation_options", reputation_options)
if sending_options is not None:
pulumi.set(__self__, "sending_options", sending_options)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if tracking_options is not None:
pulumi.set(__self__, "tracking_options", tracking_options)
@property
@pulumi.getter(name="deliveryOptions")
def delivery_options(self) -> Optional[pulumi.Input['ConfigurationSetDeliveryOptionsArgs']]:
return pulumi.get(self, "delivery_options")
@delivery_options.setter
def delivery_options(self, value: Optional[pulumi.Input['ConfigurationSetDeliveryOptionsArgs']]):
pulumi.set(self, "delivery_options", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="reputationOptions")
def reputation_options(self) -> Optional[pulumi.Input['ConfigurationSetReputationOptionsArgs']]:
return pulumi.get(self, "reputation_options")
@reputation_options.setter
def reputation_options(self, value: Optional[pulumi.Input['ConfigurationSetReputationOptionsArgs']]):
pulumi.set(self, "reputation_options", value)
@property
@pulumi.getter(name="sendingOptions")
def sending_options(self) -> Optional[pulumi.Input['ConfigurationSetSendingOptionsArgs']]:
return pulumi.get(self, "sending_options")
@sending_options.setter
def sending_options(self, value: Optional[pulumi.Input['ConfigurationSetSendingOptionsArgs']]):
pulumi.set(self, "sending_options", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ConfigurationSetTagsArgs']]]]:
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ConfigurationSetTagsArgs']]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="trackingOptions")
def tracking_options(self) -> Optional[pulumi.Input['ConfigurationSetTrackingOptionsArgs']]:
return pulumi.get(self, "tracking_options")
@tracking_options.setter
def tracking_options(self, value: Optional[pulumi.Input['ConfigurationSetTrackingOptionsArgs']]):
pulumi.set(self, "tracking_options", value)
warnings.warn("""ConfigurationSet is not yet supported by AWS Native, so its creation will currently fail. Please use the classic AWS provider, if possible.""", DeprecationWarning)
class ConfigurationSet(pulumi.CustomResource):
warnings.warn("""ConfigurationSet is not yet supported by AWS Native, so its creation will currently fail. Please use the classic AWS provider, if possible.""", DeprecationWarning)
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
delivery_options: Optional[pulumi.Input[pulumi.InputType['ConfigurationSetDeliveryOptionsArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
reputation_options: Optional[pulumi.Input[pulumi.InputType['ConfigurationSetReputationOptionsArgs']]] = None,
sending_options: Optional[pulumi.Input[pulumi.InputType['ConfigurationSetSendingOptionsArgs']]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ConfigurationSetTagsArgs']]]]] = None,
tracking_options: Optional[pulumi.Input[pulumi.InputType['ConfigurationSetTrackingOptionsArgs']]] = None,
__props__=None):
"""
Resource Type definition for AWS::PinpointEmail::ConfigurationSet
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[ConfigurationSetArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Resource Type definition for AWS::PinpointEmail::ConfigurationSet
:param str resource_name: The name of the resource.
:param ConfigurationSetArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ConfigurationSetArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
delivery_options: Optional[pulumi.Input[pulumi.InputType['ConfigurationSetDeliveryOptionsArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
reputation_options: Optional[pulumi.Input[pulumi.InputType['ConfigurationSetReputationOptionsArgs']]] = None,
sending_options: Optional[pulumi.Input[pulumi.InputType['ConfigurationSetSendingOptionsArgs']]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ConfigurationSetTagsArgs']]]]] = None,
tracking_options: Optional[pulumi.Input[pulumi.InputType['ConfigurationSetTrackingOptionsArgs']]] = None,
__props__=None):
pulumi.log.warn("""ConfigurationSet is deprecated: ConfigurationSet is not yet supported by AWS Native, so its creation will currently fail. Please use the classic AWS provider, if possible.""")
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ConfigurationSetArgs.__new__(ConfigurationSetArgs)
__props__.__dict__["delivery_options"] = delivery_options
__props__.__dict__["name"] = name
__props__.__dict__["reputation_options"] = reputation_options
__props__.__dict__["sending_options"] = sending_options
__props__.__dict__["tags"] = tags
__props__.__dict__["tracking_options"] = tracking_options
super(ConfigurationSet, __self__).__init__(
'aws-native:pinpointemail:ConfigurationSet',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ConfigurationSet':
"""
Get an existing ConfigurationSet resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ConfigurationSetArgs.__new__(ConfigurationSetArgs)
__props__.__dict__["delivery_options"] = None
__props__.__dict__["name"] = None
__props__.__dict__["reputation_options"] = None
__props__.__dict__["sending_options"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["tracking_options"] = None
return ConfigurationSet(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="deliveryOptions")
def delivery_options(self) -> pulumi.Output[Optional['outputs.ConfigurationSetDeliveryOptions']]:
return pulumi.get(self, "delivery_options")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="reputationOptions")
def reputation_options(self) -> pulumi.Output[Optional['outputs.ConfigurationSetReputationOptions']]:
return pulumi.get(self, "reputation_options")
@property
@pulumi.getter(name="sendingOptions")
def sending_options(self) -> pulumi.Output[Optional['outputs.ConfigurationSetSendingOptions']]:
return pulumi.get(self, "sending_options")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Sequence['outputs.ConfigurationSetTags']]]:
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="trackingOptions")
def tracking_options(self) -> pulumi.Output[Optional['outputs.ConfigurationSetTrackingOptions']]:
return pulumi.get(self, "tracking_options")
| 1.65625 | 2 |
FEV_KEGG/KEGG/NUKA.py | ryhaberecht/FEV-KEGG | 0 | 12772029 | from FEV_KEGG.Graph import SubstanceGraphs
from FEV_KEGG.Graph.Elements import ReactionID, EcNumber
from FEV_KEGG.Graph.SubstanceGraphs import SubstanceEcGraph, SubstanceReactionGraph
from FEV_KEGG.KEGG.File import cache
from FEV_KEGG.KEGG.Organism import Organism
from FEV_KEGG.settings import verbosity as init_verbosity
class NUKA(object):
def __init__(self):
"""
This is a hypothetical 'complete' organism - NUKA - which possesses all EC numbers known to all metabolic KEGG pathways.
Conversions to other graph types are not possible, because as a hypothetical organism, NUKA has no genes.
Attributes
----------
self.nameAbbreviation : str
"""
self.nameAbbreviation = 'NUKA'
@property
@cache(folder_path = 'NUKA/graph', file_name = 'SubstanceReactionGraph')
def substanceReactionGraph(self) -> SubstanceReactionGraph:
"""
NUKA's substance-reaction graph.
Returns
-------
SubstanceReactionGraph
Contains all substrates/products and all reactions known to KEGG's metabolic pathways.
Raises
------
HTTPError
If any underlying organism, pathway, or gene does not exist.
URLError
If connection to KEGG fails.
Note
----
This SubstanceReactionGraph can **NOT** be converted into a SubstanceGeneGraph, as the pathways do not contain gene information!
"""
mockOrganism = Organism('ec') # 'ec' is not an organism abbreviation, but merely desribes that pathways shall contain EC numbers as edges. This returns the full pathways not specific to any species.
pathwaysSet = mockOrganism.getMetabolicPathways(includeOverviewMaps = False)
substanceReactionGraph = SubstanceGraphs.Conversion.KeggPathwaySet2SubstanceReactionGraph(pathwaysSet, localVerbosity = 0)
substanceReactionGraph.name = 'Substance-Reaction NUKA'
if init_verbosity > 0:
print('calculated ' + substanceReactionGraph.name)
return substanceReactionGraph
@property
@cache(folder_path = 'NUKA/graph', file_name = 'SubstanceEcGraph')
def substanceEcGraph(self) -> SubstanceEcGraph:
"""
NUKA's substance-EC graph.
Returns
-------
SubstanceEcGraph
Contains all substrates/products and all EC numbers known to KEGG's metabolic pathways.
Raises
------
HTTPError
If any underlying organism, pathway, or gene does not exist.
URLError
If connection to KEGG fails.
"""
return self._SubstanceReactionGraph2SubstanceEcGraph(self.substanceReactionGraph)
def _SubstanceReactionGraph2SubstanceEcGraph(self, speciesSubstanceReactionGraph: SubstanceReactionGraph) -> SubstanceEcGraph:
"""
Converts NUKA's substance-reaction graph into a substance-EC graph. Uses pathway information embedded into the graph object.
Parameters
----------
speciesSubstanceReactionGraph : SubstanceReactionGraph
NUKA's substance-reaction graph.
Returns
-------
SubstanceEcGraph
NUKA's substance-EC graph.
Warnings
--------
This function is special to NUKA and **MUST NOT** be used anywhere else!
"""
# shallow-copy old graph to new graph
graph = SubstanceEcGraph(speciesSubstanceReactionGraph.underlyingRawGraph)
graph.name = 'Substance-EC NUKA'
# create dict of replacements: reaction -> {EC numbers}
replacementDict = dict()
# for each embedded pathway, get list of 'enzyme' entries
for pathway in speciesSubstanceReactionGraph.pathwaySet:
ecEntryList = [e for e in pathway.entries.values() if e.type == 'enzyme']
# for each EC number, get reactions in which it is involved
for ecEntry in ecEntryList:
reactionIDList = ecEntry.reaction.split()
if len(reactionIDList) > 0: # filter EC numbers not associated with any reaction
ecNumberList = ecEntry.name.split()
# replace each reaction with its associated EC number
for reactionID in reactionIDList:
reactionName = reactionID.split(':', 1)[1]
reaction = ReactionID(reactionName)
# save associated EC numbers in a set
ecNumberSet = set()
for ecNumberString in ecNumberList:
ecNumber = EcNumber(ecNumberString.replace('ec:', ''))
ecNumberSet.add(ecNumber)
# update the replacement dict for the current reaction, adding the newly created EC number set
replacementSet = replacementDict.get(reaction, None)
if replacementSet == None or replacementSet.__class__ != set:
replacementSet = set()
replacementSet.update(ecNumberSet)
replacementDict[reaction] = replacementSet
# get list of all reaction edges. Copy edge list to prevent changes in-place, which would NOT work
edgeList = list(graph.getEdges())
# replace reaction edges with EC number edges, using replacement dict
for edge in edgeList:
substrate, product, reaction = edge
# delete old edge
graph.removeEdge(substrate, product, reaction, False)
# add new edges, according to replacement dict
replacementSet = replacementDict[reaction]
for ecNumber in replacementSet:
graph.addEC(substrate, product, ecNumber, False)
if init_verbosity > 0:
print('calculated ' + graph.name)
return graph
| 2.25 | 2 |
eggo/resources/download_mapper.py | laserson/eggo | 32 | 12772030 | #! /usr/bin/env python
# Licensed to Big Data Genomics (BDG) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The BDG licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# standard lib only makes it easier to run
import re
import os
import sys
import json
from subprocess import check_call
from os.path import join as pjoin
from hashlib import md5
def sanitize(dirty):
# for sanitizing URIs/filenames
# inspired by datacache
clean = re.sub(r'/|\\|;|:|\?|=', '_', dirty)
if len(clean) > 150:
prefix = md5(dirty).hexdigest()
clean = prefix + clean[-114:]
return clean
def uri_to_sanitized_filename(source_uri, decompress=False):
# inspired by datacache
digest = md5(source_uri.encode('utf-8')).hexdigest()
filename = '{digest}.{sanitized_uri}'.format(
digest=digest, sanitized_uri=sanitize(source_uri))
if decompress:
(base, ext) = os.path.splitext(filename)
if ext == '.gz':
filename = base
return filename
for line in sys.stdin:
resource = json.loads(line.split('\t', 1)[1])
# compute dest filename
staging_path = os.environ['STAGING_PATH']
decompress = resource['compression'] in ['gzip']
dest_name = uri_to_sanitized_filename(resource['url'],
decompress=decompress)
dest_path = pjoin(staging_path, dest_name)
# construct dnload cmd (straight into HDFS)
pipeline = ['curl -L {0}'.format(resource['url'])]
if resource['compression'] == 'gzip':
pipeline.append('gunzip')
pipeline.append('hadoop fs -put - {0}'.format(dest_path))
# ensure staging path exists
check_call('hadoop fs -mkdir -p {0}'.format(staging_path), shell=True)
# execute dnload
cmd = ' | '.join(pipeline)
check_call(cmd, shell=True)
# dummy output
sys.stdout.write('{0}\t1\n'.format(dest_path))
| 2.125 | 2 |
scraper/schema.py | pekasen/DBoeS-Automatization | 0 | 12772031 | <reponame>pekasen/DBoeS-Automatization
# Configure schema: 'Column_name': ['List', 'of', 'synonyms']
columns_with_synonyms = {
'Name': ['Mitglied des Landtages'],
'Fraktion': ['Partei',
'Fraktion (ggf. Partei)',
],
'Wahlkreis': ['Landtagswahlkreis der Direktkandidaten',
'Landtagswahlkreis',
'Wahlkreis/Liste',
],
'Kommentar': ['Anmerkung',
'Anmerkungen',
'Bemerkungen',
],
'Bild': ['Foto'],
'Wikipedia-URL': []
}
# Automatically generate schema and synonym map for scraper (dev only)
schema = list(columns_with_synonyms.keys())
schema_map = {}
for column in columns_with_synonyms.keys():
for synonym in columns_with_synonyms[column]:
schema_map[synonym] = column
| 2.3125 | 2 |
sumo/tools/build/wix.py | iltempe/osmosi | 0 | 12772032 | <filename>sumo/tools/build/wix.py
#!/usr/bin/env python
"""
@file wix.py
@author <NAME>
@author <NAME>
@date 2011
@version $Id$
Builds the installer based on the nightly zip.
SUMO, Simulation of Urban MObility; see http://sumo.dlr.de/
Copyright (C) 2011-2017 DLR (http://www.dlr.de/) and contributors
This file is part of SUMO.
SUMO is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
"""
from __future__ import absolute_import
import optparse
import subprocess
import zipfile
import os
import tempfile
import glob
import shutil
INPUT_DEFAULT = r"O:\Daten\Sumo\daily\sumo-win32-svn.zip"
OUTPUT_DEFAULT = "sumo.msi"
WIX_DEFAULT = "%sbin" % os.environ.get(
"WIX", r"D:\Programme\Windows Installer XML v3.5\\")
WXS_DEFAULT = os.path.join(
os.path.dirname(__file__), "..", "..", "build", "wix", "*.wxs")
LICENSE = os.path.join(
os.path.dirname(__file__), "..", "..", "build", "wix", "License.rtf")
SKIP_FILES = ["osmWebWizard.py", "sumo-gui.exe",
"netedit.exe", "start-command-line.bat"]
def buildFragment(wixBin, sourceDir, targetLabel, tmpDir, log=None):
base = os.path.basename(sourceDir)
subprocess.call([os.path.join(wixBin, "heat.exe"), "dir", sourceDir,
"-cg", base, "-gg", "-dr", targetLabel, "-sreg",
"-out", os.path.join(tmpDir, base + "RawFragment.wxs")],
stdout=log, stderr=log)
fragIn = open(os.path.join(tmpDir, base + "RawFragment.wxs"))
fragOut = open(os.path.join(tmpDir, base + "Fragment.wxs"), "w")
skip = 0
for l in fragIn:
for s in SKIP_FILES:
if s in l:
skip = 3
if skip == 0:
fragOut.write(l.replace("SourceDir", sourceDir))
else:
skip -= 1
fragOut.close()
fragIn.close()
return fragOut.name
def buildMSI(sourceZip=INPUT_DEFAULT, outFile=OUTPUT_DEFAULT,
wixBin=WIX_DEFAULT, wxsPattern=WXS_DEFAULT,
license=LICENSE, log=None):
tmpDir = tempfile.mkdtemp()
zipfile.ZipFile(sourceZip).extractall(tmpDir)
sumoRoot = glob.glob(os.path.join(tmpDir, "sumo-*"))[0]
fragments = [buildFragment(wixBin, os.path.join(
sumoRoot, d), "INSTALLDIR", tmpDir, log) for d in ["bin", "data", "tools"]]
for d in ["userdoc", "pydoc", "javadoc", "tutorial", "examples"]:
fragments.append(
buildFragment(wixBin, os.path.join(sumoRoot, "docs", d), "DOCDIR", tmpDir, log))
for wxs in glob.glob(wxsPattern):
with open(wxs) as wxsIn:
with open(os.path.join(tmpDir, os.path.basename(wxs)), "w") as wxsOut:
for l in wxsIn:
l = l.replace("License.rtf", license)
dataDir = os.path.dirname(license)
for data in ["bannrbmp.bmp", "dlgbmp.bmp"]:
l = l.replace(data, os.path.join(dataDir, data))
wxsOut.write(
l.replace(r"O:\Daten\Sumo\Nightly", os.path.join(sumoRoot, "bin")))
fragments.append(wxsOut.name)
subprocess.call([os.path.join(wixBin, "candle.exe"),
"-o", tmpDir + "\\"] + fragments,
stdout=log, stderr=log)
wixObj = [f.replace(".wxs", ".wixobj") for f in fragments]
subprocess.call([os.path.join(wixBin, "light.exe"),
"-ext", "WixUIExtension", "-o", outFile] + wixObj,
stdout=log, stderr=log)
shutil.rmtree(tmpDir, True) # comment this out when debugging
if __name__ == "__main__":
optParser = optparse.OptionParser()
optParser.add_option("-n", "--nightly-zip", dest="nightlyZip",
default=INPUT_DEFAULT, help="full path to nightly zip")
optParser.add_option("-o", "--output", default=OUTPUT_DEFAULT,
help="full path to output file")
optParser.add_option(
"-w", "--wix", default=WIX_DEFAULT, help="path to the wix binaries")
optParser.add_option(
"-x", "--wxs", default=WXS_DEFAULT, help="pattern for wxs templates")
optParser.add_option(
"-l", "--license", default=LICENSE, help="path to the license")
(options, args) = optParser.parse_args()
buildMSI(options.nightlyZip, options.output,
options.wix, options.wxs, options.license)
| 2.015625 | 2 |
bcs-ui/backend/iam/permissions/perm.py | ZhongmingFan/bk-bcs | 0 | 12772033 | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import logging
from abc import ABC
from typing import Dict, List, Optional, Type, Union
import attr
from django.conf import settings
from .client import IAMClient
from .exceptions import AttrValidationError, PermissionDeniedError
from .request import ActionResourcesRequest, IAMResource, ResourceRequest
logger = logging.getLogger(__name__)
def validate_empty(instance, attribute, value):
"""用于校验属性是否为空. https://www.attrs.org/en/20.2.0/init.html#callables"""
if not value:
raise AttrValidationError(f"{attribute.name} must not be empty")
@attr.dataclass
class ResCreatorAction:
"""用于新建关联属性授权接口"""
creator: str
project_id: str
resource_type: str
def __attrs_post_init__(self):
self.system = settings.BK_IAM_SYSTEM_ID
def to_data(self) -> Dict:
return {'creator': self.creator, 'system': self.system, 'type': self.resource_type}
@attr.s(kw_only=True)
class PermCtx:
"""
权限参数上下文
note: 由于 force_raise 默认值的原因,其子类属性必须设置默认值
"""
username = attr.ib(validator=[attr.validators.instance_of(str), validate_empty])
# 如果为 True, 表示不做权限校验,直接以无权限方式抛出异常
force_raise = attr.ib(validator=[attr.validators.instance_of(bool)], default=False)
@classmethod
def from_dict(cls, init_data: Dict) -> 'PermCtx':
return cls(username=init_data['username'], force_raise=init_data['force_raise'])
def validate_resource_id(self):
"""校验资源实例 ID. 如果校验不过,抛出 AttrValidationError 异常"""
if not self.resource_id:
raise AttrValidationError('missing valid resource_id')
@property
def resource_id(self) -> str:
"""注册到权限中心的资源实例 ID. 空字符串表示实例无关"""
return ''
def get_parent_chain(self) -> List[IAMResource]:
return []
class Permission(ABC, IAMClient):
"""
对接 IAM 的权限基类
"""
resource_type: str = ''
perm_ctx_cls: Type[PermCtx] = PermCtx
resource_request_cls: Type[ResourceRequest] = ResourceRequest
parent_res_perm: Optional['Permission'] = None # 父级资源的权限类对象
def can_action(self, perm_ctx: PermCtx, action_id: str, raise_exception: bool, use_cache: bool = False) -> bool:
"""
校验用户的 action_id 权限
:param perm_ctx: 权限校验的上下文
:param action_id: 资源操作 ID
:param raise_exception: 无权限时,是否抛出异常
:param use_cache: 是否使用本地缓存 (缓存时间 1 min) 校验权限。用于非敏感操作鉴权,比如 view 操作
"""
if perm_ctx.force_raise:
self._raise_permission_denied_error(perm_ctx, action_id)
is_allowed = self._can_action(perm_ctx, action_id, use_cache)
if raise_exception and not is_allowed:
self._raise_permission_denied_error(perm_ctx, action_id)
return is_allowed
def can_multi_actions(self, perm_ctx: PermCtx, action_ids: List[str], raise_exception: bool) -> bool:
"""
校验同类型单个资源的多个 action 权限
:param perm_ctx: 权限校验的上下文
:param action_ids: 资源 action_id 列表
:param raise_exception: 无权限时,是否抛出异常
:return: 只有 action_id 都有权限时,才返回 True; 否则返回 False 或者抛出异常
"""
perm_ctx.validate_resource_id()
# perms 结构如 {'project_view': True, 'project_edit': False}
if perm_ctx.force_raise:
perms = {action_id: False for action_id in action_ids}
else:
res_request = self.make_res_request(perm_ctx)
perms = self.resource_inst_multi_actions_allowed(
perm_ctx.username, action_ids, resources=res_request.make_resources(perm_ctx.resource_id)
)
return self._can_multi_actions(perm_ctx, perms, raise_exception)
def resources_actions_allowed(
self, username: str, action_ids: List[str], res_ids: Union[List[str], str], res_request: ResourceRequest
):
"""
判断用户对某些资源是否具有多个指定操作的权限. 当前sdk仅支持同类型的资源
:return 示例 {'0ad86c25363f4ef8adcb7ac67a483837': {'project_view': True, 'project_edit': False}}
"""
return self.batch_resource_multi_actions_allowed(username, action_ids, res_request.make_resources(res_ids))
def grant_resource_creator_actions(self, creator_action: ResCreatorAction):
"""
用于创建资源时,注册用户对该资源的关联操作权限.
note: 具体的关联操作见权限模型的 resource_creator_actions 字段
"""
return self.iam._client.grant_resource_creator_actions(None, creator_action.creator, creator_action.to_data())
def has_parent_resource(self) -> bool:
return self.parent_res_perm is not None
def make_res_request(self, perm_ctx: PermCtx) -> ResourceRequest:
return self.resource_request_cls.from_dict(attr.asdict(perm_ctx))
def _can_action(self, perm_ctx: PermCtx, action_id: str, use_cache: bool = False) -> bool:
res_id = perm_ctx.resource_id
if res_id: # 与当前资源实例相关
res_request = self.make_res_request(perm_ctx)
resources = res_request.make_resources(res_id)
return self.resource_inst_allowed(perm_ctx.username, action_id, resources, use_cache)
# 与当前资源实例无关, 并且无关联上级资源, 按资源实例无关处理
if not self.has_parent_resource():
return self.resource_type_allowed(perm_ctx.username, action_id, use_cache)
# 有关联上级资源
p_perm_ctx = self.parent_res_perm.perm_ctx_cls.from_dict(attr.asdict(perm_ctx))
res_request = self.parent_res_perm.make_res_request(p_perm_ctx)
resources = res_request.make_resources(p_perm_ctx.resource_id)
return self.resource_inst_allowed(perm_ctx.username, action_id, resources, use_cache)
def _can_multi_actions(self, perm_ctx: PermCtx, perms: Dict[str, bool], raise_exception: bool) -> bool:
messages = []
action_request_list = []
for action_id, is_allowed in perms.items():
if is_allowed:
continue
try:
self._raise_permission_denied_error(perm_ctx, action_id)
except PermissionDeniedError as e:
messages.append(e.message)
action_request_list.extend(e.action_request_list)
if not messages:
return True
if not raise_exception:
return False
raise PermissionDeniedError(
message=';'.join(messages), username=perm_ctx.username, action_request_list=action_request_list
)
def _raise_permission_denied_error(self, perm_ctx: PermCtx, action_id: str):
"""抛出 PermissionDeniedError 异常, 其中 username 和 action_request_list 可用于生成权限申请跳转链接"""
res_id = perm_ctx.resource_id
resources = None
resource_type = self.resource_type
parent_chain = None
if res_id:
resources = [res_id]
parent_chain = perm_ctx.get_parent_chain()
elif self.has_parent_resource():
resource_type = self.parent_res_perm.resource_type
p_perm_ctx = self.parent_res_perm.perm_ctx_cls.from_dict(attr.asdict(perm_ctx))
resources = [p_perm_ctx.resource_id]
parent_chain = p_perm_ctx.get_parent_chain()
raise PermissionDeniedError(
f"no {action_id} permission",
username=perm_ctx.username,
action_request_list=[ActionResourcesRequest(action_id, resource_type, resources, parent_chain)],
)
| 1.914063 | 2 |
okl4_kernel/okl4_2.1.1-patch.9/tools/magpie-parsers/src/magpieparsers/mig/__init__.py | CyberQueenMara/baseband-research | 77 | 12772034 | <reponame>CyberQueenMara/baseband-research
from magpieparsers.mig.parser import parse_to_pt
__ALL__ = ['parse_to_pt']
| 1.210938 | 1 |
_test/_test_multi_gaussian_distribution.py | hjw-1014/Multi-Objective-Reactive-Motion-Planning-in-Mobile-Manipulators | 0 | 12772035 | <reponame>hjw-1014/Multi-Objective-Reactive-Motion-Planning-in-Mobile-Manipulators<filename>_test/_test_multi_gaussian_distribution.py
# Test multi gaussian distribution
import torch
import torch.distributions as td
import matplotlib.pyplot as plt
from icecream import ic
g1 = td.Normal(torch.tensor([0.0]), torch.tensor([1.]))
g2 = td.Normal(torch.tensor([5.0]), torch.tensor([1.]))
ls = torch.linspace(-10, 10, 201)
print(ls)
g1_t = g1.log_prob(ls)
g2_t = g2.log_prob(ls)
g12_t = torch.exp(g1_t + g2_t)
#g12_s = torch.exp(g1_t) + torch.exp(g2_t)
g1_u = torch.unsqueeze(g1_t, dim=1)
g2_u = torch.unsqueeze(g2_t, dim=1)
g12_s = torch.exp(torch.logsumexp(torch.stack([g1_u, g2_u], dim=2), dim=2))
print(g12_t)
figure, ax = plt.subplots(1, 4, figsize=(12, 3))
# for i in range(4):
# ax[i].set_ylim(0, 3)
ax[0].plot(ls, g1_t)
ax[0].set_title("log_prob(p1)")
ax[1].plot(ls, g2_t)
ax[1].set_title("log_prob(p2)")
ax[1].set_xlim(-10, 10)
ax[2].plot(ls, g12_t)
ax[2].set_title("exp(sum log_i(x))")
ax[3].plot(ls, g12_s)
ax[3].set_title("exp(logsumexp(x))")
plt.show()
| 2.21875 | 2 |
tools/get_graph.py | zeng-ziyin/RG_GCN | 0 | 12772036 | <gh_stars>0
import torch
import torch.nn as nn
from torch_geometric.nn import knn_graph
class RandomDrop(nn.Module):
"""
Find dilated neighbor from neighbor list
edge_index: (2, batch_size, num_points, k)
"""
def __init__(self, k=20, random_rate=1.0, stochastic=True, isTrain=True):
super(RandomDrop, self).__init__()
self.random_rate = random_rate
self.stochastic = stochastic
self.k = k
self.isTrain = isTrain
def forward(self, edge_index):
if self.stochastic:
num = int(self.k * self.random_rate)
if self.isTrain:
randnum = torch.randperm(self.k)[:num]
edge_index = edge_index[:, :, :, randnum]
else:
edge_index = edge_index[:, :, :, :num]
return edge_index
class GetKnnGraph(nn.Module):
"""
Find the neighbors' indices based on dilated knn
"""
def __init__(self, k=20, stochastic=True, random_rate=1.0, isTrain=True):
super(GetKnnGraph, self).__init__()
self.stochastic = stochastic
self.k = k
self._random = RandomDrop(k, random_rate, stochastic, isTrain)
self.knn = knn_graph
self.random_rate = random_rate
def forward(self, x):
x = x.squeeze(-1)
B, C, N = x.shape
edge_index = []
for i in range(B):
edgeindex = self.knn(x[i].contiguous().transpose(1, 0).contiguous(), self.k)
if edgeindex.size(1) != N * self.k:
edgeindex = edgeindex[:, :N * self.k]
edgeindex = edgeindex.view(2, N, self.k)
edge_index.append(edgeindex)
edge_index = torch.stack(edge_index, dim=1)
return self._random(edge_index)
| 2.734375 | 3 |
apps/vision/trials.py | dlooto/driver-vision | 2 | 12772037 | # coding=utf-8
# !/usr/bin/env python
#
# Copyright (C) 2014 NianNian TECH Co., Ltd. All rights reserved.
# Created on Oct 22, 2015, by Junn
#
# import Queue
import random
import maths
from config import *
from vision.models import RoadModel
import copy
cached_real_roads = RoadModel.objects.get_all_roads(is_real=True) # 缓存真路名
cached_kana_roads = RoadModel.objects.get_all_roads(is_real=False) # 缓存假路名
class Shape(object):
'''形状基础类.
需注视点/单路牌/多路牌等具体类继承. 为避免WatchPoint与Board代码重复在后续开发中添加
'''
type_label = ''
def set_move_scheme(self, move_scheme):
self.move_scheme = move_scheme
self.move_scheme.print_direction(self.type_label)
# 保存原始move_scheme, 以处理路牌与注视点粘附问题
self.original_move_scheme = copy.deepcopy(move_scheme)
def set_move_velocity(self, velocity):
'''静态试验中重写该方法为空'''
self.move_scheme.set_velocity(velocity)
def get_move_velocity(self):
'''静态试验中重写该方法为空'''
return self.move_scheme.get_velocity()
def get_move_direction(self):
return self.move_scheme.get_direction()
def get_border_xy(self):
'''返回形状当前的边界坐标. 需要子类重载
@return: 4元素列表, 分别表示形状最左(x坐标)最右(x坐标)最上(y坐标)最下(y坐标)各边界值.
'''
return []
def is_left_over(self, xy_boarder):
'''形状是否越过试验窗体左边界'''
return xy_boarder[0] <= 0
def is_right_over(self, xy_boarder):
'''形状是否越过试验窗体右边界'''
return xy_boarder[1] >= FACE_SIZE['w']
def is_up_over(self, xy_boarder):
'''形状是否越过试验窗体上边界'''
return xy_boarder[2] <= 0
def is_down_over(self, xy_boarder):
'''形状是否越过试验窗体下边界'''
return xy_boarder[3] >= FACE_SIZE['h']
def close_to_edge(self):
'''形状是否已到窗体边缘'''
xy_boarder = self.get_border_xy()
if self.is_left_over(xy_boarder):
return True, 'L'
if self.is_right_over(xy_boarder):
return True, 'R'
if self.is_up_over(xy_boarder):
return True, 'U'
if self.is_down_over(xy_boarder):
return True, 'D'
return False, ''
# return self.is_left_over(xy_boarder) or self.is_right_over(xy_boarder) or \
# self.is_up_over(xy_boarder) or self.is_down_over(xy_boarder)
def random_move_direction(self):
'''随机变化运动方向: 动态敏感度阈值时, 在平滑动态状态下随机改变到另一个运动方向'''
self.move_scheme.random_direction()
# def reverse_move_direction(self):
# '''动态敏感度阈值过程且形状越过边界时, 则向反方向运动
# 注: 该方法仅考虑垂直方向.
# '''
#
# xy_boarder = self.get_border_xy()
# if self.is_left_over(xy_boarder):
# self.move_scheme.change_to('right')
# print 'Left Over, Reversed to Right'
# elif self.is_right_over(xy_boarder):
# self.move_scheme.change_to('left')
# print 'Right Over, Reversed to Left'
# elif self.is_up_over(xy_boarder):
# self.move_scheme.change_to('down')
# print 'Up Over, Reversed to Down'
# elif self.is_down_over(xy_boarder):
# self.move_scheme.change_to('up')
# print 'Down Over, Reversed to Up'
# else:
# pass
def handle_edge(self):
'''平滑运动时, 检查是否已靠近窗体边界, 若已靠近则运动方向反转.
为处理路牌初始即于边界重叠问题, 进行如下处理:
进左边界, 则grad_x取正值
进右边界, 则grad_x取负值
进上边界, 则grad_y取正值
进下边界, 则grad_y取负值
'''
to_edge, edge_direct = self.close_to_edge()
if not to_edge:
return
self.move_scheme.reverse_direction(edge_direct)
def move(self):
self.handle_edge()
self._do_move()
def _do_move(self):
pass
class WatchPoint(Shape):
'''注视点'''
type_label = u'注视点'
default_set = WATCH_POINT_SET
def __init__(self, pos=default_set['pos'], radius=default_set['radius'],
fill=default_set['fill'], outline=default_set['outline']):
# 坐标点. 在圆周运动过程中, 该值为原始坐标, 作为运动圆心代入计算
self.pos = pos
self.radius = radius # 圆圈半径
self.fill = fill # 填充颜色
self.outline = outline # 边框颜色
# 运动过程中, 该值为运动时的圆心坐标. 注视点绘制时以该坐标为准
self.move_pos = pos
self.is_glued = False # 注视点与路牌是否已粘附(当注视点与路牌相遇时进行粘附处理)
def _do_move(self):
'''若注视点靠近窗体边界, 则反向运动'''
self.move_pos = self.move_scheme.new_pos(self.move_pos)
def get_border_xy(self):
'''Overwrite: 坐标顺序为 左右上下'''
x0, y0 = self.move_pos
return [x0 - self.radius, x0 + self.radius, y0 - self.radius,
y0 + self.radius]
def is_crossed_with(self, board): # 备用...
'''
判断注视点与路牌是否相交.
算法:
A=r+width/2, B=r+height/2, (r为圆半径, width为路牌宽度, height为路牌高度)
dx=|xw0 - xb0|, dy=|yw0 - yb0| (垂直间距绝对值)
如果 dx <= A and dy <= B, 则相交.
'''
dx, dy = abs(self.move_pos[0] - board.pos[0]), abs(
self.move_pos[1] - board.pos[1])
dw, dh = self.radius + board.width * 1.0 / 2, self.radius + board.height * 1.0 / 2
return dx <= dw and dy <= dh
def deglue(self):
'''去除与路牌的粘附'''
if not self.is_glued:
return
self.copy_move_scheme(self.original_move_scheme)
self.is_glued = False
def copy_move_scheme(self, a_move_scheme):
'''为避免循环重新构选move_scheme对象, 添加该拷贝函数'''
self.move_scheme.copy_fields(a_move_scheme)
class BaseBoard(Shape):
'''单路牌与多路牌基础类'''
type_label = u'路牌'
def __init__(self):
self.is_glued = False
def is_same_direction_with(self, direction):
'''用户判断的方向(传入参数)是否与路牌当前运动方向一致
@param direction: 1-上, 2-下, 3-左, 4-右
@return: 若方向一致返回True, 否则返回False
'''
return self.move_scheme.get_direction() == direction
def change_items_velocity(self, is_left_algo): # 动态敏感度阈值时速度阶梯变化
v0 = self.get_move_velocity()
if is_left_algo:
if v0 * VELO_PARAM['left'] > VELO_BORDER['max']:
self.set_move_velocity(VELO_BORDER['max'])
else:
self.set_move_velocity(v0 * VELO_PARAM['left'])
else:
if v0 * VELO_PARAM['right'] < VELO_BORDER['min']:
self.set_move_velocity(VELO_BORDER['min'])
else:
self.set_move_velocity(v0 * VELO_PARAM['right'])
def restore_size(self):
'''单路牌尺寸阈值时路牌尺寸还原, 包括路名尺寸. 多路牌时重写为空'''
pass
def is_crossed_with(self, wpoint):
'''判断路牌是否与注视点相交'''
pass
def glue_with(self, wpoint):
'''与注视点粘合'''
if wpoint.is_glued and self.is_glued:
return
if self.is_crossed_with(wpoint):
# wpoint.move_scheme = self.move_scheme #不应直接赋值, 应重新拷贝一个以彼此不影响
wpoint.copy_move_scheme(self.move_scheme)
wpoint.is_glued, self.is_glued = True, True
def deglue(self):
'''去除与注视点的粘附'''
if not self.is_glued:
return
self.is_glued = False
class ItemQueue():
'''队列
主用于路名或路牌增减. 默认规则: 增加路牌/路名以最近的间距增加(表现为出队列),
减少路牌/路名以最远的间距减少(表现为入队列).
'''
def __init__(self, board, maxsize=8):
'''
@param maxsize: 队列长度
'''
self.maxsize = maxsize
self._queue = []
self.board = board # 单路牌或多路牌
def empty(self):
'''判断队列是否为空'''
return not self._queue or len(self._queue) == 0
def clear(self):
'''清空队列'''
self._queue = []
def get(self):
pass
def put(self, item):
'''入队列'''
if self.qsize() >= self.maxsize:
print self._queue
raise Exception(
'%s beyond largest size: %s' % (self.qsize(), self.maxsize))
self._queue.append(item)
def qsize(self):
'''返回队列长度'''
return len(self._queue)
def bget(self):
'''从队列中获取离目标项位置最近(间距最小)的位置返回, 并移除队列中相应元素'''
min_space = 99999999
min_seat = self._queue[0]
for seat in self._queue:
space = self.board.calc_queue_space(seat)
if space < min_space:
min_space = space
min_seat = seat
self._queue.remove(min_seat)
return min_seat
class Board(BaseBoard):
'''单个路牌'''
def __init__(self, e, a, road_size, width=BOARD_SIZE['w'],
height=BOARD_SIZE['h'],
space_scale=False):
'''
@param e: 路牌中心距(路牌中心与注视点距离)
@param a: 路牌中心点与注视点连线的水平夹角(角度值)
@param road_size: 路名尺寸
'''
BaseBoard.__init__(self)
# 辅助变量
# road_que = Queue.Queue(maxsize=8) #用于求数量阈值时路名的增减
self.road_que = ItemQueue(self, maxsize=8) # 用于求数量阈值时路名的增减
self.pos = None # 路牌中心点坐标
self.road_dict = {} # 路名字典, key/value: 'A'/Road()
self.target_seat = None # 目标路名位置
self.space_scale = space_scale
# 保留初始尺寸不被修改
self.original_width = width
self.original_height = height
self.original_road_size = road_size
self.width = width
self.height = height
self.road_size = road_size # 全部路名的统一尺寸
# 根据尺寸确定各路名位置坐标参考系. 以140宽为基准
self.road_seat_refer = scale_refer(width / 140.0)
self.reset_pos(e, a)
self.prompt_road_dict = {}
self._load_prompt_roads(road_size) # 提示路牌上的各路名坐标计算依赖于self.pos
def __str__(self):
return '%s, (%s, %s)' % (self.pos, self.width, self.height)
def restore_size(self):
'''还原尺寸'''
self.width = self.original_width
self.height = self.original_height
self.road_size = self.original_road_size
self.change_seat_refer()
def reset_pos(self, e, a, wp_pos=WATCH_POINT_SET['pos']):
""" 重置路牌中心点坐标. 路牌中心坐标一旦改变, 重新加载路名后路牌上所有路名坐标将改变.
e: 路牌中心距(路牌中心与注视点距离)
a: 路牌中心点与注视点连线的水平夹角(角度值)
wp_pos: 注视点坐标
"""
self.pos = self.calc_pos(e, a, wp_pos)
def reset_size(self, is_left_algo):
'''重设路牌尺寸'''
original_width = self.width
if is_left_algo:
if self.width * SIZE_PARAM['left'] < BOARD_SIZE_BORDER['min'][0] or \
self.height * SIZE_PARAM['left'] < \
BOARD_SIZE_BORDER['min'][1]:
self.width, self.height = BOARD_SIZE_BORDER['min']
else:
self.width, self.height = self.width * SIZE_PARAM[
'left'], self.height * SIZE_PARAM['left']
else:
if self.width * SIZE_PARAM['right'] > BOARD_SIZE_BORDER['max'][0] or \
self.height * SIZE_PARAM['right'] > \
BOARD_SIZE_BORDER['max'][1]:
self.width, self.height = BOARD_SIZE_BORDER['max']
else:
self.width, self.height = self.width * SIZE_PARAM[
'right'], self.height * SIZE_PARAM['right']
# 路牌尺寸变化将引起路牌上路名尺寸同比例变化
self.road_size = self.road_size * self.width * 1.0 / original_width
def reset_pos_xy(self, pos):
''' 重置路牌中心点坐标. 为reset_pos()的替代方法, 直接传入路牌中心点坐标.
路牌中心坐标一旦改变, 后续逻辑需要重新加载路牌上的路名(调整路名坐标).
@param pos: (x, y)元组形式坐标值
'''
self.pos = pos
def clear_queue(self):
'''清空queue, 用于下一轮求数量阈值的阶梯变化过程'''
if not self.road_que.empty():
self.road_que.clear()
# 单路牌求数量阈值: 路名上限为8, 初始路名显示条数为设定的值
rest_seats = set(ALLOWED_ROAD_SEATS) - set(self.get_road_seats())
for s in rest_seats:
self.road_que.put(s)
def calc_pos(self, e, a, wp_pos):
'''根据初始参数e和a值, 计算路牌中心坐标
@param e: 路牌中心与注视点距离
@param a: 路牌中心点/注视点连线的水平夹角(角度值)
'''
x0, y0 = wp_pos
return (
x0 - e * math.cos(math.radians(a)), y0 - e * math.sin(math.radians(a)))
def set_spared_road_seats(self, road_seats_item):
'''设置待轮询的目标路名列表
多路牌试验中添加该方法
'''
self.spared_road_seats = road_seats_item[0] # 路牌上路名标记
self.spared_target_seats = road_seats_item[1] # 待轮询的目标路名列表
def load_roads(self, road_seats, target_seat, road_size):
''' 设置路牌上的所有路名. 从词库中重新随机选择, 路名对象将被重新初始化'''
self.road_dict.clear()
modeled_roads = self.generate_random_roads(len(road_seats))
for mark in road_seats:
road_model = random.choice(modeled_roads)
self.road_dict[mark] = Road(road_model.name,
self.pos_xx(mark, road_size),
is_real=road_model.is_real,
size=road_size)
self.road_dict[mark].is_target = True if mark == target_seat else False
modeled_roads.remove(road_model)
self.target_seat = target_seat
def load_roads_lean(self, road_size):
''' 多路牌试验时加载路牌上的所有路名. 从词库中重新随机选择, 路名对象将被重新初始化,
不设置目标路名
'''
self.road_dict.clear()
modeled_roads = self.generate_random_roads(len(self.spared_road_seats))
for mark in self.spared_road_seats:
road_model = random.choice(modeled_roads)
self.road_dict[mark] = Road(road_model.name,
self.pos_xx(mark, road_size),
is_real=road_model.is_real,
size=road_size)
modeled_roads.remove(road_model)
def _load_prompt_roads(self, road_size):
''' 加载8个位置上的全部路名, 用于提示目标项位置.'''
self.prompt_road_dict.clear()
modeled_roads = self.generate_random_roads(8)
for mark in ALLOWED_ROAD_SEATS:
road_model = random.choice(modeled_roads)
self.prompt_road_dict[mark] = Road('%s: %s' % (mark, road_model.name),
self.pos_xx(mark, road_size),
is_real=road_model.is_real,
size=road_size)
modeled_roads.remove(road_model)
def flash_road_names(self):
'''仅刷新路名, 不替换路名对象, 不更新目标项及干扰项位置'''
road_seats = self.get_road_seats()
modeled_roads = self.generate_random_roads(len(road_seats))
for mark in road_seats:
road_model = random.choice(modeled_roads)
self.road_dict[mark].name = road_model.name
self.road_dict[mark].is_real = road_model.is_real # 解决某个Bug
modeled_roads.remove(road_model)
def generate_random_roads(self, road_num):
''' 根据传入的路名数量, 生成不重复的随机路名列表.
列表元素类型为Road Model(name, is_real).
算法规则: 若路名数量为偶数, 则真假路名各一半, 若为奇数, 则假名多1.
'''
if road_num == 1:
return random.sample(cached_kana_roads, 1)
num = road_num / 2
real_roads = random.sample(cached_real_roads, num) # 先挑选一半的真路名列表
if road_num % 2 == 1: # 奇数
num += 1
real_roads.extend(random.sample(cached_kana_roads, num))
return real_roads
def get_ee(self, target_seat, wpoint):
'''离心率: 根据目标项位置及注视点对象计算离心率
@param target_seat: 目标项位置标记, 如'A'
@param wpoint: 注视点对象
'''
return maths.dist(self.road_dict[target_seat].pos, wpoint.pos)
def get_angle(self, target_seat, wpoint):
'''返回目标项与注视点连线夹角, 顺时针方向计算
@param target_seat: 目标项位置标记, 如'A'
@param wpoint: 注视点对象
'''
return maths.angle(self.road_dict[target_seat].pos, wpoint.pos)
def get_road_spacings(self):
if not hasattr(self, 'road_spacings') or not self.road_spacings:
self.road_spacings = self.calc_target_flanker_spacings()
return self.road_spacings
def get_item_spacings(self):
'''返回目标项与干扰项间距: 与multiBoard形成多态'''
return self.calc_target_flanker_spacings()
def calc_target_flanker_spacings(self):
'''计算当前目标项与所有干扰项的间距, 返回间距列表'''
target_road = self.get_target_road()
flanker_roads = self.get_flanker_roads()
road_spacings = []
for f in flanker_roads:
road_spacings.append(f.dist_with(target_road))
return road_spacings
def update_flanker_poses(self, is_left_algo):
'''更新所有干扰项的坐标, 在间距阶梯法中以反应目标与干扰项的间距变化.
算法规则: 根据两点原有坐标可确定间距变化方向, 目标路名坐标不变, 干扰路名则远离或靠近.
以目标项为原点, 连线方向指向干扰项.
'''
target_road = self.road_dict[self.target_seat]
road_seats = self.get_road_seats()
road_seats.remove(self.target_seat)
for flanker_seat in road_seats:
self.road_dict[flanker_seat].update_pos(target_road.pos, is_left_algo)
def update_flanker_spacings(self, is_left_algo, update_all=True):
'''以目标项为基准, 更新所有干扰项的坐标.(也即更新了干扰项与目标项的间距)
@param is_left_algo: 算法规则, true-流程图左侧看法, 一般设置为间距减小, false-右侧算法, 设置为间距增加
@param update_all: true-间距统一变化,
false-间距不统一变化, 规则: 若为左侧算法, 则减小最大间距; 若为右侧算法, 则增加最小间距
'''
if update_all: # 若所有干扰项间距统一变化
self.update_flanker_poses(is_left_algo)
return
target_road = self.get_target_road()
space_dict = self.get_flanker_space_dict()
if is_left_algo: # 选择最大的间距进行减小
mflanker, mval = self.get_max_space_flanker(space_dict)
else: # 选择最小的间距进行间距增加
mflanker, mval = self.get_min_space_flanker(space_dict)
# 筛选出间距与最大值或最小值相等的路名
equal_space_roads = [] # 间距相等的路名列表
for road, space in space_dict.items():
if abs(space - mval) < 0.01:
equal_space_roads.append(road)
for flanker in equal_space_roads:
flanker.update_pos(target_road.pos, is_left_algo)
def get_flanker_space_dict(self):
'''返回干扰项与目标项间距字典, 干扰项对象为key, 值为间距值. 主要用于计算最大间距及最小间距'''
space_dict = {}
target_road = self.get_target_road()
for flanker in self.get_flanker_roads():
space_dict[flanker] = flanker.dist_with(target_road)
return space_dict
def get_max_space_flanker(self, space_dict):
'''返回与目标项间距最大的干扰项'''
mval = 0.0
mflanker = None
for flanker, space in space_dict.items():
if space > mval:
mval = space
mflanker = flanker
return mflanker, mval
def get_min_space_flanker(self, space_dict):
'''返回与目标项间距最大的干扰项'''
mval = 99999999
mflanker = None
for flanker, space in space_dict.items():
if space < mval:
mval = space
mflanker = flanker
return mflanker, mval
def update_flanker_numbers(self, is_left_algo):
'''
更新干扰项的数量. 该方法将修改road_dict字典对象.
若减少干扰项数, 则将路名位置标记放入队列, 否则从队列取出路名位置标记
@param is_left_algo: 决定了干扰项数量是是+2还是-1
@return: 返回更新后的干扰项数量
'''
if is_left_algo: # +2
self.add_flankers()
else:
self.decr_flankers()
def calc_queue_space(self, seat):
'''用于数量阈值队列中求目标项与当前位置对象的间距'''
return maths.dist(self.get_target_road().pos,
self.pos_xx(seat, self.road_size))
def add_flankers(self):
'''增加干扰项数量, 每次增加2个干扰项. 增加以最近间距位置开始增加'''
if self.road_que.qsize() < 2:
print(
'\nAlready max flankers on board: %s' % int(len(self.road_dict) - 1))
return
road_model1, road_model2 = self.generate_random_roads(2)
seat1, seat2 = self.road_que.bget(), self.road_que.bget()
self.road_dict[seat1] = Road(road_model1.name,
self.pos_xx(seat1, self.road_size),
is_real=road_model1.is_real,
size=self.road_size
)
self.road_dict[seat2] = Road(road_model2.name,
self.pos_xx(seat2, self.road_size),
is_real=road_model2.is_real,
size=self.road_size
)
def decr_flankers(self):
'''减少干扰项数量, 每次减少1个. 从当前已存在的路名列表中最大间距的干扰项开始减少'''
if len(self.road_dict) == 2:
print(
'\nAlready min flankers on board: %s' % int(len(self.road_dict) - 1))
return
space_dict = self.get_flanker_space_dict()
max_flanker, max_space = self.get_max_space_flanker(space_dict)
for seat, road in self.road_dict.items():
if seat != self.target_seat and road == max_flanker:
self.road_que.put(seat)
self.road_dict.pop(seat)
break
def update_items_size(self, is_left_algo):
'''与MultiBoard形成多态而增加, 在algo中调用, 用于计算尺寸阈值中更新阶梯变量
!!!注: 间距缩放情况下, 路名尺寸变化将引起路牌尺寸变化, 并所有路名坐标变化, 表现形式为路名膨胀或缩小
@param space_scale: False-间距不变, True-间距缩放.
'''
self.update_road_size(is_left_algo)
# original_size = self.road_size
self.road_size = self.get_road_size()
if self.space_scale:
factor = self.road_size * 1.0 / self.original_road_size
self.width, self.height = self.original_width * factor, self.original_height * factor
self.change_seat_refer()
def change_seat_refer(self):
'''修改位置坐标参考系: 坐标参考系变化, 更新所有路名坐标'''
self.road_seat_refer = scale_refer(self.width / 140.0) # 坐标参考系变化
for seat, road in self.road_dict.items():
road.pos = self.pos_xx(seat, self.road_size)
def update_road_size(self, is_left_algo):
'''更新路名尺寸.
@param is_left_algo: 决定了尺寸用左边算法计算 or 右边算法计算
'''
for road in self.road_dict.viewvalues():
road.reset_size(is_left_algo)
def get_road_size(self):
'''返回路名当前尺寸'''
return self.get_target_road().size
def get_item_size(self):
'''返回路名尺寸, 为与MultiBoard形成多态调用而增加'''
return '%s' % self.get_road_size()
def get_road_seats(self): # 路名位置标记列表
return self.road_dict.keys()
def count_flanker_items(self):
'''返回干扰项数量'''
return len(self.get_road_seats()) - 1
def _do_move(self):
''' 移动路牌坐标
@param move_scheme: 运动模式对象
'''
new_pos = self.move_scheme.new_pos(self.pos)
dx, dy = new_pos[0] - self.pos[0], new_pos[1] - self.pos[1] # 路名偏移量
self.reset_pos_xy(new_pos)
for road in self.road_dict.values():
road.pos = road.pos[0] + dx, road.pos[1] + dy
def dist_with(self, a_board):
'''计算路牌间距, 结果取2位小数. 以路牌中心点为参考
@param a_board: 传入的参数路牌对象
@return: 返回间距值
'''
return maths.dist(self.pos, a_board.pos)
def update_pos(self, target_pos, is_left_algo):
'''根据两点间距变化值, 重新计算当前路名/路牌坐标.
根据两点原有坐标可确定间距变化方向, 目标项坐标不变, 干扰项则远离或靠近
@param target_pos: 目标项位置坐标, 元组对象(x, y)
@param is_left_algo: 算法参数, 如 True=0.5r, False=r+1
'''
x0, y0 = target_pos
x, y = self.pos
r = maths.dist((x0, y0), (x, y)) # 计算两点间距
if is_left_algo:
r1 = r * SPACING_PARAM['left']
else:
r1 = r + SPACING_PARAM['right']
x = x0 - r1 * (x0 - x) / r * 1.0
y = y0 - r1 * (y0 - y) / r * 1.0
self.pos = round(x, 2), round(y, 2)
def is_target_road_real(self):
'''判断目标路名是否为真路名'''
return self.get_target_road().is_real
def get_target_road(self):
return self.road_dict[self.target_seat]
def get_flanker_roads(self):
'''返回干扰路名列表'''
target_road = self.get_target_road()
roads = self.road_dict.values()
roads.remove(target_road)
return roads
## 以下建立路牌及路名坐标系
def pos_xx(self, mark, s):
'''以路牌中心坐标为参照点, 获取路牌上 A, B, C, D, E, F, G, H各点中心坐标
@param mark: 路名位置标识, 一般为小写字母, 以匹配正确的pos_x方法
@param s: 路名尺寸(一般为文本高度值)
'''
mt = 'pos_%s' % mark.lower()
return getattr(self, mt)(s)
def pos_a(self, s=0): # 带默认值时可不传, 为便于pos_xx调用的一致性
return self.pos[0] - self.road_seat_refer['left_x'], self.pos[1] + \
self.road_seat_refer['a_y']
def pos_b(self, s):
x, y = self.pos_a(s)
return x, y + s + self.road_seat_refer['blank_y']
def pos_c(self, s):
x, y = self.pos_b(s)
return x, y + s + self.road_seat_refer['blank_y']
def pos_d(self, s=0):
return self.pos[0] + self.road_seat_refer['right_x'], self.pos[1] + \
self.road_seat_refer['a_y']
def pos_e(self, s):
x, y = self.pos_d(s)
return x, y + s + self.road_seat_refer['blank_y']
def pos_f(self, s):
x, y = self.pos_e(s)
return x, y + s + self.road_seat_refer['blank_y']
def pos_g(self, s):
return self.pos[0], self.pos[1] - self.road_seat_refer['g_y']
def pos_h(self, s):
x, y = self.pos_g(s)
return x, y + s + self.road_seat_refer['blank_y']
def get_border_xy(self):
'''重载方法: 返回路牌当前的边界坐标, 顺序为: 左(x坐标)右(x坐标)上(y坐标)下(y坐标)'''
x0, y0 = self.pos
woffset, hoffset = self.width * 1.0 / 2, self.height * 1.0 / 2
return [x0 - woffset, x0 + woffset, y0 - hoffset, y0 + hoffset]
def is_crossed_with(self, wpoint):
'''
判断路牌是否与注视点相交.
算法:
A=r+width/2, B=r+height/2, (r为注视点圆半径, width为路牌宽度, height为路牌高度)
dx=|xw0 - xb0|, dy=|yw0 - yb0| (垂直间距绝对值)
如果 dx <= A and dy <= B, 则相交.
'''
dx, dy = abs(wpoint.move_pos[0] - self.pos[0]), abs(
wpoint.move_pos[1] - self.pos[1])
dw, dh = wpoint.radius + self.width * 1.0 / 2, wpoint.radius + self.height * 1.0 / 2
return dx <= dw and dy <= dh
# def draw(self, canvas):
# '''将路牌绘制在屏幕上, 同是包含注视点'''
#
# #绘制注视点
# self.watch_point.draw(canvas)
#
# #绘制路牌
# self.tk_id = canvas.create_rectangle_pro(
# self.pos[0], self.pos[1], self.width, self.height, fill=board_color,
# outline=board_color
# )
# canvas.widget_list[self.tk_id] = self
# self._draw_roads(canvas)
#
# canvas.update()
# def _draw_roads(self, canvas):
# for road in self.road_dict.values():
# road.draw(canvas)
# def _erase_roads(self, canvas):
# for road in self.road_dict.values():
# road.erase(canvas)
# def erase(self, canvas):
# '''擦除路牌, 开始下一个1.6s的显示. 擦除路牌同时擦除所有路名'''
# self._erase_roads(canvas)
# canvas.delete(self.tk_id)
class MultiBoard(BaseBoard):
'''多路牌. 为路牌容器, 内部管理着多个单路牌'''
def __init__(self, param):
'''初始化.
@param board_size: 3块路牌中的最大尺寸, 元组形式: (w, h)
@param board_range: 路牌排列形式, H-横, V-纵
@param road_size: 初始路名尺寸, 一般为第一个最大路牌上的路名尺寸
@param pre_board_num: 求数量阈值时初始路牌显示数量
@param board_space: 路牌间距
@param board_scale: 路牌缩放比例, 默认1
说明:
初始化路牌列表, 目标项提示路牌, 并设置目标路牌提示并相应路名
'''
BaseBoard.__init__(self)
# 辅助变量
# board_que = Queue.Queue(maxsize=3) #求数量阈值时路牌增减
self.board_que = ItemQueue(self, maxsize=6) # 作用于求数量阈值时路牌增减, 存放路牌标识位
# 辅助变量, 用于提示目标项, 结构与board_dict相同
self.prompt_board_dict = {}
self.board_size = param.get_board_size()
self.board_range = param.board_range
self.pre_board_num = param.pre_board_num
self.road_size = param.road_size # 最大路名尺寸
self.board_space = param.board_space
self.board_scale = param.board_scale
# 初始化路牌字典, 该字典存放正在控制过程中并显示的路牌, 如 {'B1':Board1, 'B2':Board2, 'B3':Board3}
# 已初始化且未显示的路牌将存放于 board_que
self.board_repos = self._generate_boards(param) # 路牌仓库, 存储所有待用路牌
# 初始化为空, 在控制过程中真正在使用的路牌, 从board_repos中装载
self.board_dict = {}
self.reload_boards() # 装载self.board_dict
self._init_prompt_boards(param.get_board_size(), param.board_range,
param.road_size,
param.board_space, param.board_scale)
def reload_boards(self):
'''每一轮目标项变化后重新加载路牌, 主要因路牌数量阈值情况而增加 '''
self.board_dict.clear()
for k, board in self.board_repos.items():
self.board_dict[k] = board
# 根据当前self.board_dict中路牌情况(数量, 排列状况等)来设置坐标...
def reset_boards(self, eccent, angle, ):
'''重设路牌坐标
@param eccent: 最大路牌的离心率, 该路牌作为其他路牌的参考路牌
@param angle: 最大路牌的角度
'''
board_marks = ALLOWED_BOARD_MARKS
prev_board = None
for i in range(len(board_marks)):
curr_board = self.board_dict[board_marks[i]]
curr_board.reset_pos(eccent, angle)
if prev_board: # True-表示第2/3个路牌
curr_board.reset_pos_xy(self._next_board_pos(
prev_board.pos,
self.board_range,
self.board_space
))
prev_board = curr_board # 指针下移
def load_roads(self, target_board_key, target_seat):
''' 多个路牌上加载所有路名, 并根据条件设置目标项
@param target_board_key: 目标路牌位置标记
@param target_seat: 目标路名位置
'''
for iboard in self.board_dict.values():
if iboard == self.board_dict[target_board_key]: # 是目标路牌
iboard.load_roads(iboard.spared_road_seats, target_seat,
iboard.road_size)
else:
iboard.load_roads_lean(iboard.road_size)
def _init_prompt_boards(self, board_size, board_range, road_size, board_space,
board_scale):
'''初始化每一次阶梯算法的目标提示路牌, 并加载路牌上相应的路名'''
self.prompt_board_dict.clear()
prev_board = None
for i in range(3):
width, height = board_size[0] * board_scale ** i, board_size[
1] * board_scale ** i
road_size = road_size * board_scale ** i
curr_board = Board(200, 0, road_size, width=width,
height=height) # 第1个路牌注视点左移200
if prev_board: # 若当前为第2个/第3个路牌
curr_board.reset_pos_xy(self._next_board_pos(
prev_board.pos,
board_range,
board_space
))
curr_board._load_prompt_roads(road_size) # 路牌坐标重设后重新加载路名
self.prompt_board_dict[ALLOWED_BOARD_MARKS[i]] = curr_board
prev_board = curr_board # 指针下移
def _generate_boards(self, param):
'''生成初始路牌列表. 全部路牌都被加载'''
board_dict = {}
prev_board = None
road_seats_list = param.get_multi_road_seats()
board_size = param.get_board_size()
for i in range(len(road_seats_list)): # 3个路牌同时初始化
width = board_size[0] * param.board_scale ** i
height = board_size[1] * param.board_scale ** i
road_size = param.road_size * param.board_scale ** i
curr_board = Board(200, 0, road_size, width=width,
height=height) # 第1个路牌注视点左移200
if prev_board: # 若当前为第2个/第3个路牌
curr_board.reset_pos_xy(self._next_board_pos(
prev_board.pos,
param.board_range,
param.board_space
))
# print 'board pos: ', curr_board.pos
curr_board.set_spared_road_seats(road_seats_list[i])
board_dict[ALLOWED_BOARD_MARKS[i]] = curr_board
prev_board = curr_board # 指针下移
return board_dict
def _next_board_pos(self, prev_board_pos, board_range, board_space):
'''初始路牌排列时, 根据传入的前一个路牌坐标生成下一个路牌坐标
@param prev_board_pos: 上一个生成的路牌中心坐标
@param board_range: 路牌排列方式, H-横, V-纵
@param board_space: 路牌间距
@return: 返回下一个路牌中心坐标, (x,y)形式
'''
if board_range == 'H': # 横
return prev_board_pos[0] + board_space, prev_board_pos[1]
else: # 纵
return prev_board_pos[0], prev_board_pos[1] + board_space
def calc_ee(self, wpoint):
'''计算目标路牌离心率: 根据目标路牌中心点及注视点对象计算离心率
@param target_board: 目标路牌
@param wpoint: 注视点对象
'''
key, target_board = self.get_target_board()
return maths.dist(target_board.pos, wpoint.pos)
def calc_angle(self, wpoint):
'''计算目标路牌角度: 目标路牌中心点与注视点连线夹角, 顺时针方向计算
@param target_board: 目标路牌对象
@param wpoint: 注视点对象
'''
key, target_board = self.get_target_board()
return maths.angle(target_board.pos, wpoint.pos)
def set_target_board(self, board_key):
'''B1/B2/B3'''
self.target_board_key = board_key
def get_target_board(self, target_board_key=None):
'''获取目标路牌'''
if target_board_key:
return target_board_key, self.board_dict[target_board_key]
return self.target_board_key, self.board_dict[
self.target_board_key] # 目标路牌必定存在于dict中
def get_spared_target_seats(self, board_key):
'''返回当前设定的目标路牌上的待轮询目标路牌路名位置'''
return self.board_repos[board_key].spared_target_seats
def get_target_road(self):
'''获取目标路牌上的目标路名'''
key, board = self.get_target_board()
return board.get_target_road()
def get_target_name(self, target_board_key=None):
key, board = self.get_target_board(target_board_key)
return board.get_target_road().name
def get_flanker_boards(self):
flanker_boards = self.board_dict.values()
key, iboard = self.get_target_board()
flanker_boards.remove(iboard)
return flanker_boards
def count_flanker_items(self):
'''返回干扰项数量'''
return len(self.board_dict) - 1
def get_item_size(self):
'''返回路牌尺寸, 为与Board形成多态调用而增加. 目前仅返回目标路牌尺寸
@return: width, height
'''
key, tboard = self.get_target_board()
return '%s,%s' % (tboard.width, tboard.height)
def get_item_spacings(self):
'''返回目标项与干扰项间距: 与Board类型对象形成多态'''
return self._calc_item_spacings()
def _calc_item_spacings(self):
'''计算当前目标路牌与所有干扰路牌的间距. 路牌坐标变化将引起间距变化
@return: 间距列表
'''
key, iboard = self.get_target_board()
spacings = []
for board in self.get_flanker_boards():
spacings.append(board.dist_with(iboard))
return spacings
def clear_queue(self): # TODO: 不可以清空已重设坐标后的boards
'''数量阈值时清空queue
用于下一轮求数量阈值的阶梯变化过程
'''
if not self.board_que.empty():
self.board_que.clear()
# 阶梯循环前装载pre_board_num数量的路牌(包括目标项). 注: 此处不可以进行board_dict.clear(),
# 因为各路牌坐标已进行了重设.
while len(self.board_dict) > self.pre_board_num:
self.decr_board()
# 剩余路牌位置标记存入board_que
rest_marks = set(ALLOWED_BOARD_MARKS) - set(self.board_dict.keys())
for m in rest_marks:
self.board_que.put(m)
def update_flanker_numbers(self, is_left_algo):
if is_left_algo:
self.incre_board()
else:
self.decr_board()
def calc_queue_space(self, seat):
'''board_que中使用, 计算路牌间隔, 如若目标为B1, 则B3与B1间隔为2, B5与B1间隔为4'''
return abs(int(seat[1]) - int(self.target_board_key[1]))
def incre_board(self): # 该方法现仅适用于3个路牌情况
'''board_dict中增加路牌, 目前每次仅增加一块路牌. 增加的路牌为离目标路牌最近的位置'''
if self.board_que.qsize() < 1:
return
key = self.board_que.bget()
self.board_dict[key] = self.board_repos[key]
def decr_board(self): # 该方法已比较通用, 可适用于路牌数大于3个情况
'''board_dict中减少一块路牌. 去除的路牌为离目标项最远的位置'''
if len(self.board_dict) == 2: # 至少2个路牌
return
# 得到距离目标项最远的干扰路牌
max_space = 0.0
max_fboard = None
key, tboard = self.get_target_board()
for fboard in self.get_flanker_boards():
space = tboard.dist_with(fboard)
if space > max_space:
max_space = space
max_fboard = fboard
for key, board in self.board_dict.items():
if key == self.target_board_key: # 目标路牌不允许去除
continue
if board == max_fboard:
self.board_dict.pop(key)
self.board_que.put(key)
return
def update_flanker_spacings(self, is_left_algo, update_all=True):
'''求多路牌关键间距时调用.
根据算法更新所有目标项-干扰项间距值, 本质上是更新干扰项的坐标(以目标项为原点)
@param is_left_algo: 算法规则, true-流程图左侧看法, 一般设置为间距减小, false-右侧算法, 设置为间距增加
@param update_all: true-间距统一变化,
false-间距不统一变化, 规则: 若为左侧算法, 则减小最大间距; 若为右侧算法, 则增加最小间距
'''
if update_all:
self.update_flanker_poses(is_left_algo)
return
space_dict = {}
key, iboard = self.get_target_board()
for flanker in self.get_flanker_boards():
space_dict[flanker] = flanker.dist_with(iboard)
if is_left_algo: # 选择最大的间距进行减小
mval = 0.0
for space in space_dict.values():
if space > mval:
mval = space
else: # 选择最小的间距进行间距增加
mval = 99999999
for space in space_dict.values():
if space < mval:
mval = space
# 筛选出间距与最大值或最小值相等的路牌
equal_space_boards = [] # 间距相等的路牌列表
for board, space in space_dict.items():
if abs(space - mval) < 0.01:
equal_space_boards.append(board)
for flanker_board in equal_space_boards:
flanker_board.update_pos(iboard.pos, is_left_algo)
flanker_board.load_roads_lean(flanker_board.road_size)
def update_flanker_poses(self, is_left_algo):
'''更新所有干扰项的坐标: 在间距阶梯法中用来反应目标与干扰项的间距变化.
算法规则: 根据两点原有坐标可确定间距变化方向, 目标路名坐标不变, 干扰路名则远离或靠近.
以目标项为原点, 连线方向指向干扰项.
'''
key, iboard = self.get_target_board()
for flanker_board in self.get_flanker_boards():
flanker_board.update_pos(iboard.pos, is_left_algo)
# 路牌位置变化后需重新加载路名
flanker_board.load_roads_lean(flanker_board.road_size)
def update_items_size(self, is_left_algo):
'''更新路牌尺寸.
2种变化规则: 1. 路牌尺寸独立变化, 2.路牌尺寸与间距同比例变化. 目前暂实现第1种
路牌尺寸变化, 是否要引起其他参数变化? TODO...
@param is_left_algo: 决定了尺寸用左边算法计算 or 右边算法计算
'''
for board in self.board_dict.values():
board.reset_size(is_left_algo)
board.load_roads_lean(board.road_size)
def flash_road_names(self):
'''刷新所有路牌上的路名, 不替换路名对象'''
for board in self.board_dict.values():
board.flash_road_names()
def is_target_road_real(self):
key, iboard = self.get_target_board()
return iboard.is_target_road_real()
def _do_move(self):
''' 移动路牌坐标
'''
key, iboard = self.get_target_board()
new_pos = self.move_scheme.new_pos(iboard.pos)
dx, dy = new_pos[0] - iboard.pos[0], new_pos[1] - iboard.pos[1] # 路名偏移量
# 重设目标路牌坐标
iboard.reset_pos_xy(new_pos)
for road in iboard.road_dict.values():
road.pos = road.pos[0] + dx, road.pos[1] + dy
# 重设干扰路牌坐标
flanker_boards = self.get_flanker_boards()
for fboard in flanker_boards:
fboard.reset_pos_xy((fboard.pos[0] + dx, fboard.pos[1] + dy))
for road in fboard.road_dict.values():
road.pos = road.pos[0] + dx, road.pos[1] + dy
def get_border_xy(self):
'''重载方法: 返回多路牌的边界坐标, 顺序为: 左(x坐标)右(x坐标)上(y坐标)下(y坐标)'''
l_board, r_board, u_board, d_board = None, None, None, None # 路牌数大于3时, 这可能分别是4个路牌
x_min, x_max, y_min, y_max = 99999, 0, 9999, 0
for board in self.board_dict.values():
x, y = board.pos
if x < x_min: # 最左路牌筛选
x_min = x
l_board = board
if x > x_max: # 最右路牌筛选
x_max = x
r_board = board
if y < y_min: # 最上路牌筛选
y_min = y
u_board = board
if y > y_max: # 最下路牌筛选
y_max = y
d_board = board
return [x_min - l_board.width * 1.0 / 2, x_max + r_board.width * 1.0 / 2,
y_min - u_board.height * 1.0 / 2, y_max + d_board.height * 1.0 / 2]
def is_crossed_with(self, wpoint):
''' 判断多个路牌是否与注视点相交. 需多个路牌逐一判断, 单个路牌判断方法与单路牌时相同
'''
for board in self.board_dict.values():
if board.is_crossed_with(wpoint):
return True
return False
class Road(object):
def __init__(self, name, pos, size=15, is_target=False, is_real=False):
self.name = name
self.pos = pos
self.size = size
self.is_target = is_target
self.is_real = is_real
def __str__(self):
return u'%s, %s, %s' % (self.name, self.pos, self.is_target)
def dist_with(self, a_road):
'''计算路名间距, 结果取2位小数'''
return maths.dist(self.pos, a_road.pos)
def update_pos(self, target_pos, is_left_algo): # 算法规则与路牌同名方法相同, 以后可考虑重构提取公共方法.
'''根据两点间距变化值, 重新计算当前路名/路牌坐标.
根据两点原有坐标可确定间距变化方向, 目标项坐标不变, 干扰项则远离或靠近
@param target_pos: 目标项位置坐标, 元组对象(x, y)
@param is_left_algo: 算法参数, 如 True=0.5r, False=r+1
'''
x0, y0 = target_pos
x, y = self.pos
r = maths.dist((x0, y0), (x, y)) # 两路名原来的间距
if is_left_algo:
r1 = r * SPACING_PARAM['left']
else:
r1 = r + SPACING_PARAM['right']
x = x0 - r1 * (x0 - x) / r * 1.0
y = y0 - r1 * (y0 - y) / r * 1.0
self.pos = round(x, 2), round(y, 2)
def reset_size(self, is_left_algo):
'''重设路名尺寸'''
if is_left_algo:
if self.size * SIZE_PARAM['left'] >= SIZE_BORDER[0]:
self.size *= SIZE_PARAM['left']
else:
self.size = SIZE_BORDER[0]
else:
if self.size * SIZE_PARAM['right'] < SIZE_BORDER[1]:
self.size *= SIZE_PARAM['right']
else:
self.size = SIZE_BORDER[1]
# def draw(self, canvas):
# '''显示在屏幕上''' #调用画布进行绘制...
# road_font = DEFAULT_ROAD_FONT[0], self.size
# road_color = TARGET_ROAD_COLOR if self.is_target else DEFAULT_ROAD_COLOR
# self.tk_id = canvas.create_text(self.pos, text=self.name, fill=road_color, font=road_font)
# canvas.widget_list[self.tk_id] = self
# def erase(self, canvas):
# '''擦除路名'''
# canvas.delete(self.tk_id)
| 2.296875 | 2 |
jupyter-notebooks/console-examples/pdb-debugging.py | r2cp/PES-Programacion-I | 1 | 12772038 | # -*- coding: utf-8 -*-
"""
Editor de Spyder
Este es un archivo temporal.
"""
#import pdb
def biseccion(f, a=-100, b=100, epsilon=0.001, max_iter=100):
guess = (a + b) / 2
# Contador de iteraciones
num_guesses = 0
#pdb.set_trace()
while abs(f(guess)) >= epsilon and num_guesses < max_iter:
if f(a)*f(guess) > 0:
# Si f(a) y f(guess) tienen el mismo signo, se debe
# acortar el intervalo por la izquierda
# Tu código acá:
a = guess
else:
# De lo contrario, se acorta por el lado derecho
# Tu código acá:
b = guess
# Siguiente guess en el punto medio del espacio de búsqueda
guess = (a + b) / 2
num_guesses += 1
print('Iteraciones: ', num_guesses)
print('Solución encontrada: f(%0.4f) = %0.4f' % (guess, f(guess)))
return guess
f = lambda x: x**2 - x - 1
approx_phi = biseccion(f, a=1, b=2, epsilon=1e-6)
print(approx_phi) | 3.5625 | 4 |
tests/endpoints/test_transactions.py | tmnhat2001/yelp-fusion-api | 0 | 12772039 | <reponame>tmnhat2001/yelp-fusion-api<filename>tests/endpoints/test_transactions.py
from yelpfusion.endpoints.transactions import Transactions
def test_transaction_search(requests_mock):
mock_api = requests_mock.get("https://api.yelp.com/v3/transactions/pickup/search", json={})
Transactions("test-key").search("pickup", location="Markham")
assert mock_api.called is True
| 1.96875 | 2 |
accelbyte_py_sdk/api/lobby/operations/friends/__init__.py | AccelByte/accelbyte-python-sdk | 0 | 12772040 | <reponame>AccelByte/accelbyte-python-sdk
# Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template file: justice_py_sdk_codegen/__main__.py
"""Auto-generated package that contains models used by the justice-lobby-server."""
__version__ = "staging"
__author__ = "AccelByte"
__email__ = "<EMAIL>"
# pylint: disable=line-too-long
from .add_friends_without_con_a5cd59 import AddFriendsWithoutConfirmation
from .get_list_of_friends import GetListOfFriends
from .get_user_friends_updated import GetUserFriendsUpdated
from .get_user_incoming_friends import GetUserIncomingFriends
from .get_user_outgoing_friends import GetUserOutgoingFriends
from .user_accept_friend_request import UserAcceptFriendRequest
from .user_cancel_friend_request import UserCancelFriendRequest
from .user_get_friendship_status import UserGetFriendshipStatus
from .user_reject_friend_request import UserRejectFriendRequest
from .user_request_friend import UserRequestFriend
from .user_unfriend_request import UserUnfriendRequest
| 1.414063 | 1 |
gui.py | charan123g/imageprocessing | 0 | 12772041 | import tkinter as tk
from tkinter import filedialog
from tkinter import *
from PIL import ImageTk, Image
import numpy as np
import cv2
#load the trained model to classify sign
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Model
from tensorflow.keras.applications.inception_v3 import InceptionV3, preprocess_input
from pickle import dump, load
from tensorflow.keras.preprocessing.image import load_img, img_to_array
base_model = InceptionV3(weights = 'inception_v3_weights_tf_dim_ordering_tf_kernels.h5')
vgg_model = Model(base_model.input, base_model.layers[-2].output)
def preprocess_img(img_path):
#inception v3 excepts img in 299*299
img = load_img(img_path, target_size = (299, 299))
x = img_to_array(img)
# Add one more dimension
x = np.expand_dims(x, axis = 0)
x = preprocess_input(x)
return x
def encode(image):
image = preprocess_img(image)
vec = vgg_model.predict(image)
vec = np.reshape(vec, (vec.shape[1]))
return vec
pickle_in = open("wordtoix.pkl", "rb")
wordtoix = load(pickle_in)
pickle_in = open("ixtoword.pkl", "rb")
ixtoword = load(pickle_in)
max_length = 74
def greedy_search(pic):
start = 'startseq'
for i in range(max_length):
seq = [wordtoix[word] for word in start.split() if word in wordtoix]
seq = pad_sequences([seq], maxlen = max_length)
yhat = model.predict([pic, seq])
yhat = np.argmax(yhat)
word = ixtoword[yhat]
start += ' ' + word
if word == 'endseq':
break
final = start.split()
final = final[1:-1]
final = ' '.join(final)
return final
def beam_search(image, beam_index = 3):
start = [wordtoix["startseq"]]
# start_word[0][0] = index of the starting word
# start_word[0][1] = probability of the word predicted
start_word = [[start, 0.0]]
while len(start_word[0][0]) < max_length:
temp = []
for s in start_word:
par_caps = pad_sequences([s[0]], maxlen=max_length)
e = image
preds = model.predict([e, np.array(par_caps)])
# Getting the top <beam_index>(n) predictions
word_preds = np.argsort(preds[0])[-beam_index:]
# creating a new list so as to put them via the model again
for w in word_preds:
next_cap, prob = s[0][:], s[1]
next_cap.append(w)
prob += preds[0][w]
temp.append([next_cap, prob])
start_word = temp
# Sorting according to the probabilities
start_word = sorted(start_word, reverse=False, key=lambda l: l[1])
# Getting the top words
start_word = start_word[-beam_index:]
start_word = start_word[-1][0]
intermediate_caption = [ixtoword[i] for i in start_word]
final_caption = []
for i in intermediate_caption:
if i != 'endseq':
final_caption.append(i)
else:
break
final_caption = ' '.join(final_caption[1:])
return final_caption
model = load_model('new-model-1.h5')
#initialise GUI
top=tk.Tk()
top.geometry('800x600')
top.title('Image Caption Generator')
top.configure(background='#CDCDCD')
label2=Label(top,background='#CDCDCD', font=('arial',15))
label1=Label(top,background='#CDCDCD', font=('arial',15))
label=Label(top,background='#CDCDCD', font=('arial',15))
sign_image = Label(top)
def classify(file_path):
global label_packed
enc = encode(file_path)
image = enc.reshape(1, 2048)
pred = greedy_search(image)
print(pred)
label.configure(foreground='#000', text= 'Greedy: ' + pred)
label.pack(side=BOTTOM,expand=True)
beam_3 = beam_search(image)
print(beam_3)
label1.configure(foreground='#011638', text = 'Beam_3: ' + beam_3)
label1.pack(side = BOTTOM, expand = True)
beam_5 = beam_search(image, 5)
print(beam_5)
label2.configure(foreground='#228B22', text = 'Beam_5: ' + beam_5)
label2.pack(side = BOTTOM, expand = True)
def show_classify_button(file_path):
classify_b=Button(top,text="Generate",command=lambda: classify(file_path),padx=10,pady=5)
classify_b.configure(background='#364156', foreground='white',font=('arial',10,'bold'))
classify_b.place(relx=0.79,rely=0.46)
def upload_image():
try:
file_path=filedialog.askopenfilename()
uploaded=Image.open(file_path)
uploaded.thumbnail(((top.winfo_width()/2.25),(top.winfo_height()/2.25)))
im=ImageTk.PhotoImage(uploaded)
sign_image.configure(image=im)
sign_image.image=im
label.configure(text='')
label1.configure(text='')
label2.configure(text='')
show_classify_button(file_path)
except:
pass
upload=Button(top,text="Upload an image",command=upload_image,padx=10,pady=5)
upload.configure(background='#364156', foreground='white',font=('arial',10,'bold'))
upload.pack(side=BOTTOM,pady=50)
sign_image.pack(side=BOTTOM,expand=True)
#label2.pack(side = BOTTOM, expand = True)
heading = Label(top, text="Image Caption Generator",pady=20, font=('arial',22,'bold'))
heading.configure(background='#CDCDED',foreground='#FF6348')
heading.pack()
top.mainloop()
| 2.90625 | 3 |
add_jira_label.py | shnax0210/add-jira-label | 0 | 12772042 | <filename>add_jira_label.py<gh_stars>0
import requests
class JiraLabelAdder:
def __init__(self, login, password, jira_base_url, batch_size=50):
self.login = login
self.password = password
self.jira_base_url = jira_base_url
self.batch_size = batch_size
def add_label(self, jql, label):
with requests.Session() as session:
session.auth = (self.login, self.password)
return [self.__add_label(session, ticket, label) for ticket in self.__read_tickets(session, jql)]
def __add_url_prefix(self, relative_path):
return self.jira_base_url + relative_path
@staticmethod
def __check_if_there_are_more_tickets_for_updates(search_result):
next_start_at = search_result['startAt'] + search_result['maxResults']
return next_start_at < search_result['total'], next_start_at
def __read_tickets(self, session, query):
are_there_more_tickets_for_updates = True
start_at = 0
while are_there_more_tickets_for_updates:
search_result = session.get(self.__add_url_prefix('/rest/api/latest/search'),
params={'jql': query, "startAt": start_at, "maxResults": self.batch_size}).json()
for ticket in search_result['issues']:
yield ticket
are_there_more_tickets_for_updates, start_at = self.__check_if_there_are_more_tickets_for_updates(search_result)
@staticmethod
def __create_add_label_result(ticket, label, is_success, message):
return {
'ticket': ticket['key'],
'label': label,
'success': is_success,
'message': message
}
@staticmethod
def __is_label_present(ticket, label):
ticket_fields = ticket['fields']
return 'labels' in ticket_fields and ticket_fields['labels'] is not None and label in ticket_fields['labels']
def __add_label(self, session, ticket, label):
if self.__is_label_present(ticket, label):
return self.__create_add_label_result(ticket, label, False, "The label is already present")
response = session.put(self.__add_url_prefix('/rest/api/latest/issue/' + ticket['key']),
json={"update": {"labels": [{"add": label}]}})
if response.ok:
return self.__create_add_label_result(ticket, label, True, "The label added successfully")
else:
return self.__create_add_label_result(ticket, label, False,
"Request to add the label failed with status" + str(response.status_code))
| 2.609375 | 3 |
Python/Reals/river_in_brazil/count_bracket.py | Mr-Perfection/coding_practice | 0 | 12772043 | """
for a string with '(' find the count of complete '()' ones, '(()))" does not count, if does not have full brackets, return -1
time & space: O(n), n = length of S
"""
def count_brackets(S):
# initializations
cs,stack,cnt = S[:],[],0
# iterate through char array of S
for c in cs:
# if it is '(', push this to stack
if c == '(':
stack.append(c)
# if it is ')', pop element from the stack
elif c == ')':
# check if stack is empty
if len(stack) == 0:
#invalid
return -1
el = stack.pop()
# check if element is '('
if el == '(':
# increments the count
cnt += 1
return cnt
test = "(())()"
print(count_brackets(test))
| 4 | 4 |
fuzzysets/sets/base.py | StiliyanDr/fuzzy-sets | 1 | 12772044 | import abc
import operator
from fuzzysets import utils
class Domain(abc.ABC):
"""
An abstract class for domain of a fuzzy set.
"""
@abc.abstractmethod
def __iter__(self):
"""
:returns: a generator which yields the elements of the domain.
The order of the elements is the same in each call.
"""
pass
@abc.abstractmethod
def __contains__(self, item):
pass
@abc.abstractmethod
def __eq__(self, other):
pass
def __ne__(self, other):
return not self == other
class FuzzySet(abc.ABC):
"""
An abstract class for fuzzy set.
"""
def __init__(self, domain, degrees):
"""
:param domain: an instance of type Domain.
:param degrees: a NumPy array of floats in the range [0, 1] -
the corresponding membership degrees.
:raises ValueError: if the degrees are invalid.
"""
self.__set_degrees(degrees)
self.__domain = domain
self.__core = None
self.__support = None
self.__cross_over_points = None
def __set_degrees(self, degrees):
if (utils.is_membership_degree_v(degrees).all()):
self.__degrees = degrees
else:
raise ValueError("Membership degrees must be "
"floats between 0 and 1!")
def _degree_at(self, i):
return self.__degrees[i]
@abc.abstractmethod
def mu(self, x):
"""
:param x: an element of the domain.
:returns: the membership degree of `x`, if it is within the
domain, otherwise 0.
"""
pass
@property
def domain(self):
"""
:returns: an instance of type Domain - the set's domain.
"""
return self.__domain
@property
def range(self):
"""
:returns: a generator of floats in the range [0, 1] - the set's
range.
"""
return (i for i in self.__degrees)
def __iter__(self):
"""
:returns: an generator of pairs (x, d), where x is an element
of the domain and d is its membership degree.
"""
return zip(self.domain, self.range)
@property
def core(self):
"""
:returns: an immutable set of all the elements whose membership
degree is 1.
"""
if (self.__core is None):
self.__core = frozenset(x for x, d in self if (d == 1.))
return self.__core
@property
def support(self):
"""
:returns: an immutable set of all the elements whose membership
degree is positive.
"""
if (self.__support is None):
self.__support = frozenset(x for x, d in self if (d > 0.))
return self.__support
@property
def cross_over_points(self):
"""
:returns: an immutable set of all the elements whose membership
degree is 0.5.
"""
if (self.__cross_over_points is None):
self.__cross_over_points = frozenset(
x for x, d in self if (d == 0.5)
)
return self.__cross_over_points
def alpha_cut(self, alpha):
"""
:param alpha: a float between 0 and 1.
:returns: a set of the elements whose membership degree is
greater or equal to `alpha`.
"""
alpha = utils.to_float_if_int(alpha)
utils.validate_alpha(alpha)
return {x for x, d in self if (d >= alpha)}
@property
def height(self):
"""
:returns: the highest membership degree in the set, 0.0 if it is
empty.
"""
return self.__degrees.max(initial=0.0)
def __eq__(self, other):
"""
:param other: a value.
:returns: a boolean value indicating whether `other` is a fuzzy
set of the same type which has the same domain and membership
degrees.
"""
return (isinstance(other, self.__class__) and
self.__pointwise_comparison(other, operator.eq))
def __pointwise_comparison(self, other, p, reduction=all):
return (self.domain == other.domain and
reduction(p(self.mu(x), other.mu(x))
for x in self._select_between_domains(other)))
def _select_between_domains(self, other):
"""
This method is invoked whenever two FS's have equal domains and
one of them is needed, in case it matters which one it is.
"""
return self.domain
def __ne__(self, other):
return not self == other
def __lt__(self, other):
"""
Checks whether the fuzzy set is a proper subset of `other`.
:param other: an instance of the same FuzzySet subclass.
:returns: a boolean value indicating whether `other` has the
same domain and its membership degrees are greater or equal
to the fuzzy set's membership degrees, with at least one of them
being greater.
:raises TypeError: if `other` is not an instance of the same
class.
"""
self.__class__.__verify_has_same_class(other)
return (self.__pointwise_comparison(other, operator.le) and
self.__pointwise_comparison(other, operator.lt, any))
@classmethod
def __verify_has_same_class(cls, other):
if (not isinstance(other, cls)):
raise TypeError(
f"Expected an instance of {cls.__name__!r}!"
)
def __gt__(self, other):
"""
Checks whether the fuzzy set is a proper superset of `other`.
:raises TypeError: if `other` is not an instance of the same
class.
"""
self.__class__.__verify_has_same_class(other)
return other < self
def __le__(self, other):
"""
Checks whether the fuzzy set is a subset of `other`.
:raises TypeError: if `other` is not an instance of the same
class.
"""
self.__class__.__verify_has_same_class(other)
return self.__pointwise_comparison(other, operator.le)
def __ge__(self, other):
"""
Checks whether the fuzzy set is a superset of `other`.
:raises TypeError: if `other` is not an instance of the same
class.
"""
self.__class__.__verify_has_same_class(other)
return other <= self
def __norm(self, other, norm):
self.__verify_has_same_class_and_domain(other)
return self.__class__._from_domain(
self._select_between_domains(other),
mu=lambda x: norm(self.mu(x), other.mu(x))
)
def __verify_has_same_class_and_domain(self, other):
self.__class__.__verify_has_same_class(other)
if (self.domain != other.domain):
raise ValueError(f"Domains differ: {self.domain} "
f"!= {other.domain}")
@classmethod
@abc.abstractmethod
def _from_domain(cls, domain, mu):
"""
:param domain: an instance of Domain.
:param mu: a callable that takes elements of `domain` and
returns floats in the range [0, 1] (not assumed).
"""
pass
def t_norm(self, other, norm=min):
"""
Finds the t-norm of the fuzzy set and `other`.
:param other: an instance of the same FuzzySet class.
:param norm: a callable that takes two membership degrees
(floats between 0 and 1) and returns a membership degree. This
callable (denoted by I below) must also satisfy the following
axioms:
1) boundary condition:
I(1, 1) = 1; I(0, 0) = 0; I(0, 1) = 0; I(1, 0) = 0
2) commutativity:
I(a, b) = I(b, a)
3) I is monotonic:
If a' <= a and b' <= b, then I(a', b') <= I(a, b)
4) associativity
I(a, I(b, c)) = I(I(a, b), c)
Defaults to min.
:returns: an instance of the same FuzzySet class.
:raises TypeError: if `other` is not an instance of the same
class.
:raises ValueError: if the supplied callable does not return
membership degrees.
"""
return self.__norm(other, norm)
def s_norm(self, other, norm=max):
"""
Finds the s-norm of the fuzzy set and `other`.
:param other: an instance of the same FuzzySet class.
:param norm: a callable that takes two membership degrees
(floats between 0 and 1) and returns a membership degree. This
callable (denoted by U below) must also satisfy the following
axioms:
1) boundary condition:
U(1, 1) = 1; U(0, 0) = 0; U(0, 1) = 1; U(1, 0) = 1
2) commutativity:
U(a, b) = U(b, a)
3) U is monotonic:
If a' <= a and b' <= b, then U(a', b') <= U(a, b)
4) associativity
U(a, U(b, c)) = U(U(a, b), c)
Defaults to max.
:returns: an instance of the same FuzzySet class.
:raises TypeError: if `other` is not an instance of the same
class.
:raises ValueError: if the supplied callable does not return
membership degrees.
"""
return self.__norm(other, norm)
def complement(self, comp=utils.complement):
"""
Finds the complement of the fuzzy set.
:param comp: a callable that takes a membership degree (float
between 0 and 1) and returns a membership degree. This callable
(denoted by C below) must also satisfy the following axioms:
1) boundary condition:
C(0) = 1; C(1) = 0
2) if a <= b then C(a) >= C(b)
Defaults to `1 - x`.
:returns: an instance of the same FuzzySet class.
:raises ValueError: if the supplied callable does not return
membership degrees.
"""
return self.__class__._from_domain(
self.domain,
mu=lambda x: comp(self.mu(x))
)
def __repr__(self):
return f"{self.__class__.__name__}({self.domain})"
def __str__(self):
"""
:returns: a str in the format:
<x 0>/<d 0> + ... + <x n>/<d n>
where <x i> and <d i> are the elements of the set and their
membership degrees, respectively.
"""
return " + ".join(f"{x}/{d:.2f}" for x, d in self)
def t_norm(a, b, norm=min):
"""
Equivalent to `a.t_norm(b, norm)`.
"""
return a.t_norm(b, norm)
def s_norm(a, b, norm=max):
"""
Equivalent to `a.s_norm(b, norm)`.
"""
return a.s_norm(b, norm)
def complement(a, comp=utils.complement):
"""
Equivalent to `a.complement(comp)`.
"""
return a.complement(comp)
def alpha_cut(a, alpha):
"""
Equivalent to `a.alpha_cut(alpha)`.
"""
return a.alpha_cut(alpha)
| 3.75 | 4 |
pex/proto/ldap/listener.py | EntySec/pex | 0 | 12772045 | #!/usr/bin/env python3
#
# MIT License
#
# Copyright (c) 2020-2022 EntySec
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from .tools import LDAPTools
from pex.string import String
from twisted.application import service
from twisted.internet.endpoints import serverFromString
from twisted.internet.protocol import ServerFactory
from twisted.python.components import registerAdapter
from twisted.python import log
from ldaptor.inmemory import fromLDIFFile
from ldaptor.interfaces import IConnectedLDAPEntry
from ldaptor.protocols.ldap.ldapserver import LDAPServer
from ldaptor.protocols.ldap import distinguishedname, ldaperrors
from ldaptor import delta, entry
from ldaptor.protocols import pureldap, pureber
class Handler(LDAPServer):
def __init__(self, host, port, payload):
self.string_tools = String()
self.host = host
self.port = int(port)
self.payload = payload
LDAPServer.__init__(self)
def handle_LDAPSearchRequest(self, request, controls, reply):
command = request.baseObject.decode()
command = self.string_tools.base64_decode(command.encode())
class_name = 'Main'
reply_url = f'http://{self.host}:{str(self.port)}/'
attr = [
("javaClassName", [class_name]),
("objectClass", ["javaNamingReference"]),
("javaCodeBase", [reply_url]),
("javaFactory", [class_name])
]
reply(
pureldap.LDAPSearchResultEntry(
objectName="",
attributes=attr
)
)
return pureldap.LDAPSearchResultDone(resultCode=ldaperrors.Success.resultCode)
class Factory(ServerFactory):
protocol = Handler
def __init__(self, host, port, root=None):
self.root = root
super(ServerFactory).__init__()
self.host = host
self.port = int(port)
def buildProtocol(self, addr):
proto = self.protocol(self.host, self.port)
proto.debug = self.debug
proto.factory = self
return proto
class LDAPListen:
def __init__(self, host, port, methods={}):
self.http_tools = LDAPTools()
self.handler = Handler
self.host = host
self.port = int(port)
self.sock = None
def listen(self):
try:
pass
except Exception:
return False
def stop(self):
try:
pass
except Exception:
return False
def accept(self):
try:
pass
except Exception:
return False
class LDAPListener:
@staticmethod
def listen_http(host, port):
return LDAPListen(host, port)
| 1.601563 | 2 |
tests/unit/providers/aws/test_AWSConfigBuilder.py | romsok24/epiphany | 0 | 12772046 | from cli.src.providers.aws.InfrastructureBuilder import InfrastructureBuilder
from cli.src.helpers.objdict_helpers import dict_to_objdict
def test_get_resource_group_should_set_proper_values_to_model():
cluster_model = get_cluster_model(cluster_name='TestCluster', address_pool='10.20.0.0/22')
builder = InfrastructureBuilder([cluster_model])
actual = builder.get_resource_group()
assert actual.specification.name == 'prefix-testcluster-rg'
assert actual.specification.cluster_name == 'testcluster'
def test_get_vpc_config_should_set_proper_values_to_model():
cluster_model = get_cluster_model(cluster_name='TestCluster', address_pool='10.20.0.0/22')
builder = InfrastructureBuilder([cluster_model])
actual = builder.get_vpc_config()
assert actual.specification.name == 'prefix-testcluster-vpc'
assert actual.specification.address_pool == '10.20.0.0/22'
def test_get_default_security_group_config_should_set_proper_values_to_model():
cluster_model = get_cluster_model(cluster_name='TestCluster', address_pool='10.20.0.0/22')
vpc_config = dict_to_objdict({
'specification': {
'name': 'prefix-testcluster-vpc'
}
})
builder = InfrastructureBuilder([cluster_model])
actual = builder.get_default_security_group_config(vpc_config)
assert actual.specification.vpc_name == 'prefix-testcluster-vpc'
def test_get_efs_config_should_set_proper_values_to_model():
cluster_model = get_cluster_model(cluster_name='TestCluster', address_pool='10.20.0.0/22')
builder = InfrastructureBuilder([cluster_model])
actual = builder.get_efs_config()
assert actual.specification.token == 'aws-efs-token-testcluster'
assert actual.specification.name == 'prefix-testcluster-efs'
def test_get_subnet_config_should_set_proper_values_to_model():
cluster_model = get_cluster_model(cluster_name='TestCluster')
component_value = dict_to_objdict({
'address_pool': '10.20.0.0/24',
'availability_zone': 'eu-westa'
})
builder = InfrastructureBuilder([cluster_model])
actual = builder.get_subnet(component_value, 'component', 'my-test-vpc', 1,)
assert actual.specification.name == 'prefix-testcluster-component-subnet-1'
assert actual.specification.vpc_name == 'my-test-vpc'
assert actual.specification.cidr_block == '10.20.0.0/24'
assert actual.specification.availability_zone == 'eu-westa'
def test_get_security_group_should_set_proper_values_to_model():
cluster_model = get_cluster_model(cluster_name='TestCluster')
subnet = dict_to_objdict({
'specification': {
'cidr_block': '10.21.0.0/24'
}
})
builder = InfrastructureBuilder([cluster_model])
actual = builder.get_security_group(subnet, 'component', 'my-test-vpc', 1)
assert actual.specification.name == 'prefix-testcluster-component-security-group-1'
assert actual.specification.vpc_name == 'my-test-vpc'
assert actual.specification.cidr_block == '10.21.0.0/24'
def test_get_route_table_association_should_set_proper_values_to_model():
cluster_model = get_cluster_model(cluster_name='TestCluster')
builder = InfrastructureBuilder([cluster_model])
actual = builder.get_route_table_association('route-table-name','component', 'test-subnet', 1)
assert actual.specification.name == 'prefix-testcluster-component-1-route-association'
assert actual.specification.subnet_name == 'test-subnet'
assert actual.specification.route_table_name == 'route-table-name'
def test_get_internet_gateway_should_set_proper_values_to_model():
cluster_model = get_cluster_model(cluster_name='TestCluster')
builder = InfrastructureBuilder([cluster_model])
actual = builder.get_internet_gateway('test-vpc-name')
assert actual.specification.name == 'prefix-testcluster-internet-gateway'
assert actual.specification.vpc_name == 'test-vpc-name'
def test_get_routing_table_should_set_proper_values_to_model():
cluster_model = get_cluster_model(cluster_name='TestCluster')
builder = InfrastructureBuilder([cluster_model])
actual = builder.get_routing_table('test-vpc-name', 'test-internet-gateway')
assert actual.specification.name == 'prefix-testcluster-route-table'
assert actual.specification.vpc_name == 'test-vpc-name'
assert actual.specification.route.gateway_name == 'test-internet-gateway'
def get_cluster_model(address_pool='10.22.0.0/22', cluster_name='EpiphanyTestCluster'):
cluster_model = dict_to_objdict({
'kind': 'epiphany-cluster',
'provider': 'aws',
'specification': {
'name': cluster_name,
'prefix': 'prefix',
'cloud': {
'vnet_address_pool': address_pool,
'network': {
'use_network_security_groups': True
},
'default_os_image': 'default',
'use_public_ips': True
}
}
})
return cluster_model
| 2.171875 | 2 |
docs/cross.py | txt/sin21 | 1 | 12772047 | import random,math
def distribution(decay,buckets):
"Return random numbers, sum noamrlzes 0..1"
tmp=[random.random()]
for _ in range(buckets-1):
old=tmp[-1];
tmp += [old*decay]
s=sum(tmp)
return sorted([x/s for x in tmp])
def run(n=1000,decay=0.99,dimensions=10, buckets = 10):
ds=[distribution(decay,buckets) for _ in range(dimensions)]
print()
[print(d) for d in ds]
print()
nums=[math.prod(random.choice(d) for d in ds) for _ in range(n)]
return sorted(nums,reverse=True)
for x in run(decay=.9,dimensions=6,buckets=10,n=1000): print(x)
| 3.28125 | 3 |
config/production.py | javicacheiro/device-info-service | 0 | 12772048 | DEBUG = False
CONSUL_URL = 'http://consul:8500/v1/kv'
| 1.007813 | 1 |
train_2.py | pengfeidip/refindeDet_Pytorch | 12 | 12772049 | <reponame>pengfeidip/refindeDet_Pytorch<gh_stars>10-100
"""Adapted from:
@longcw faster_rcnn_pytorch: https://github.com/longcw/faster_rcnn_pytorch
@rbgirshick py-faster-rcnn https://github.com/rbgirshick/py-faster-rcnn
Licensed under The MIT License [see LICENSE for details]
"""
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from data import VOC_ROOT, VOCAnnotationTransform, VOCDetection, BaseTransform
from data import VOC_CLASSES as labelmap
from data import CSVDataset, CSVAnnotationTransform
import torch.utils.data as data
from tqdm import tqdm
import pandas as pd
from models.refinedet import build_refinedet
from mAP import ComputemAP
import sys
import os
import time
import argparse
import numpy as np
import pickle
import csv
import cv2
os.environ["CUDA_VISIBLE_DEVICES"] = '1'
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
parser = argparse.ArgumentParser(
description='Single Shot MultiBox Detector Evaluation')
# './backup_weights/RefineDet320_CSV_original.pth'
#./weights/RefineDet320_CSV_final.pth
parser.add_argument('--trained_model',
default='./focal_loss_weights/RefineDet320_CSV_55000.pth', type=str,
help='Trained state_dict file path to open')
parser.add_argument('--save_folder', default='eval/', type=str,
help='File path to save results')
parser.add_argument('--confidence_threshold', default=0.01, type=float,
help='Detection confidence threshold')
parser.add_argument('--top_k', default=5, type=int,
help='Further restrict the number of predictions to parse')
parser.add_argument('--cuda', default=True, type=str2bool,
help='Use cuda to train model')
parser.add_argument('--voc_root', default='./results',
help='Location of VOC root directory')
parser.add_argument('--cleanup', default=True, type=str2bool,
help='Cleanup and remove results files following eval')
parser.add_argument('--input_size', default='320', choices=['320', '512'],
type=str, help='RefineDet320 or RefineDet512')
args = parser.parse_args()
if not os.path.exists(args.save_folder):
os.mkdir(args.save_folder)
if torch.cuda.is_available():
if args.cuda:
torch.set_default_tensor_type('torch.cuda.FloatTensor')
if not args.cuda:
print("WARNING: It looks like you have a CUDA device, but aren't using \
CUDA. Run with --cuda for optimal eval speed.")
torch.set_default_tensor_type('torch.FloatTensor')
else:
torch.set_default_tensor_type('torch.FloatTensor')
dataset_mean = (104, 117, 123)
class Timer(object):
"""A simple timer."""
def __init__(self):
self.total_time = 0.
self.calls = 0
self.start_time = 0.
self.diff = 0.
self.average_time = 0.
def tic(self):
# using time.time instead of time.clock because time time.clock
# does not normalize for multithreading
self.start_time = time.time()
def toc(self, average=True):
self.diff = time.time() - self.start_time
self.total_time += self.diff
self.calls += 1
self.average_time = self.total_time / self.calls
if average:
return self.average_time
else:
return self.diff
def test_net(net, dataset, gt_file):
predictions = pd.read_csv('pred.csv', header=None).values.tolist()
# save predictions in current path
print('Evaluating detections')
evaluate_detections(predictions, dataset, gt_file)
def evaluate_detections(preds, dataset, gt_file):
classes = list(dataset.classes.keys())
with open(gt_file) as f:
labels = list(csv.reader(f))
print("Computing the mAp ing...... please wait a moment")
ap = ComputemAP(preds=preds, labels=labels, class_list=classes, iou_thresh=0.5, use_cuda=False)
mAP, ap_list = ap()
for i in ap_list:
print(i)
print(mAP)
if __name__ == '__main__':
# load net
num_classes = len(labelmap) + 1 # +1 for background
gt_file = "./csv/voc/0712_trainval.csv"
dataset = CSVDataset(csv_file=gt_file,
classes_file='./csv/voc/classes.csv',
transform=BaseTransform(int(args.input_size),
dataset_mean))
net = build_refinedet('test', int(args.input_size), dataset.num_classes)
net.load_state_dict(torch.load(args.trained_model))
net.eval()
print('Finished loading model!')
if args.cuda:
net = net.cuda()
cudnn.benchmark = True
# evaluation
test_net(net, dataset, gt_file)
| 2.03125 | 2 |
examples/small.py | bvarjavand/ngl-bot | 0 | 12772050 | from ngubot.utils.base import BaseGame
import pyautogui as pag
import time
game = BaseGame()
def clean(game):
itemsToMerge = [
"Head",
"Chest",
"Legs",
"Boots",
"Weapon",
"Accessory1",
"0_0",
"0_1",
"0_2",
"0_3",
"0_4",
"0_5",
"0_6",
"0_7",
]
itemsToBoost = [
"Head",
"Chest",
"Legs",
"Boots",
"Weapon",
"Accessory1",
"0_0",
"0_1",
"0_2",
"0_6",
"3_9",
]
path = True
for item in itemsToMerge:
game.move(item, path)
pag.press("d")
path = False
for item in itemsToBoost:
game.move(item, path)
pag.press("a")
game.move("InfCube", False)
pag.click(
game._shift(game.coords["Inventory"]["InfCube"]["Button"]), button="right"
)
while True:
game.click("IdleAttackPlus", True)
clean(game)
game.click("Nuke", True)
game.click("Right Arrow", True)
for _ in range(4):
game.click("Right Arrow", False)
game.click("ShoulderPlus", True)
game.click("TackPlus", True)
time.sleep(300)
# 330, 5.5
game._focus_window()
clean(game)
game.click("ShoulderPlus", True)
game.click("TackPlus", True)
time.sleep(300)
# 750, 12.5
game._focus_window()
clean(game)
game.click("Nuke", True)
game.click("Left Arrow", True)
for _ in range(6):
game.click("Left Arrow", False)
for _ in range(6):
game.click("Right Arrow", False)
pag.press("r")
game.click("TMEPlus", True)
game.click("TackPlus", True)
time.sleep(300)
# 1100, 18.3
game._focus_window()
clean(game)
time.sleep(300)
# 1420, 23.6
game._focus_window()
clean(game)
game.click("Nuke", True)
game.click("Left Arrow", True)
for _ in range(6):
game.click("Left Arrow", False)
for _ in range(6):
game.click("Right Arrow", False)
pag.press("r")
game.click("ShoulderPlus", True)
game.click("TackPlus", True)
time.sleep(300)
# 1750, 29.1 (adv training unlocked)
game._focus_window()
clean(game)
pag.press("r")
game.click("Adv. Training", False)
game.click("PowerPlus", False)
pag.press("t")
game.click("CutsPlus")
time.sleep(240)
# ~ 2000, 33.3
game._focus_window()
clean(game)
# 0, 0
game.click("Feed Me", True)
game.click("Really", True)
game.click("RebirthYeah", False)
time.sleep(1)
| 2.703125 | 3 |