max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
tools/extract/screener.py | renoneto/swing_trading | 8 | 12770451 | <reponame>renoneto/swing_trading<filename>tools/extract/screener.py
from bs4 import BeautifulSoup
from requests import Session
import pandas as pd
HEADERS = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) '\
'AppleWebKit/537.36 (KHTML, like Gecko) '\
'Chrome/75.0.3770.80 Safari/537.36'}
def finviz_pull(stocks_table, url='https://finviz.com/screener.ashx?v=111&f=cap_midover&ft=4&o=volume'):
"""
Function to scrape symbols out of Finviz's website based on the URL that was given.
The output is a list of symbols with company name and industry.
"""
# Create Session
s = Session()
# Add headers
s.headers.update(HEADERS)
# Extract data from Finviz - parse html
screener = s.get(url)
soup = BeautifulSoup(screener.text, 'html.parser')
# Figure out number of stocks
total_stocks_element = soup.find(class_ = 'count-text').text[7:]
stop_position = total_stocks_element.find(' ')
total_stocks = int(total_stocks_element[:stop_position])
# Empty list to store stocks
my_stocks = []
# Pages and number of stocks
page = 1
stocks_imported = 0
while stocks_imported < total_stocks:
# Create new url
new_url = url + '&r=' + str(page)
# Pull data and parse html
stock_data = s.get(new_url)
soup = BeautifulSoup(stock_data.text, 'html.parser')
# Table with stocks
table_element_1 = soup.find_all(class_='table-dark-row-cp')
table_element_2 = soup.find_all(class_='table-light-row-cp')
table_element = table_element_1 + table_element_2
# For each line extract the symbol, name and industry
for idx, row in enumerate(table_element):
# Creating table with all 'a' elements
symbol_table = row.find_all('a')
# Symbol
symbol = symbol_table[1].text
# Name
symbol_name = symbol_table[2].text
# Industry
symbol_industry = symbol_table[3].text
# Sector
symbol_sector = symbol_table[4].text
# Industry
symbol_marketcap = symbol_table[6].text
# Append all
my_stocks.append([symbol, symbol_name, symbol_industry, symbol_sector, symbol_marketcap])
stocks_imported += 1
if stocks_imported == total_stocks:
print(f"Total of {stocks_imported} stocks imported")
print('Done loading')
# Add 20 to page number, to go to next page. Each page contains 20 stocks.
page += 20
# Data Frame
df = pd.DataFrame(my_stocks, columns = ['symbol', 'description', 'industry', 'sector', 'market_cap'])
# Move it to the database
stocks_table.load_data(df, id_columns=['symbol'], is_replace=True)
| 3.25 | 3 |
resample/mov.py | VenoMpie/pyrescene | 18 | 12770452 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2014 pyReScene
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
# Docs for a quicker understanding:
# http://wiki.multimedia.cx/index.php?title=QuickTime_container
# http://code.google.com/p/mp4parser/
import os
import struct
from rescene.utility import is_rar
from rescene.rarstream import RarStream
BE_LONG = struct.Struct('>L') # unsigned long: 4 bytes
BE_LONGLONG = struct.Struct('>Q') # unsigned long long: 8 bytes
class MovReadMode(object):
MP4, Sample, SRS = list(range(3))
# MP4 == Sample, but doesn't throw InvalidDataException
class InvalidDataException(ValueError):
pass
class Atom(object):
def __init__(self, size, object_guid):
"""size: full size of the atom (including 2 first header fields)
object_guid: the type of the atom (moov, mdat,...)"""
self.size = size
self.type = object_guid
self.raw_header = b""
self.start_pos = -1
def __repr__(self, *args, **kwargs):
return "<Atom type=%r size=%d start_pos=%d>" % (self.type,
self.size, self.start_pos)
class MovReader(object):
"""Implements a simple Reader class that reads through MP4
or MP4-SRS files one atom/box at a time.
atom: QuickTime File Format
box: ISO/IEC 14496-12:2008"""
def __init__(self, read_mode, path=None, stream=None,
archived_file_name=""):
assert path or stream
if path:
if is_rar(path):
self._mov_stream = RarStream(path, archived_file_name)
else:
self._mov_stream = open(path, 'rb')
elif stream:
self._mov_stream = stream
self._mov_stream.seek(0, 2)
self._file_length = self._mov_stream.tell()
self._mov_stream.seek(0)
self.mode = read_mode
self.read_done = True
self.current_atom = None
self.atom_type = None
def read(self):
# "Read() is invalid at this time", "MoveToChild(), ReadContents(), or
# SkipContents() must be called before Read() can be called again")
assert self.read_done or (self.mode == MovReadMode.SRS and
self.atom_type == b"mdat")
atom_start_position = self._mov_stream.tell()
self.current_atom = None
self.read_done = False
# no room for size (4B) and type (4B) of the atom
if atom_start_position + 8 > self._file_length:
return False
self._atom_header = self._mov_stream.read(8)
# 4 bytes for atom length, 4 bytes for atom type
(atom_length,) = BE_LONG.unpack_from(self._atom_header)
self.atom_type = self._atom_header[4:]
# special sizes
hsize = 8
if atom_length == 1:
# 8-byte size field after the atom type
bsize = self._mov_stream.read(8)
(atom_length,) = BE_LONGLONG.unpack(bsize)
self._atom_header += bsize
hsize += 8
elif atom_length == 0:
# print("Box without size found.")
# FoV/COMPULSiON samples have an atom that consists of just 8
# null bytes. This is the case if it is followed by an mdat
# try to make it work with those samples too
# https://code.google.com/p/mp4parser/ can not open these files!
if self.atom_type == b"\x00\x00\x00\x00":
atom_length = 8
else:
# the atom extends to the end of the file
atom_length = self._file_length - atom_start_position
# sanity check on atom length
# Skip check on mdat so we can still report expected size.
# This is only applied on samples,
# since a partial movie might still be useful.
end_offset = atom_start_position + atom_length
if (self.mode == MovReadMode.Sample and self.atom_type != b"mdat" and
end_offset > self._file_length):
raise InvalidDataException("Invalid box length at 0x%08X" %
atom_start_position)
self.current_atom = Atom(atom_length, self.atom_type)
self.current_atom.raw_header = self._atom_header
self.current_atom.start_pos = atom_start_position
self._mov_stream.seek(atom_start_position, os.SEEK_SET)
# Apple Computer reserves
# all four-character codes consisting entirely of lowercase letters.
return True
def read_contents(self):
# if read_done is set, we've already read or skipped it.
# back up and read again?
if self.read_done:
self._mov_stream.seek(self.current_atom.start_pos, os.SEEK_SET)
self.read_done = True
buff = b""
if (self.mode != MovReadMode.SRS and self.atom_type == b"mdat"):
raise NotImplementedError("Programming error: implement this "
"for mdat atoms using the chunk method. These mdat atoms "
"can become enormous and cause a MemoryError.")
# do always when it's not a SRS file
# else skip it when encountering removed data
if (self.mode != MovReadMode.SRS or self.atom_type != b"mdat"):
# skip header bytes
hl = len(self.current_atom.raw_header)
self._mov_stream.seek(hl, os.SEEK_CUR)
buff = self._mov_stream.read(self.current_atom.size - hl)
return buff
def read_contents_chunks(self, chunk_size=65536):
"""Lazy function (generator) to read a lot of data piece by piece."""
if self.atom_type != b"mdat" or self.mode == MovReadMode.SRS:
raise NotImplementedError("Only use this for 'mdat' atoms.")
self.read_done = True
# skip header bytes
hl = len(self.current_atom.raw_header)
self._mov_stream.seek(self.current_atom.start_pos + hl, os.SEEK_SET)
end_offset = self.current_atom.start_pos + self.current_atom.size
todo = self.current_atom.size - hl # to prevent ending up in a loop
while todo != 0 and self._mov_stream.tell() + todo == end_offset:
amount = end_offset - self._mov_stream.tell()
if amount > chunk_size:
amount = chunk_size
todo -= amount
yield self._mov_stream.read(amount)
def skip_contents(self):
if not self.read_done:
self.read_done = True
# do always when it's not a SRS file
# else skip it when encountering removed data
if (self.mode != MovReadMode.SRS
or self.atom_type != b"mdat"):
self._mov_stream.seek(self.current_atom.start_pos +
self.current_atom.size,
os.SEEK_SET)
def move_to_child(self):
self.read_done = True
# skip the header bytes
hl = len(self.current_atom.raw_header)
self._mov_stream.seek(hl, os.SEEK_CUR)
def close(self):
try: # close the file/stream
self._mov_stream.close()
except:
pass
def __del__(self):
try: # close the file/stream
self._mov_stream.close()
except:
pass
| 1.945313 | 2 |
PEPit/examples/unconstrained_convex_minimization/subgradient_method.py | bgoujaud/PEPit | 44 | 12770453 | from math import sqrt
from PEPit import PEP
from PEPit.functions import ConvexLipschitzFunction
def wc_subgradient_method(M, n, gamma, verbose=1):
"""
Consider the minimization problem
.. math:: f_\\star \\triangleq \\min_x f(x),
where :math:`f` is convex and :math:`M`-Lipschitz. This problem is a (possibly non-smooth) minimization problem.
This code computes a worst-case guarantee for the **subgradient** method. That is, it computes
the smallest possible :math:`\\tau(n, M, \\gamma)` such that the guarantee
.. math:: \\min_{0 \leqslant t \leqslant n} f(x_t) - f_\\star \\leqslant \\tau(n, M, \\gamma) \|x_0 - x_\\star\|
is valid, where :math:`x_t` is the output of the **subgradient** method after :math:`t\\leqslant n` steps,
and where :math:`x_\\star` is the minimizer of :math:`f`.
In short, for given values of :math:`M`, the step-size :math:`\\gamma` and the number of iterations :math:`n`,
:math:`\\tau(n, M, \\gamma)` is computed as the worst-case value of
:math:`\\min_{0 \leqslant t \leqslant n} f(x_t) - f_\\star` when :math:`\\|x_0-x_\\star\\| \\leqslant 1`.
**Algorithm**:
For :math:`t\\in \\{0, \\dots, n-1 \\}`
.. math::
:nowrap:
\\begin{eqnarray}
g_{t} & \\in & \\partial f(x_t) \\\\
x_{t+1} & = & x_t - \\gamma g_t
\\end{eqnarray}
**Theoretical guarantee**: The **tight** bound is obtained in [1, Section 3.2.3] and [2, Eq (2)]
.. math:: \\min_{0 \\leqslant t \\leqslant n} f(x_t)- f(x_\\star) \\leqslant \\frac{M}{\\sqrt{n+1}}\|x_0-x_\\star\|,
and tightness follows from the lower complexity bound for this class of problems, e.g., [3, Appendix A].
**References**: Classical references on this topic include [1, 2].
`[1] <NAME> (2003). Introductory lectures on convex optimization: A basic course.
Springer Science & Business Media.
<https://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.693.855&rep=rep1&type=pdf>`_
`[2] <NAME>, <NAME>, <NAME> (2003). Subgradient Methods (lecture notes).
<https://web.stanford.edu/class/ee392o/subgrad_method.pdf>`_
`[3] <NAME>, <NAME> (2016). An optimal variant of Kelley's cutting-plane method.
Mathematical Programming, 160(1), 321-351.
<https://arxiv.org/pdf/1409.2636.pdf>`_
Args:
M (float): the Lipschitz parameter.
n (int): the number of iterations.
gamma (float): step-size.
verbose (int): Level of information details to print.
- 1: No verbose at all.
- 0: This example's output.
- 1: This example's output + PEPit information.
- 2: This example's output + PEPit information + CVXPY details.
Returns:
pepit_tau (float): worst-case value
theoretical_tau (float): theoretical value
Example:
>>> M = 2
>>> n = 6
>>> gamma = 1 / (M * sqrt(n + 1))
>>> pepit_tau, theoretical_tau = wc_subgradient_method(M=M, n=n, gamma=gamma, verbose=1)
(PEPit) Setting up the problem: size of the main PSD matrix: 9x9
(PEPit) Setting up the problem: performance measure is minimum of 7 element(s)
(PEPit) Setting up the problem: initial conditions (1 constraint(s) added)
(PEPit) Setting up the problem: interpolation conditions for 1 function(s)
function 1 : 64 constraint(s) added
(PEPit) Compiling SDP
(PEPit) Calling SDP solver
(PEPit) Solver status: optimal (solver: SCS); optimal value: 0.7559825331741553
*** Example file: worst-case performance of subgradient method ***
PEPit guarantee: min_(0 \leq t \leq n) f(x_i) - f_* <= 0.755983 ||x_0 - x_*||
Theoretical guarantee: min_(0 \leq t \leq n) f(x_i) - f_* <= 0.755929 ||x_0 - x_*||
"""
# Instantiate PEP
problem = PEP()
# Declare a convex lipschitz function
func = problem.declare_function(ConvexLipschitzFunction, M=M)
# Start by defining its unique optimal point xs = x_* and corresponding function value fs = f_*
xs = func.stationary_point()
fs = func(xs)
# Then define the starting point x0 of the algorithm
x0 = problem.set_initial_point()
# Set the initial constraint that is the distance between x0 and xs
problem.set_initial_condition((x0 - xs)**2 <= 1)
# Run n steps of the subgradient method
x = x0
gx, fx = func.oracle(x)
for _ in range(n):
problem.set_performance_metric(fx - fs)
x = x - gamma * gx
gx, fx = func.oracle(x)
# Set the performance metric to the function value accuracy
problem.set_performance_metric(fx - fs)
# Solve the PEP
pepit_verbose = max(verbose, 0)
pepit_tau = problem.solve(verbose=pepit_verbose)
# Compute theoretical guarantee (for comparison)
theoretical_tau = M / sqrt(n + 1)
# Print conclusion if required
if verbose != -1:
print('*** Example file: worst-case performance of subgradient method ***')
print('\tPEPit guarantee:\t min_(0 \leq t \leq n) f(x_i) - f_* <= {:.6} ||x_0 - x_*||'.format(pepit_tau))
print('\tTheoretical guarantee:\t min_(0 \leq t \leq n) f(x_i) - f_* <= {:.6} ||x_0 - x_*||'.format(
theoretical_tau))
# Return the worst-case guarantee of the evaluated method (and the reference theoretical value)
return pepit_tau, theoretical_tau
if __name__ == "__main__":
M = 2
n = 6
gamma = 1 / (M * sqrt(n + 1))
pepit_tau, theoretical_tau = wc_subgradient_method(M=M, n=n, gamma=gamma, verbose=1)
| 3.90625 | 4 |
BasicExperiments/MovieListTask_Builder_d1.py | djangraw/PsychoPyParadigms | 50 | 12770454 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This experiment was created using PsychoPy2 Experiment Builder (v1.90.1),
on Tue May 8 15:02:31 2018
Created 5/8/18 by DJ.
Updated 5/8/18 by DJ - added NetStationEEG code from https://github.com/imnotamember/PyNetstation (per Pete's instructions)
Updated 6/8/18 by DJ - made into list of movie files instead of single movie files
"""
from __future__ import absolute_import, division
from psychopy import locale_setup, sound, gui, visual, core, data, event, logging, clock
from psychopy.constants import (NOT_STARTED, STARTED, PLAYING, PAUSED,
STOPPED, FINISHED, PRESSED, RELEASED, FOREVER)
import numpy as np # whole numpy lib is available, prepend 'np.'
from numpy import (sin, cos, tan, log, log10, pi, average,
sqrt, std, deg2rad, rad2deg, linspace, asarray)
from numpy.random import random, randint, normal, shuffle
import os # handy system and path functions
import sys # to get file system encoding
# Declare movie params
params = {
'movieFileList': u'/Users/jangrawdc/Documents/Python/PsychoPyParadigms/BasicExperiments/Movies/MovieList.txt', # text file with spaces/linebreaks between movies
'imiDur': 5.0, # time between movies.
'warmUpTime': 6.0, # time before first movie
'coolDownTime': 6.0, # time after last movie
'movieSize': (640.0*3,360.0*3), # for Boldscreen
'fixCrossHeight': 0.5,
# eeg params
'isEegConnected': False, # is an EGI EEG system connected?
'tcpipAddress': '10.10.10.42',
'tcpipPort': 55513
}
# Load movies
fid = open(params['movieFileList'],'r')
movieFileText = fid.read()
movieFiles = movieFileText.split()
fid.close()
print "%d movie files read."%(len(movieFiles))
print(movieFiles)
# === EEG === #
# === Initialize
if params['isEegConnected'] == False:
# # This will import the debugging version of the PyNetStation module,
# # which will not actually attempt a connection but will check to make sure
# # your code is properly functioning.
import egi.fake as egi # FOR TESTING WITHOUT CONNECTION TO NETSTATION COMPUTER
else:
# # This will import the single-threaded version of the PyNetStation module
import egi.simple as egi # FOR RUNNING CONNECTED TO NETSTATION COMPUTER -- USE THIS IN A REAL EXPERIMENT
# === Timing Obj
# # Create a proper timing object to reference. To retrieve the time you want later,
# # call this method using ms_localtime(), it returns the time in a millisecond format
# # appropriate for the NetStation TCP/IP protocol.
# # This is only necessary if you are in need of direct contact with the clock object that NetStation is utilizing,
# # which you don't actually need since it's working behind the scenes in the egi module.
# ms_localtime = egi.ms_localtime
# === Netstation Obj
# # Create the NetStation event-sending object. After this you can call
# # the methods via the object instance, in this case 'ns'.
ns = egi.Netstation()
# === Establish Cxn
# # The next line is for connecting the actual, single-threaded module version to the computer.
if params['isEegConnected']:
ns.connect(params['tcpipAddress'], params['tcpipPort']) # sample address and port -- change according to your network settings
# === Link Expt to Session
# # This sends some initialization info to NetStation for recording events.
ns.BeginSession()
# # This synchronizes the clocks of the stim computer and the NetStation computer.
ns.sync()
# === END EEG === #
# Ensure that relative paths start from the same directory as this script
_thisDir = os.path.dirname(os.path.abspath(__file__)).decode(sys.getfilesystemencoding())
os.chdir(_thisDir)
# Store info about the experiment session
expName = u'MovieListTask_Builder_d1' # Name of experiment
expInfo = {'session': '001', 'participant': ''}
dlg = gui.DlgFromDict(dictionary=expInfo, title=expName)
if dlg.OK == False:
core.quit() # user pressed cancel
expInfo['date'] = data.getDateStr() # add a simple timestamp
expInfo['expName'] = expName
# Data file name stem = absolute path + name; later add .psyexp, .csv, .log, etc
filename = _thisDir + os.sep + u'data/%s_%s_%s' % (expInfo['participant'], expName, expInfo['date'])
# An ExperimentHandler isn't essential but helps with data saving
thisExp = data.ExperimentHandler(name=expName, version='',
extraInfo=expInfo, runtimeInfo=None,
originPath=u'/Users/jangrawdc/Documents/Python/PsychoPyParadigms/BasicExperiments/TEST.psyexp',
savePickle=True, saveWideText=True,
dataFileName=filename)
# save a log file for detail verbose info
logFile = logging.LogFile(filename+'.log', level=logging.EXP)
logging.console.setLevel(logging.WARNING) # this outputs to the screen, not a file
endExpNow = False # flag for 'escape' or other condition => quit the exp
# Start Code - component code to be run before the window creation
# Setup the Window
win = visual.Window(
size=(1024, 768), fullscr=True, screen=0,
allowGUI=False, allowStencil=False,
monitor='testMonitor', color=[0,0,0], colorSpace='rgb',
blendMode='avg', useFBO=True)
# store frame rate of monitor if we can measure it
expInfo['frameRate'] = win.getActualFrameRate()
if expInfo['frameRate'] != None:
frameDur = 1.0 / round(expInfo['frameRate'])
else:
frameDur = 1.0 / 60.0 # could not measure, so guess
# Initialize components for Routine "Trigger"
TriggerClock = core.Clock()
TriggerText = visual.TextStim(win=win, name='TriggerText',
text=u'Waiting for trigger...\n\n(Experimenter: press t to override.)',
font=u'Arial',
pos=(0, 0), height=0.1, wrapWidth=None, ori=0,
color=u'white', colorSpace='rgb', opacity=1,
depth=0.0);
# Initialize components for Routine "Fixation"
fixationClock = core.Clock()
fixCross = visual.TextStim(win=win, name='fixCross',
text=u'+',
font=u'Arial',
pos=(0, 0), height=params['fixCrossHeight'], wrapWidth=None, ori=0,
color=u'white', colorSpace='rgb', opacity=1,
depth=0.0);
# Initialize components for Routine "Movie"
MovieClock = core.Clock()
movie = []
movieDur = [0]*len(movieFiles)
for i in range(len(movieFiles)):
movie.append(visual.MovieStim3(
win=win, name='movie',
noAudio = False,
filename=movieFiles[i],
ori=0, pos=(0, 0), size=params['movieSize'], opacity=1,
depth=0.0,
))
# save out duration of this movie
movieDur[i] = movie[i].duration
# print results for debugging
print('Movie %d: loaded %s'%(i, movieFiles[i]))
print('duration: %f'%movieDur[i]);
ImiText = fixCross
# Initialize components for Routine "WaitForEnd"
WaitForEndClock = core.Clock()
WaitForEndText = visual.TextStim(win=win, name='WaitForEndText',
text=u'Please stay still until\nthe scanner noise stops.',
font=u'Arial',
pos=(0, 0), height=0.1, wrapWidth=None, ori=0,
color=u'white', colorSpace='rgb', opacity=1,
depth=0.0);
# Create some handy timers
globalClock = core.Clock() # to track the time since experiment started
routineTimer = core.CountdownTimer() # to track time remaining of each (non-slip) routine
# === EEG === #
# # This starts the recording in NetStation acquisition. Equivalent to pressing the Record button.
# # If at some point you pause the experiment using the "StopRecording()" method,
# # just call this method again to restart the recording.
ns.StartRecording()
# === END EEG === #
# ------Prepare to start Routine "Trigger"-------
t = 0
TriggerClock.reset() # clock
frameN = -1
continueRoutine = True
# update component parameters for each repeat
TriggerKey = event.BuilderKeyResponse()
# keep track of which components have finished
TriggerComponents = [TriggerText, TriggerKey]
for thisComponent in TriggerComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# === EEG === #
# # This re-aligns the clocks between the stim computer and the NetStation computer.
# # Best to put at the start of each trial for maximal timing accuracy.
ns.sync()
# Send Message to EEG
win.callOnFlip(ns.send_event, key='WAIT', timestamp=None, label="WaitForTrigger", description="Waiting for Trigger from fMRI", pad=False)
# === END EEG === #
# -------Start Routine "Trigger"-------
while continueRoutine:
# get current time
t = TriggerClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *TriggerText* updates
if t >= 0.0 and TriggerText.status == NOT_STARTED:
# keep track of start time/frame for later
TriggerText.tStart = t
TriggerText.frameNStart = frameN # exact frame index
TriggerText.setAutoDraw(True)
# *TriggerKey* updates
if t >= 0.0 and TriggerKey.status == NOT_STARTED:
# keep track of start time/frame for later
TriggerKey.tStart = t
TriggerKey.frameNStart = frameN # exact frame index
TriggerKey.status = STARTED
# keyboard checking is just starting
win.callOnFlip(TriggerKey.clock.reset) # t=0 on next screen flip
event.clearEvents(eventType='keyboard')
if TriggerKey.status == STARTED:
theseKeys = event.getKeys(keyList=['t'])
# check for quit:
if "escape" in theseKeys:
endExpNow = True
if len(theseKeys) > 0: # at least one key was pressed
TriggerKey.keys = theseKeys[-1] # just the last key pressed
TriggerKey.rt = TriggerKey.clock.getTime()
# a response ends the routine
continueRoutine = False
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in TriggerComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "Trigger"-------
for thisComponent in TriggerComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# check responses
if TriggerKey.keys in ['', [], None]: # No response was made
TriggerKey.keys=None
thisExp.addData('TriggerKey.keys',TriggerKey.keys)
if TriggerKey.keys != None: # we had a response
thisExp.addData('TriggerKey.rt', TriggerKey.rt)
thisExp.nextEntry()
# the Routine "Trigger" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# ------Prepare to start Routine "fixation"-------
t = 0
fixationClock.reset() # clock
frameN = -1
continueRoutine = True
routineTimer.add(params['warmUpTime'])
# update component parameters for each repeat
# keep track of which components have finished
fixationComponents = [fixCross]
for thisComponent in fixationComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# === EEG === #
# # This re-aligns the clocks between the stim computer and the NetStation computer.
# # Best to put at the start of each trial for maximal timing accuracy.
ns.sync()
# Send Message to EEG
win.callOnFlip(ns.send_event, key='FIX', timestamp=None, label="Fixation", description="Fixation Cross", pad=False)
# === END EEG === #
# -------Start Routine "fixation"-------
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = fixationClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *fixCross* updates
if t >= 0.0 and fixCross.status == NOT_STARTED:
# keep track of start time/frame for later
fixCross.tStart = t
fixCross.frameNStart = frameN # exact frame index
fixCross.setAutoDraw(True)
frameRemains = 0.0 + params['warmUpTime'] - win.monitorFramePeriod * 0.75 # most of one frame period left
if fixCross.status == STARTED and t >= frameRemains:
fixCross.setAutoDraw(False)
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in fixationComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "fixation"-------
for thisComponent in fixationComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# ------Prepare to start Routine "Movie"-------
# update component parameters for each repeat
# keep track of which components have finished
MovieComponents = movie + [ImiText]
for thisComponent in MovieComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# ------Start Movie Loop-------
for i in range(len(movie)):
# initialize vars
t = 0
MovieClock.reset() # clock
frameN = -1
continueRoutine = True
# Update timer
if i<(len(movie)-1):
routineTimer.add(movieDur[i]+params['imiDur'])
else:
routineTimer.add(movieDur[i])
# Make sure repeated-use ImiText object is listed as "not started"
ImiText.status = NOT_STARTED
# === EEG === #
# # This re-aligns the clocks between the stim computer and the NetStation computer.
# # Best to put at the start of each trial for maximal timing accuracy.
ns.sync()
# Send Message to EEG
win.callOnFlip(ns.send_event, key='MOVI', timestamp=None, label="StartMovie%d"%i, description="Started Movie %d"%i, pad=False)
# === END EEG === #
# -------Start Routine "Movie"-------
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = MovieClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *movie* updates
if t >= 0.0 and movie[i].status == NOT_STARTED:
# keep track of start time/frame for later
movie[i].tStart = t
movie[i].frameNStart = frameN # exact frame index
movie[i].setAutoDraw(True)
frameRemains = 0.0 + movieDur[i]- win.monitorFramePeriod * 0.75 # most of one frame period left
if movie[i].status == STARTED and t >= frameRemains:
movie[i].setAutoDraw(False)
# *ImiText* updates
if t >= movieDur[i] and ImiText.status == NOT_STARTED and i<(len(movie)-1):
# keep track of start time/frame for later
ImiText.tStart = t
ImiText.frameNStart = frameN # exact frame index
ImiText.setAutoDraw(True)
# === EEG === #
# # This re-aligns the clocks between the stim computer and the NetStation computer.
# # Best to put at the start of each trial for maximal timing accuracy.
ns.sync()
# Send Message to EEG
win.callOnFlip(ns.send_event, key='IMI', timestamp=None, label="Inter-Movie Interval", description="Inter-Movie Interval", pad=False)
# === END EEG === #
frameRemains = movieDur[i] + params['imiDur']- win.monitorFramePeriod * 0.75 # most of one frame period left
if ImiText.status == STARTED and t >= frameRemains:
ImiText.setAutoDraw(False)
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in MovieComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "Movie"-------
for thisComponent in MovieComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# ------Prepare to start Routine "fixation"-------
t = 0
fixationClock.reset() # clock
frameN = -1
continueRoutine = True
routineTimer.add(params['coolDownTime'])
# update component parameters for each repeat
# keep track of which components have finished
fixationComponents = [fixCross]
for thisComponent in fixationComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# === EEG === #
# # This re-aligns the clocks between the stim computer and the NetStation computer.
# # Best to put at the start of each trial for maximal timing accuracy.
ns.sync()
# Send Message to EEG
win.callOnFlip(ns.send_event, key='FIX', timestamp=None, label="Fixation", description="Fixation Cross", pad=False)
# === END EEG === #
# -------Start Routine "fixation"-------
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = fixationClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *fixCross* updates
if t >= 0.0 and fixCross.status == NOT_STARTED:
# keep track of start time/frame for later
fixCross.tStart = t
fixCross.frameNStart = frameN # exact frame index
fixCross.setAutoDraw(True)
frameRemains = 0.0 + params['coolDownTime'] - win.monitorFramePeriod * 0.75 # most of one frame period left
if fixCross.status == STARTED and t >= frameRemains:
fixCross.setAutoDraw(False)
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in fixationComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "fixation"-------
for thisComponent in fixationComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# ------Prepare to start Routine "WaitForEnd"-------
t = 0
WaitForEndClock.reset() # clock
frameN = -1
continueRoutine = True
# update component parameters for each repeat
EndKey = event.BuilderKeyResponse()
# keep track of which components have finished
WaitForEndComponents = [WaitForEndText, EndKey]
for thisComponent in WaitForEndComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# === EEG === #
# # This re-aligns the clocks between the stim computer and the NetStation computer.
# # Best to put at the start of each trial for maximal timing accuracy.
ns.sync()
# Send Message to EEG
win.callOnFlip(ns.send_event, key='END', timestamp=None, label="WaitForEnd", description="Waiting for End of Scan", pad=False)
# === END EEG === #
# -------Start Routine "WaitForEnd"-------
while continueRoutine:
# get current time
t = WaitForEndClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *WaitForEndText* updates
if t >= 0.0 and WaitForEndText.status == NOT_STARTED:
# keep track of start time/frame for later
WaitForEndText.tStart = t
WaitForEndText.frameNStart = frameN # exact frame index
WaitForEndText.setAutoDraw(True)
# *EndKey* updates
if t >= 0.0 and EndKey.status == NOT_STARTED:
# keep track of start time/frame for later
EndKey.tStart = t
EndKey.frameNStart = frameN # exact frame index
EndKey.status = STARTED
# keyboard checking is just starting
win.callOnFlip(EndKey.clock.reset) # t=0 on next screen flip
event.clearEvents(eventType='keyboard')
if EndKey.status == STARTED:
theseKeys = event.getKeys(keyList=['q', 'escape'])
# check for quit:
if "escape" in theseKeys:
endExpNow = True
if len(theseKeys) > 0: # at least one key was pressed
EndKey.keys = theseKeys[-1] # just the last key pressed
EndKey.rt = EndKey.clock.getTime()
# a response ends the routine
continueRoutine = False
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in WaitForEndComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "WaitForEnd"-------
for thisComponent in WaitForEndComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# check responses
if EndKey.keys in ['', [], None]: # No response was made
EndKey.keys=None
thisExp.addData('EndKey.keys',EndKey.keys)
if EndKey.keys != None: # we had a response
thisExp.addData('EndKey.rt', EndKey.rt)
thisExp.nextEntry()
# the Routine "WaitForEnd" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# === EEG === #
# # This re-aligns the clocks between the stim computer and the NetStation computer.
# # Best to put at the start of each trial for maximal timing accuracy.
ns.sync()
# Send Message to EEG
win.callOnFlip(ns.send_event, key='DONE', timestamp=None, label="ExperimentDone", description="End of Scan", pad=False)
# === End Session
# # This method is misleading, as it merely pauses the recording in NetStation. Equivalent to the pause button.
# # It is not actually stopping the recording session. That is done by the 'EndSession()' method.
ns.StopRecording()
# # I don't typically use this, as it is closes the current "Session" in NetStation.
# # I find it easier to just pause the recording using "StopRecording()" and then
# # get ending impedance measurements before manually closing NetStation.
ns.EndSession()
# # This line ends the connection via the ns object, and should then be destroying the object itself.
# # It is good practice to use so as not to waste memory or leave TCP/IP links open, which could lead to being
# # unable to reconnect without restarting the computer running the experiment.
if params['isEegConnected']:
ns.disconnect()
# === END EEG === #
# ----------Finishing Experiment----------
# these shouldn't be strictly necessary (should auto-save)
thisExp.saveAsWideText(filename+'.csv')
thisExp.saveAsPickle(filename)
logging.flush()
# make sure everything is closed down
thisExp.abort() # or data files will save again on exit
win.close()
core.quit()
| 2.703125 | 3 |
deep_rl/actor_critic/__init__.py | jkulhanek/deep-rl-pytorch | 7 | 12770455 | <gh_stars>1-10
from .agent import ActorCriticAgent # noqa: F401
from .ppo import PPO # noqa: F401
from .a2c import A2C # noqa: F401
from .a2c import A2CDynamicBatch # noqa: F401
from .acktr import ACKTR # noqa: F401
from .a3c import A3C # noqa: F401
from .unreal import Unreal, UnrealAgent # noqa: F401
| 1.09375 | 1 |
abc/abc189/abc189c-2.py | c-yan/atcoder | 1 | 12770456 | <gh_stars>1-10
from sys import setrecursionlimit
setrecursionlimit(10 ** 6)
N, *A = map(int, open(0).read().split())
def f(a):
n = len(a)
m = min(a)
result = m * n
i = 0
for j in range(n):
if a[j] != m:
continue
if i < j:
result = max(result, f(a[i:j]))
i = j + 1
if i < n:
result = max(result, f(a[i:n]))
return result
print(f(A))
| 2.484375 | 2 |
cli/list_train_en.py | ftnext/chatbot-lecture-2021 | 1 | 12770457 | <gh_stars>1-10
from chatterbot import ChatBot
from chatterbot.trainers import ListTrainer
conversation = [
"Hello.",
"Hi there!",
"How are you doing?",
"I'm doing great.",
"That is good to hear.",
"Thank you.",
"You're welcome.",
]
if __name__ == "__main__":
chatbot = ChatBot("Training with list bot", database_uri=None)
trainer = ListTrainer(chatbot)
trainer.train(conversation)
while True:
try:
user_input = input("You: ")
response = chatbot.get_response(user_input)
print(f"Bot: {response}")
except (KeyboardInterrupt, EOFError, SystemExit):
break
| 3.046875 | 3 |
samples/python_durable_bindings/DurableFanoutOrchestrationTrigger/__init__.py | msarm/azure-functions-durable-python | 9 | 12770458 | import logging
import azure.functions as func
import azure.durable_functions as df
def generator_function(context):
tasks = []
for i in range(30):
current_task = context.df.callActivity("DurableActivity", str(i))
tasks.append(current_task)
results = yield context.df.task_all(tasks)
logging.warn(f"!!! fanout results {results}")
return results
def main(context: str):
logging.warn("Durable Orchestration Trigger: " + context)
orchestrate = df.Orchestrator.create(generator_function)
logging.warn("!!!type(orchestrate) " + str(type(orchestrate)))
result = orchestrate(context)
logging.warn("!!!serialized json : " + result)
logging.warn("!!!type(result) " + str(type(result)))
return result
| 2.5625 | 3 |
uqcsbot/scripts/parking.py | dhood/uqcsbot | 38 | 12770459 | from uqcsbot import bot, Command
from uqcsbot.utils.command_utils import loading_status
from typing import Tuple
import requests
from bs4 import BeautifulSoup as Soup
def get_pf_parking_data() -> Tuple[int, str]:
"""
Returns a parking HTML document from the UQ P&F website
"""
page = requests.get("https://pg.pf.uq.edu.au/")
return (page.status_code, page.text)
@bot.on_command("parking")
@loading_status
def handle_parking(command: Command) -> None:
"""
`!parking [all]` - Displays how many car parks are available at UQ St. Lucia
By default, only dispalys casual parking availability
"""
if command.has_arg() and command.arg.lower() == "all":
permit = True
else:
permit = False
# read parking data
code, data = get_pf_parking_data()
if code != 200:
bot.post_message(command.channel_id, "Could Not Retrieve Parking Data")
return
response = ["*Available Parks at UQ St. Lucia*"]
names = {"P1": "P1 - Warehouse (14P Daily)", "P2": "P2 - Space Bank (14P Daily)",
"P3": "P3 - Multi-Level West (Staff)", "P4": "P4 - Multi-Level East (Staff)",
"P6": "P6 - Hartley Teakle (14P Hourly)", "P7": "P7 - DustBowl (14P Daily)",
"P7 UC": "P7 - Keith Street (14P Daily Capped)",
"P8 L1": "P8 - Athletics Basement (14P Daily)",
"P8 L2": "P8 - Athletics Roof (14P Daily)", "P9": "P9 - Boatshed (14P Daily)",
"P10": "P10 - UQ Centre & Playing Fields (14P Daily/14P Daily Capped)",
"P11 L1": "P11 - Conifer Knoll Lower (Staff)",
"P11 L2": "P11 - Conifer Knoll Upper (Staff)",
"P11 L3": "P11 - Conifer Knoll Roof (14P Daily Restricted)"}
def category(fill):
if fill.upper() == "FULL":
return "No"
if fill.upper() == "NEARLY FULL":
return "Few"
return fill
# find parks
table = Soup(data, "html.parser").find("table", attrs={"id": "parkingAvailability"})
rows = table.find_all("tr")[1:]
# split and join for single space whitespace
areas = [[" ".join(i.get_text().split()) for i in j.find_all("td")] for j in rows]
for area in areas:
if area[2]:
response.append(f"{category(area[2])} Carparks Available in {names[area[0]]}")
elif permit and area[1]:
response.append(f"{category(area[1])} Carparks Available in {names[area[0]]}")
bot.post_message(command.channel_id, "\n".join(response))
| 2.859375 | 3 |
example_vocmax_calculation.py | toddkarin/vocmax | 5 | 12770460 | <reponame>toddkarin/vocmax<gh_stars>1-10
"""
This script shows an example calculation for calculating the maximum
string length allowed in a particular location.
The method proceeds in the following steps
- Choose module parameters
- Choose racking method
- Set maximum allowable string voltage.
- Import weather data
- Run the calculation
- Plot.
"""
import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import pandas as pd
import vocmax
import time
# ------------------------------------------------------------------------------
# Choose Module Parameters
# ------------------------------------------------------------------------------
# Option 1. If the module is in the CEC database, then can retrieve parameters.
cec_modules = vocmax.cec_modules
module_of_choice = cec_modules.keys()[0]
cec_parameters = cec_modules[module_of_choice].to_dict()
# Create SAPM parameters from CEC parameters.
sapm_parameters = vocmax.cec_to_sapm(cec_parameters)
# Calculate extra module parameters for your information:
module = {**sapm_parameters, **cec_parameters}
# AOI loss model controls reflection from glass at non-normal incidence angles.
# Can be 'ashrae' or 'no_loss'
module['aoi_model'] = 'ashrae'
module['ashrae_iam_param'] = 0.05
module['is_bifacial'] = False
"""
# Option 2. Or can build a dictionary of parameters manually. Note that in order
# to calculate MPP, it is necessary to include the CEC parameters: alpha_sc,
# a_ref, I_L_ref, I_o_ref, R_sh_ref, R_s, and Adjust.
module = {
# Number of cells in series in each module.
'cells_in_series': 60,
# Open circuit voltage at reference conditions, in Volts.
'Voco': 37.2,
# Temperature coefficient of Voc, in Volt/C
'Bvoco': -0.127,
# Short circuit current, in Amp
'Isco': 8.09,
# Short circuit current temperature coefficient, in Amp/C
'alpha_sc': 0.0036,
# Module efficiency, unitless
'efficiency': 0.15,
# Diode Ideality Factor, unitless
'n_diode': 1.2,
# Fracion of diffuse irradiance used by the module.
'FD': 1,
# Whether the module is bifacial
'is_bifacial': True,
# Ratio of backside to frontside efficiency for bifacial modules. Only used if 'is_bifacial'==True
'bifaciality_factor': 0.7,
# AOI loss model
'aoi_model':'ashrae',
# AOI loss model parameter.
'ashrae_iam_param': 0.05
}
"""
is_cec_module = 'a_ref' in module
print('\n** Module parameters **')
print(pd.Series(module))
# ------------------------------------------------------------------------------
# Choose Racking Method
# ------------------------------------------------------------------------------
# Racking parameters for single axis tracking (fixed tilt parameters are below).
racking_parameters = {
# Racking type, can be 'single_axis' or 'fixed_tilt'
'racking_type': 'single_axis',
# The tilt of the axis of rotation with respect to horizontal, in degrees
'axis_tilt': 0,
# Compass direction along which the axis of rotation lies. Measured in
# degrees East of North
'axis_azimuth': 0,
# Maximum rotation angle of the one-axis tracker from its horizontal
# position, in degrees.
'max_angle': 90,
# Controls whether the tracker has the capability to “backtrack” to avoid
# row-to-row shading. False denotes no backtrack capability. True denotes
# backtrack capability.
'backtrack': True,
# A value denoting the ground coverage ratio of a tracker system which
# utilizes backtracking; i.e. the ratio between the PV array surface area
# to total ground area.
'gcr': 2.0 / 7.0,
# Bifacial model can be 'proportional' or 'pvfactors'
'bifacial_model': 'proportional',
# Proportionality factor determining the backside irradiance as a fraction
# of the frontside irradiance. Only used if 'bifacial_model' is
# 'proportional'.
'backside_irradiance_fraction': 0.2,
# Ground albedo
'albedo': 0.25
}
# Example racking parameters for fixed tilt (only use one racking_parameters,
# comment the other one out!)
"""
racking_parameters = {
'racking_type': 'fixed_tilt',
# Tilt of modules from horizontal.
'surface_tilt': 30,
# 180 degrees orients the modules towards the South.
'surface_azimuth': 180,
# Ground albedo
'albedo':0.25
}
"""
# Additionally, here is an example set of racking parameters for full bifacial
# modeling. Make sure 'is_bifacial' is True in the module parameters. Full
# bifacial modeling takes about 10 minutes depending on the exact configuration.
# See documentation for pvfactors for additional description of parameters.
"""
racking_parameters = {
# Racking type, can be 'single_axis' or 'fixed_tilt'
'racking_type': 'single_axis',
# The tilt of the axis of rotation with respect to horizontal, in degrees
'axis_tilt': 0,
# Compass direction along which the axis of rotation lies. Measured in
# degrees East of North
'axis_azimuth': 0,
# Maximum rotation angle of the one-axis tracker from its horizontal
# position, in degrees.
'max_angle': 90,
# Controls whether the tracker has the capability to “backtrack” to avoid
# row-to-row shading. False denotes no backtrack capability. True denotes
# backtrack capability.
'backtrack': True,
# A value denoting the ground coverage ratio of a tracker system which
# utilizes backtracking; i.e. the ratio between the PV array surface area
# to total ground area.
'gcr': 2.0 / 7.0,
# Ground albedo
'albedo':0.25,
# bifacial model can be 'pfvactors' or 'simple'
'bifacial_model': 'pvfactors',
# number of pv rows
'n_pvrows': 3,
# Index of row to use backside irradiance for
'index_observed_pvrow': 1,
# height of pvrows (measured at center / torque tube)
'pvrow_height': 1,
# width of pvrows
'pvrow_width': 1,
# azimuth angle of rotation axis
'axis_azimuth': 0.,
# pv row front surface reflectivity
'rho_front_pvrow': 0.01,
# pv row back surface reflectivity
'rho_back_pvrow': 0.03,
# Horizon band angle.
'horizon_band_angle': 15,
}
"""
# Sandia thermal model can be a string for using default coefficients or the
# parameters can be set manually. Parameters are described in [1].
#
# [1] <NAME>, <NAME>, <NAME>. Photovoltaic Array Performance
# Model. Sand2004-3535 (2004).
thermal_model = {
'named_model': 'open_rack_glass_polymer',
# Temperature of open circuit modules is higher, specify whether to include
# this effect.
'open_circuit_rise': True
}
# Or can set thermal model coefficients manually:
"""
thermal_model = {
'named_model': 'explicit',
'a':-3.56,
'b':-0.075,
'deltaT':3,
'open_circuit_rise':True
}
"""
print('\n** Racking parameters **')
print(pd.Series(racking_parameters))
# ------------------------------------------------------------------------------
# Max string length
# ------------------------------------------------------------------------------
# Max allowable string voltage, for determining string length. Typically this
# number is determined by the inverter.
string_design_voltage = 1500
# ------------------------------------------------------------------------------
# Import weather data
# ------------------------------------------------------------------------------
# Get the weather data.
print("\nImporting weather data...")
# Define the lat, lon of the location (this location is preloaded and does not
# require an API key)
lat, lon = 37.876, -122.247
# Get an NSRDB api key for any point but the preloaded one (this api key will
# not work, you need to get your own which will look like it.)
api_key = '<KEY>'
# Get weather data (takes a few minutes, result is cached for quick second calls).
weather, info = vocmax.get_weather_data(lat,lon,api_key=api_key)
# Option 2: Get weather data from a series of NSRDB csv files.
"""
weather_data_directory = 'vocmax/NSRDB_sample'
weather, info = vocmax.import_nsrdb_sequence(weather_data_directory)
"""
# Make sure that the weather data has the correct fields for pvlib.
weather = weather.rename(columns={'DNI':'dni','DHI':'dhi','GHI':'ghi',
'Temperature':'temp_air',
'Wind Speed':'wind_speed'})
# ------------------------------------------------------------------------------
# Simulate system
# ------------------------------------------------------------------------------
# Run the calculation.
print('Running Simulation...')
t0 = time.time()
df = vocmax.simulate_system(weather,
info,
module,
racking_parameters,
thermal_model)
print('Simulation time: {:1.2f}'.format(time.time()-t0))
# Calculate max power voltage, only possible if using CEC database for module parameters.
if is_cec_module:
_, df['v_mp'], _ = vocmax.sapm_mpp(df['effective_irradiance'],
df['temp_cell'],
module)
# ------------------------------------------------------------------------------
# Calculate String Size
# ------------------------------------------------------------------------------
# IMPORTANT: one must add the ASHRAE spreadsheet to this file in order to
# automatically calucate traditional values using ASHRAE design conditions.
ashrae_available = vocmax.ashrae_is_design_conditions_available()
if not ashrae_available:
print("""** IMPORTANT ** add the ASHRAE design conditions spreadsheet to this
directory in order to get ASHRAE design.""")
# Look up weather data uncertainty safety factor at the point of interest.
temperature_error = vocmax.get_nsrdb_temperature_error(info['Latitude'],info['Longitude'])
# Calculate weather data safety factor using module Voc temperature coefficient
Beta_Voco_fraction = np.abs(module['Bvoco'])/module['Voco']
weather_data_safety_factor = np.max([0, temperature_error*Beta_Voco_fraction])
# Calculate propensity for extreme temperature fluctuations.
extreme_cold_delta_T = vocmax.calculate_mean_yearly_min_temp(df.index,df['temp_air']) - df['temp_air'].min()
# Compute safety factor for extreme cold temperatures
extreme_cold_safety_factor = extreme_cold_delta_T*Beta_Voco_fraction
# Add up different contributions to obtain an overall safety factor
safety_factor = weather_data_safety_factor + 0.016
print('Total Safety Factor: {:1.1%}'.format(safety_factor))
# Calculate string length.
voc_summary = vocmax.make_voc_summary(df, info, module,
string_design_voltage=string_design_voltage,
safety_factor=safety_factor)
print('Simulation complete.')
# Make a csv file for saving simulation parameters
summary_text = vocmax.make_simulation_summary(df, info,
module,
racking_parameters,
thermal_model,
string_design_voltage,
safety_factor)
# Save the summary csv to file.
summary_file = 'out.csv'
with open(summary_file,'w') as f:
f.write(summary_text)
print('\n** Voc Results **')
print(voc_summary[[ 'max_module_voltage', 'safety_factor','string_length',
'Cell Temperature', 'POA Irradiance']].to_string())
# Calculate some IV curves if we are using CEC database.
if is_cec_module:
irradiance_list = [200,400,600,800,1000]
iv_curve = []
for e in irradiance_list:
ret = vocmax.calculate_iv_curve(e, 25, cec_parameters)
ret['effective_irradiance'] = e
iv_curve.append(ret)
# ------------------------------------------------------------------------------
# Plot results
# ------------------------------------------------------------------------------
pd.plotting.register_matplotlib_converters()
fig_width = 6
fig_height = 4
max_pos = np.argmax(np.array(df['v_oc']))
plot_width = 300
# Plot Voc vs. time
plot_key = ['v_oc','ghi','effective_irradiance','temp_air']
plot_ylabel = ['Voc (V)', 'GHI (W/m2)', 'POA Irradiance (W/m2)', 'Air Temperature (C)']
for j in range(len(plot_key)):
plt.figure(j,figsize=(fig_width,fig_height))
plt.clf()
plt.plot(df.index[max_pos-plot_width:max_pos+plot_width],
df[plot_key[j]][max_pos-plot_width:max_pos+plot_width])
ylims = np.array(plt.ylim())
plt.plot([ df.index[max_pos],df.index[max_pos]] , ylims)
plt.ylabel(plot_ylabel[j])
plt.show()
# Plot Voc histogram
plt.figure(11,figsize=(fig_width,fig_height))
plt.clf()
voc_hist_x, voc_hist_y = vocmax.make_voc_histogram(df,info)
plt.plot(voc_hist_x, voc_hist_y)
plt.xlabel('Voc (Volts)')
plt.ylabel('hrs/year')
for key in voc_summary.index:
if ('ASHRAE' in key and ashrae_available) or ('ASHRAE' not in key):
plt.plot(voc_summary['max_module_voltage'][key] * np.array([1,1]), [0,10],
label=key)
plt.show()
plt.legend()
# Plot IV curve
if is_cec_module:
plt.figure(12)
plt.clf()
for j in range(len(iv_curve)):
plt.plot(iv_curve[j]['v'], iv_curve[j]['i'])
plt.xlabel('Voltage (V)')
plt.ylabel('Current (A)')
plt.grid()
# Scatter plot of Temperature/Irradiance where Voc is highest.
plt.figure(13)
plt.clf()
cax = df['v_oc']>np.percentile(df['v_oc'],99.9)
plt.plot(df.loc[:,'effective_irradiance'], df.loc[:,'temp_cell'],'.',
label='all data')
plt.plot(df.loc[cax,'effective_irradiance'], df.loc[cax,'temp_cell'],'.',
label='Voc>P99.9')
poa_smooth = np.linspace(1,1100,200)
T_smooth = vocmax.sapm_temperature_to_get_voc(poa_smooth,
np.percentile(df['v_oc'],99.9),
Voco=module['Voco'],
Bvoco=module['Bvoco'],
diode_factor=module['n_diode'],
cells_in_series=module[
'cells_in_series'])
plt.plot(poa_smooth, T_smooth)
plt.xlabel('POA Irradiance (W/m^2)')
plt.ylabel('Cell Temperature (C)')
plt.legend(loc='upper left')
# plt.xlim([0,1000])
plt.show()
| 2.625 | 3 |
src/config/common/vnc_type_conv.py | biswajit-mandal/contrail-controller | 5 | 12770461 | <filename>src/config/common/vnc_type_conv.py
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
import sys
import re
import types
from types import *
import vnc_api_service
# dictionary representation to XML representation
def dict_to_elem(wrap_tag, obj_dict):
xml_str = "<%s>" % (wrap_tag)
for fname in obj_dict:
if isinstance(obj_dict[fname], dict):
# call recursively with dict
xml_str += dict_to_elem(fname, obj_dict[fname])
else:
xml_str += "<%s>%s</%s>" % (fname, obj_dict[fname], fname)
xml_str += "</%s>" % (wrap_tag)
return xml_str
# XML representation to dictionary representation
def elem_to_dict(obj_elem):
obj_dict = {}
for field in obj_elem.getchildren():
fname = re.sub("{.*}", "", field.tag)
if not field.text and not field.getchildren():
return {}
if not field.text:
val = elem_to_dict(field)
elif (field.text.isdigit()):
val = int(field.text)
else:
val = unicode(field.text)
obj_dict[fname] = val
return obj_dict
# convert object to dictionary representation
def obj_to_dict(obj):
obj_dict = {}
for fname in obj.__dict__:
if obj.__dict__[fname] is None:
continue
val = obj.__dict__[fname]
if isinstance(val, str):
# unicode it if field is string
val = unicode(val)
elif isinstance(obj.__dict__[fname], InstanceType):
# call recursively if field is object
val = obj_to_dict(val)
obj_dict[unicode(fname)] = val
return obj_dict
def subnet_elem_to_dict(sn_elem):
# TODO replace with autogenerated code
sn_d = {}
for field in sn_elem.getchildren():
fname = re.sub("{.*}", "", field.tag)
if (field.text.isdigit()):
sn_d[fname] = int(field.text)
else:
sn_d[fname] = unicode(field.text)
return sn_d
def subnet_obj_to_dict(sn_obj):
return obj_to_dict(sn_obj)
def subnet_dict_to_obj(sn_dict):
# construct object from dict contents
# will fail if server's obj defn has more fields than client's defn :(
#kwargs = {}
# for fname in sn_dict:
# kwargs[fname] = sn_dict[fname]
#sn_obj = vnc_api_service.subnet_s(**kwargs)
sn_obj = vnc_api_service.subnet_s()
sn_obj.__dict__.update(sn_dict)
return sn_obj
def vn_dict_to_obj(vn_dict):
# construct object from dict contents
# will fail if server's obj defn has more fields than client's defn :(
#kwargs = {}
# for fname in vn_dict:
# kwargs[fname] = vn_dict[fname]
#vn_obj = vnc_api_service.vn_s(**kwargs)
vn_obj = vnc_api_service.vn_s()
vn_obj.__dict__.update(vn_dict)
if vn_obj.vn_subnets:
# construct list of sn objs
vn_obj.vn_subnets = []
for sn_dict in vn_dict['vn_subnets']:
vn_obj.vn_subnets.append(subnet_dict_to_obj(sn_dict))
return vn_obj
def policy_dict_to_obj(policy_dict):
# construct object from dict contents
# will fail if server's obj defn has more fields than client's defn :(
#kwargs = {}
# for fname in vn_dict:
# kwargs[fname] = vn_dict[fname]
#vn_obj = vnc_api_service.vn_s(**kwargs)
policy_obj = vnc_api_service.policy_s()
policy_obj.__dict__.update(policy_dict)
return policy_obj
def sg_rule_elem_to_dict(sg_rule_elem):
return elem_to_dict(sg_rule_elem)
def sg_rule_dict_to_elem(sg_rule_d):
return dict_to_elem("sg_rule", sg_rule_d)
def sg_rule_obj_to_dict(sg_rule_obj):
return obj_to_dict(sg_rule_obj)
def policy_entry_elem_to_dict(policy_entry_elem):
return elem_to_dict(policy_entry_elem)
def policy_entry_dict_to_elem(policy_entry_d):
return dict_to_elem("policy_entry", policy_entry_d)
def policy_entry_obj_to_dict(policy_entry_obj):
return obj_to_dict(policy_entry_obj)
| 2.390625 | 2 |
rpy2json/project/medias.py | jean-plank/rpy2html | 2 | 12770462 | <filename>rpy2json/project/medias.py
from os import path
def load(GAME_BASE_DIR, res, media_type, key, media):
if media != None:
full_path = path.join(GAME_BASE_DIR, media)
if path.isfile(full_path):
res[media_type][key] = full_path
return
else:
var_name = full_path
else:
var_name = key
print('[WARNING] couldn\'t import %s %s' % (media_type[:-1], var_name))
| 2.53125 | 3 |
users/management/commands/compute_first_sighting.py | maverick-labs-pune/wikirumours | 0 | 12770463 | import csv
import datetime
import os
from django.contrib.gis.geos import Point
from django.core.management import BaseCommand, CommandError
from django.db import transaction
from geopy import Nominatim
from countries.models import Country
from report.models import Report, Sighting, ReportedViaChoice
from users.models import User
class Command(BaseCommand):
help = "compute first sighting"
def handle(self, *args, **kwargs):
compute_first_sighting()
def compute_first_sighting():
with transaction.atomic():
reports = Report.objects.all()
# print("Total reports : " + str(len(reports)))
for index, report in enumerate(reports):
# print("Report " + str(index + 1) + "/" + str(len(reports)))
sighting = Sighting.objects.filter(report=report).order_by("heard_on").first()
if sighting:
Sighting.objects.filter(report=report).update(is_first_sighting=False)
sighting.is_first_sighting = True
sighting.save()
| 2.125 | 2 |
pyjournal.py | dwillcox/pyjournal2 | 0 | 12770464 | <gh_stars>0
#!/usr/bin/env python3
"""
a simple commandline-driven scientific journal in LaTeX managed by git
"""
import pyjournal2.main_util as main_util
if __name__ == "__main__":
tdefs = main_util.read_config()
targs = main_util.get_args(tdefs)
main_util.main(targs, tdefs)
| 1.617188 | 2 |
modules/vcd_vapp_vm_nic.py | MichalTaratuta/ansible-module-vcloud-director | 0 | 12770465 | <gh_stars>0
# Copyright © 2018 VMware, Inc. All Rights Reserved.
# SPDX-License-Identifier: BSD-2-Clause
# !/usr/bin/python
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: vcd_vapp_vm_nic
short_description: Ansible Module to manage (create/delete/update) NICs in vApp VMs in vCloud Director.
version_added: "2.4"
description:
- "Ansible Module to manage (create/delete/update) NICs in vApp VMs."
options:
user:
description:
- vCloud Director user name
required: false
password:
description:
- vCloud Director user password
required: false
host:
description:
- vCloud Director host address
required: false
org:
description:
- Organization name on vCloud Director to access
required: false
api_version:
description:
- Pyvcloud API version
required: false
verify_ssl_certs:
description:
- whether to use secure connection to vCloud Director host
required: false
nic_id:
description:
- NIC ID
required: false
nic_ids:
description:
- List of NIC IDs
required: false
network:
description:
- VApp network name
required: false
vm_name:
description:
- VM name
required: true
vapp:
description:
- vApp name
required: true
vdc:
description:
- VDC name
required: true
ip_allocation_mode:
description:
- IP allocation mode (DHCP, POOL or MANUAL)
required: false
ip_address:
description:
- NIC IP address (required for MANUAL IP allocation mode)
required: false
state:
description:
- state of nic (present/absent).
- One from state or operation has to be provided.
required: true
operation:
description:
- operation on nic (update/read).
- One from state or operation has to be provided.
required: false
author:
- <EMAIL>
'''
EXAMPLES = '''
- name: Test with a message
vcd_vapp_vm_nic:
user: terraform
password: <PASSWORD>
host: csa.sandbox.org
org: Terraform
api_version: 30
verify_ssl_certs: False
vm: "vm1"
vapp = "vapp1"
vdc = "vdc1"
nic_id = "2000"
state = "present"
'''
RETURN = '''
msg: success/failure message corresponding to nic state
changed: true if resource has been changed else false
'''
from copy import deepcopy
from lxml import etree
from pyvcloud.vcd.vm import VM
from pyvcloud.vcd.org import Org
from pyvcloud.vcd.vdc import VDC
from pyvcloud.vcd.client import E
from pyvcloud.vcd.vapp import VApp
from collections import defaultdict
from pyvcloud.vcd.client import E_RASD
from pyvcloud.vcd.client import NSMAP
from pyvcloud.vcd.client import EntityType
from ansible.module_utils.vcd import VcdAnsibleModule
from pyvcloud.vcd.exceptions import EntityNotFoundException, OperationNotSupportedException
VAPP_VM_NIC_STATES = ['present', 'absent']
VAPP_VM_NIC_OPERATIONS = ['update', 'read']
def vapp_vm_nic_argument_spec():
return dict(
vm_name=dict(type='str', required=True),
vapp=dict(type='str', required=True),
vdc=dict(type='str', required=True),
nic_id=dict(type='int', required=False),
nic_ids=dict(type='list', required=False),
ip_allocation_mode=dict(type='str', required=False),
ip_address=dict(type='str', required=False,),
network=dict(type='str', required=False),
state=dict(choices=VAPP_VM_NIC_STATES, required=False),
operation=dict(choices=VAPP_VM_NIC_OPERATIONS, required=False),
)
class VappVMNIC(VcdAnsibleModule):
def __init__(self, **kwargs):
super(VappVMNIC, self).__init__(**kwargs)
vapp_resource = self.get_resource()
self.vapp = VApp(self.client, resource=vapp_resource)
def manage_states(self):
state = self.params.get('state')
if state == "present":
return self.add_nic()
if state == "absent":
return self.delete_nic()
def manage_operations(self):
operation = self.params.get('operation')
if operation == "update":
return self.update_nic()
if operation == "read":
return self.read_nics()
def get_resource(self):
vapp = self.params.get('vapp')
vdc = self.params.get('vdc')
org_resource = Org(self.client, resource=self.client.get_org())
vdc_resource = VDC(self.client, resource=org_resource.get_vdc(vdc))
vapp_resource_href = vdc_resource.get_resource_href(name=vapp, entity_type=EntityType.VAPP)
vapp_resource = self.client.get_resource(vapp_resource_href)
return vapp_resource
def get_vm(self):
vapp_vm_resource = self.vapp.get_vm(self.params.get('vm_name'))
return VM(self.client, resource=vapp_vm_resource)
def get_vm_nics(self):
vm = self.get_vm()
return self.client.get_resource(vm.resource.get('href') + '/networkConnectionSection')
def add_nic(self):
'''
Error - More than 10 Nics are not permissible in vCD
'''
vm = self.get_vm()
network = self.params.get('network')
ip_address = self.params.get('ip_address')
ip_allocation_mode = self.params.get('ip_allocation_mode')
uri = vm.resource.get('href') + '/networkConnectionSection'
response = defaultdict(dict)
response['changed'] = False
new_nic_id = None
nics = self.get_vm_nics()
nics_indexes = [int(nic.NetworkConnectionIndex) for nic in nics.NetworkConnection]
nics_indexes.sort()
for index, nic_index in enumerate(nics_indexes):
new_nic_id = nic_index + 1
if index != nic_index:
new_nic_id = index
break
if ip_allocation_mode not in ('DHCP', 'POOL', 'MANUAL'):
raise Exception('IpAllocationMode should be one of DHCP/POOL/MANUAL')
if ip_allocation_mode in ('DHCP', 'POOL'):
nic = E.NetworkConnection(
E.NetworkConnectionIndex(new_nic_id),
E.IsConnected(True),
E.IpAddressAllocationMode(ip_allocation_mode),
network=network)
else:
if not ip_address:
raise Exception('IpAddress is missing.')
nic = E.NetworkConnection(
E.NetworkConnectionIndex(new_nic_id),
E.IpAddress(ip_address),
E.IsConnected(True),
E.IpAddressAllocationMode(ip_allocation_mode),
network=network)
nics.NetworkConnection.addnext(nic)
add_nic_task = self.client.put_resource(uri, nics, EntityType.NETWORK_CONNECTION_SECTION.value)
self.execute_task(add_nic_task)
response['msg'] = {
'nic_id': new_nic_id,
'ip_allocation_mode': ip_allocation_mode,
'ip_address': ip_address
}
response['changed'] = True
return response
def update_nic(self):
'''
Following update scenarios are covered
1. MANUAL mode to DHCP
2. Update IP address in MANUAL mode
'''
vm = self.get_vm()
nic_id = self.params.get('nic_id')
network = self.params.get('network')
ip_address = self.params.get('ip_address')
ip_allocation_mode = self.params.get('ip_allocation_mode')
uri = vm.resource.get('href') + '/networkConnectionSection'
response = defaultdict(dict)
response['changed'] = False
nics = self.get_vm_nics()
nic_indexs = [nic.NetworkConnectionIndex for nic in nics.NetworkConnection]
if nic_id not in nic_indexs:
raise EntityNotFoundException('Can\'t find the specified VM nic')
nic_to_update = nic_indexs.index(nic_id)
if network:
nics.NetworkConnection[nic_to_update].set('network', network)
response['changed'] = True
if ip_allocation_mode:
allocation_mode_element = E.IpAddressAllocationMode(ip_allocation_mode)
nics.NetworkConnection[nic_to_update].IpAddressAllocationMode = allocation_mode_element
response['changed'] = True
if ip_address:
nics.NetworkConnection[nic_to_update].IpAddress = E.IpAddress(ip_address)
response['changed'] = True
if response['changed']:
update_nic_task = self.client.put_resource(uri, nics, EntityType.NETWORK_CONNECTION_SECTION.value)
self.execute_task(update_nic_task)
response['msg'] = 'Vapp VM nic has been updated.'
return response
def read_nics(self):
response = defaultdict(dict)
response['changed'] = False
nics = self.get_vm_nics()
for nic in nics.NetworkConnection:
meta = defaultdict(dict)
nic_id = str(nic.NetworkConnectionIndex)
meta['MACAddress'] = str(nic.MACAddress)
meta['IsConnected'] = str(nic.IsConnected)
meta['NetworkAdapterType'] = str(nic.NetworkAdapterType)
meta['NetworkConnectionIndex'] = str(nic.NetworkConnectionIndex)
meta['IpAddressAllocationMode'] = str(nic.IpAddressAllocationMode)
if hasattr(nic, 'IpAddress'):
meta['IpAddress'] = str(nic.IpAddress)
response['msg'][nic_id] = meta
return response
def delete_nic(self):
vm = self.get_vm()
nic_ids = self.params.get('nic_ids')
response = defaultdict(dict)
response['changed'] = False
uri = vm.resource.get('href') + '/networkConnectionSection'
nics = self.get_vm_nics()
for nic in nics.NetworkConnection:
if nic.NetworkConnectionIndex in nic_ids:
nics.remove(nic)
nic_ids.remove(nic.NetworkConnectionIndex)
if len(nic_ids) > 0:
nic_ids = [str(nic_id) for nic_id in nic_ids]
err_msg = 'Can\'t find the specified VM nic(s) {0}'.format(','.join(nic_ids))
raise EntityNotFoundException(err_msg)
remove_nic_task = self.client.put_resource(uri, nics, EntityType.NETWORK_CONNECTION_SECTION.value)
self.execute_task(remove_nic_task)
response['msg'] = 'VM nic(s) has been deleted.'
response['changed'] = True
return response
def main():
argument_spec = vapp_vm_nic_argument_spec()
response = dict(
msg=dict(type='str')
)
module = VappVMNIC(argument_spec=argument_spec, supports_check_mode=True)
try:
if module.params.get('state'):
response = module.manage_states()
elif module.params.get('operation'):
response = module.manage_operations()
else:
raise Exception('One of the state/operation should be provided.')
except Exception as error:
response['msg'] = error
module.fail_json(**response)
module.exit_json(**response)
if __name__ == '__main__':
main()
| 1.554688 | 2 |
cse521/hw1/p1_data/old/p1_sorted1.py | interesting-courses/UW_coursework | 2 | 12770466 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 5 23:54:49 2018
@author: tyler
"""
import numpy as np
#%%
def postprocess_cut(supernodes_original,supernodes_f,supernode_nonempty_Q,not_loop_Q):
'''
returns : partition of original vertices of $G$ and size of coresponding cut
'''
sn = supernodes_f[supernode_nonempty_Q]
size_E = sum(not_loop_Q)
if len(sn[0])<len(sn[1]):
sn = sn[0]
else:
sn = sn[1]
for v in list(sn):
if v >= len(supernodes):
sn.remove(v)
for sn_o in supernodes_original:
if v in sn_o:
sn.add(min(sn_o))
break
return list(sn),size_E
#%%
def karger(E,G,supernodes,supernode_nonempty_Q,not_loop_Q):
# start = time.clock()
size_E = np.shape(E)[0]
size_V = sum(supernode_nonempty_Q)
f = 0
s = 0
sn0 = {}
for j in range(size_V-2):
if j%500==0:
print('===================')
print('iteration:', j)
'''
print('find endpoints: ',f)
print('find loops : ', s)
f = 0
s = 0
print('sn0 size: ', len(sn0))
sn_count = sum(map(len,supernodes[np.where(supernode_nonempty_Q)[0]]))
print('numer of vert :', sn_count)
print('numer of sns :', sum(supernode_nonempty_Q))
'''
# pick random edge
#probably can't be faster than this unless we can compile
cs = np.cumsum(not_loop_Q)
# rand_idx = np.where(cs > np.random.randint(cs[-1]))[0][0]
rand_idx = np.searchsorted(cs, np.random.randint(cs[-1]))
e0,e1 = E[rand_idx]
#find edge endopoint vertices
# start = time.clock()
supernode_nonempty_idx = np.where(supernode_nonempty_Q)[0]
for i0 in supernode_nonempty_idx:
if e0 in supernodes[i0]:
break
for i1 in supernode_nonempty_idx[::-1]:
if e1 in supernodes[i1]:
break
# f += (time.clock() - start)
# merge vertex equivalence classes
sn0 = supernodes[i0]
sn1 = supernodes[i1]
# find loops
# search for edges with one end in sn0 and one in sn1
# start = time.clock()
for i in sn0:
Gi = G[i]
for j in sn1:
Gij = Gi[j]
if Gij != -1:
if not_loop_Q[Gij]:
not_loop_Q[Gij] = False
# s += time.clock() - start
# put sn1 into sn0 and sn1 into sn0 and delete sn1
supernodes[i0] = supernodes[i0] | supernodes[i1]
supernode_nonempty_Q[i1] = False
return supernodes,supernode_nonempty_Q,not_loop_Q
#%% load data
d = np.load('b0_pre.npz')
E=d['E']
G=d['G']
supernodes_=d['supernodes_']
supernode_nonempty_Q_=d['supernode_nonempty_Q_']
del(d)
#%%
supernodes,supernode_nonempty_Q = np.copy(supernodes_),np.copy(supernode_nonempty_Q_)
not_loop_Q = np.ones(len(E),dtype='bool')
supernodes,supernode_nonempty_Q,not_loop_Q = karger(E,G,supernodes,supernode_nonempty_Q,not_loop_Q)
#%%
postprocess_cut(supernodes_,supernodes,supernode_nonempty_Q,not_loop_Q)
#%%
len(E[np.any(E==8,axis=1)])
| 2.78125 | 3 |
main.py | apl-ocean-engineering/DeepLabv3.pytorch | 0 | 12770467 | import numpy as np
import torch
from scipy.io import loadmat
import cv2
import time
import deepLabv3.deeplab as deeplab
from deepLabv3.pascal import VOCSegmentation
from deepLabv3.cityscapes import Cityscapes
from deepLabv3.utils import AverageMeter, inter_and_union, load_model
from deepLabv3.detector import Detector
from deepLabv3.argLoader import ArgLoader
def main():
assert torch.cuda.is_available()
argloader = ArgLoader()
args = argloader.args
torch.backends.cudnn.benchmark = True
if args.dataset == 'pascal':
dataset = VOCSegmentation(
args.voc_path,
train=args.train, crop_size=args.crop_size)
elif args.dataset == 'cityscapes':
dataset = Cityscapes(
args.cityscape_path,
train=args.train, crop_size=args.crop_size)
else:
raise ValueError('Unknown dataset: {}'.format(args.dataset))
model, model_fname = load_model(args, dataset.CLASSES)
detector = Detector(model)
if args.train:
detector.train(dataset, model_fname, args)
else:
torch.cuda.set_device(args.gpu)
model = model.cuda()
model.eval()
checkpoint = torch.load(model_fname % args.epochs)
state_dict = {k[7:]: v for k, v in checkpoint['state_dict'].items()
if 'tracked' not in k}
model.load_state_dict(state_dict)
cmap = loadmat('data/pascal_seg_colormap.mat')['colormap']
cmap = (cmap * 255).astype(np.uint8).flatten().tolist()
inter_meter = AverageMeter()
union_meter = AverageMeter()
for i in range(len(dataset)):
prev_time = time.time()
inputs, target, fname = dataset[i]
pred = detector.inference(inputs)
mask = target.numpy().astype(np.uint8)
inter, union = inter_and_union(pred, mask, len(dataset.CLASSES))
inter_meter.update(inter)
union_meter.update(union)
print("time elapsed", time.time() - prev_time)
iou = inter_meter.sum / (union_meter.sum + 1e-10)
for i, val in enumerate(iou):
print('IoU {0}: {1:.2f}'.format(dataset.CLASSES[i], val * 100))
print('Mean IoU: {0:.2f}'.format(iou.mean() * 100))
if __name__ == "__main__":
main()
| 2.125 | 2 |
tests/test_worker.py | rpSebastian/AutoCFR | 1 | 12770468 | import time
from autocfr.worker import Worker, VecWorker, GroupVecWorker
import numpy as np
import ray
class DiverContainer(Worker):
@ray.remote
def run(task):
a = task["a"]
b = task["b"]
result = {
"worker_index": task["worker_index"],
"group_index": task["group_index"],
"a": a,
"b": b,
}
try:
time.sleep(a)
out = a / b
except Exception as e:
result["status"] = "fail"
result["error"] = e
result["info"] = str(e)
else:
result["status"] = "succ"
result["out"] = out
return result
class Diver(Worker):
def run(self, task):
try:
result = self.get_result_dict(task)
a = task["a"]
b = task["b"]
time.sleep(int(a))
out = a / 0
out = a / b
except Exception as e:
result["state"] = "fail"
result["error"] = e
result["info"] = str(e)
else:
result["state"] = "succ"
result["out"] = out
return result
def test_run():
diver = Diver(1)
result = diver.run(dict(a=1, b=0))
assert result["state"] == "fail"
def atest_parallel_run():
ray.init()
vec_worker = VecWorker(3, Diver)
for i in range(10):
a = np.random.randint(low=0, high=100)
b = np.random.randint(low=0, high=100)
vec_worker.add_task(dict(a=a, b=b))
for i in range(20):
time.sleep(0.01)
result = vec_worker.get_result()
print(vec_worker.get_info())
ray.shutdown()
def atest_parallel_run_sync():
ray.init()
vec_worker = VecWorker(2, Diver)
tasks = []
for i in range(10):
a = np.random.randint(low=0, high=100)
b = np.random.randint(low=0, high=100)
tasks.append(dict(a=a, b=b))
results = vec_worker.execute_tasks(tasks)
for task, result in zip(tasks, results):
print(task["a"], task["b"], task["a"] / task["b"], result["out"])
ray.shutdown()
def atest_group_run():
ray.init()
group_vec_worker = GroupVecWorker(10, DiverContainer)
group_vec_worker.add_tasks([dict(a=3, b=4), dict(a=3, b=7), dict(a=1, b=1)])
group_vec_worker.add_tasks([dict(a=3, b=4), dict(a=5, b=0)])
group_vec_worker.add_tasks([dict(a=1, b=0), dict(a=3, b=4), dict(a=3, b=0)])
group_vec_worker.add_tasks([dict(a=1, b=4), dict(a=3, b=0), dict(a=2, b=4), ])
for i in range(20):
time.sleep(1)
print(group_vec_worker.info())
while True:
result = group_vec_worker.get_result()
if result is not None:
print(result)
else:
break
ray.shutdown()
| 2.5 | 2 |
research/cv/ArtTrack/src/model/losses.py | mindspore-ai/models | 77 | 12770469 | <reponame>mindspore-ai/models<gh_stars>10-100
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import mindspore.numpy as np
from mindspore import nn
from mindspore.ops import functional as F
from mindspore.ops.operations import Abs
class HuberLossWithWeight(nn.LossBase):
"""
huber loss
"""
def __init__(self):
super(HuberLossWithWeight, self).__init__()
self.abs = Abs()
def construct(self, predictons, labels, weight=1.0, k=1.0):
diff = predictons - labels
abs_diff = self.abs(diff)
k = np.array(k)
losses = np.where(abs_diff < k, 0.5 * np.square(diff), k * abs_diff - 0.5 * k ** 2)
return self.get_loss(losses, weight)
class MSELossWithWeight(nn.LossBase):
"""
mse loss
"""
def construct(self, base, target, weight=1.0):
x = F.square(base - target)
return self.get_loss(x, weight)
class WeightLoss(nn.LossBase):
"""
weight loss
"""
def construct(self, loss, weight=1.0):
return self.get_loss(loss, weight)
| 2.28125 | 2 |
oauth/migrations/0003_auto_20190531_0914.py | enjoy-binbin/Django-blog | 111 | 12770470 | <reponame>enjoy-binbin/Django-blog
# Generated by Django 2.1.5 on 2019-05-31 09:14
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('oauth', '0002_auto_20190512_1129'),
]
operations = [
migrations.AlterModelOptions(
name='oauthconfig',
options={'ordering': ['-add_time'], 'verbose_name': '0-OAuth配置', 'verbose_name_plural': '0-OAuth配置'},
),
migrations.AlterModelOptions(
name='oauthuser',
options={'ordering': ['-add_time'], 'verbose_name': '1-Oauth用户', 'verbose_name_plural': '1-Oauth用户'},
),
]
| 1.554688 | 2 |
pirates/piratesgui/SheetFrame.py | itsyaboyrocket/pirates | 3 | 12770471 | # uncompyle6 version 3.2.0
# Python bytecode 2.4 (62061)
# Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)]
# Embedded file name: pirates.piratesgui.SheetFrame
from pirates.piratesgui import ListFrame
from pirates.piratesgui import PiratesGuiGlobals
class SheetFrame(ListFrame.ListFrame):
__module__ = __name__
def __init__(self, w, h, title, holder=None, hideAll=True, delayedReveal=None, **kw):
ListFrame.ListFrame.__init__(self, w, h, title, holder, hideAll, delayedReveal, frameColor=(1,
1,
1,
0.5), **kw)
self.initialiseoptions(SheetFrame)
self.rowColors = {}
def createListItem(self, currItem, revealTime=0, itemType=None, columnWidths=[], color=None):
newItem = self.createNewItem(currItem, itemType, columnWidths, color)
self.items.insert(0, newItem)
itemHeight = self.getItemHeight()
y = self.getHeight() - 0.01
for guiitem in self.items:
y -= guiitem.getHeight()
print 'y = %s' % y
for gui in self.items:
if self.hideAll == False:
gui.descText.wrtReparentTo(gui)
gui.setZ(y)
gui.setX(0.01)
y += gui.getHeight()
if self.hideAll == False:
gui.descText.wrtReparentTo(self.getParent().getParent())
if hasattr(currItem, 'getChangeEvent'):
self.accept(currItem.getChangeEvent(), self._handleItemChange)
return newItem
def _createIface(self):
itemList = self.getItemList()
numRows = len(itemList)
if numRows > 0:
numColumns = len(itemList[0])
column1Width = 0.55
columnWidth = (self.getWidth() - column1Width) / (numColumns - 1)
revealTime = 0
self.createListItem(itemList[0], itemType=PiratesGuiGlobals.UIListItemType_ColumHeadings, columnWidths=[column1Width, columnWidth])
for currItemIdx in range(1, numRows):
currItem = itemList[currItemIdx]
if currItem is not None:
if len(currItem) > 0:
customColor = currItem[-1][0] == 'color' and currItem[-1][1]
else:
customColor = None
self.createListItem(currItem, revealTime, columnWidths=[column1Width, columnWidth], color=customColor)
return
def _destroyIface(self):
for gui in self.items:
gui.destroy()
self.items = []
def getItemHeight(self):
return -1 | 2.234375 | 2 |
src/tools/pid_printer.py | rishvantsingh/kmlog | 0 | 12770472 | # Copyright 2021 - 2022, <NAME> <<EMAIL>>, Dr. <NAME> <<EMAIL>>
# Use of this source code is governed by an MIT-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/MIT.
# This is a small debug utility to print the pid KMLogger is
# running on to help with profiling
import os
from base.util import block_text
def print_pid():
block_text("PID")
print(os.getpid())
| 1.5625 | 2 |
src/intelliflow/core/platform/definitions/aws/kms/client_wrapper.py | amzn/rheoceros | 4 | 12770473 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import json
import logging
from typing import Set, Tuple
from botocore.exceptions import ClientError
from intelliflow.core.platform.definitions.aws.common import _is_trust_policy_AWS_principle_deleted
logger = logging.getLogger(__name__)
_allow_block_sid = "DONOT_DELETE_allow_use_of_the_key"
_admin_sid = "DONOT_DELETE_admin_access"
# https://docs.aws.amazon.com/kms/latest/developerguide/deleting-keys.html
KMS_MIN_DELETION_WAITING_PERIOD_IN_DAYS = 7
KMS_MAX_DELETION_WAITING_PERIOD_IN_DAYS = 30
def create_cmk(kms_client, policy: str, desc: str = "RheocerOS CMK") -> Tuple[str, str]:
"""Create a KMS (Symmetric) Customer Master Key
The created CMK is a Customer-managed key stored in AWS KMS.
Please note that a brand new key is 'enabled' by default.
https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html
:param desc: key description
:return Tuple(KeyId, KeyArn) where:
KeyId: AWS globally-unique string ID
KeyArn: Amazon Resource Name of the CMK
"""
try:
response = kms_client.create_key(Policy=policy, Description=desc)
except ClientError:
logger.info("Couldn't create new KMS CMK (desc=%s).", desc)
raise
else:
# Return the key ID and ARN
return response["KeyMetadata"]["KeyId"], response["KeyMetadata"]["Arn"]
def schedule_cmk_deletion(kms_client, id_or_arn: str, pending_window_in_days=KMS_MIN_DELETION_WAITING_PERIOD_IN_DAYS) -> "datetime":
"""Refer
https://boto3.amazonaws.com/v1/documentation/api/1.9.42/reference/services/kms.html#KMS.Client.schedule_key_deletion
:returns <datetime> 'DeletionDate' from KMS response; by when the key will be irrevocably deleted.
"""
if pending_window_in_days < KMS_MIN_DELETION_WAITING_PERIOD_IN_DAYS or pending_window_in_days > KMS_MAX_DELETION_WAITING_PERIOD_IN_DAYS:
raise ValueError(
f"Please provide a 'pending_window_in_days' value between 7 and 30 (inclusive) for the " f"deletion of KMS CMK {id_or_arn}."
)
try:
response = kms_client.schedule_key_deletion(KeyId=id_or_arn, PendingWindowInDays=pending_window_in_days)
except ClientError as err:
if err.response["Error"]["Code"] not in ["NotFoundException", "NotFound", "ResourceNotFoundException"]:
# see it was already in PendingDeletion state
key_metadata = kms_client.describe_key(KeyId=id_or_arn)
if "KeyMetadata" in key_metadata and key_metadata["KeyMetadata"]["KeyState"] == "PendingDeletion":
return key_metadata["KeyMetadata"]["DeletionDate"]
raise
else:
return response["DeletionDate"]
def get_cmk(kms_client, id_or_arn_or_alias: str) -> Tuple[str, str]:
"""refer
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/kms.html#KMS.Client.describe_key
:return Tuple(KeyId, KeyArn) where:
KeyId: AWS globally-unique string ID
KeyArn: Amazon Resource Name of the CMK
"""
try:
response = kms_client.describe_key(KeyId=id_or_arn_or_alias)
except ClientError as err:
if err.response["Error"]["Code"] not in ["NotFoundException"]:
logger.info("Couldn't get KMS CMK (alias=%s).", id_or_arn_or_alias)
raise
return None, None
if "KeyMetadata" in response:
return response["KeyMetadata"]["KeyId"], response["KeyMetadata"]["Arn"]
return None, None
def create_alias(kms_client, alias_name: str, target_key_id: str) -> None:
try:
response = kms_client.create_alias(AliasName=alias_name, TargetKeyId=target_key_id)
except ClientError:
logger.info("Couldn't create new alias '%s' for KMS CMK.", alias_name)
raise
def update_alias(kms_client, alias_name: str, target_key_id: str) -> None:
try:
response = kms_client.update_alias(AliasName=alias_name, TargetKeyId=target_key_id)
except ClientError:
raise
def delete_alias(kms_client, alias_name: str):
try:
kms_client.delete_alias(AliasName=alias_name)
except ClientError:
logger.info("Couldn't delete KMS alias '%s'.", alias_name)
raise
def create_default_policy(account_id: str, users_to_be_added: Set[str], admins: Set[str], trust_same_account=False) -> str:
default_policy = {"Version": "2012-10-17", "Id": "IntelliFlow-CMK-policy", "Statement": []}
if admins or trust_same_account:
admin_list = list(admins)
if trust_same_account:
admin_list.append(f"arn:aws:iam::{account_id}:root")
# see https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam
default_policy["Statement"].append(
{"Sid": _admin_sid, "Effect": "Allow", "Principal": {"AWS": admin_list}, "Action": "kms:*", "Resource": "*"}
)
elif not users_to_be_added:
raise ValueError(f"Cannot risk KMS CMK lockout due to no AWS entity as a trustee in the policy.")
if users_to_be_added:
current_statements = default_policy["Statement"]
new_aws_entity_list = list(users_to_be_added)
current_statements.append(
{
"Sid": _allow_block_sid,
"Effect": "Allow",
"Principal": {"AWS": new_aws_entity_list},
"Action": ["kms:Encrypt", "kms:Decrypt", "kms:ReEncrypt*", "kms:GenerateDataKey*", "kms:DescribeKey"],
"Resource": "*",
}
)
return json.dumps(default_policy)
def put_cmk_policy(
kms_client,
key_id: str,
account_id: str,
users_to_be_added: Set[str] = set(),
users_to_be_removed: Set[str] = set(),
trust_same_account: bool = None,
) -> None:
default_policy = {"Version": "2012-10-17", "Id": f"IntelliFlow-CMK-{key_id}-policy", "Statement": []}
same_account_root = f"arn:aws:iam::{account_id}:root"
# https://docs.aws.amazon.com/kms/latest/developerguide/programming-key-policies.html
# the only valid policy name is 'default'
policy_name = "default"
change_detected = False
current_policy_doc = None
try:
response = kms_client.get_key_policy(KeyId=key_id, PolicyName=policy_name)
current_policy_doc = json.loads(response["Policy"])
except ClientError as policy_error:
if policy_error.response["Error"]["Code"] not in ["NotFoundException"]:
raise
current_policy_doc = default_policy
change_detected = True
current_statements = current_policy_doc["Statement"]
new_aws_entity_set = set(users_to_be_added)
allow_block_found = False
admin_block_found = False
removed_statement_indexes = []
for i, statement in enumerate(current_statements):
sid = statement.get("Sid", None)
if sid == _allow_block_sid:
allow_block_found = True
current_aws_entity_list = statement["Principal"]["AWS"]
current_aws_entity_list = [current_aws_entity_list] if isinstance(current_aws_entity_list, str) else current_aws_entity_list
# first check deleted entities
for current_entity in current_aws_entity_list:
if _is_trust_policy_AWS_principle_deleted(current_entity):
users_to_be_removed.add(current_entity)
if users_to_be_added or users_to_be_removed:
if bool(new_aws_entity_set - set(current_aws_entity_list)) or set(current_aws_entity_list).intersection(
users_to_be_removed
):
change_detected = True
for current_aws_entity in current_aws_entity_list:
if current_aws_entity not in users_to_be_removed:
new_aws_entity_set.add(current_aws_entity)
if new_aws_entity_set:
statement["Principal"]["AWS"] = list(new_aws_entity_set)
else:
removed_statement_indexes.append(i)
elif sid == _admin_sid:
admin_block_found = True
current_admins_set = statement["Principal"]["AWS"]
current_admins_set = {current_admins_set} if isinstance(current_admins_set, str) else set(current_admins_set)
new_admins_set = set(current_admins_set)
# - clean-up if a zombie entity is still here (due to manual modifications, deletions, etc)
# - clean-up deleted users from admin list as well (if in there)
for current_entity in current_admins_set:
if _is_trust_policy_AWS_principle_deleted(current_entity) or current_entity in users_to_be_removed:
change_detected = True
new_admins_set.remove(current_entity)
if trust_same_account is not None:
if trust_same_account is True and same_account_root not in new_admins_set:
change_detected = True
new_admins_set.add(same_account_root)
elif trust_same_account is False and same_account_root in new_admins_set:
change_detected = True
new_admins_set.remove(same_account_root)
if new_admins_set != current_admins_set:
statement["Principal"]["AWS"] = list(new_admins_set)
# delete in reverse order so that iteration won't mess up
for i in sorted(removed_statement_indexes, reverse=True):
del current_statements[i]
if not allow_block_found and new_aws_entity_set:
change_detected = True
current_statements.append(
{
"Sid": _allow_block_sid,
"Effect": "Allow",
"Principal": {"AWS": list(new_aws_entity_set)},
"Action": ["kms:Encrypt", "kms:Decrypt", "kms:ReEncrypt*", "kms:GenerateDataKey*", "kms:DescribeKey"],
"Resource": "*",
}
)
if not admin_block_found and trust_same_account is True:
change_detected = True
# this normally should not happen (unless the policy had some other admin level statement)
# we still want to have our own well-defined block.
default_policy["Statement"].append(
{"Sid": _admin_sid, "Effect": "Allow", "Principal": {"AWS": same_account_root}, "Action": "kms:*", "Resource": "*"}
)
if change_detected:
try:
response = kms_client.put_key_policy(
KeyId=key_id, PolicyName=policy_name, Policy=json.dumps(current_policy_doc), BypassPolicyLockoutSafetyCheck=False
)
except ClientError:
logger.info("Couldn't update policy for KMS key '%s'.", key_id)
raise
def enable_key_rotation(kms_client, key_id: str):
try:
response = kms_client.enable_key_rotation(KeyId=key_id)
except ClientError:
logger.info("Couldn't enable key rotation for KMS key '%s'.", key_id)
raise
| 1.992188 | 2 |
apis/rest/agent_checkin.py | 3lpsy/FactionAPI | 1 | 12770474 | from flask import jsonify, request
from flask_restful import Resource, reqparse
from processing import agent_checkin
from processing.user_role import authorized_groups
from logger import log
agent_checkin_parser = reqparse.RequestParser()
agent_checkin_parser.add_argument('TransportId')
agent_checkin_parser.add_argument('SourceIP')
agent_checkin_parser.add_argument('Message')
class AgentCheckinEndpoint(Resource):
@authorized_groups(['StandardRead', 'Transport'])
def get(self, agent_name):
log("AgentCheckinEndpoint:GET", "AgentID: {0}".format(agent_name))
task_obj = agent_checkin.process_agent_checkin(agent_name=agent_name,
transport_id=request.args.get('TransportId'),
source_ip=request.args.get('SourceIp'))
return jsonify(task_obj)
@authorized_groups(['StandardWrite', 'Transport'])
def post(self, agent_name):
args = agent_checkin_parser.parse_args()
log("AgentCheckinEndpoint:POST", "AgentID: %s | Args: {0}".format(agent_name, args))
task_obj = agent_checkin.process_agent_checkin(agent_name=agent_name,
transport_id=args.get('TransportId'),
source_ip=args.get('SourceIp'),
message=args.get('Message'))
return jsonify(task_obj) | 2.203125 | 2 |
client2.py | pabitra0177/ITR-internship | 0 | 12770475 | <reponame>pabitra0177/ITR-internship
import socket
import time
# ? HOW to check the connection does exist
# ? How to unbind
# ? socket.error: [Errno 111] Connection refused
s=socket.socket()
host='127.0.0.1'
port=1121
while s.connect((host,port)):
print s.recv(1024)
time.sleep(0.5)
| 3.21875 | 3 |
vmain.py | DBMSRmutl/meterOCR | 7 | 12770476 | import os
import sys
import subprocess
foldername = 'results'
ocrFileName = 'python v1-3-arg.py'
for filename in os.listdir('/home/ubuntu/pyview/Test/pics'):
#print filename
fullFilename = (u'/home/ubuntu/pyview/Test/pics/'+filename)
#create new path for results
pathForEachResult = foldername+'/'+filename
print 'pathForEachResult = '+ pathForEachResult
#print fullFilename
qx = os.system('python v1-3-arg.py '+fullFilename+' '+pathForEachResult)
#qx = os.system('python v1-3-arg.py fullFilename pathForEachResult')
#print 'full command= '+ocrFileName+' '+fullFilename+' '+pathForEachResult
#qx = os.system(ocrFileName fullFilename pathForEachResult)
#result = subprocess.check_output(os.system('python v1-3-arg.py '+fullFilename+' '+pathForEachResult), shell=True)
#print 'qx ='+result
#upload to google drive
# /results/...
#mainResultDirectory = '0B-VyANMiyv0HUzJhTUFKdTZHRWc'
#os.system('python /home/ubuntu//uploadToGDrive.py ')
#sys.stdin.read(1)
#print output
| 2.578125 | 3 |
zoneh/threads/pusher.py | RaminAT/zoneh | 8 | 12770477 | <gh_stars>1-10
"""Pusher threads module."""
import logging
from telegram import InlineKeyboardMarkup, InlineKeyboardButton
from zoneh.captcha import captcha
from zoneh.commons import CommonThread
from zoneh.conf import get_config
from zoneh.parsers.formatter import FormattedRecord
from zoneh.utils import shallow_sleep, get_lock
CONF = get_config()
class PusherThread(CommonThread):
"""Pusher Thread Class."""
def __init__(self, push_queue, update):
"""Class constructor."""
super().__init__()
self._log = logging.getLogger(self.__class__.__name__)
self._update = update
self._lock = get_lock()
self._push_queue = push_queue
def _run(self):
"""Real thread run method."""
rec_num = 0
while self._run_trigger.is_set():
with self._lock:
self._log.debug('Captcha is active: %s', captcha.is_active)
self._log.debug('Captcha is sent: %s', captcha.is_sent)
if captcha.is_active and not captcha.is_sent:
self._send_captcha(self._update)
while self._push_queue:
try:
record = self._push_queue.pop()
except IndexError:
pass
rec_num += 1
self._process_record(record, rec_num)
shallow_sleep(1)
shallow_sleep(1)
def _send_captcha(self, update):
"""Send captcha image to the telegram chat."""
self._log.info('Sending captcha image to telegram')
update.message.reply_photo(photo=captcha.image, caption=captcha.caption)
captcha.is_sent = True
def _process_record(self, record, rec_num):
"""Process and send record."""
rec_formatted = FormattedRecord(record, rec_num)
keyboard = [[InlineKeyboardButton(
'Open mirror', url=rec_formatted.mirror)]]
reply_markup = InlineKeyboardMarkup(keyboard)
self._update.message.reply_html(rec_formatted.data,
reply_markup=reply_markup)
| 2.40625 | 2 |
main/change.py | PiRobotLm5G/Adeept_RaspTank | 0 | 12770478 | <filename>main/change.py<gh_stars>0
#!/usr/bin/python
import time
import smbus
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
bus = smbus.SMBus(1)
def __WriteByte(register16, addr, data):
a1 = (register16 >> 8) & 0xFF
a0 = register16 & 0xFF
try:
bus.write_i2c_block_data(addr, a1, [a0, (data & 0xFF)])
except:
raise
def change_addr(addr, new_addr, reset_pin):
VL6180X_SLAVE_DEVICE_ADDRESS = 0x0212
GPIO.setup(reset_pin, GPIO.OUT)
GPIO.output(reset_pin,0)
time.sleep(1)
GPIO.output(reset_pin,1)
time.sleep(1)
if addr != new_addr:
__WriteByte(VL6180X_SLAVE_DEVICE_ADDRESS, addr, new_addr)
#time.sleep(1)
#GPIO.output(reset_pin,0)
#init
if __name__ == "__main__":
tof_orig_addr = 0x29
tof_right_addr = tof_orig_addr
tof_right_pin = 9
tof_left_addr = 0x2a
tof_left_pin = 21
tof_front_addr = 0x2b
tof_front_pin = 10
#GPIO.setwarnings(False)
change_addr(tof_orig_addr, tof_left_addr, tof_left_pin)
change_addr(tof_orig_addr, tof_front_addr, tof_front_pin)
change_addr(tof_orig_addr, tof_right_addr, tof_right_pin) # power on
#sda = new_address_read(VL6180X_SLAVE_DEVICE_ADDRESS)
#print(sda,' id')
GPIO.cleanup()
#if read(VL6180X_SYSTEM_FRESH_OUT_OF_RESET) == 1:
# print('sensor is ready.')
# WriteByte(0x0207, 0x01)
# WriteByte(0x0208, 0x01)
# WriteByte(0x0096, 0x00)
# WriteByte(0x0097, 0xfd)
# WriteByte(0x00e3, 0x00)
# WriteByte(0x00e4, 0x04)
# WriteByte(0x00e5, 0x02)
# WriteByte(0x00e6, 0x01)
# WriteByte(0x00e7, 0x03)
# WriteByte(0x00f5, 0x02)
# WriteByte(0x00d9, 0x05)
# WriteByte(0x00db, 0xce)
# WriteByte(0x00dc, 0x03)
# WriteByte(0x00dd, 0xf8)
# WriteByte(0x009f, 0x00)
# WriteByte(0x00a3, 0x3c)
# WriteByte(0x00b7, 0x00)
# WriteByte(0x00bb, 0x3c)
# WriteByte(0x00b2, 0x09)
# WriteByte(0x00ca, 0x09)
# WriteByte(0x0198, 0x01)
# WriteByte(0x01b0, 0x17)
# WriteByte(0x01ad, 0x00)
# WriteByte(0x00ff, 0x05)
# WriteByte(0x0100, 0x05)
# WriteByte(0x0199, 0x05)
# WriteByte(0x01a6, 0x1b)
# WriteByte(0x01ac, 0x3e)
# WriteByte(0x01a7, 0x1f)
# WriteByte(0x0030, 0x00)
#default_settings
# Recommended : Public registers - See data sheet for more detail
#WriteByte(0x0011, 0x10); # Enables polling for 'New Sample ready' when measurement completes
#WriteByte(0x010a, 0x30); # Set the averaging sample period (compromise between lower noise and increased execution time)
#WriteByte(0x003f, 0x46); # Sets the light and dark gain (upper nibble). Dark gain should not be changed.
#WriteByte(0x0031, 0xFF); # sets the # of range measurements after which auto calibration of system is performed
#WriteByte(0x0040, 0x63); # Set ALS integration time to 100ms DocID026571 Rev 1 25/27 AN4545 SR03 settings27
#WriteByte(0x002e, 0x01); # perform a single temperature calibration of the ranging sensor
#
##Optional: Public registers - See data sheet for more detail
#WriteByte(0x001b, 0x09); # Set default ranging inter-measurement period to 100ms
#WriteByte(0x003e, 0x31); # Set default ALS inter-measurement period to 500ms
#WriteByte(0x0014, 0x24); # Configures interrupt on 'New Sample Ready threshold event'
#WriteByte(0x016, 0x00); #change fresh out of set status to 0
#
## Additional settings defaults from community
#WriteByte(VL6180X_SYSRANGE_MAX_CONVERGENCE_TIME, 0x32)
#WriteByte(VL6180X_SYSRANGE_RANGE_CHECK_ENABLES, 0x10 | 0x01)
#WriteByte16(VL6180X_SYSRANGE_EARLY_CONVERGENCE_ESTIMATE, 0x7B)
#WriteByte16(VL6180X_SYSALS_INTEGRATION_PERIOD, 0x64) #100ms
#WriteByte(VL6180X_SYSALS_ANALOGUE_GAIN, 0x20) #x40
#WriteByte(VL6180X_FIRMWARE_RESULT_SCALER, 0x01)
#main
#distance
#WriteByte(VL6180X_SYSRANGE_START, 0x01) #0x03 renzoku
#time.sleep(0.1)
#distance = read(VL6180X_RESULT_RANGE_VAL)
#WriteByte(VL6180X_SYSTEM_INTERRUPT_CLEAR, 0x07)
#print(distance,'mm')
#
#ambient_light
#WriteByte(VL6180X_SYSALS_START, 0x01)
#time.sleep(0.5)
#light = read16(VL6180X_RESULT_ALS_VAL)
#WriteByte(VL6180X_SYSTEM_INTERRUPT_CLEAR, 0x07)
#print read(VL6180X_SYSALS_ANALOGUE_GAIN)
#print read16(VL6180X_SYSALS_INTEGRATION_PERIOD)
#print(light*0.32*100/(32*100),'lux')
#Copyright (c) 2014-2015 <NAME>. All rights reserved.
| 2.765625 | 3 |
last_social_activity/social_networks/facebook.py | intelligenia/django-last-social-activity | 2 | 12770479 | <gh_stars>1-10
# -.- coding: utf-8 -.-
from __future__ import unicode_literals, absolute_import
from django.conf import settings
from dateutil import parser
import requests
try:
from urllib.request import urlopen, HTTPError
except ImportError:
from urllib2 import urlopen, HTTPError
from httplib import HTTPException
from last_social_activity.models import SocialNetworkItemCache
class FacebookReader(object):
# To get an ACCESS TOKEN use
# https://graph.facebook.com/oauth/access_token?client_id=YOUR_APP_ID&client_secret=YOUR_APP_SECRET&grant_type=client_credentials
GET_POSTS_URL = 'https://graph.facebook.com/{0}/posts'
def __init__(self):
credentials = self._get_credentials()
self.access_token = credentials["access_token"]
self.profile = credentials["profile"]
self.api = None
def connect(self):
self.api = None
# Fetch the last num_posts posts
def get_last_posts(self, num_posts=5):
# If there is a hit, get from cache
if SocialNetworkItemCache.hit("facebook", num_posts):
return SocialNetworkItemCache.get("facebook", num_posts).response_dict
parameters = {
'access_token': self.access_token,
'fields': 'type,created_time,link,permalink_url,message,message_tags,name,picture,full_picture,source',
'limit': num_posts
}
try:
response = requests.get(FacebookReader.GET_POSTS_URL.format(self.profile), params=parameters)
posts = response.json().get('data')
except (HttpError, HTTPException, ValueError, requests.exceptions.RequestException) as e:
# If there is a hit, get from cache
if SocialNetworkItemCache.hit("facebook", num_posts):
return SocialNetworkItemCache.get("facebook", num_posts).response_dict
return []
for post in posts:
post['created_at'] = parser.parse(post.get('created_time')).isoformat()
SocialNetworkItemCache.create("facebook", num_posts, posts)
return posts
# Return the credentials of the Facebook account
def _get_credentials(self):
facebook_credentials = settings.LAST_SOCIAL_ACTIVITY_CREDENTIALS.get('facebook')
if not facebook_credentials:
raise AssertionError(u"Credentials not found for facebook")
if type(facebook_credentials) is dict:
return facebook_credentials
raise AssertionError(u"No other credential source is implemented at the moment")
| 2.515625 | 3 |
kive/FixtureFiles/demo/CodeResources/prelim_map.py | cfe-lab/Kive | 2 | 12770480 | <reponame>cfe-lab/Kive
#! /usr/bin/env python
"""
Shipyard-style bowtie2
Run bowtie2 on paired-end FASTQ data sets with user-supplied *.bt2
bowtie2 SAM format output to <stdout> for redirection via subprocess.Popen
Sort outputs by refname.
Convert to CSV format and write to file.
"""
import argparse
import csv
import logging
import os
import sys
import micall.core.miseq_logging as miseq_logging
import micall.core.project_config as project_config
from micall.utils.externals import Bowtie2, Bowtie2Build, LineCounter
BOWTIE_THREADS = 4 # Bowtie performance roughly scales with number of threads
BOWTIE_VERSION = '2.2.8' # version of bowtie2, used for version control
BOWTIE_PATH = 'bowtie2-align-s' # path to executable, so you can install more than one version
BOWTIE_BUILD_PATH = 'bowtie2-build-s'
# Read and reference gap open/extension penalties.
READ_GAP_OPEN = 10
READ_GAP_EXTEND = 3
REF_GAP_OPEN = 10
REF_GAP_EXTEND = 3
logger = miseq_logging.init_logging_console_only(logging.DEBUG)
line_counter = LineCounter()
def prelim_map(fastq1,
fastq2,
prelim_csv,
nthreads=BOWTIE_THREADS,
callback=None,
rdgopen=READ_GAP_OPEN,
rfgopen=REF_GAP_OPEN,
stderr=sys.stderr,
gzip=False,
work_path=''):
""" Run the preliminary mapping step.
@param fastq1: the file name for the forward reads in FASTQ format
@param fastq2: the file name for the reverse reads in FASTQ format
@param prelim_csv: an open file object for the output file - all the reads
mapped to references in CSV version of the SAM format
@param nthreads: the number of threads to use.
@param callback: a function to report progress with three optional
parameters - callback(message, progress, max_progress)
@param rdgopen: a penalty for opening a gap in the read sequence.
@param rfgopen: a penalty for opening a gap in the reference sequence.
@param stderr: where to write the standard error output from bowtie2 calls.
@param work_path: optional path to store working files
"""
try:
bowtie2 = Bowtie2(BOWTIE_VERSION, BOWTIE_PATH)
bowtie2_build = Bowtie2Build(BOWTIE_VERSION,
BOWTIE_BUILD_PATH,
logger)
except:
bowtie2 = Bowtie2(BOWTIE_VERSION, BOWTIE_PATH + '-' + BOWTIE_VERSION)
bowtie2_build = Bowtie2Build(BOWTIE_VERSION,
BOWTIE_BUILD_PATH + '-' + BOWTIE_VERSION,
logger)
# check that the inputs exist
if not os.path.exists(fastq1):
logger.error('No FASTQ found at %s', fastq1)
sys.exit(1)
if not os.path.exists(fastq2):
logger.error('No FASTQ found at %s', fastq2)
sys.exit(1)
# append .gz extension if necessary
if gzip:
if not fastq1.endswith('.gz'):
try:
os.symlink(fastq1, fastq1+'.gz')
except OSError:
# symbolic link already exists
pass
fastq1 += '.gz'
if not fastq2.endswith('.gz'):
try:
os.symlink(fastq2, fastq2+'.gz')
except OSError:
# symbolic link already exists
pass
fastq2 += '.gz'
if callback:
# four lines per read, two files
total_reads = line_counter.count(fastq1, gzip=gzip) / 2
callback(message='... preliminary mapping',
progress=0,
max_progress=total_reads)
# generate initial reference files
projects = project_config.ProjectConfig.loadDefault()
ref_path = os.path.join(work_path, 'micall.fasta')
with open(ref_path, 'w') as ref:
projects.writeSeedFasta(ref)
reffile_template = os.path.join(work_path, 'reference')
bowtie2_build.build(ref_path, reffile_template)
# do preliminary mapping
output = {}
read_gap_open_penalty = rdgopen
ref_gap_open_penalty = rfgopen
# stream output from bowtie2
bowtie_args = ['--wrapper', 'micall-0',
'--quiet',
'-x', reffile_template,
'-1', fastq1,
'-2', fastq2,
'--rdg', "{},{}".format(read_gap_open_penalty,
READ_GAP_EXTEND),
'--rfg', "{},{}".format(ref_gap_open_penalty,
REF_GAP_EXTEND),
'--no-hd', # no header lines (start with @)
'-X', '1200',
'-p', str(nthreads)]
for i, line in enumerate(bowtie2.yield_output(bowtie_args, stderr=stderr)):
if callback and i % 1000 == 0:
callback(progress=i)
refname = line.split('\t')[2] # read was mapped to this reference
if refname not in output:
output.update({refname: []})
output[refname].append(line.split('\t')[:11]) # discard optional items
fieldnames = ['qname',
'flag',
'rname',
'pos',
'mapq',
'cigar',
'rnext',
'pnext',
'tlen',
'seq',
'qual']
writer = csv.DictWriter(prelim_csv, fieldnames, lineterminator=os.linesep)
writer.writeheader()
# lines grouped by refname
for refname, lines in output.iteritems():
for line in lines:
writer.writerow(dict(zip(fieldnames, line)))
if callback:
# Track progress for second half
callback(progress=total_reads)
def main():
parser = argparse.ArgumentParser(
description='Map contents of FASTQ R1 and R2 data sets to references using bowtie2.')
parser.add_argument('fastq1', help='<input> FASTQ containing forward reads')
parser.add_argument('fastq2', help='<input> FASTQ containing reverse reads')
parser.add_argument('prelim_csv',
type=argparse.FileType('w'),
help='<output> CSV containing preliminary mapping from bowtie2 (modified SAM)')
parser.add_argument("--rdgopen", default=None, help="<optional> read gap open penalty")
parser.add_argument("--rfgopen", default=None, help="<optional> reference gap open penalty")
parser.add_argument("--gzip", action='store_true', help="<optional> FASTQs are compressed")
args = parser.parse_args()
prelim_map(fastq1=args.fastq1,
fastq2=args.fastq2,
prelim_csv=args.prelim_csv,
rdgopen=args.rdgopen,
rfgopen=args.rfgopen,
gzip=args.gzip) # defaults to False
if __name__ == '__main__':
main()
| 2.25 | 2 |
asynctest.py | uskoo/CitrusDrop | 0 | 12770481 | import json
import os
from urllib.parse import parse_qsl
import asyncio
from requests_oauthlib import OAuth1Session
from flask import Flask, jsonify, request, redirect, url_for
from flask import render_template
from citrus_drop import CitrusDrop
app = Flask(__name__)
user_drop = {
'screen_name': '未取得',
'last_update': '-',
'profile_image_url': './static/not_found.png',
'followers_count': '未取得',
'friends_count': '未取得',
'result': []
}
@app.route('/update', methods=['GET'])
def update():
global loop
title = "CitrusDrop"
page = "main"
#loop = asyncio.get_event_loop()
print("update呼ばれた")
hoge = loop.run_until_complete(update_dict())
print(hoge)
return render_template('main.html', title=title, page=page, message=user_drop, disabled="true")
async def update_dict():
title = "CitrusDrop"
page = "main"
print('kokomadekitayo')
task = loop.create_task(asyncio.sleep(10))
await task
return render_template('main.html', title=title, page=page, message=user_drop, disabled="false")
@app.route('/')
def main():
print("呼ばれた")
disabled = "false"
print(request.args.get('disabled'))
if request.args.get('disabled'):
disabled = "true"
else:
pass
title = "CitrusDrop"
page = "main"
return render_template('main.html', title=title, page=page, message=user_drop, disabled=disabled)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(app.run())
#app.run()
| 2.6875 | 3 |
stRT/tdr/__init__.py | Yao-14/stAnalysis | 0 | 12770482 | from .models import *
from .widgets import *
| 1.101563 | 1 |
saau/sections/ancestry/irish.py | Mause/statistical_atlas_of_au | 0 | 12770483 | <reponame>Mause/statistical_atlas_of_au
# ancestry-irish
from . import AncestryImageProvider
from ...utils.header import render_header_to
class IrishAncestryImageProvider(AncestryImageProvider):
filename = 'irish.json'
ancestry_name = 'Irish'
def build_image(self):
return render_header_to(
self.services.fonts.get_font(),
super().build_image(),
19,
lines=[
'<b>MAP</b>',
'SHOWING THE DISTRIBUTION OF',
'<b>IRISH ANCESTRY</b>',
# !!!!
'ACCORDING TO THEIR PROPORTION TO THE AGGREGATE POPULATION',
'<i>Compiled using data from the 2011 ABS Census</i>'
]
)
| 2.375 | 2 |
rest-service/manager_rest/security/user_handler.py | TS-at-WS/cloudify-manager | 124 | 12770484 | <filename>rest-service/manager_rest/security/user_handler.py
#########
# Copyright (c) 2013 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import string
from flask import current_app
from itsdangerous import BadSignature, SignatureExpired
from ..storage.idencoder import get_encoder
from cloudify.constants import CLOUDIFY_API_AUTH_TOKEN_HEADER
from manager_rest.storage.models import User
from manager_rest.manager_exceptions import NotFoundError
from manager_rest.storage import user_datastore, get_storage_manager
from manager_rest.execution_token import (set_current_execution,
get_current_execution_by_token,
get_execution_token_from_request)
ENCODED_ID_LENGTH = 5
def user_loader(request):
"""Attempt to retrieve the current user from the request
Either from request's Authorization attribute, or from the token header
Having this function makes sure that this will work:
> from flask_security import current_user
> current_user
<manager_rest.storage.models.User object at 0x50d9d10>
:param request: flask's request
:return: A user object, or None if not found
"""
if request.authorization:
return get_user_from_auth(request.authorization)
execution_token = get_execution_token_from_request(request)
if execution_token:
execution = get_current_execution_by_token(execution_token)
set_current_execution(execution) # Sets the request current execution
return execution.creator if execution else None
token = get_token_from_request(request)
if token:
_, _, user, _, _ = get_token_status(token)
return user
api_token = get_api_token_from_request(request)
if api_token:
user, user_token_key = extract_api_token(api_token)
return user
if current_app.external_auth \
and current_app.external_auth.can_extract_user_from_request():
user = current_app.external_auth.get_user_from_request(request)
if isinstance(user, User):
return user
return None
def extract_api_token(api_token):
user_id = api_token[:ENCODED_ID_LENGTH]
user_token_key = api_token[ENCODED_ID_LENGTH:]
user_id = get_encoder().decode(user_id)
try:
user = get_storage_manager().get(User, user_id)
except NotFoundError:
return None, None
return user, user_token_key
def get_user_from_auth(auth):
if not auth or not auth.username:
return None
if auth.username[0] not in string.ascii_letters:
return None
return user_datastore.get_user(auth.username)
def get_token_from_request(request):
token_auth_header = current_app.config[
'SECURITY_TOKEN_AUTHENTICATION_HEADER']
return request.headers.get(token_auth_header)
def get_api_token_from_request(request):
return request.headers.get(CLOUDIFY_API_AUTH_TOKEN_HEADER)
def get_token_status(token):
"""Mimic flask_security.utils.get_token_status with some changes
:param token: The token to decrypt
:return: A tuple: (expired, invalid, user, data)
"""
security = current_app.extensions['security']
serializer = security.remember_token_serializer
max_age = security.token_max_age
user, data, error = None, None, None
expired, invalid = False, False
try:
data = serializer.loads(token, max_age=max_age)
except SignatureExpired:
expired = True
except (BadSignature, TypeError, ValueError) as e:
invalid = True
error = e
if data:
user = user_datastore.find_user(id=data[0])
return expired, invalid, user, data, error
| 1.921875 | 2 |
api_service.py | gve-sw/gve_devnet_ise_expanded_grace_period_guest_portal | 0 | 12770485 | <gh_stars>0
# Copyright (c) 2021 Cisco and/or its affiliates.
# This software is licensed to you under the terms of the Cisco Sample
# Code License, Version 1.1 (the "License"). You may obtain a copy of the
# License at
# https://developer.cisco.com/docs/licenses
# All use of the material herein must be in accordance with the terms of
# the License. All rights not expressly granted by the License are
# reserved. Unless required by applicable law or agreed to separately in
# writing, software distributed under the License is distributed on an "AS
# IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied.
import datetime
import os
import requests
import base64
import json
from dotenv import load_dotenv
# load all environment variables
load_dotenv()
'''Global variables'''
'''ISE Instance'''
HOST = os.environ['HOST']
''' Setup ISE credintials '''
'''User ERS Admin credentials = normal ISE login user'''
ERS_USERNAME = os.environ['ERS_USERNAME']
ERS_PASSWORD = os.environ['ERS_PASSWORD']
ers_creds = str.encode(ERS_USERNAME+':'+ERS_PASSWORD)
ers_encodedAuth = bytes.decode(base64.b64encode(ers_creds))
ers_headers = {
'Content-Type': 'application/json',
'Accept': 'application/json',
'Authorization': 'Basic ' + ers_encodedAuth
}
'''Sponsor Account credentials = Admin > identities > users'''
SPONSOR_USERNAME = os.environ['SPONSOR_USERNAME']
SPONSOR_PASSWORD = os.environ['SPONSOR_PASSWORD']
sponsor_creds = str.encode(SPONSOR_USERNAME +':'+ SPONSOR_PASSWORD)
sponsor_encodedAuth = bytes.decode(base64.b64encode(sponsor_creds))
sponsor_headers = {
'Content-Type': 'application/json',
'Accept': 'application/json',
'Authorization': 'Basic ' + sponsor_encodedAuth
}
'''Sponsor Portal Info'''
SPONSOR_PORTAL_ID = os.environ['SPONSOR_PORTAL_ID']
'''Create Timestamps based on number of days'''
def getDates(days):
# get current date
fromDateObject = datetime.datetime.now()
#get days to date python lib object
delta = datetime.timedelta(days=days)
toDateObject = fromDateObject + delta
# format as mm/dd/y
fromDate = fromDateObject.strftime("%m/%d/%Y %H:%M")
toDate = toDateObject.strftime("%m/%d/%Y %H:%M")
return fromDate, toDate
'''
Update a guest user by its username.
'''
def updateGuestUserByName(name, days, approveStatus):
print('-------------UPDATE GUEST USER BY NAME: '+ name +'-------------------')
fromDate, toDate = getDates(days)
payload = {
"GuestUser" : {
"guestType" : "Contractor (default)",
"guestInfo" : {
},
"guestAccessInfo" : {
"validDays" : days,
"fromDate" : fromDate,
"toDate" : toDate
},
"portalId" : SPONSOR_PORTAL_ID,
"customFields" : {
"Approve" : approveStatus
}
}
}
url = HOST +":9060/ers/config/guestuser/name/" + name
method = "PUT"
response = requests.request(method, url, headers=sponsor_headers, data=json.dumps(payload), verify=False)
print('Response Code: ' + str(response.status_code))
return response.text
'''
Update a guest user by its ID.
'''
def updateGuestUserByID(userID, days):
print('---------------UPDATE GUEST USER BY ID: '+ userID +' --------------------')
fromDate, toDate = getDates(days)
payload = {
"GuestUser" : {
"guestType" : "Contractor (default)",
"guestInfo" : {
},
"guestAccessInfo" : {
"validDays" : days,
"fromDate" : fromDate,
"toDate" : toDate
},
"portalId" : SPONSOR_PORTAL_ID
}
}
url = HOST +":9060/ers/config/guestuser/"+ userID
method = "PUT"
response = requests.request(method, url, headers=sponsor_headers, data=json.dumps(payload), verify=False)
print('Response Code: ' + str(response.status_code))
return response.text
'''
Suspend a guest user by username
'''
def suspendGuestUserbyName(username):
print('------------SUSPENDED GUEST USER: '+ username +' -------------')
url = HOST +":9060/ers/config/guestuser/suspend/name/"+ username
method = "PUT"
response = requests.request(method, url, headers=sponsor_headers, data={}, verify=False)
print('Response Code: ' + str(response.status_code))
#print(response.text)
return response.text
'''
Get all guest Users
'''
def getGuestUsers():
print('-------------------GET ALL GUEST USERS-------------------')
url = HOST +":9060/ers/config/guestuser"
method = "GET"
response = requests.request(method, url, headers=sponsor_headers, data={}, verify=False)
print('Response Code: ' + str(response.status_code))
return response.text
'''
Get detailed guest user info based on username.
'''
def getGuestUserbasedOnName(name):
print('------------------GET GUEST USER BY USERNAME: '+ name +'--------------')
url = HOST +":9060/ers/config/guestuser/name/" + name
method = "GET"
response = requests.request(method, url, headers=sponsor_headers, data={}, verify=False)
print('Response Code: ' + str(response.status_code))
return response.text
'''
Get detailed guest user info based on ID.
'''
def getGuestUserByID(userID):
print('---------------GET GUEST USER BY ID: '+ userID +'--------------------')
url = HOST +":9060/ers/config/guestuser/" + userID
method = "GET"
response = requests.request(method, url, headers=sponsor_headers, data={}, verify=False)
print('Response Code: ' + str(response.status_code))
#print(response.text)
return response.text
'''
Get all sponsor portals
'''
def getSponsorPortals():
print('-----------------GET SPONSOR PORTALS----------------')
url = HOST +":9060/ers/config/sponsorportal"
method = "GET"
response = requests.request(method, url, headers=ers_headers, data={}, verify=False)
print('Response Code: ' + str(response.status_code))
return response.text
| 1.6875 | 2 |
Paper_13/Classes/Transfer_to_RAM.py | ADGiuliano/DevAndCompForRecSys2016 | 0 | 12770486 | """
@author: <NAME>
@contact: <EMAIL>
@organization: University of Padua
"""
import sqlite3
import datetime
import Pikle_operations as p_o
from Functions import Time_Calcolation as tc
from Functions import Users_dict as u_d
from Functions import Items_dict as i_d
from Functions import Users_Items_groups_dict as g_d
from Functions import Interactions_Impressions_dict as int_imp_d
#File contenente la classe T_to_RAM
class T_to_RAM:
# Modulo riguardante l'inizializzazione della classe
def __init__(self, d_l):
a = datetime.datetime.now();
if (d_l == True):
db_path = 'File_db/recsys16_lite.db';
else:
db_path = 'File_db/recsys16.db';
conn = sqlite3.connect(db_path);
conn.row_factory = sqlite3.Row;
c = conn.cursor();
pik = p_o.Pickle_operator(d_l);
nr_items = g_d.Nr_items(c);
if (pik.check_int_max_time_file() == False):
self.int_max_time = tc.Max_T_int(c);
pik.save_int_max_time(self.int_max_time);
else:
self.int_max_time = pik.load_int_max_time();
if (pik.check_int_max_diff_file() == False):
self.int_max_diff = self.int_max_time - tc.Min_T_int(c);
pik.save_int_max_diff(self.int_max_diff);
else:
self.int_max_diff = pik.load_int_max_diff();
# divisioni periodi temporali interactions
#print self.int_max_diff;
t_period = self.int_max_diff / 7;
#print t_period;
t_rest = self.int_max_diff%t_period;
#print t_rest;
period_test = tc.Min_T_int(c);
int_periods_t_lim_v = [];
for i in range(0,6):
#print period_test;
int_periods_t_lim_v.append(period_test);
period_test += t_period;
period_test += t_period + t_rest;
int_periods_t_lim_v.append(period_test);
#print int_periods_t_lim_v;
#print self.int_max_time;
# divisioni periodi temporali impressions
imp_periods_t_lim_v = [];
for i in range(0,len(int_periods_t_lim_v)):
tmsp = int_periods_t_lim_v[i];
week = datetime.datetime.fromtimestamp(tmsp).isocalendar()[1];
imp_periods_t_lim_v.append(week);
#print imp_periods_t_lim_v;
if (pik.check_imp_max_time_file() == False):
self.imp_max_time = tc.Max_T_imp(c);
imp_max_year = tc.Max_year_imp(c);
self.imp_max_time += (imp_max_year * 52);
pik.save_imp_max_time(self.imp_max_time);
else:
self.imp_max_time = pik.load_imp_max_time();
if (pik.check_imp_max_diff_file() == False):
imp_min_year = tc.Min_year_imp(c);
self.imp_max_diff = self.imp_max_time - (tc.Min_T_imp(c,imp_min_year)+(imp_min_year*52));
pik.save_imp_max_diff(self.imp_max_diff);
else:
self.imp_max_diff = pik.load_imp_max_diff();
if (pik.check_item_max_time_file() == False):
self.item_max_time = tc.Max_T_items(c);
pik.save_item_max_time(self.item_max_time);
else:
self.item_max_time = pik.load_item_max_time();
if (pik.check_item_min_time_file() == False):
Min_t = tc.Min_T_items(c);
pik.save_item_min_time(Min_t);
else:
Min_t = pik.load_item_min_time();
if (pik.check_item_max_diff_file() == False):
self.item_max_diff = self.item_max_time - Min_t;
pik.save_item_max_diff(self.item_max_diff);
else:
self.item_max_diff = pik.load_item_max_diff();
print "\tItems score dict";
if (pik.check_item_score_dict_file() == False):
self.items_score_d = i_d.Item_score_dict(c);
pik.save_item_score_dict(self.items_score_d);
else:
self.items_score_d = pik.load_item_score_dict();
print "\tJobroles";
if (pik.check_jobroles_list_file() == False):
jobroles_l = g_d.Jobroles_list(c);
pik.save_jobroles_list(jobroles_l);
else:
jobroles_l = pik.load_jobroles_list();
self.jobroles_d = g_d.Jobroles_d_creation(c,jobroles_l);
#print "Jobrole 996660"
#print self.jobroles_d[996660][0];
#print len(self.jobroles_d[996660][1]);
#print self.jobroles_d[996660][1];
#print self.jobroles_d[996660][2];
#return 0;
print "\tFos";
if (pik.check_fos_list_file() == False):
fos_l = g_d.Fos_list(c);
pik.save_fos_list(fos_l);
else:
fos_l = pik.load_fos_list();
self.fos_d = g_d.Fos_d_creation(c,fos_l);
#print "Fos 1:";
#print self.fos_d[1][0];
#print self.fos_d[1][1];
#print len(self.fos_d[1][1]);
#print self.fos_d[1][2];
#print self.fos_d[2][2];
#return 0;
print "\tTags";
if (pik.check_tags_list_file() == False):
tags_l = g_d.Tags_list(c,0);
pik.save_tags_list(tags_l);
else:
tags_l = pik.load_tags_list();
if (pik.check_tags_dict_file() == False):
self.tags_d = g_d.Tags_d_creation(c,tags_l,0,nr_items);
pik.save_tags_dict(self.tags_d);
else:
self.tags_d = pik.load_tags_dict();
#print "Tag NULL:"
#print self.tags_d[0][0];
#print self.tags_d[0][1];
#print self.tags_d[0][2];
print "\tTitles";
if (pik.check_titles_list_file() == False):
titles_l = g_d.Titles_list(c,0);
pik.save_titles_list(titles_l);
else:
titles_l = pik.load_titles_list();
if (pik.check_titles_dict_file() == False):
self.titles_d = g_d.Titles_d_creation(c,titles_l,0,nr_items);
pik.save_titles_dict(self.titles_d);
else:
self.titles_d = pik.load_titles_dict();
#print "Title NULL:";
#print self.titles_d[0][0];
#print self.titles_d[0][1];
#print self.titles_d[0][2];
print "\tInteractions";
if (pik.check_int_list_file() == False):
int_list = int_imp_d.int_list(c,self.int_max_time);
pik.save_int_list(int_list);
else:
int_list = pik.load_int_list();
if (pik.check_int_dist_file() == False):
self.int_d = int_imp_d.int_d_creation(c,self.int_max_time,int_list,self.int_max_diff);
pik.save_int_dist(self.int_d);
else:
self.int_d = pik.load_int_dist();
#print "Interactions list i = 14 :";
#print int_list[14];
#print "Interactions dict type=4 i=165:";
#print len(self.int_d[4]);
#print self.int_d[4][165];
print "\tImpressions";
if (pik.check_imp_dict_file() == False):
if (pik.check_item_imp_list_file() == False):
imp_items_l = int_imp_d.imp_items_list(c);
pik.save_item_imp_list(imp_items_l);
else:
imp_items_l = pik.load_item_imp_list();
if (pik.check_imp_list_file() == False):
imp_l = int_imp_d.imp_list(c);
pik.save_imp_list(imp_l);
else:
imp_l = pik.load_imp_list();
self.imp_d = int_imp_d.imp_d_creation(c,self.imp_max_time,imp_items_l,self.imp_max_diff);
pik.save_imp_dict(self.imp_d);
else:
self.imp_d = pik.load_imp_dict();
imp_l = pik.load_imp_list();
#print "Imp items list i=13:"
#print imp_items_l[13];
#print "Imp list i=156:";
#print imp_l[156];
#print "Imp dict 10128439:";
#print self.imp_d[10128439];
print "\tCountry Dict";
if (pik.check_country_dict_file() == False):
country_dict = i_d.country_dict(c);
pik.save_country_dict(country_dict);
else:
country_dict = pik.load_country_dict();
#print "Country Dict:";
#print country_dict;
#return 0;
print "\tItems";
self.items_d = i_d.Items_d_creation(c,Min_t,self.item_max_time,self.item_max_diff,country_dict,\
titles_l, tags_l, self.titles_d,\
self.tags_d, int_list, self.int_max_diff, self.imp_d,\
int_periods_t_lim_v, imp_periods_t_lim_v);
#print "Items_d[2828770]:";
#print "titles:";
#print len(self.items_d[2828770][11]);
#print self.items_d[2828770][11];
#print "tags:";
#print len(self.items_d[2828770][12]);
#print self.items_d[2828770][12];
#print "AP score:";
#print self.items_d[2828770][14];
#print "sim_cos_tit and tag";
#print self.items_d[2828770][15];
#print self.items_d[2828770][16];
#print "IKNN_SI:";
#print self.items_d[2828770][18][6][2];
#print "Last_int:";
#print self.items_d[2828770][18][6][0][3245];
#print self.items_d[2828770][18][6][0][1884];
#print self.items_d[2828770][18][6][0][7449];
#print "fos prima:";
#print self.fos_d[1][2];
#print int_periods_t_lim_v;
#b = datetime.datetime.now();
#print "Tempo totale fino ad items dict:\t" + str(b - a);
#return 0;
#self.items_d = 0;
print "\tUsers";
self.users_d = u_d.Users_d_creation(c,country_dict,fos_l,jobroles_l,self.fos_d,self.jobroles_d,\
self.int_max_diff, int_list, self.imp_d, self.items_d,\
int_periods_t_lim_v, imp_periods_t_lim_v);
pik.save_users_dict(self.users_d);
#print "fos dopo:";
#print self.fos_d[1][2];
#print "User 10866:";
#print "Fos:";
#print self.users_d[10866][9];
#print "Jobroles:";
#print self.users_d[10866][10];
#print "Nr_groups:";
#print self.users_d[10866][11];
#print "Interactions:";
#print self.users_d[10866][12];
#print "Impressions:";
#print self.users_d[10866][13];
#print "W_e:";
#print self.users_d[10866][14];
#print "RTCR_score:";
#print self.users_d[10866][15];
#print "AS_score:";
#print self.users_d[10866][16];
b = datetime.datetime.now();
print "Tempo totale creazione dict:\t" + str(b - a);
conn.commit();
conn.close();
def items_score_dict(self):
return self.items_score_d;
def users_dict(self):
return self.users_d;
def items_dict(self):
return self.items_d;
def jobroles_dict(self):
return self.jobroles_d;
def fos_dict(self):
return self.fos_d;
def tags_dict(self):
return self.tags_d;
def titles_dict(self):
return self.titles_d;
def imp_dict(self):
return self.imp_d;
def int_dict(self):
return self.int_d;
def int_m_time(self):
return self.int_max_time;
def int_m_diff(self):
return self.int_max_diff;
def imp_m_time(self):
return self.imp_max_time;
def imp_m_diff(self):
return self.imp_max_diff;
def item_m_diff(self):
return self.item_max_diff; | 2.4375 | 2 |
src/Kattis/2021/MSOE_Comp_Prog/Week_9/D/missinggnomes2.py | phungj/MSOE_Comp_Prog_Py | 0 | 12770487 | params = input().split(" ")
params = [int(param) for param in params]
if params[0] == params[1]:
for i in range(1, params[0] + 1):
print(input())
exit(0)
else:
all_gnomes = set(range(1, params[0] + 1))
remaining_gnomes = []
sorted = True
prev_gnome = -1
for _ in range(params[1]):
current_gnome = int(input())
remaining_gnomes.append(current_gnome)
all_gnomes.remove(current_gnome)
if sorted and prev_gnome > current_gnome:
sorted = False
elif sorted:
prev_gnome = current_gnome
if sorted:
print('\n'.join([str(i) for i in range(1, params[0] + 1)]))
exit(0)
else:
missing = False
for missing_gnome in all_gnomes:
for remaining_gnome in remaining_gnomes:
if missing_gnome < remaining_gnome:
print(missing_gnome)
missing = True
break
else:
print(remaining_gnome)
remaining_gnomes = remaining_gnomes[1:]
if missing:
missing = False
continue
else:
print(missing_gnome)
remaining_gnomes = [str(gnome) for gnome in remaining_gnomes]
print('\n'.join(remaining_gnomes))
| 3.484375 | 3 |
sympybotics/kinematics.py | ZhangMeiHuiROS/Sym_RobotD | 117 | 12770488 |
import sympy
_id = lambda x: x
class Kinematics(object):
"""Robot symbolic Jacobians.
kinobj.J: list of link frame Jacobians - complete (6 x N):
[linear_velocity
angular_velocity] = J * joint_velocities
kinobj.Jc: list of link center-of-mass Jacobians - complete
kinobj.Jp: list of link frame Jacobians - linear velocity part only
kinobj.Jo: list of link frame Jacobians - angular velocity part only
kinobj.Jcp: list of link center-of-mass Jacobians - linear part
kinobj.Jco: list of link center-of-mass Jacobians - angular part
"""
def __init__(self, robotdef, geom, ifunc=None):
if not ifunc:
ifunc = _id
self.rbtdef = robotdef
self.geom = geom
self.dof = self.rbtdef.dof
def sym_skew(v):
return sympy.Matrix([[0, -v[2], v[1]],
[v[2], 0, -v[0]],
[-v[1], v[0], 0]])
if self.rbtdef._dh_convention == 'standard':
# extend z and p so that z[-1] and p[-1] return values from base
# frame
z_ext = geom.z + [sympy.Matrix([0, 0, 1])]
p_ext = geom.p + [sympy.zeros(3, 1)]
self.Jp = list(range(self.rbtdef.dof))
for l in range(self.rbtdef.dof):
self.Jp[l] = sympy.zeros(3, self.rbtdef.dof)
for j in range(l + 1):
if self.rbtdef._links_sigma[j]:
self.Jp[l][0:3, j] = ifunc(z_ext[j - 1])
else:
self.Jp[l][0:3, j] = ifunc(z_ext[j - 1].cross(
(p_ext[l] - p_ext[j - 1])).reshape(3, 1))
self.Jo = list(range(self.rbtdef.dof))
for l in range(self.rbtdef.dof):
self.Jo[l] = sympy.zeros(3, self.rbtdef.dof)
for j in range(l + 1):
if self.rbtdef._links_sigma[j]:
self.Jo[l][0:3, j] = sympy.zeros(3, 1)
else:
self.Jo[l][0:3, j] = ifunc(z_ext[j - 1])
elif self.rbtdef._dh_convention == 'modified':
self.Jp = list(range(self.rbtdef.dof))
for l in range(self.rbtdef.dof):
self.Jp[l] = sympy.zeros(3, self.rbtdef.dof)
for j in range(l + 1):
if self.rbtdef._links_sigma[j]:
self.Jp[l][0:3, j] = ifunc(geom.z[j])
else:
self.Jp[l][0:3, j] = ifunc(geom.z[j].cross(
(geom.p[l] - geom.p[j])).reshape(3, 1))
self.Jo = list(range(self.rbtdef.dof))
for l in range(self.rbtdef.dof):
self.Jo[l] = sympy.zeros(3, self.rbtdef.dof)
for j in range(l + 1):
if self.rbtdef._links_sigma[j]:
self.Jo[l][0:3, j] = sympy.zeros(3, 1)
else:
self.Jo[l][0:3, j] = ifunc(geom.z[j])
self.J = list(range(self.rbtdef.dof))
for l in range(self.rbtdef.dof):
self.J[l] = self.Jp[l].col_join(self.Jo[l])
self.Jcp = list(range(self.rbtdef.dof))
self.Jco = self.Jo
for l in range(self.rbtdef.dof):
self.Jcp[l] = ifunc(self.Jp[l] - sym_skew(
geom.R[l] * sympy.Matrix(self.rbtdef.l[l])) * self.Jo[l])
self.Jc = list(range(self.rbtdef.dof))
for l in range(self.rbtdef.dof):
self.Jc[l] = self.Jcp[l].col_join(self.Jco[l])
| 2.71875 | 3 |
deriv_api/easy_future.py | sasikala-binary/python-deriv-api | 2 | 12770489 | <filename>deriv_api/easy_future.py
from __future__ import annotations
import asyncio
from asyncio import Future, CancelledError, InvalidStateError
from typing import Any, Optional, TypeVar, Union, Callable
import weakref
_S = TypeVar("_S")
class EasyFuture(Future):
"""A class that extend asyncio Future class and has some more convenient methods
Just like Promise in JS or Future in Perl"""
def __init__(self, *, loop: Optional[asyncio.AbstractEventLoop] = None, label: Optional[str] = None) -> None:
super().__init__(loop=loop)
if not label:
label = f"Future {id(self)}"
self.label = label
@classmethod
def wrap(cls, future: Future) -> EasyFuture:
"""Wrap an Asyncio Future to a EasyFuture"""
if isinstance(future, cls):
return future
easy_future = cls(loop=future.get_loop())
easy_future.cascade(future)
weak_future = weakref.ref(future)
def cancel_cb(cb_future: Future):
out_future = weak_future()
if cb_future.cancelled() and not out_future.done():
try:
cb_future.result()
except CancelledError as err:
out_future.cancel(*err.args)
easy_future.add_done_callback(cancel_cb)
return easy_future
def resolve(self, *args: Any) -> EasyFuture:
"""Set result on the future"""
super().set_result(*args)
return self
def reject(self, *args: Union[type, BaseException]) -> EasyFuture:
"""Set exception on the future"""
super().set_exception(*args)
return self
def is_pending(self) -> bool:
"""Check if the future is pending (not done)"""
return not self.done()
def is_resolved(self) -> bool:
"""check if the future is resolved (result set)"""
return self.done() and not self.cancelled() and not self.exception()
def is_rejected(self) -> bool:
"""check if the future is rejected (exception set)"""
return self.done() and not self.cancelled() and self.exception()
def is_cancelled(self) -> bool:
"""check if the future is cancelled"""
return self.cancelled()
def cascade(self, future: Future) -> EasyFuture:
"""copy another future result to itself"""
if self.done():
raise InvalidStateError('invalid state')
def done_callback(f: Future) -> None:
try:
result = f.result()
self.set_result(result)
except CancelledError as err:
self.cancel(*err.args)
except BaseException as err:
self.set_exception(err)
future.add_done_callback(done_callback)
return self
def then(self, then_callback: Union[Callable[[Any], Any], None], else_callback: Union[Callable[[Any], Any], None] = None) -> EasyFuture:
"""Simulate Perl Future's 'then' function.
Parameters:
then_callback: the cb function that will be called when the original Future is resolved
else_callback: the cb function that will be called when the original Future is rejected,
can be None
Both cb function should return a Future. The Future returned by the function 'then'
will have same result of cb returned Future.
"""
new_future = EasyFuture(loop=self.get_loop())
def done_callback(myself: EasyFuture) -> None:
f: Optional[EasyFuture] = None
if myself.is_cancelled():
new_future.cancel('Upstream future cancelled')
return
if myself.is_rejected() and else_callback:
f = else_callback(myself.exception())
elif myself.is_resolved() and then_callback:
f = then_callback(myself.result())
if f is None:
new_future.cascade(myself)
return
def inside_callback(internal_future: EasyFuture) -> None:
new_future.cascade(internal_future)
f.add_done_callback(inside_callback)
self.add_done_callback(done_callback)
return new_future
def catch(self, else_callback: Callable[[_S], Any]) -> EasyFuture:
"""An variant of 'then' function. it can only get an 'else_cb' which will be run when the future rejected"""
return self.then(None, else_callback)
| 2.640625 | 3 |
setup.py | ashb/gh-action-encrypted-secrets | 2 | 12770490 | # Copyright 2020 Astronomer Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
from setuptools import find_namespace_packages, setup
def fpath(*parts):
return os.path.join(os.path.dirname(__file__), *parts)
def read(*parts):
return open(fpath(*parts)).read()
def desc():
return read('README.rst')
# https://packaging.python.org/guides/single-sourcing-package-version/
def find_version(*paths):
version_file = read(*paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
def or_fallback(fn, *args, fallback, **kwargs):
try:
return fn(*args, **kwargs)
except Exception:
return fallback
VERSION = or_fallback(find_version, 'encryptedsecrets', '__init__.py', fallback='0.0.0-dev1')
setup(
name='encrypted-secrets',
version=VERSION,
url='https://github.com/astronomer/encrypted-python-secrets',
license='Apache2',
author='astronomerio',
author_email='<EMAIL>',
description='Store secrets in an encrypted YAML file, inspired by hiera-eyaml',
long_description=or_fallback(desc, fallback=''),
long_description_content_type="text/rst",
packages=find_namespace_packages(include=('encryptedsecrets', 'encryptedsecrets.*')),
package_data={
'': ['LICENSE'],
},
include_package_data=True,
zip_safe=True,
platforms='any',
entry_points={
'console_scripts': ['encrypted-secrets = encryptedsecrets.__main__:cli']
},
install_requires=[
'encrypteddict',
'pyyaml',
'click'
],
setup_requires=[
'pytest-runner~=4.0',
],
tests_require=[
'encrypted-secrets[test]',
],
extras_require={
'test': [
'pytest',
'pytest-mock',
'pytest-flake8',
],
},
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python :: 3',
],
python_requires='>=3.6',
)
| 1.929688 | 2 |
tests/cli/__init__.py | pcarranzav2/pyuavcan | 0 | 12770491 | <reponame>pcarranzav2/pyuavcan
#
# Copyright (c) 2019 UAVCAN Development Team
# This software is distributed under the terms of the MIT License.
# Author: <NAME> <<EMAIL>>
#
import sys
import typing
import dataclasses
@dataclasses.dataclass(frozen=True)
class TransportConfig:
cli_args: typing.Sequence[str]
can_transmit: bool
TransportFactory = typing.Callable[[typing.Optional[int]], TransportConfig]
"""
This factory constructs arguments for the CLI instructing it to use a particular transport configuration.
The factory takes one argument - the node-ID - which can be None (anonymous).
"""
def _make_transport_factories_for_cli() -> typing.Iterable[TransportFactory]:
"""
Sensible transport configurations supported by the CLI to test against.
Don't forget to extend when adding support for new transports.
"""
if sys.platform == 'linux':
# CAN via SocketCAN
yield lambda nid: TransportConfig(
cli_args=(f'--tr=CAN(can.media.socketcan.SocketCANMedia("vcan0",64),local_node_id={nid})', ),
can_transmit=True,
)
# Redundant CAN via SocketCAN
yield lambda nid: TransportConfig(
cli_args=(
f'--tr=CAN(can.media.socketcan.SocketCANMedia("vcan0",8),local_node_id={nid})',
f'--tr=CAN(can.media.socketcan.SocketCANMedia("vcan1",32),local_node_id={nid})',
f'--tr=CAN(can.media.socketcan.SocketCANMedia("vcan2",64),local_node_id={nid})',
),
can_transmit=True,
)
# Serial via TCP/IP tunnel (emulation)
from tests.transport.serial import VIRTUAL_BUS_URI
yield lambda nid: TransportConfig(
cli_args=(f'--tr=Serial("{VIRTUAL_BUS_URI}",local_node_id={nid})', ),
can_transmit=True,
)
# UDP/IP on localhost (cannot transmit if anonymous)
yield lambda nid: TransportConfig(
cli_args=(f'--tr=UDP("127.0.0.{nid}/8")', ),
can_transmit=True,
) if nid is not None else TransportConfig(
cli_args=('--tr=UDP("127.255.255.255/8")', ),
can_transmit=False,
)
# Redundant UDP+Serial. The UDP transport does not support anonymous transfers.
yield lambda nid: TransportConfig(
cli_args=(
f'--tr=Serial("{VIRTUAL_BUS_URI}",local_node_id={nid})',
(f'--tr=UDP("127.0.0.{nid}/8")' if nid is not None else '--tr=UDP("127.255.255.255/8")'),
),
can_transmit=nid is not None,
)
TRANSPORT_FACTORIES = list(_make_transport_factories_for_cli())
| 2.265625 | 2 |
qbittorrent/connectors.py | CuteFwan/aqbit | 0 | 12770492 | from .errors import TorrentHashNotFound, TorrentNotValid, HttpException
import aiohttp
import asyncio
import json
class AConnector:
def __init__(self, *, base, session = None, loop = None):
self.base = base
self.loop = loop or asyncio.get_event_loop()
self.session = session
async def request(self, method, path : str, *, payload = None):
url = self.base + path
retries = 5
while retries:
async with self.session.request(method, url, data=payload) as r:
data = await r.text(encoding='utf-8')
if r.headers.get('Content-Type', None) == 'application/json':
data = json.loads(data)
if r.status == 200:
"""Everything is fine?"""
return data
elif r.status == 403:
"""Login has probably been invalidated. retry."""
await self.login(self.credentials['username'], self.credentials['password'])
elif r.status == 400:
retries -= 1
print(f'Bad Http request, retrying {retries}')
await asyncio.sleep(1)
elif r.status == 404:
raise TorrentHashNotFound(r, data)
elif r.status == 415:
raise TorrentNotValid(r, data)
else:
return r.status, data
raise HttpException(r, data)
async def login(self, username : str, password : str):
"""
Attempt to log into the web api using a username and password.
Parameters
----------
username: str
The username to log into the web api
password: str
The password to log into the web api
"""
if not self.session:
self.session = aiohttp.ClientSession(cookie_jar=aiohttp.CookieJar(unsafe=True))
payload = {
'username' : username,
'password' : password
}
self.credentials = payload
return await self.request('POST', '/auth/login', payload=self.credentials)
async def logout(self):
"""Attempt to log out of the webapi"""
return await self.request('POST', '/auth/logout')
await self.session.close()
import requests
class RConnector:
def __init__(self, *, base, session = None):
self.base = base
self.session = session or requests.Session()
def request(self, method, path : str, *, payload = None):
url = self.base + path
retries = 5
while retries:
r = self.session.request(method, url, data=payload)
if r.status_code == 200:
data = r.text
if r.headers.get('Content-Type', None) == 'application/json':
return json.loads(data)
return data
def login(self, username : str, password : str):
if not self.session:
self.session = requests.Session()
payload = {
'username' : username,
'password' : password
}
self.credentials = payload
return self.request('POST', '/auth/login', payload=self.credentials)
def logout(self):
self.session.close() | 2.765625 | 3 |
tests/test_aquamonitor_integration_client.py | NIVANorge/odm2-postgres-api | 0 | 12770493 | import os
import pytest
from httpx import AsyncClient
from odm2_postgres_api.aquamonitor.aquamonitor_client import (
get_method_by_id,
get_project_stations,
get_taxonomy,
get_taxonomy_domain_id,
get_taxonomy_codes,
)
from odm2_postgres_api.aquamonitor.aquamonitor_mapping import METHODS_NIVABASE_MAP
"""
Tests that do actual calls to aquamonitor API. Using aquamonitor API in production,
so we are only doing reading operations
"""
@pytest.fixture(scope="function")
async def aquamonitor_client():
username = os.environ["AQUAMONITOR_USER"]
password = os.environ["<PASSWORD>"]
url = "https://test-aquamonitor.niva.no/AquaServices/api"
async with AsyncClient(base_url=url, auth=(username, password)) as client:
yield client
@pytest.mark.asyncio
@pytest.mark.aquamonitor_api_test
async def test_aquamonitor_client_get_method(aquamonitor_client):
method_name = "Kiselalger Relative abundance"
method_id = METHODS_NIVABASE_MAP[method_name]
method = await get_method_by_id(aquamonitor_client, method_id=method_id)
assert method
assert method.Id == method_id
assert method.Laboratory == "NIVA"
assert method.Matrix is None
assert method.MethodRef is None
assert method.Name == method_name
assert method.Unit is None
@pytest.mark.asyncio
@pytest.mark.aquamonitor_api_test
async def test_aquamonitor_client_get_stations(aquamonitor_client):
station_code = "HEDEGL06"
station = await get_project_stations(
aquamonitor_client,
project_name="Overvåkning av Glomma, Vorma og Øyeren",
station_code=station_code,
)
assert station.Name == "<NAME>"
assert station.Id == 57692
assert station.Code == station_code
assert station.Type["Text"] == "Elv"
@pytest.mark.asyncio
@pytest.mark.aquamonitor_api_test
async def test_aquamonitor_get_taxonomy(aquamonitor_client):
taxon_domain = await get_taxonomy_domain_id(aquamonitor_client, "Begroingsalger")
taxa = await get_taxonomy_codes(aquamonitor_client, domain_id=taxon_domain)
bambusina_spp = [t for t in taxa if t.Code == "BAMBUSIZ"][0]
assert bambusina_spp.Id
taxon = await get_taxonomy(aquamonitor_client, domain_name="Begroingsalger", code=bambusina_spp.Code)
# comparing taxon fetched from get_taxonomy and get_taxonomy_codes. Should be equal
assert taxon.Id == bambusina_spp.Id
assert taxon.Code == bambusina_spp.Code
assert taxon.Name == bambusina_spp.Name
# TODO: would like to assert that the two objects are equal, but they do indeed differ on domain. This fails:
# assert taxon.Domain == bambusina_spp.Domain
# assert taxon.Taxonomy == bambusina_spp.Taxonomy
| 2.421875 | 2 |
testproj/testproj/util.py | MilanPecov/drf-yasg | 2,788 | 12770494 | from django.templatetags.static import static
from django.utils.functional import lazy
static_lazy = lazy(static, str)
| 1.40625 | 1 |
src/werdich_cfr/models/Cnn3D.py | awerdich/werdich-cfr | 0 | 12770495 | <reponame>awerdich/werdich-cfr
# Imports
from tensorflow.keras import layers, Model
from tensorflow.keras.layers import BatchNormalization, Conv3D, MaxPooling3D, Dense, Flatten
class Convmodel:
kreg = None # regularizers.l2(0.001) #None
pad = 'valid'
strd = None
def __init__(self, model_dict):
# NETWORK PARAMETERS AS DICTIONARY
self.im_size = model_dict['im_size']
self.n_frames = model_dict['n_frames']
self.cl_outputs = model_dict['cl_outputs']
self.filters = model_dict['filters']
self.fc_nodes = model_dict['fc_nodes']
self.pool_nodes = model_dict['pool_nodes'] # Filters in the 1x1x1 convolutional pooling layer
def video_encoder(self):
video = layers.Input(shape = (self.n_frames, *self.im_size), name = 'video')
# Block 1
x = Conv3D(self.filters, (3, 3, 3), activation='relu')(video)
x = BatchNormalization()(x)
x = MaxPooling3D(pool_size=(1, 2, 2), strides=None)(x)
# Block 2
x = Conv3D(self.filters, (3, 3, 3), activation='relu')(x)
x = BatchNormalization()(x)
# Block 3
x = Conv3D(self.filters, (3, 3, 3), activation='relu')(x)
x = BatchNormalization()(x)
x = MaxPooling3D(pool_size=(1, 2, 2), strides=None)(x)
# Block 4
x = Conv3D(self.filters* 2, (3, 3, 3), activation='relu')(x)
x = BatchNormalization()(x)
# Block 5
x = Conv3D(self.filters * 2, (3, 3, 3), activation='relu')(x)
x = BatchNormalization()(x)
x = MaxPooling3D(pool_size=(1, 2, 2), strides=None)(x)
# Block 6
x = Conv3D(self.filters * 4, (3, 3, 3), activation='relu')(x)
x = BatchNormalization()(x)
# Block 7
x = Conv3D(self.filters * 4, (3, 3, 3), activation='relu')(x)
x = BatchNormalization()(x)
x = MaxPooling3D(pool_size=(2, 1, 2), strides=None)(x)
# Block 8
x = Conv3D(self.filters * 8, (3, 3, 3), activation='relu')(x)
x = BatchNormalization()(x)
# Block 9
x = Conv3D(self.filters * 8, (3, 3, 3), activation='relu')(x)
x = BatchNormalization()(x)
# Block 10
x = Conv3D(self.filters * 8, (3, 3, 3), activation='relu')(x)
x = BatchNormalization()(x)
x = MaxPooling3D(pool_size=(2, 1, 2))(x)
# Reduce model complexity by 1x1x1 convolution
# replaces x = Dense(self.fc_nodes, activation='relu')(x)
x = Conv3D(self.pool_nodes, (1, 1, 1), activation = 'relu')(x)
# Flatten for output
x = Flatten()(x)
x = BatchNormalization()(x)
# Categorical outputs classification
net_cat = Dense(self.fc_nodes, activation='relu')(x)
net_cat = BatchNormalization()(net_cat)
class_output = Dense(self.cl_outputs, activation = 'softmax', name = 'class_output')(net_cat)
# Regression output
net_cfr = Dense(self.fc_nodes, activation='relu')(x)
net_cfr = BatchNormalization()(net_cfr)
score_output = Dense(1, name='score_output')(net_cfr)
# Combined classification (net_cat) and regression (net_cfr) outputs
model = Model(inputs=video, outputs=[class_output, score_output])
return model
| 2.296875 | 2 |
DailyProgrammer/DP20120706B.py | DayGitH/Python-Challenges | 2 | 12770496 | """
Write a program that, given an ASCII binary matrix of 0's and 1's like this:
0000000000000000
0000000000000000
0000011001110000
0000001111010000
0000011001110000
0000011011100000
0000000000110000
0000101000010000
0000000000000000
0000000000000000
0000000000000000
Outputs the smallest cropped sub-matrix that still contains all 1's (that is, remove all borders of 0's):
01100111
00111101
01100111
01101110
00000011
10100001
"""
def main():
matrix = ("0000000000000000\n"
"0000000000000000\n"
"0000011001110000\n"
"0000001111010000\n"
"0000011001110000\n"
"0000011011100000\n"
"0000000000110000\n"
"0000101000010000\n"
"0000000000000000\n"
"0000000000000000\n"
"0000000000000000")
matrix = matrix.split('\n')
top = -1
bottom = 0
right = 0
left = len(matrix[0])
for n, m in enumerate(matrix):
if '1' in m:
if top == -1:
top = n
bottom = n
left_find = m.find('1')
right_find = m.rfind('1')
if left_find < left:
left = left_find
if right_find > right:
right = right_find
for m in matrix[top:bottom+1]:
print(m[left:right+1])
if __name__ == "__main__":
main()
| 3.65625 | 4 |
resp.py | ainilili/ocr-server | 1 | 12770497 | import json
class RespVo:
def __init__(self):
self.code = 0
self.msg = ''
self.data = {} | 2.109375 | 2 |
app/qnn_builder.py | mahabubul-alam/qnn_circuits_pennylane | 10 | 12770498 | import pennylane as qml
import numpy as np
if __name__ != '__main__':
from . encoder.encoding_circuits import EncodingCircuitsPennylane
from . pqc.parametric_circuits import ParametricCircuitsPennylane
from . measurement.measurement_circuits import MeasurementCircuitsPennylane
class PennylaneQNNCircuit:
def __init__(self, enc = 1, pqc = 1, meas = 1, layers = 1, qubit = 1):
'''
initialize variables
'''
self.enc = enc
self.pqc = pqc
self.meas = meas
self.qubit = qubit
self.layers = layers
self.pqc_builder = ParametricCircuitsPennylane(pqc = self.pqc, qubit = self.qubit, layers = self.layers)
self.enc_builder = EncodingCircuitsPennylane(enc = self.enc, qubit = self.qubit)
self.meas_builder = MeasurementCircuitsPennylane(meas = self.meas, qubit = self.qubit)
def construct_qnn_circuit(self, inputs, weights0, weights1 = 0):
assert len(inputs) <= self.enc_builder.max_inputs_length()
pqc_weights_shape = self.pqc_builder.weigths_shape()
if isinstance(pqc_weights_shape[0], tuple):
assert weights0.shape == pqc_weights_shape[0]
assert weights1.shape == pqc_weights_shape[1]
else:
assert weights0.shape == pqc_weights_shape
self.enc_builder.get_encoder(inputs)
self.pqc_builder.get_pqc(weights0, weights1 = weights1)
return self.meas_builder.get_meas()
if __name__ == '__main__':
from encoder.encoding_circuits import EncodingCircuitsPennylane
from pqc.parametric_circuits import ParametricCircuitsPennylane
from measurement.measurement_circuits import MeasurementCircuitsPennylane
qnn = PennylaneQNNCircuit(enc = 5, qubit = 5, layers = 2, pqc = 19, meas = 3)
input_length = qnn.enc_builder.max_inputs_length()
weight_shape = qnn.pqc_builder.weigths_shape()
inputs = np.random.random(input_length)
dev = qml.device("default.qubit", wires = 10) #target pennylane device
qnode = qml.QNode(qnn.construct_qnn_circuit, dev) #circuit
if isinstance(weight_shape[0], tuple):
weights0 = np.random.random(weight_shape[0])
weights1 = np.random.random(weight_shape[1])
qnode(inputs, weights0, weights1)
else:
weights = np.random.random(weight_shape)
qnode(inputs, weights)
print(qnode.draw())
| 2.34375 | 2 |
engine.py | gaetanV/python | 0 | 12770499 | pointer=-1
lock=0
def setSeq(data) :
if lock == 1:
exit(2)
globals()['pointer']=-1
globals()['data']=data.split(',')
globals()['lock']=1
def raw_input() :
globals()['pointer']+=1
try:
return data[pointer]
except IndexError:
exit(0)
def open(path,mode) :
exit(3)
| 2.96875 | 3 |
raft_asyncio/server.py | aratz-lasa/py-raft | 0 | 12770500 | import asyncio
from typing import List
from . import utils
from .abc import IRaftServer
from .errors import *
from .rpc import protocol as prot
from .rpc import rpc
from .state_machine import RaftStateMachine, State, Command
ELECTION_TIMEOUT = 0.5
FLEXIBLE_PAXOS_QUORUM = 2 / 6
RPC_TIMEOUT = 1
class ClusterMember:
def __init__(self, ip: str, port: int):
self.ip = ip
self.port = port
self.id = utils.get_id(self, ip, port)
class Server(ClusterMember):
def __init__(self, ip: str, port: int, cluster):
super().__init__(ip, port)
self._cluster: List[rpc.RemoteRaftServer] = cluster or []
self._leader = None
self._leader_hbeat = asyncio.Event()
self._leader_volatile_state_data = None
self._listener_task = None
async def _start_listening(self):
self._listener_task = await asyncio.start_server(
self._handle_request, self.ip, self.port
)
async def _handle_request(self, reader, writer):
message = await prot.read_decode_msg(reader)
if isinstance(self, RaftServer):
await rpc.handle_request(self, (reader, writer), message)
else:
raise TypeError("Invalid Server instance")
writer.close()
class RaftServer(IRaftServer, Server, RaftStateMachine):
def __init__(
self, ip: str, port: int, cluster, state: State = State.FOLLOWER, log=None
):
Server.__init__(self, ip, port, cluster)
RaftStateMachine.__init__(self, state, log)
self._voted_for = None # candidateId that received vote in currentterm
self._next_indexes = (
{}
) # for each server, index of the next log entryto send to that server
self._match_indexes = (
{}
) # for each server, index of highest log entryknown to be replicated on server
self._commands_queue = (
asyncio.Queue()
) # Queue where commands waiting for commit process to start are stored
self._cluster_locks = (
{}
) # Lock for mantaining order when several AppendEntries RPC calls are sent for same server
self._election_task = None
self._timeout_task = None
self._entries_task = None
self._hbeat_task = None
self._append_tasks = (
{}
) # TODO: every cluster member has a list of tasks running
# TODO: init tasks
async def update_state(self, key, value):
command = Command(key, value)
if self._leader is self:
await self._queue_command(command)
else:
while True:
try:
await self._leader.command_request(command)
break
except TermConsistencyError as error:
self._current_term = error.term
pass # TODO: do something else?
except LeaderConsistencyError as error:
self.leader = list(
filter(lambda s: s.id == error.leader_id, self.cluster)
)[0]
async def join_cluster(self, random_server: ClusterMember):
if random_server:
remote_server = rpc.RemoteRaftServer(random_server.ip, random_server.port)
self.cluster, leader_id = await remote_server.get_cluster_configuration()
self.leader = list(filter(lambda s: s.id == leader_id, self.cluster))[0]
if self not in self.cluster:
self.cluster.append(self)
# TODO: init configuration change
else:
pass # TODO: means it already was in the cluster, but it had crushed
else:
pass # TODO: first cluster member
async def leave_cluster(self):
self.cluster.remove(self)
# TODO: init configuration change
async def remove_cluster_member(self, id):
self.cluster = list(filter(lambda s: s.id != id, self.cluster))
# TODO: init configuration change
async def _run_timeout_task(self):
while True:
try:
await asyncio.wait_for(
self._leader_hbeat.wait(), timeout=ELECTION_TIMEOUT
) # TODO: random timeout
except asyncio.TimeoutError:
await self._change_state(State.CANDIDATE)
finally:
self._leader_hbeat.clear()
async def _run_entries_task_(self):
command = await self._commands_queue.get()
self._append_command(command)
rpc_calls = []
for server in filter(lambda s: s is not self, self.cluster):
append_task = asyncio.create_task(
self._append_entry_task(s, len(self._log) - 1)
)
rpc_calls.append(append_task)
self._append_tasks[server.id].append(append_task)
committed_amount = 1 # Starts on '1' because of itself
for rpc_call in asyncio.as_completed(rpc_calls):
await rpc_call.result()
committed_amount += 1
if committed_amount >= int(len(self._cluster) * FLEXIBLE_PAXOS_QUORUM):
self._commit_command(command)
break
async def _run_hbeat_task(self):
while True:
await self._send_hbeat()
await asyncio.sleep(
ELECTION_TIMEOUT * 0.9
) # Just in case there is high latency
async def _run_election_task(self):
self._current_term += 1
self._leader_hbeat.set()
last_log_index = self._last_applied
last_log_term = (
0 if not len(self._log) > last_log_index else self._log[last_log_index].term
)
voting_rpcs = list(
map(
lambda s: asyncio.create_task(
s.request_vote(
self._current_term, self.id, last_log_index, last_log_term
)
),
filter(lambda s: s is not self, self._cluster),
)
)
granted_votes = 1 # 1 -> its own vote
votes = 1
election_win = False
for next_vote in asyncio.as_completed(voting_rpcs, timeout=RPC_TIMEOUT):
try:
vote = (await next_vote).result()
granted_votes += int(vote)
except asyncio.TimeoutError:
pass
votes += 1
if granted_votes >= int(
len(self._cluster) * (1 - FLEXIBLE_PAXOS_QUORUM) + 1
): # Equal because itself is not considered
election_win = True
if election_win:
self._change_state(State.LEADER)
async def _queue_command(self, command: Command):
await self._commands_queue.put(command)
async def _send_hbeat(self):
for server in filter(lambda s: s != self, self.cluster):
task = asyncio.create_task(
server.append_entries(
self._current_term,
self.id,
self._last_applied,
self._log[self._last_applied].term,
None,
self._commit_index,
)
)
self._append_tasks[server.id].append(task)
def _change_state(self, new_state: State):
if new_state is State.FOLLOWER:
self._cancel_leader_tasks()
self._cancel_candidate_tasks()
self._timeout_task = asyncio.create_task(self._run_timeout_task())
self._state = State.FOLLOWER
elif new_state is State.LEADER:
if self._timeout_task and not self._timeout_task.cancelled():
self._timeout_task.cancel()
self._hbeat_task = asyncio.create_task(self._run_hbeat_task())
self._entries_task = asyncio.create_task(self._run_entries_task_())
self._state = State.LEADER
elif new_state is State.CANDIDATE:
self._cancel_leader_tasks()
self._cancel_candidate_tasks()
self._election_task = asyncio.create_task(self._run_election_task())
self._state = State.CANDIDATE
def _cancel_leader_tasks(self):
if self._hbeat_task and not self._hbeat_task.cancelled():
self._hbeat_task.cancel()
if self._entries_task and not self._entries_task.cancelled():
self._entries_task.cancel()
self._cancel_append_tasks()
self._cluster_locks.clear()
def _cancel_append_tasks(self):
for server_tasks in self._append_tasks.values():
for task in server_tasks:
if not task.cancelled():
task.cancel()
server_tasks.clear()
def _cancel_candidate_tasks(self):
if self._election_task and not self._election_task.cancelled():
self._election_task.cancel()
def _im_leader(self):
return self._state is State.LEADER
async def _append_entry_task(
self, server: rpc.RemoteRaftServer, entries_index: int
):
async with self._cluster_locks[server.id]:
while True:
try:
await server.append_entries(
self._current_term,
self.id,
max(entries_index - 1, 0),
self._log[max(entries_index - 1, 0)].term,
self._log[entries_index:],
self._commit_index,
)
break
except TermConsistencyError as error:
self._current_term = error.term
self._change_state(State.FOLLOWER)
break
except EntriesConsistencyError:
entries_index = max(entries_index - 1, 0)
except:
pass # Network error, so retry until it answers
| 2.3125 | 2 |
train_speaker.py | airbert-vln/airbert | 17 | 12770501 | """
Train a speaker model on R2R
"""
import logging
from typing import List, Tuple, Dict
import copy
import os
import random
import shutil
import sys
from datetime import datetime
from tqdm import tqdm
import numpy as np
import torch
import torch.distributed as dist
import torch.nn.functional as F
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, Subset, Dataset
from torch.utils.data.distributed import DistributedSampler
from torch.utils.tensorboard import SummaryWriter
from apex.parallel import DistributedDataParallel as DDP
from transformers import AutoTokenizer, BertTokenizer
from vilbert.optimization import AdamW, WarmupLinearSchedule
from vilbert.vilbert import BertConfig
from airbert import Airbert
from utils.cli import get_parser
from utils.dataset import PanoFeaturesReader
from utils.dataset.speak_dataset import SpeakDataset
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
stream=sys.stdout,
)
logger = logging.getLogger(__name__)
Batch = Dict[str, torch.Tensor]
def main():
# ----- #
# setup #
# ----- #
# command line parsing
parser = get_parser(training=True, speaker=True)
args = parser.parse_args()
# FIXME how to do it properly in bash?
args.perturbations = [p for pert in args.perturbations for p in pert.split(" ")]
# validate command line arguments
if not (args.masked_vision or args.masked_language) and args.no_ranking:
parser.error(
"No training objective selected, add --masked_vision, "
"--masked_language, or remove --no_ranking"
)
# set seed
if args.seed:
seed = args.seed
if args.local_rank != -1:
seed += args.local_rank
torch.manual_seed(seed)
np.random.seed(seed) # type: ignore
random.seed(seed)
# get device settings
if args.local_rank == -1:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
n_gpu = torch.cuda.device_count()
else:
# Initializes the distributed backend which will take care of synchronizing
# nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
dist.init_process_group(backend="nccl")
n_gpu = 1
# check if this is the default gpu
default_gpu = True
if args.local_rank != -1 and dist.get_rank() != 0:
default_gpu = False
if default_gpu:
logger.info(f"Playing with {n_gpu} GPUs")
# create output directory
save_folder = os.path.join(args.output_dir, f"run-{args.save_name}")
if default_gpu and not os.path.exists(save_folder):
os.makedirs(save_folder)
# ------------ #
# data loaders #
# ------------ #
tokenizer = AutoTokenizer.from_pretrained(args.bert_tokenizer)
if not isinstance(tokenizer, BertTokenizer):
raise ValueError("fix mypy")
features_reader = PanoFeaturesReader(args.img_feature)
vln_path = f"data/task/{args.prefix}R2R_train.json"
if default_gpu:
logger.info("using provided training trajectories")
logger.info(f"VLN path: {vln_path}")
if default_gpu:
logger.info("Loading train dataset")
train_dataset: Dataset = SpeakDataset(
vln_path=vln_path,
skeleton_path="np_train.json" if args.np else "",
tokenizer=tokenizer,
features_reader=features_reader,
max_instruction_length=args.max_instruction_length,
max_path_length=args.max_path_length,
max_num_boxes=args.max_num_boxes,
default_gpu=default_gpu,
)
if default_gpu:
logger.info("Loading val datasets")
val_seen_dataset = SpeakDataset(
vln_path=f"data/task/{args.prefix}R2R_val_seen.json",
skeleton_path="np_val_seen.json" if args.np else "",
tokenizer=tokenizer,
features_reader=features_reader,
max_instruction_length=args.max_instruction_length,
max_path_length=args.max_path_length,
max_num_boxes=args.max_num_boxes,
default_gpu=default_gpu,
)
val_unseen_dataset = SpeakDataset(
vln_path=f"data/task/{args.prefix}R2R_val_unseen.json",
skeleton_path="np_val_unseen.json" if args.np else "",
tokenizer=tokenizer,
features_reader=features_reader,
max_instruction_length=args.max_instruction_length,
max_path_length=args.max_path_length,
max_num_boxes=args.max_num_boxes,
default_gpu=default_gpu,
)
if args.local_rank == -1:
train_sampler = RandomSampler(train_dataset)
val_seen_sampler = SequentialSampler(val_seen_dataset)
val_unseen_sampler = SequentialSampler(val_unseen_dataset)
else:
train_sampler = DistributedSampler(train_dataset)
val_seen_sampler = DistributedSampler(val_seen_dataset)
val_unseen_sampler = DistributedSampler(val_unseen_dataset)
# adjust the batch size for distributed training
batch_size = args.batch_size // args.gradient_accumulation_steps
if args.local_rank != -1:
batch_size = batch_size // dist.get_world_size()
if default_gpu:
logger.info(f"batch_size: {batch_size}")
if default_gpu:
logger.info(f"Creating dataloader")
# create data loaders
train_data_loader = DataLoader(
train_dataset,
sampler=train_sampler,
batch_size=batch_size,
num_workers=args.num_workers,
pin_memory=True,
)
val_seen_data_loader = DataLoader(
val_seen_dataset,
sampler=val_seen_sampler,
shuffle=False,
batch_size=batch_size,
num_workers=args.num_workers,
pin_memory=True,
)
val_unseen_data_loader = DataLoader(
val_unseen_dataset,
sampler=val_unseen_sampler,
shuffle=False,
batch_size=batch_size,
num_workers=args.num_workers,
pin_memory=True,
)
# ----- #
# model #
# ----- #
if default_gpu:
logger.info(f"Loading model")
config = BertConfig.from_json_file(args.config_file)
config.cat_highlight = args.cat_highlight # type: ignore
config.convert_mask = True # type: ignore
if len(args.from_pretrained) == 0: # hack for catching --from_pretrained ""
model = Airbert(config)
else:
model = Airbert.from_pretrained(
args.from_pretrained, config, default_gpu=default_gpu
)
if default_gpu:
logger.info(
f"number of parameters: {sum(p.numel() for p in model.parameters())}"
)
# move/distribute model to device
model.to(device)
if args.local_rank != -1:
model = DDP(model, delay_allreduce=True)
if default_gpu:
logger.info("using distributed data parallel")
# elif n_gpu > 1:
# model = torch.nn.DataParallel(model) # type: ignore
# if default_gpu:
# logger.info("using data parallel")
# ------------ #
# optimization #
# ------------ #
# set parameter specific weight decay
no_decay = ["bias", "LayerNorm.weight", "LayerNorm.bias"]
optimizer_grouped_parameters = [
{"params": [], "weight_decay": 0.0},
{"params": [], "weight_decay": args.weight_decay},
]
for name, param in model.named_parameters():
if any(nd in name for nd in no_decay):
optimizer_grouped_parameters[0]["params"].append(param)
else:
optimizer_grouped_parameters[1]["params"].append(param)
# optimizer
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate,)
# calculate learning rate schedule
t_total = (
len(train_data_loader) // args.gradient_accumulation_steps
) * args.num_epochs
warmup_steps = args.warmup_proportion * t_total
adjusted_t_total = warmup_steps + args.cooldown_factor * (t_total - warmup_steps)
scheduler = (
WarmupLinearSchedule(
optimizer,
warmup_steps=warmup_steps,
t_total=adjusted_t_total,
last_epoch=-1,
)
if not args.no_scheduler
else MultiplicativeLR(optimizer, lr_lambda=lambda epoch: 1.0) # type: ignore
)
# --------------- #
# before training #
# --------------- #
# save the parameters
if default_gpu:
with open(os.path.join(save_folder, "config.txt"), "w") as fid:
print(f"{datetime.now()}", file=fid)
print("\n", file=fid)
print(vars(args), file=fid)
print("\n", file=fid)
print(config, file=fid)
# loggers
if default_gpu:
writer = SummaryWriter(
log_dir=os.path.join(save_folder, "logging"), flush_secs=30
)
else:
writer = None
# -------- #
# training #
# -------- #
# run training
if default_gpu:
logger.info("starting training...")
best_seen_success_rate, best_unseen_success_rate = 0, 0
for epoch in range(args.num_epochs):
if default_gpu and args.debug:
logger.info(f"epoch {epoch}")
if args.local_rank > -1:
train_data_loader.sampler.set_epoch(epoch) # type: ignore
# train for one epoch
train_epoch(
epoch,
model,
optimizer,
scheduler,
train_data_loader,
writer,
default_gpu,
args,
)
if default_gpu and args.debug:
logger.info(f"saving the model")
# save the model every epoch
model_path = os.path.join(save_folder, f"pytorch_model_{epoch + 1}.bin")
if default_gpu:
model_state = (
model.module.state_dict() # type: ignore
if hasattr(model, "module")
else model.state_dict()
)
torch.save(
{
"model_state_dict": model_state,
"optimizer_state_dict": optimizer.state_dict(),
"scheduler_state_dict": scheduler.state_dict(),
"epoch": epoch,
},
model_path
)
if default_gpu and args.debug:
logger.info(f"running validation")
# run validation
global_step = (epoch + 1) * len(train_data_loader)
# run validation on the "val seen" split
with torch.no_grad():
seen_success_rate = val_epoch(
epoch,
model,
"val_seen",
val_seen_data_loader,
writer,
default_gpu,
args,
global_step,
)
if default_gpu:
logger.info(
f"[val_seen] epoch: {epoch + 1} success_rate: {seen_success_rate.item():.3f}"
)
# save the model that performs the best on val seen
if seen_success_rate > best_seen_success_rate:
best_seen_success_rate = seen_success_rate
if default_gpu:
best_seen_path = os.path.join(
save_folder, "pytorch_model_best_seen.bin"
)
shutil.copyfile(model_path, best_seen_path) # type: ignore
# run validation on the "val unseen" split
with torch.no_grad():
unseen_success_rate = val_epoch(
epoch,
model,
"val_unseen",
val_unseen_data_loader,
writer,
default_gpu,
args,
global_step,
)
if default_gpu:
logger.info(
f"[val_unseen] epoch: {epoch + 1} success_rate: {unseen_success_rate.item():.3f}"
)
# save the model that performs the best on val unseen
if unseen_success_rate > best_unseen_success_rate:
best_unseen_success_rate = unseen_success_rate
if default_gpu:
best_unseen_path = os.path.join(
save_folder, "pytorch_model_best_unseen.bin"
)
shutil.copyfile(model_path, best_unseen_path)
# -------------- #
# after training #
# -------------- #
if default_gpu:
writer.close()
def rollout(batch: Batch, model: nn.Module, window: int
) :
"""
we split the batch over sequences of $window tokens.
This reduces the burden on memory usage.
"""
# get the model input and output
instruction_length = batch["target_tokens"].shape[1]
batch_size = get_batch_size(batch)
device = get_device(batch)
inputs = get_model_input(batch)
# import ipdb
# ipdb.set_trace()
# B, N
target = get_target(batch) # inputs["instr_tokens"][:, 0]
# B, N, N
pred_mask = get_mask_predictions(batch)
# B, N
pad_or_sep = (batch["target_tokens"] == 102) | (batch["target_tokens"] == 0)
pad_or_sep = pad_or_sep.squeeze(1)
map_loss = torch.tensor(0.).to(device)
map_correct = torch.tensor(0.).to(device)
map_batch_size = torch.tensor(0.).to(device)
for start in range(0, instruction_length, window):
small_inputs = {
key: tensor[:, start: start+ window].flatten(0, 1) for key, tensor in inputs.items()
}
small_target = target[:, start+1:start+window+1].flatten()
output = model(**small_inputs)
# N * W * B
small_mask = pred_mask[:, start : start + window].flatten()
# N * W * B x V
predictions = output[2].view(-1, output[2].shape[-1])
# W * B x V
predictions = predictions[small_mask]
# W x B
instr = predictions.argmax(1).view(batch_size, -1)
# calculate the final loss on non-padding tokens
loss = F.cross_entropy(predictions, small_target, ignore_index=0)
# backward pass
if model.training:
loss.backward()
# calculate accuracy
# remove pad tokens and sep tokens
small_pad = pad_or_sep[0,start+1: start+window+1 ].flatten()
correct = torch.sum(instr.flatten()[small_pad] == small_target[small_pad]).detach().float()
# calculate accumulated stats
map_batch_size += batch_size
map_loss += loss.detach().float()
map_correct += correct.detach().float()
map_loss = torch.true_divide(map_loss.sum(), map_batch_size) # type: ignore
map_correct = torch.true_divide(map_correct.sum(), map_batch_size) # type: ignore
return map_batch_size.float(), map_loss.float(), map_correct.float()
def train_epoch(
epoch, model, optimizer, scheduler, data_loader, writer, default_gpu, args
) -> None:
device = next(model.parameters()).device
model.train()
batch: Batch
for step, batch in enumerate(tqdm(data_loader, disable=False)): # not (default_gpu))):
if step < 78:
continue
# load batch on gpu
batch = {
k: t.cuda(device=device, non_blocking=True) if hasattr(t, "cuda") else t
for k, t in batch.items()
}
batch_size, loss, correct = rollout(batch, model, args.window)
if args.gradient_accumulation_steps > 1:
loss /= args.gradient_accumulation_steps
correct /= args.gradient_accumulation_steps
# write stats to tensorboard
if default_gpu:
global_step = step + epoch * len(data_loader)
writer.add_scalar("loss/train", loss.float(), global_step=global_step)
writer.add_scalar(
"accuracy/train",
correct.float(),
global_step=global_step,
)
writer.add_scalar(
"learning_rate/train", scheduler.get_lr()[0], global_step=global_step
)
if args.local_rank != -1:
world_size = float(dist.get_world_size())
loss /= world_size
dist.all_reduce(loss, op=dist.ReduceOp.SUM)
dist.all_reduce(correct, op=dist.ReduceOp.SUM)
dist.all_reduce(batch_size, op=dist.ReduceOp.SUM)
if default_gpu and args.debug:
logger.info(
f"[train] step: {step + 1} "
f"loss: {loss:0.2f} "
f"accuracy: {correct / batch_size:0.2f} "
f"lr: {scheduler.get_lr()[0]:0.1e}"
)
if (step + 1) % args.gradient_accumulation_steps == 0:
optimizer.step()
scheduler.step()
model.zero_grad()
def val_epoch(epoch: int, model, tag, data_loader, writer, default_gpu, args, global_step):
device = next(model.parameters()).device
# validation
model.eval()
stats = torch.zeros(3, device=device).float()
for step, batch in enumerate(data_loader):
# load batch on gpu
batch = {
k: t.cuda(device=device, non_blocking=True) if hasattr(t, "cuda") else t
for k, t in batch.items()
}
# get the model output
batch_size, loss, correct = rollout(batch, model, args.window)
# accumulate
stats[0] += loss
stats[1] += correct
stats[2] += batch_size
if default_gpu and args.debug:
logger.info(
f"[{tag}] step: {step + 1} "
f"running loss: {stats[0] / stats[2]:0.2f} "
f"running success rate: {stats[1] / stats[2]:0.2f}"
)
if args.local_rank != -1:
dist.all_reduce(stats, op=dist.ReduceOp.SUM)
# write stats to tensorboard
if default_gpu:
writer.add_scalar(
f"loss/vce_{tag}", stats[0] / stats[2], global_step=global_step
)
writer.add_scalar(
f"accuracy/sr_{tag}", stats[1] / stats[2], global_step=global_step
)
return stats[1] / stats[2]
# ------------- #
# batch parsing #
# ------------- #
# batch format:
# 1:image_features, 2:image_locations, 3:image_mask,
# 5:image_targets_mask, 6:instr_tokens, 7:instr_mask, 8:instr_targets, 9:instr_highlights, 10:segment_ids,
# 11:co_attention_mask, 12:item_id
def get_instr_length(batch: Batch):
return batch["instr_tokens"].shape[1]
def get_instr_mask(batch: Batch) -> torch.Tensor:
return batch["instr_mask"].squeeze(1)
def get_model_input(batch: Batch) -> Dict[str, torch.Tensor]:
batch_size = get_batch_size(batch)
num_tokens = get_instr_length(batch)
# duplicate for each word token
image_features = batch["image_features"].unsqueeze(1).repeat(1, num_tokens - 1, 1, 1)
image_locations = batch["image_boxes"].unsqueeze(1).repeat(1, num_tokens - 1, 1, 1)
image_mask = batch["image_masks"].unsqueeze(1).repeat(1, num_tokens - 1, 1)
instr_tokens = batch["instr_tokens"].unsqueeze(1).repeat(1, num_tokens - 1, 1)
segment_ids = batch["segment_ids"].unsqueeze(1).repeat(1, num_tokens - 1, 1)
instr_mask = batch["instr_mask"].unsqueeze(1).repeat(1, num_tokens - 1, 1)
# create triangular masks
tri = (
torch.ones((num_tokens - 1, num_tokens))
.tril(0)
.bool()
.repeat(batch_size, 1, 1)
. transpose(0, 1)
.reshape(-1, num_tokens)
.to(instr_mask.device)
)
instr_mask = torch.logical_and(instr_mask, tri) # type: ignore
# transform batch shape
co_attention_mask = batch["co_attention_mask"].view(
-1, batch["co_attention_mask"].size(2), batch["co_attention_mask"].size(3)
)
return {
"instr_tokens": instr_tokens,
"image_features": image_features,
"image_locations": image_locations,
"token_type_ids": segment_ids,
"attention_mask": instr_mask,
"image_attention_mask": image_mask,
"co_attention_mask": co_attention_mask,
}
def get_batch_size(batch: Batch):
return batch["instr_tokens"].shape[0]
def get_target(batch: Batch) -> torch.Tensor:
return batch["target_tokens"]
def get_device(batch: Batch):
return batch["instr_tokens"].device
def get_mask_predictions(batch: Batch) -> torch.Tensor:
target_length = batch["target_tokens"].shape[1]
instruction_length = get_instr_length(batch) - target_length
batch_size = get_batch_size(batch)
device = get_device(batch)
diag = torch.diag(torch.tensor([1] * instruction_length), diagonal=target_length).bool().to(device)
diag = diag[:-target_length]
diag[-1] = 0
diag = diag.repeat(batch_size, 1, 1)
return diag
if __name__ == "__main__":
main()
| 2.25 | 2 |
problem_1.py | adamkells/project_euler | 0 | 12770502 | result = 0
for i in range(1000):
if i%3==0 or i%5==0:
result += i
print(result) | 3.78125 | 4 |
PiCN/Layers/LinkLayer/FaceIDTable/test/__init__.py | NikolaiRutz/PiCN | 0 | 12770503 | <gh_stars>0
"""Tests for the FaceIDTable"""
| 0.914063 | 1 |
cross_validation.py | JeffreyCNL/hidden_markov | 0 | 12770504 | <reponame>JeffreyCNL/hidden_markov
# this is to use cross validation to obtain the number of states
import numpy as np
from sklearn.model_selection import KFold
from hmm_class import hmm
import random
from sklearn.preprocessing import normalize
# load the obs and split into k fold.
def split_load_data(filename, k_splits):
obs_seq = np.loadtxt(filename, dtype = int)
kf = KFold(n_splits = k_splits, shuffle = False)
for train_index, test_index in kf.split(obs_seq):
obs_train, obs_test = obs_seq[train_index], obs_seq[test_index]
return obs_train, obs_test
def sts_seq_generate(N, size_data, len_obs): # N states
sts_seq = np.zeros((size_data, len_obs))
for i in range(size_data):
for j in range(len_obs):
sts_seq[i][j] = random.randint(0,N-1)
return sts_seq
def param_generate(n, obs_seq):
size_data = len(obs_seq) # cal the line of obs 1000
len_obs = len(obs_seq[0]) # cal the len of each obs. only works for the same length
sts_seq = sts_seq_generate(n, size_data, len_obs)
return size_data, len_obs, sts_seq
def em_prob_generate(n, m): # n:# states, m: # obs
em_prob = np.zeros((n,m))
for i in range(n):
for j in range(m):
em_prob[i][j] = np.random.uniform(0,1)
em_prob = normalize(em_prob, axis = 1, norm = 'l1')
return np.asmatrix(em_prob)
def trans_prob_generate(n): # n:# states
trans_prob = np.zeros((n,n))
for i in range(n):
for j in range(n):
trans_prob[i][j] = np.random.uniform(0,1)
trans_prob = normalize(trans_prob, axis = 1, norm = 'l1')
return np.asmatrix(trans_prob)
def pi_generate(n):
pi = np.zeros(n)
for i in range(n):
pi[i] = np.random.uniform(0,1)
pi = normalize([pi], axis = 1, norm = 'l1')
return np.asmatrix(pi)
def predict_obs(first_sts,ep, tp):
predicted_obs = []
next_sts = np.argmax(tp[int(first_sts),:]) # from the first to compute 2nd states
for _ in range(len_obs_test):
# from the next sts to find out the argmax of the index
# so that we know the most emission output
out = np.argmax(ep[next_sts,:])
# print("obs index", out)
predicted_obs.append(uniq_obs[out]) # from the output list to take out the obs
next_sts = np.argmax(tp[next_sts,:]) # update the next sts
# print("next states: ", next_sts)
return predicted_obs
def loss_count(hidden_sts, predicted_obs):
loss = 0
hidden_sts = [int(i) for i in hidden_sts]
for i in range(len(hidden_sts)):
if hidden_sts[i] != predicted_obs[i]:
loss += 1
return loss
if __name__ == '__main__':
n = 6 # number of states
m = 4 # number of obs
k = 5 # number of fold
num_iter = 1000
tolerance = 10**(-4)
obs_train, obs_test = split_load_data('train534.dat', k)
# train data
size_train_data, len_obs_train, sts_seq_train = param_generate(n, obs_train)
# test data
size_test_data, len_obs_test, sts_seq_test = param_generate(n, obs_test)
# uniq seq of sts and obs
uniq_sts = list(np.unique(sts_seq_train)) # the function need to feed in a list of uniq states
uniq_obs = list(np.unique(obs_train))
quantities = np.ones(size_test_data)
# prob param
pi = pi_generate(n)
trans_prob = trans_prob_generate(n)
em_prob = em_prob_generate(n, m)
model = hmm(uniq_sts, uniq_obs, pi, trans_prob, em_prob) # init the model
ep, tp, sp, prob_lst, iter_count, loss_lst = model.train_hmm(obs_test, num_iter, quantities, tolerance)
hidden_sts = []
for i in range(size_train_data):
hidden_sts.append(model.viterbi(obs_train[i]))
# print("hidden states for training data:\n", hidden_sts)
# print(len(hidden_sts))
print("em_prob\n", ep)
print("trans_prob\n", trans_prob)
# predicted output from the training data
first_index = []
for i in range(len(hidden_sts)):
first_index.append(hidden_sts[i][0])
predicted_obs = []
for i in range(len(hidden_sts)):
predicted_obs.append(predict_obs(first_index[i], ep, tp))
# print("predicted obs", predicted_obs)
loss = []
for i in range(len(hidden_sts)):
loss.append(loss_count(hidden_sts[i], predicted_obs[i]))
print("total loss: ", sum(loss))
| 2.671875 | 3 |
Tesi/3_modelTester/5_costsTester.py | LucaCamerani/EcoFin-library | 9 | 12770505 | """
5_costsTester.py
Created by <NAME> at 18/02/2021, University of Milano-Bicocca.
(<EMAIL>)
All rights reserved.
This file is part of the EcoFin-Library (https://github.com/LucaCamerani/EcoFin-Library),
and is released under the "BSD Open Source License".
"""
"""
4_portfolioTester.py
Created by <NAME> at 10/02/2021, University of Milano-Bicocca.
(<EMAIL>)
All rights reserved.
This file is part of the EcoFin-Library (https://github.com/LucaCamerani/EcoFin-Library),
and is released under the "BSD Open Source License".
"""
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from tqdm import tqdm
from EcoFin.assetAllocation.performance import Performance
from EcoFin.utils import utils
from EcoFin.assetAllocation.allocation import Allocation
# -------------------------[Set-up]-------------------------
ticker_list = [line.rstrip('\n') for line in open(r'../INDEXs/DJIA.txt')]
maturity_min = 15
base_path = r'../Export/BackTest_C'
start_date = 0
# Strategy set-up
direction = 'OPS_[OI]' # Direction driver
force = 'VIX_[CBOE]' # In None, don't use force driver
polarize = True # True or False: polarize direction component
# Portfolio set-up
buy_only = False # Set a buy only strategy that ignore negative signals
w_limit = None # Ranks best N ticker based on strategy
w_equally = False # Equally weighted mode
leverage = None # Strategy leverage (1 is no leverage, None is auto-compensation)
# Transaction costs
tc = 8 # unit in basis points
# ----------------------------------------------------------
base = ['SpotPrice']
data = {b: {} for b in base + [direction, force]}
if None in data.keys():
del data[None]
for tick in tqdm(ticker_list, desc='Importing data'):
try:
# Import data and clean-up
source = pd.read_excel(r'{}/{}/backTest_[{}].xlsx'.format(base_path, tick, maturity_min), engine='openpyxl')
source = source.loc[source['Date'] >= start_date, ~source.columns.str.contains('^Unnamed')]
source.set_index(pd.to_datetime(source['Date'], format='%Y%m%d'), drop=True, inplace=True)
for driver in data.keys():
data[driver][tick] = source[driver]
except:
pass
# Merge (concatenate) data and create dataframes
for driver in data.keys():
data[driver] = pd.concat(data[driver], axis=1)
# ❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌[Normalize direction data]❌❌❌❌❌❌❌❌❌❌❌
if driver == direction:
data[driver] = data[driver].sub(data[driver].mean(axis=1), axis=0)
# ❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌
# Generate strategy signals
# -----------------------------------[STRATEGY SET-UP]-----------------------------------
if polarize: #
data[direction] = utils.polarizeTable(data[direction]) #
#
if force is None: #
force_v = 1 #
else: #
force_v = data[force] #
#
data['signals'] = data[direction] * force_v #
# -----------------------------------[STRATEGY SET-UP]-----------------------------------
# =====================================================================================
# FROM HERE NO 'signals data' MANIPULATION
# =====================================================================================
# [1] Compute ln-returns of benchmark
data['lnReturns'] = np.log(data['SpotPrice'].shift(-1) / data['SpotPrice'])
# [2] Compute strategy weights
allocation = Allocation(data['signals'], buyOnly=buy_only, limit=w_limit)
if w_equally:
data['weights'] = allocation.getEquallyWeights()
else:
data['weights'] = allocation.getSignalWeights()
# [3] Compute strategy ln-returns
if leverage is None:
leverage = data['SpotPrice'].shape[1]
data['strategy'] = data['lnReturns'] * data['weights'] * leverage
# Compute turnover and transaction costs
turnover = allocation.getTurnover(data['weights'])
data['costs'] = np.log(turnover.byTime * 2 * (tc/1e4) + 1)
data['strategy_net'] = data['strategy'].mean(axis=1) - data['costs']
# =====================================================================================
# FROM HERE NO DATA MANIPULATION
# =====================================================================================
# Create plot framework
fig, axs = plt.subplots(2, figsize=(15, 8), sharex=True)
fig.suptitle('Strategy tester', fontsize=16)
# Plot strategy return vs. benchmark (data)
axs[0].set_title('data returns')
axs[0].plot(data['lnReturns'].mean(axis=1).cumsum(), linestyle='dotted', label='Benchmark')
axs[0].plot(data['strategy'].mean(axis=1).cumsum(), label='Strategy Gross')
axs[0].plot(data['strategy_net'].cumsum(), label='Strategy Net')
axs[0].set(ylabel='Cumulated ln-returns ($X_t$)')
axs[0].legend()
# Plot transaction costs
ax2 = axs[0].twinx()
color = 'tab:gray'
ax2.set_ylabel('Transaction Costs', color=color)
ax2.fill_between(data['costs'].index, 0, data['costs'], linewidth=.5, alpha=.2, color=color)
ax2.plot(data['costs'], linewidth=.5, alpha=.6, color=color)
ax2.set_ylim([0, data['costs'].max() * 4])
ax2.tick_params(axis='y', labelcolor=color)
# Plot evolution of weights
axs[1].set_title('Transition costs')
axs[1].plot(turnover.byTime, color='gold', label=r'Turnover ($\gamma$)')
axs[1].axhline(turnover.mean, alpha=.6, linestyle='--', label=r'mean')
axs[1].legend()
plt.show()
| 1.828125 | 2 |
extractor/credit.py | shalev67/AdvWebProject | 1 | 12770506 | import textract
from itertools import tee
import base64
import bson
import uuid
import os
from flask import Flask, request, jsonify
import datetime
from pymongo import MongoClient
import string
import json
UPLOAD_FOLDER = '/tmp/'
ALLOWED_EXTENSIONS = {'pdf'}
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
client = MongoClient('mongodb://localhost:27017/')
users_collection = client.test.users
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.after_request
def cors_enabled(response):
response.headers['Access-Control-Allow-Origin'] = 'http://localhost:5000'
response.headers['Access-Control-Allow-Credentials'] = 'true'
return response
@app.route('/', methods=['GET', 'POST'])
def upload_file():
is_request_post = request.method == 'POST'
is_user_cookie_string = type(request.cookies.get('currentUserId')) is str
is_user_len_ok = type(request.cookies.get('currentUserId')) is str
if is_request_post and is_user_cookie_string and is_user_len_ok:
user_id = request.cookies.get('currentUserId')[3:-3]
file_stream = request.form['file']
file_name = uuid.uuid4().hex + '.pdf'
file_path = os.path.join(app.config['UPLOAD_FOLDER'], file_name)
with open(file_path, 'wb') as file:
file.write(base64.b64decode(file_stream[28:]))
extract_transaction_from_pdf(file_path, user_id)
return jsonify({"message": "User id : %s transaction was updated" % user_id})
else:
# TODO remove GET
return 'Hello World!'
def pairwise(iterable):
(a, b) = tee(iterable)
next(b, None)
return zip(a, b)
def remove_rtl(text):
text = text.replace('\u202a', '')
text = text.replace('\u202b', '')
text = text.replace('\u202c', '')
text = text.replace('í', 'ם')
text = text.replace('ï', 'ן')
text = text.replace('ó', 'ף')
text = text.replace('ê', 'ך')
text = text.replace('õ', 'ץ')
text = text.replace('', 'נ')
return text
def extract_credit_card_number(lines):
return lines[0][2:]
def is_timestamp(date_text):
if len(date_text) == 8:
numbers = date_text.split('/')
if len(numbers) == 3:
number1 = numbers[0]
number2 = numbers[1]
number3 = numbers[2]
is_number1 = len(number1) == 2 and number1.isdigit()
is_number2 = len(number2) == 2 and number2.isdigit()
is_number3 = len(number3) == 2 and number3.isdigit()
if all([is_number1, is_number2, is_number3]):
return True
return False
def is_price(price):
allowed = string.digits + '.' + ',' + '-'
return all(c in allowed for c in price)
class Transaction:
date = None
to = None
iska = None
class Data:
credit_card_last_digits = None
name = None
address = None
transactions = None
def add_transaction_to_user(transactions, user_id):
current_transactions = users_collection.find_one({'_id': bson.ObjectId(user_id)})['transactions']
for transaction in current_transactions:
transaction.pop('_id', None)
transactions['transactions'] = [transaction for transaction in
transactions['transactions'] if transaction not in current_transactions]
for transaction in transactions['transactions']:
transaction['_id'] = bson.objectid.ObjectId()
users_collection.update_one(
{'_id': bson.ObjectId(user_id)},
{'$push': {'transactions': {'$each': transactions['transactions']}}}
)
def extract_transaction_from_isracard_pdf(lines, user_id):
data = Data()
remove_list = [
'ה.קבע',
'',
'לא הוצג',
'סכו םהחיוב',
'בש"ח',
'}',
'ש םבית עסק',
'תאריך',
'עסקה',
'כרטיס',
'בעסקה',
'ענף',
'ש.אלחוט'
]
categories_list = [
'ביטוח',
'שרות רפואי',
'נופש ותיור',
'בתי ספר',
'פנאי/ספורט',
'שירותי רכב',
'דלק',
'מכולת/סופר',
'רהיטים',
'מסעדות/קפה',
'מוצרי חשמל',
"קניה אינט'",
"תש' רשויות",
'פארמה',
'כלי בית',
'משתלות',
'הלבשה',
'מעדניות',
'תרבות',
'שונות',
'ספרי/םדיסק',
'אבזרי אפנה',
'טוטו/פיס',
'הנעלה',
'צעצועים',
'עיתו/ןדפוס',
'מחשבים'
]
categories_fixer = {
'ביטוח': 'ביטוח',
'שרות רפואי': 'שרות רפואי',
'נופש ותיור': 'נופש ותיור',
'בתי ספר': 'בתי ספר',
'פנאי/ספורט': 'פנאי וספורט',
'שירותי רכב': 'שירותי רכב',
'דלק': 'דלק',
'מכולת/סופר': 'מכולת וסופר',
'רהיטים': 'רהיטים',
'מסעדות/קפה': 'מסעדות וקפה',
'מוצרי חשמל': 'מוצרי חשמל',
"קניה אינט'": 'קניות באינטרנט',
"תש' רשויות": 'תשלומי רשויות',
'פארמה': 'פארמה',
'כלי בית': 'כלי בית',
'משתלות': 'משתלות',
'הלבשה': 'הלבשה',
'מעדניות': 'מעדניות',
'תרבות': 'תרבות',
'שונות': 'שונות',
'ספרי/םדיסק': 'ספרים ודיסקים',
'אבזרי אפנה': 'אביזרי אופנה',
'טוטו/פיס': 'טוטו ופיס',
'הנעלה': 'הנעלה',
'צעצועים': 'צעצועים',
'עיתו/ןדפוס': 'עיתון',
'מחשבים': 'מחשבים'
}
lines = list([value for value in lines if value not in remove_list])
start_index = lines.index('עסקות שחויבו /זוכו -בארץ')
end_index = lines.index('מסגרת הכרטיס ותנאי האשראי')
lines = lines[start_index + 1:end_index]
categories = list([value for value in lines if value in categories_list])
try:
remove_index_start = lines.index('פירוט נוסף')
remove_index_end = lines.index('עסקות שחויבו /זוכו -בארץ')
except ValueError:
remove_index_start = None
remove_index_end = None
if remove_index_start and remove_index_end:
lines = lines[:remove_index_start] + lines[remove_index_end + 1:]
dates = [date for date in lines if is_timestamp(date)]
temp = list()
prices = list()
for line in lines:
if is_price(line):
temp.append(line)
else:
if len(temp) and len(prices) < len(dates):
if len(temp) > len(dates) * 2:
temp = temp[:len(dates) * 2]
if len(temp) % 2 != 0:
temp = temp[:-1]
if int(len(temp)/2) + len(prices) > len(dates):
temp = temp[:-2]
prices += temp[int(len(temp)/2):]
temp = list()
lines = list([value for value in lines if value not in categories_list])
lines = list([value for value in lines if not is_price(value)])
lines = list([value for value in lines if not is_timestamp(value)])
lines = list([value for value in lines if value != 'עסקות שחויבו /זוכו -בארץ'])
lines = list([value for value in lines if value != 'סכו םעסקה'])
lines = list([value for value in lines if 'סה"כ חיוב לתאר' not in value])
businesses = lines[:len(dates) + 1]
transactions = {"transactions": []}
for date, business, price, category in zip(dates, businesses, prices, categories):
transactions['transactions'].append({
'date': date,
'business': business,
'price': price,
'category': category
})
for transaction in transactions['transactions']:
date = transaction['date']
date = date.split('/')
date[2] = '20' + date[2]
date = '/'.join(date)
transaction['price'] = transaction['price'].replace(',', '')
transaction['price'] = int(float(transaction['price']))
transaction['date'] = datetime.datetime.strptime(date, '%d/%m/%Y')
transaction['category'] = categories_fixer[transaction['category']]
add_transaction_to_user(transactions=transactions, user_id=user_id)
def extract_transaction_from_pdf(file_path, user_id):
# TODO add error message and handler for textract.exceptions.ShellError exception
text = textract.process(file_path, 'UTF-8')
decode_text = text.decode()
decode_text = remove_rtl(decode_text)
lines = decode_text.split('\n')
extract_transaction_from_isracard_pdf(lines, user_id)
if __name__ == '__main__':
app.run(port=3000, host='0.0.0.0')
| 2.25 | 2 |
utils/folder_functions.py | miniautonomous/engine_ai | 1 | 12770507 | <gh_stars>1-10
import os
import platform
import json
import glob
import tkinter
import tkinter.filedialog as file_dialog
from tkfilebrowser import askopendirnames as multi_directories
class UserPath(object):
def __init__(self, app_module=None):
"""
This class covers a bunch of generic file IO operations that the user generally
needs for various GUI applications.
Primary functions:
1) Create and initialize an App initialization or configuration files where
various default values (saved in a dictionary) are written to disk.
2) Opens a file dialog to allow the user to select a file or folder. At the same
time, it is use to track which folder was last used to read or write files or
select a folder for a given 'pathTag'. The purpose of this tracking is to avoid for
the user to manually browse back to a given location to re-select the next file or
select the same file at a later time.
3) Miscellaneous methods to get file and folder lists.
NOTE: It is assumed here that an 'App' package base folder is one folder above
this module location. In other words, this module 'folder_functions.py' is located
in a sub-folder of the 'App' package, e.g., utils. This implies that
the 'App' configuration files where various default values are saved using
the 'app_config' property dictionary is also located in the base 'App' folder
saved in the 'appFolder' property
"""
# Set some properties here to make sure that they are attached to this instance
self.current_paths = []
self.num_paths = 0
self.historical_paths = {}
self.app_config = {}
self.app_module = app_module
self.app_config_file = None
self.app_folder = None # This is the base folder of the 'App'
# Properties
self.file_multi_select = False
self.file_prompt = 'Please select a File:'
self.file_type = [('all files', '.*')]
# Default data folder path specified by the user
self.user_data_folder = None
# find the os type
self.os_type = platform.system()
if self.app_module is not None:
"""
Create if needed the initialization file of this class/module that contains the
dict 'app_config'. This 'app_config' dictionary keeps track of the last location
that the user used. The following guarantees that the initialization file is
available and has some default values.
"""
# 1. Figure out the complete module path
self.app_config_file = os.path.abspath(self.app_module)
self.app_config_file, _ = os.path.splitext(self.app_config_file)
# self.app_folder, _ = os.path.split(self.app_config_file)
self.app_config_file += '_lnx.cfg'
# 2. Actually create the 'App' configuration file if needed
if not os.path.isfile(self.app_config_file):
# Initialize the dictionary key associated with the default path
self.app_config['default_path'] = os.path.expanduser('~')
self.app_config['cfg_file_path'] = self.app_config_file
# Save the dictionary to init file.
with open(self.app_config_file, 'w') as tmpFile:
json.dump(self.app_config, tmpFile)
else:
# Simply read the file if it exist
with open(self.app_config_file) as tmpFile:
self.app_config = json.load(tmpFile)
def store_current_paths(self, key_name:str):
"""
Adds a dictionary entry to the history property "histPaths" to save the
object "currentPaths" property so it can be recalled later.
Parameters
----------
key_name: (str) key name used to save current_paths
"""
self.historical_paths[key_name] = self.current_paths
def apps_get_default(self, key_name: str):
"""
Returns a dictionary entry if it exists.
Parameters
----------
key_name: (str) key name
Returns
-------
key_value: (str) dictionary entry
"""
if key_name in self.app_config.keys():
key_value = self.app_config[key_name]
else:
key_value = None
return key_value
def write_default_value(self):
"""
Saves the app_config property dictionary to a text file.
"""
if self.app_module is not None:
with open(self.app_config_file, 'w') as tmp_file:
json.dump(self.app_config, tmp_file, sort_keys=True)
def select_user_data_folder(self, folder_path: str, action: str='validate', path_tag: str='default_path'):
"""
This method saves a valid path to a user selected folder in the object property
"user_data_folder". At a minimum, it puts the user home folder path in that
object property.
Parameters
----------
folder_path: (str) path to folder to save
action: (str) type of action requested by user (options are 'validate', 'confirm' and 'select'
path_tag: (str) tag to label path with
Returns
-------
"""
if folder_path is None:
folder_path = ''
# Ensure a specified folder path exists, if not, return user's home directory
if action.lower() == 'validate':
if not os.path.isdir(folder_path):
self.user_data_folder = os.path.expanduser('~')
else:
self.user_data_folder = folder_path
# Confirm a folder is available, if not query the user to confirm which folder to use
elif action.lower() == 'confirm':
if not os.path.isdir(folder_path):
self.path_select(path_tag, 'dir_select')
if self.num_paths is not 0:
self.user_data_folder = self.current_paths[0]
else:
self.user_data_folder = os.path.expanduser('~')
else:
self.user_data_folder = folder_path
# Open the file dialog (which will save the path in the object property).
elif action.lower() == 'select':
self.path_select(path_tag, 'dir_select')
if self.num_paths is not 0:
self.user_data_folder = self.current_paths[0]
else:
# Return the user home folder
self.user_data_folder = os.path.expanduser('~')
# Unsupported action
else:
self.user_data_folder = None
raise ValueError('Unsupported "action" input ',
'method: select_user_data_folder ',
'class: UserPath')
def path_select(self, path_tag: str= 'default_path', path_type: str= 'file_read'):
"""
Method that uses the input parameter path_tag as a dictionary key so that previously
selected path from an given application can be re-used. Typically, the calling
application "label" is used for this string so it is easy to recall.
Parameters
----------
path_tag: (str) label for the defined path
path_type: (str) what is the operation of the file path
Currently supports: 1) 'file_read'
2) 'file_write'
3) 'dir_select'
"""
if path_tag in self.app_config.keys():
init_dir = self.app_config[path_tag]
else:
init_dir = self.app_config['default_path']
# Need to ensure initDir is a valid directory in case it has been
# rename or delete or move.
self.select_user_data_folder(init_dir)
init_dir = self.user_data_folder
diag_options = {'title': self.file_prompt, 'initialdir': init_dir}
window = tkinter.Tk()
window.wm_withdraw()
if path_type.lower() == 'file_read':
diag_options['filetypes'] = self.file_type
if self.file_multi_select:
self.current_paths = file_dialog.askopenfilenames(**diag_options)
# save the current path in a list to be consistent/compatible
self.current_paths = list(self.current_paths)
else:
# save the single path in a list directly to be consistent/compatible
self.current_paths = [file_dialog.askopenfilename(**diag_options)]
elif path_type.lower() == 'file_write':
diag_options['filetypes'] = self.file_type
# save the single path in a list directly to be consistent/compatible
self.current_paths = [file_dialog.asksaveasfilename(**diag_options)]
elif path_type.lower() == 'dir_select':
# save the single path in a list directly to be consistent/compatible
if self.file_multi_select:
self.current_paths = multi_directories(**diag_options)
else:
self.current_paths = [file_dialog.askdirectory(**diag_options)]
else:
raise ValueError('Unsupported pathType input ',
'method: path_select',
'class: UserPath')
# Check the user did NOT cancel and update the dictionary and init file
if any(map(len, self.current_paths)):
self.num_paths = len(self.current_paths)
# Find the base path of the file so it can be saved in the init file
if path_type.lower() == 'dir_select':
self.app_config[path_tag] = self.current_paths[0]
else:
# For file, remove the file name
self.app_config[path_tag] = os.path.dirname(self.current_paths[0])
else:
# User cancelled, simply default the numPath property
self.num_paths = 0
def file_paths_list(self, folder_list: list, file_type: str= '*.*'):
"""
Retrieves the full path of all the files that matches the
file type. The full file paths for all the files are saved
in the "current_paths" property of this class
Parameters
----------
folder_list: (list) list of files selected
file_type: (str) file type of selected files
"""
self.current_paths = []
for item in range(len(folder_list)):
tmp = glob.glob(folder_list[item] + '/' + file_type)
if len(tmp) == 0:
print('No matching file found in => ' + folder_list[item])
else:
self.current_paths.extend(tmp)
# Sort the list
self.current_paths.sort()
# Update the associated property
self.num_paths = len(self.current_paths)
def folders_list(self, base_path: str):
"""
This method will retrieves the full path of all the folders located in the
base_path.
The full sub-folder paths for all the folders are saved in the "current_paths"
property of this class
Parameters
----------
base_path: (str) base pass string of directories
"""
if not base_path.endswith('/'):
base_path = base_path + '/'
self.current_paths = glob.glob(base_path + '*/')
self.current_paths.sort()
# Save the path structure
self.num_paths = len(self.current_paths)
for iItem, pItem in enumerate(self.current_paths):
self.current_paths[iItem] = os.path.normpath(pItem)
@staticmethod
def read_dictionary(dictionary_path):
"""
Read the path to where the a config dictionary is stored.
Parameters
----------
dictionary_path: (str) path that leads to a previosly written config.
Returns
-------
app_config: (dict) the stored config dictionary
"""
if os.path.isfile(dictionary_path):
# Simply read the file if it exist and return its content
with open(dictionary_path) as tmpFile:
return json.load(tmpFile)
@staticmethod
def file_paths_list_2(folder_list: list, file_type: str= '*.*') -> list:
"""
This method will retrieves the full path of all the files that matches the file type for all
the folder listed.
Parameters
----------
folder_list: (list) lists all folders available with given file type
file_type: (str) file type being searched for by user
Returns
-------
list_paths: (list) current list of paths for given file type
"""
list_paths = []
for item in range(len(folder_list)):
tmp = glob.glob(folder_list[item] + '/' + file_type)
if len(tmp) == 0:
print('No matching file found in => ' + folder_list[item])
else:
list_paths.extend(tmp)
return sorted(list_paths) | 3.28125 | 3 |
tests/unit_tests/test_get_pinnacle_data.py | conorreid/dst_scoring_model | 1 | 12770508 | <reponame>conorreid/dst_scoring_model
import unittest.mock as mock
import dst_scoring_model.get_pinnacle_data as get_pinnacle_data
def construct_request_content():
response = mock.MagicMock()
response.content = mock.MagicMock(return_value="hello")
return response
@mock.patch("requests.get", return_value=construct_request_content())
@mock.patch(
"json.loads",
return_value={
"events": [
{
"teams": [{"name": "hello"}, {"name": "goodbye"}],
"lines": {
"3": {
"spread": {"point_spread_home": 1},
"total": {"total_under": 1},
}
},
}
]
},
)
def test_get_lines(get, loads):
assert get_pinnacle_data.get_lines()["team_name"][0] == "hello"
| 2.515625 | 3 |
setup.py | toluaina/essync | 1 | 12770509 | <reponame>toluaina/essync<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
import os
import re
from setuptools import find_packages, setup
HERE = os.path.dirname(os.path.abspath(__file__))
def get_version() -> str:
filename: str = os.path.join(HERE, "pgsync", "__init__.py")
with open(filename) as fp:
contents = fp.read()
pattern = r"^__version__ = \"(.*?)\"$"
return re.search(pattern, contents, re.MULTILINE).group(1)
# Package meta-data.
NAME = "pgsync"
DESCRIPTION = "Postgres to Elasticsearch sync"
URL = "https://github.com/toluaina/pgsync"
AUTHOR = MAINTAINER = "<NAME>"
AUTHOR_EMAIL = MAINTAINER_EMAIL = "<EMAIL>"
PYTHON_REQUIRES = ">=3.7.0"
VERSION = get_version()
INSTALL_REQUIRES = []
KEYWORDS = [
"pgsync",
"elasticsearch",
"postgres",
"change data capture",
]
CLASSIFIERS = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Natural Language :: English",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)",
]
SCRIPTS = [
"bin/pgsync",
"bin/bootstrap",
"bin/parallel_sync",
]
SETUP_REQUIRES = ["pytest-runner"]
TESTS_REQUIRE = ["pytest"]
# if building the source dist then add the sources
PACKAGES = find_packages(include=["pgsync"])
with open("README.rst") as fp:
README = fp.read()
with open("requirements/prod.txt") as fp:
INSTALL_REQUIRES = fp.read()
setup(
name=NAME,
author=AUTHOR,
license="LGPLv3",
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
author_email=AUTHOR_EMAIL,
classifiers=CLASSIFIERS,
python_requires=PYTHON_REQUIRES,
description=DESCRIPTION,
long_description=README,
long_description_content_type="text/markdown",
install_requires=INSTALL_REQUIRES,
include_package_data=True,
keywords=KEYWORDS,
packages=PACKAGES,
setup_requires=SETUP_REQUIRES,
scripts=SCRIPTS,
test_suite="tests",
tests_require=TESTS_REQUIRE,
url=URL,
version=VERSION,
zip_safe=False,
project_urls={
"Bug Reports": "https://github.com/toluaina/pgsync/issues",
"Funding": "https://github.com/sponsors/toluaina",
"Source": URL,
"Web": "https://pgsync.com",
"Documentation": "https://pgsync.com",
},
)
| 1.421875 | 1 |
try.py | tssga-arch/myotc | 0 | 12770510 | #!/usr/bin/env python3
import re
from pprint import pprint
import os
import yaml
import random
import string
from passlib.hash import md5_crypt, sha256_crypt, sha512_crypt
secrets_file = '_secrets_file_'
yaml_pp_vars = dict(os.environ)
yaml_pp_vars[secrets_file] = '_secrets.yaml'
secrets = None
sshkey_re = re.compile(r'(.*)\$SSHKEY:([A-Za-z][A-Za-z0-9]*)(:[^\$]*|)\$')
for line in [
'One two',
'abcd $SSHKEY:linux1$',
'abcd $$PWGEN:linux1$ something',
'abcd $$PWGEN:linux1:32:MD5$',
'abcd $$PWGEN:linux1:32:SHA256$ something',
'abcd $$PWGEN:linux1:32:SHA512$',
'abcd $PWGEN:linux1:16',
'abcd $PWGEN:linux1 something',
'abcd $PWGEN:linux1:32:MD5',
'abcd $PWGEN:linux1:32:SHA256 something',
'abcd $PWGEN:linux1:32:SHA512',
'abcd $PWGEN:linux1:16$',
'abcd $PWGEN:linux1$ something',
'abcd $PWGEN:linux1:32:MD5$',
'abcd $PWGEN:linux1:32:SHA256$ something',
'abcd $PWGEN:linux1:32:SHA512$',
'end' ]:
in_line = line
mv = pwgen_re.match(line)
if mv:
if mv.group(1)[-1] == '$':
line = line[:len(mv.group(1))-1] + line[len(mv.group(1)):]
else:
store = mv.group(2)
pwlen = 12
encode = ''
for opt in mv.group(3).split(':'):
if not opt: continue
if opt == 'MD5' or opt == 'SHA256' or opt == 'SHA512':
encode = opt
elif int(opt) > 6:
pwlen = int(opt)
if secrets is None:
if os.path.isfile(yaml_pp_vars[secrets_file]):
with open(yaml_pp_vars[secrets_file],'r') as fp:
secrets = yaml.safe_load(fp)
else:
secrets = {}
if store in secrets:
passwd = secrets[store]
else:
charset = string.ascii_lowercase + string.ascii_uppercase + string.digits
passwd = ''.join(random.sample(charset, pwlen))
secrets[store] = passwd
with open(yaml_pp_vars[secrets_file],'w') as fp:
fp.write(yaml.dump(secrets))
if encode == 'MD5':
cpassw = md5_crypt.hash(passwd)
elif encode == 'SHA256':
cpassw = sha256_crypt.hash(passwd,rounds=5000)
elif encode == 'SHA512':
cpassw = sha512_crypt.hash(passwd,rounds=5000)
else:
cpassw = passwd
line = line[:len(mv.group(1))] + cpassw + line[len(mv.group(0)):]
print('INP: {}'.format(in_line))
print('OUT: {}'.format(line))
| 2.53125 | 3 |
setup.py | cheshire3/cheshire3 | 3 | 12770511 | """Setup file for cheshire3 package."""
from __future__ import with_statement
import sys
import os
import inspect
from warnings import warn
# Import Distribute / Setuptools
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
from pkg_resources import DistributionNotFound
# Check Python version
py_version = getattr(sys, 'version_info', (0, 0, 0))
if py_version < (2, 6):
warn("Cheshire3 requires Python 2.6 or later; some code may be "
"incompatible with earlier versions.")
# Inspect to find current path
setuppath = inspect.getfile(inspect.currentframe())
setupdir = os.path.dirname(setuppath)
# Basic information
_name = 'cheshire3'
_description = ('Cheshire3 Search and Retrieval Engine and Information '
'Framework')
# Discover version number from file
with open(os.path.join(setupdir, 'VERSION.txt'), 'r') as vfh:
_version = vfh.read().strip()
_download_url = ('http://cheshire3.liv.ac.uk/download/{0}/src/{1}-{2}.tar.gz'
''.format(_version[:3], _name, _version))
# More detailed description from README
try:
fh = open(os.path.join(setupdir, 'README.rst'), 'r')
except IOError:
_long_description = ''
else:
_long_description = fh.read()
fh.close()
# Requirements
with open(os.path.join(setupdir, 'requirements.txt'), 'r') as fh:
_install_requires = fh.readlines()
_tests_require = []
# Determine python-dateutil version
if py_version < (3, 0):
dateutilstr = 'python-dateutil == 1.5'
if py_version < (2, 7):
_install_requires.append('argparse')
_tests_require.append('unittest2')
else:
dateutilstr = 'python-dateutil >= 2.0'
_install_requires.append(dateutilstr)
setup(
name=_name,
version=_version,
packages=[_name],
include_package_data=True,
package_data={'cheshire3': ['configs/*.xml', 'configs/extra/*.xml']},
exclude_package_data={'': ['README.*', '.gitignore']},
requires=['lxml(>=2.1)', 'bsddb', 'dateutil', 'argparse'],
tests_require=_tests_require,
install_requires=_install_requires,
setup_requires=['setuptools-git'],
dependency_links=[
"http://labix.org/python-dateutil",
"http://www.panix.com/~asl2/software/PyZ3950/",
"http://cheshire3.liv.ac.uk/download/latest/reqs/"
],
extras_require={
'graph': ['rdflib'],
'grid': ['PyRods'],
'datamining': ['svm'],
'lucene': ['lucene'],
'nlp': ['numpy', 'nltk >= 2.0.2'],
'sql': ['psycopg2 >= 2.5'],
'textmining': ['numpy', 'nltk >= 2.0.2'],
'web': ['pyoai', 'PyZ3950 >= 2.04', 'ZSI < 2.0']
},
test_suite="cheshire3.test.testAll.suite",
scripts=['scripts/DocumentConverter.py'],
entry_points={
'console_scripts': [
'cheshire3 = cheshire3.commands.console:main',
'cheshire3-init = cheshire3.commands.init:main',
'cheshire3-load = cheshire3.commands.load:main',
'cheshire3-register = cheshire3.commands.register:main',
'cheshire3-unregister = cheshire3.commands.unregister:main',
'cheshire3-search = cheshire3.commands.search:main',
'cheshire3-serve = cheshire3.commands.serve:main',
'icheshire3-load = cheshire3.grid.commands.load:main [grid]'
],
},
keywords="xml document search information retrieval engine data text",
description=_description,
long_description=_long_description,
author="<NAME>, et al.",
author_email="<EMAIL>",
maintainer='<NAME>',
maintainer_email='<EMAIL>',
license="BSD",
classifiers=[
"Intended Audience :: Developers",
"Intended Audience :: Information Technology",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Topic :: Internet :: WWW/HTTP :: Indexing/Search",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
"Topic :: Internet :: Z39.50",
"Topic :: Text Processing :: Indexing",
"Topic :: Text Processing :: Linguistic",
"Topic :: Text Processing :: Markup"
],
url="http://cheshire3.liv.ac.uk /",
download_url=_download_url
)
| 2.171875 | 2 |
env/lib/python3.8/site-packages/ask_sdk_core/view_resolvers/locale_template_enumerator.py | adamash99/alexa-play-pot-of-greed | 496 | 12770512 | <gh_stars>100-1000
# -- coding: utf-8 --
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights
# Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the
# License.
#
import os
import typing
from ask_sdk_runtime.view_resolvers import AbstractTemplateEnumerator
from ask_sdk_core.utils.view_resolver import split_locale
if typing.TYPE_CHECKING:
from typing import Iterator, Type
from ask_sdk_core.handler_input import HandlerInput
class LocaleTemplateEnumerator(AbstractTemplateEnumerator):
"""Enumerator to enumerate template name based on locale property.
Enumerate possible combinations of template name and given locale
from the HandlerInput.
For Example: For locale: 'en-US' and a response template name "template",
the following combinations will be generated:
template/en/US
template/en_US
template/en
template_en_US
template_en
template
"""
__instance = None
def __new__(cls):
# type: (Type[object]) -> LocaleTemplateEnumerator
"""Creating a singleton class to re-use same enumerator instance for
different locale and template values.
"""
if LocaleTemplateEnumerator.__instance is None:
LocaleTemplateEnumerator.__instance = object.__new__(cls)
return LocaleTemplateEnumerator.__instance
def __init__(self):
# type: () -> None
"""Enumerator to generate different path combinations for a given
locale to load the template.
"""
pass
def generate_combinations(self, handler_input, template_name):
# type: (HandlerInput, str) -> Iterator[str]
"""Create a generator object to iterate over different combinations
of template name and locale property.
:param handler_input: Handler Input instance with
Request Envelope containing Request.
:type handler_input: :py:class:`ask_sdk_core.handler_input.HandlerInput`
:param template_name: Template name which needs to be loaded
:type template_name: str
:return: Generator object which returns
relative paths of the template file
:rtype: Iterator[str]
"""
locale = handler_input.request_envelope.request.locale
language, country = split_locale(locale=locale)
if not language and not country:
yield template_name
else:
yield os.path.join(template_name, language, country)
yield os.path.join(template_name, (language + "_" + country))
yield os.path.join(template_name, language)
yield (template_name + "_" + language + "_" + country)
yield (template_name + "_" + language)
yield template_name
| 2.125 | 2 |
uw_iasystem/__init__.py | uw-it-aca/uw-restclients-iasystem | 0 | 12770513 | # Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
import json
import logging
from restclients_core.exceptions import DataFailureException
from uw_iasystem.dao import IASystem_DAO
from uw_iasystem.exceptions import TermEvalNotCreated
from uw_iasystem.util.thread import ThreadWithResponse
logger = logging.getLogger(__name__)
def get_resource(url, domain):
threads = []
for dao in IASystem_DAO(domain):
t = ThreadWithResponse(target=__get_resource, args=(dao, url))
t.start()
threads.append((t, dao.service_name()))
for t, k in threads:
t.join()
if t.response is not None:
data = t.response
if data.get('collection') and\
data.get('collection').get('items'):
return t.response
if t.exception is not None:
logger.error("{}: {}".format(k, str(t.exception)))
raise t.exception
return None
def __get_resource(dao, url):
"""
Issue a GET request to IASystem with the given url
and return a response in Collection+json format.
:returns: http response with content in json
"""
headers = {"Accept": "application/vnd.collection+json"}
response = dao.getURL(url, headers)
status = response.status
logger.debug("{} ==status==> {}".format(url, status))
if status != 200:
message = str(response.data)
if status == 404:
# the URL not exists on the specific domain
return None
if status == 400:
if "Term is out of range" in message:
raise TermEvalNotCreated(url, status, message)
raise DataFailureException(url, status, message)
return json.loads(response.data)
| 2.265625 | 2 |
pytorch-official-tutorials/3_neural_networks.py | aFewThings/pytorch-tutorial | 0 | 12770514 | <filename>pytorch-official-tutorials/3_neural_networks.py
# Define NN
import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# 1 input image channel(grayscale), 6 output channels, 3x3 square convolution kernel
self.conv1 = nn.Conv2d(1, 6, 3)
self.conv2 = nn.Conv2d(6, 16, 3)
# an affine operation: y = Wx + b
self.fc1 = nn.Linear(16 * 6 * 6, 120) # 6*6 from image dimension
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
# Max pooling over a (2, 2) window
x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
# If the size is a square you can only specify a single number
x = F.max_pool2d(F.relu(self.conv2(x)), 2)
x = x.view(-1, self.num_flat_features(x))
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def num_flat_features(self, x):
size = x.size()[1:] # all dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
net = Net()
print(net)
params = list(net.parameters())
print(len(params))
print(params[0].size()) # conv1's .weight
print(net.conv1.weight.size())
input = torch.randn(1, 1, 32, 32) # BCHW
out = net(input)
print(net.forward(input))
print(out)
net.zero_grad() # 모든 매개변수의 변화도 버퍼(gradient buffer)를 0으로 설정, 무작위 값으로 역전파 합니다.
out.backward(torch.randn(1, 10))
# Loss Function
output = net.forward(input)
target = torch.randn(10)
target = target.view(1, -1)
criterion = nn.MSELoss()
loss = criterion(output, target)
print(loss)
"""
.grad_fn 속성을 사용하여 loss를 역방향에서 따라가다보면, 이러한 모습의 연산 그래프를 볼 수 있습니다.
input -> conv2d -> relu -> maxpool2d -> conv2d -> relu -> maxpool2d
-> view -> linear -> relu -> linear -> relu -> linear
-> MSELoss
-> loss
"""
print(loss.grad_fn) # MSELoss
print(loss.grad_fn.next_functions[0][0]) # Linear
print(loss.grad_fn.next_functions[0][0].next_functions[0][0]) # ReLU
net.zero_grad() # zeroes the gradient buffers of all parameters
print('conv1.bias.grad before backward')
print(net.conv1.bias.grad)
loss.backward()
print('conv1.bias.grad after backward')
print(net.conv1.bias.grad)
# gradient update
# 가중치(wiehgt) = 가중치(weight) - 학습율(learning rate) * 변화도(gradient)
# 간단한 업데이트 과정
learning_rate = 0.01
for f in net.parameters():
f.data.sub_(f.grad.data * learning_rate)
# optimizer 사용해서 업데이트하기
import torch.optim as optim
optimizer = optim.SGD(net.parameters(), lr=0.01)
optimizer.zero_grad() # 수동으로 변화도 버퍼를 0으로 설정해주지 않으면 변화도가 계속 누적된다.
output = net.forward(input)
loss = criterion(output, target)
loss.backward() # compute loss
optimizer.step() # update trainable variables
| 3.828125 | 4 |
djpaddle/urls.py | abroun/dj-paddle | 32 | 12770515 | <gh_stars>10-100
from django.urls import path
from . import views
app_name = "djpaddle"
urlpatterns = [
path("webhook/", views.paddle_webhook_view, name="webhook"),
path("post-checkout/", views.post_checkout_api_view, name="post_checkout_api"),
]
| 1.65625 | 2 |
visualization/basic_examples/anim_pc_csv.py | KeyueZhu/XenomatiX | 1 | 12770516 | <reponame>KeyueZhu/XenomatiX<gh_stars>1-10
import open3d as o3d
import numpy as np
import time
import csv
import os
from matplotlib import cm
# read csv files and store all frames
# play frame by frame after that
folder = "/home/mcity/xenomatix/visualization/pc_csv/"
# folder = "/home/mcity/Desktop/eastern_market_xpc/csvs/"
pcd_frames = []
for fn in os.listdir(folder):
with open(folder + fn) as csvfile:
pcd_single_frame = []
pcd_sf_colors = []
data_reader = csv.reader(csvfile, delimiter=',', quotechar='|')
for row in data_reader:
if 'NaN' in row[0:3]:
continue
# xyz = np.asarray([float(n) / 5000 for n in row[0:3]])
xyz = np.asarray([float(row[0]), float(row[1]), float(row[2])])
# print(xyz)
# intensity compensated by range
# refelctivity = float(row[3]) * float(row[6]) * float(row[6]) /200000^2
# intensity = int(row[3])
# rgb = cm.jet(intensity% 255)
# print(intensity, rgb)
pcd_single_frame.append(xyz)
# pcd_sf_colors.append(rgb)
# pcd = o3d.geometry.PointCloud()
pcd_points = o3d.utility.Vector3dVector(pcd_single_frame)
pcd_colors = o3d.utility.Vector3dVector(pcd_sf_colors)
pcd_frames.append(pcd_points)
vis = o3d.visualization.Visualizer()
vis.create_window()
# o3d.utility.set_verbosity_level(o3d.utility.VerbosityLevel.Debug)
# show xyz axis, not showing bc in animation?
opt = vis.get_render_option()
opt.show_coordinate_frame = True
# opt.background_color = np.asarray([0.5, 0.5, 0.5])
opt.point_size = 3.5
# color_op = o3d.visualization.PointColorOption
# color_op.
# opt.point_color_option = color_op
# vis.run()
# xyz = np.random.rand(100, 3)
# pcd_perm.points = o3d.utility.Vector3dVector(xyz)
# open3d needs a permenent/same object, and just update it with new data
pcd_perm = o3d.geometry.PointCloud()
pcd_perm.points = pcd_frames[0]
# o3d.visualization.draw_geometries([pcd_perm])
# print("hi")
# print(pcd_frames[0])
# geometry is the point cloud used in your animaiton
# geometry = o3d.geometry.PointCloud()
vis.add_geometry(pcd_perm)
time.sleep(1)
# for x in range(100):
for pcd_points in pcd_frames:
# for i in range(0, 5):
# print(i, pcd_list[i])
# pcd = o3d.geometry.PointCloud()
# xyz = np.random.rand(100, 3)
# pcd_perm.points = o3d.utility.Vector3dVector(xyz)
pcd_perm.points = pcd_points
# now modify the points of your geometry
# you can use whatever method suits you best, this is just an example
# geometry.points = pcd_list[i].points
vis.update_geometry(pcd_perm) # returns true/false
time.sleep(0.2)
vis.poll_events()
vis.update_renderer()
# time.sleep(5)
# vis.destroy_window() | 2.265625 | 2 |
main.py | mrkiril/HW4_probability_cache | 0 | 12770517 | import asyncio
import logging
import os
import sys
import redis
import aiohttp_jinja2
import jinja2
import uvloop
from rediscluster import RedisCluster
from aiohttp import log, web
from aiomisc import ThreadPoolExecutor
from peewee import Proxy
from peewee_asyncext import PooledPostgresqlExtDatabase
from app.api.routes import routes
from app.api.views import ArticleHandler
from app.models import db_proxy, ExtendedDBManager, Article
from settings import Config
logger = logging.getLogger(__name__)
def init_db(conf: Config) -> Proxy:
db_conf = PooledPostgresqlExtDatabase(
conf.db_name,
user=conf.db_user,
host=conf.db_host,
port=conf.db_port,
password=<PASSWORD>.db_pass,
register_hstore=False,
autorollback=True,
max_connections=conf.postgres_max_connections
)
db_proxy.initialize(db_conf)
return db_proxy
async def on_startup(app):
conf: Config = app["conf"]
# setup templates renderer
aiohttp_jinja2.setup(app, loader=jinja2.FileSystemLoader(conf.templates_folder))
logging.basicConfig(level=logging.DEBUG)
logging.config.dictConfig(conf.DEFAULT_LOGGING)
app["db"] = ExtendedDBManager(init_db(conf))
app["db"].database.create_tables([Article], safe=True)
# FOR SINGLE NODE REDIS
# pool = redis.ConnectionPool(max_connections=10000, host=conf.redis_host, port=conf.redis_port)
# app["redis_cli"] = redis.StrictRedis(connection_pool=pool, socket_timeout=1, socket_connect_timeout=0.5)
# Requires at least one node for cluster discovery. Multiple nodes is recommended.
startup_nodes = [
{"host": conf.redis_host, "port": conf.redis_port},
{"host": conf.redis_replica_host, "port": conf.redis_replica_port}
]
app["redis_cli"] = RedisCluster(
startup_nodes=startup_nodes, max_connections=10000, decode_responses=True,
socket_timeout=1, socket_connect_timeout=0.5
)
app["art_handler"] = ArticleHandler(
db=app["db"],
redis_cli=app["redis_cli"],
use_probabilistic_cache=conf.use_probabilistic_cache
)
async def on_cleanup(app):
app["db"].database.drop_tables([Article])
await app["db"].close()
app["executor"].shutdown(wait=False)
async def on_shutdown(app):
pass
def setup_app(conf):
app = web.Application(
client_max_size=conf.MAX_BODY_SIZE,
logger=log.access_logger,
middlewares=[]
)
app["conf"] = conf
app.on_startup.append(on_startup)
app.on_cleanup.append(on_cleanup)
app.on_shutdown.append(on_shutdown)
app.router.add_routes(routes) # setup views and routes
loop = asyncio.get_event_loop()
executor = ThreadPoolExecutor(max_workers=10)
app["executor"] = executor
loop.set_default_executor(executor)
return app
if __name__ == "__main__":
argv = sys.argv[1:]
uvloop.install()
conf = Config()
app = setup_app(conf)
web.run_app(app, host=conf.app_host, port=conf.app_port)
| 2.15625 | 2 |
code/004.py | i4leader/Python-Basic-Pratice-100question | 0 | 12770518 | <reponame>i4leader/Python-Basic-Pratice-100question<gh_stars>0
#!/usr/bin/python3
year = int(input("请输入年:"))
month = int(input("请输入月份:"))
day = int(input("请输入几号:"))
leapyear = [0,31,29,31,30,31,30,31,31,30,31,30,31]
normalyear = [0,31,28,31,30,31,30,31,31,30,31,30,31]
days = 0
i = month
if year%4 == 0:
for temp in range(0,i,1):
days += leapyear[temp]
print("今年是闰年, ",end="")
else:
for temp in range(0,i,1):
days += normalyear[temp]
days = days + day
print('这是这一年的第%d天'%days)
| 3.5625 | 4 |
master_code/otherMiscCode/tools_da_riordinare/kd3hdf5.py | brunetto/MasterThesisCode | 0 | 12770519 | #!/usr/bin/env python
import time
import tables as tb
import numpy as np
import sys
# Global variables for patching the tree.
n_traverse = 0
####################################################################################
# kd3 #
# #
####################################################################################
# Modified from the original by Anne
# Copyright <NAME> 2008
# Released under the scipy license
from heapq import heappush, heappop
import scipy.sparse
def minkowski_distance_p(x,y,p=2):
"""Compute the pth power of the L**p distance between x and y
For efficiency, this function computes the L**p distance but does
not extract the pth root. If p is 1 or infinity, this is equal to
the actual L**p distance. The python broadcasting rules guarantee
that the distance is compute among alle the possible couples.
"""
x = np.asarray(x)
y = np.asarray(y)
if p==np.inf:
return np.amax(np.abs(y-x),axis=-1)
elif p==1:
return np.sum(np.abs(y-x),axis=-1)
else:
return np.sum(np.abs(y-x)**p,axis=-1)
def minkowski_distance(x,y,p=2):
"""Compute the L**p distance between x and y"""
x = np.asarray(x)
y = np.asarray(y)
if p==np.inf or p==1:
return minkowski_distance_p(x,y,p)
else:
return minkowski_distance_p(x,y,p)**(1./p)
def min_distance_rectangle(tree_1, tree_2, p=2.):
"""Compute the minimum distance between points in the two hyperrectangles."""
tree_1_maxes = np.maximum(tree_1._v_attrs.maxes,tree_1._v_attrs.mins).astype(np.float)
tree_1_mins = np.minimum(tree_1._v_attrs.maxes,tree_1._v_attrs.mins).astype(np.float)
tree_2_maxes = np.maximum(tree_2._v_attrs.maxes,tree_2._v_attrs.mins).astype(np.float)
tree_2_mins = np.minimum(tree_1._v_attrs.maxes,tree_2._v_attrs.mins).astype(np.float)
return minkowski_distance(0, np.maximum(0,np.maximum(tree_1_mins-tree_2_maxes,tree_2_mins-tree_1_maxes)),p)
def max_distance_rectangle(tree_1, tree_2, p=2.):
"""Compute the maximum distance between points in the two hyperrectangles."""
tree_1_maxes = np.maximum(tree_1._v_attrs.maxes,tree_1._v_attrs.mins).astype(np.float)
tree_1_mins = np.minimum(tree_1._v_attrs.maxes,tree_1._v_attrs.mins).astype(np.float)
tree_2_maxes = np.maximum(tree_2._v_attrs.maxes,tree_2._v_attrs.mins).astype(np.float)
tree_2_mins = np.minimum(tree_1._v_attrs.maxes,tree_2._v_attrs.mins).astype(np.float)
return minkowski_distance(0, np.maximum(tree_1_maxes-tree_2_mins,tree_2_maxes-tree_1_mins),p)
class KDTree(object):
"""kd-tree for quick nearest-neighbor lookup
This class provides an index into a set of k-dimensional points
which can be used to rapidly look up the nearest neighbors of any
point.
The algorithm used is described in Maneewongvatana and Mount 1999.
The general idea is that the kd-tree is a binary trie, each of whose
nodes represents an axis-aligned hyperrectangle. Each node specifies
an axis and splits the set of points based on whether their coordinate
along that axis is greater than or less than a particular value.
During construction, the axis and splitting point are chosen by the
"sliding midpoint" rule, which ensures that the cells do not all
become long and thin.
The tree can be queried for the r closest neighbors of any given point
(optionally returning only those within some maximum distance of the
point). It can also be queried, with a substantial gain in efficiency,
for the r approximate closest neighbors.
For large dimensions (20 is already large) do not expect this to run
significantly faster than brute force. High-dimensional nearest-neighbor
queries are a substantial open problem in computer science.
The tree also supports all-neighbors queries, both with arrays of points
and with other kd-trees. These do use a reasonably efficient algorithm,
but the kd-tree is not necessarily the best data structure for this
sort of calculation.
"""
def __init__(self, filename, mode):
if mode == 'read' or mode == 'r':
self.h5file = tb.openFile(filename, mode = "r")
elif mode == 'append' or mode == 'a':
self.h5file = tb.openFile(filename, mode = "a")
elif mode == 'build' or mode == 'w' or mode == 'write':
self.h5file = tb.openFile(filename, mode = "w")
def data_store(self, data):
t = time.time()
self.h5file.createArray(self.h5file.root, 'data', np.asarray(data), title='data')
self.h5file.root.data._v_attrs.n_elements = data.shape[0]
self.h5file.root.data._v_attrs.m_dimensions = data.shape[1]
self.h5file.root.data._v_attrs.maxes = np.amax(data,axis=0) # maxes and mins for each coord
self.h5file.root.data._v_attrs.mins = np.amin(data,axis=0)
print self.h5file.root.data._v_attrs.n_elements, " Stored in ", time.time()-t, "seconds."
t = time.time()
self.h5file.root.data.flush()
print time.time()-t, " seconds to commit changes."
def c_data_store(self, data):
t = time.time()
self.h5file.createCArray(self.h5file.root, 'data', np.asarray(data), title='data')
self.h5file.root.data._v_attrs.n_elements = data.shape[0]
self.h5file.root.data._v_attrs.m_dimensions = data.shape[1]
self.h5file.root.data._v_attrs.maxes = np.amax(data,axis=0) # maxes and mins for each coord
self.h5file.root.data._v_attrs.mins = np.amin(data,axis=0)
print self.h5file.root.data._v_attrs.n_elements, " Stored in ", time.time()-t, "seconds."
t = time.time()
self.h5file.root.data.flush()
print time.time()-t, " seconds to commit changes."
def tree_build(self, leafsize = 300):
"""Construct a kd-tree.
Parameters:
===========
data : array-like, shape (n,k)
The data points to be indexed. This array is not copied, and
so modifying this data will result in bogus results.
leafsize : positive integer
The number of points at which the algorithm switches over to
brute-force.
"""
print "Start building the tree..."
t = time.time()
#self.tree = self.h5file.createGroup(self.h5file.root, 'tree')
self.h5file.root._v_attrs.nodetag = 0
self.leafsize = self.h5file.root._v_attrs.leafsize = int(leafsize) # elements in the leaves
if self.leafsize<1:
raise ValueError("leafsize must be at least 1")
self.__build(self.h5file.root, np.arange(self.h5file.root.data._v_attrs.n_elements),
self.h5file.root.data._v_attrs.maxes, self.h5file.root.data._v_attrs.mins,
'tree') # run the build for the tree
# Number of nodes.
self.h5file.root.tree._v_attrs.n_nodes = self.h5file.root._v_attrs.nodetag
self.h5file.createHardLink(self.h5file.root.tree, 'data', self.h5file.root.data)
### Check the definition of the most bounded particle.
# Create a leafnode at h5file.root named h5file.root.mbp
self.__leafnode(self.h5file.root, [0], 'mbp', self.h5file.root.tree.data.read()[0], self.h5file.root.tree.data.read()[0])
print "Tree built in ", time.time()-t, " seconds, for ", self.h5file.root.tree._v_attrs.children, " particles."
t = time.time()
self.h5file.flush()
print time.time()-t, " seconds to commit the changes."
#return self.h5file
def __leafnode(self, parent, idx, name, maxes, mins):
self.h5file.root._v_attrs.nodetag += 1
leaf = self.h5file.createArray(parent, name, idx, title='leafnode_'+str(self.h5file.root._v_attrs.nodetag))
leaf._v_attrs.tag = self.h5file.root._v_attrs.nodetag # unique (in this tree) index for the leaf
leaf._v_attrs.children = len(idx) # number of elements in the leaf
leaf._v_attrs.type = 'leaf'
leaf._v_attrs.maxes = maxes
leaf._v_attrs.mins = mins
#leaf.flush()
return leaf
def __innernode(self, parent, split_dim, split, name, children, maxes, mins):
self.h5file.root._v_attrs.nodetag +=1
inner = self.h5file.createGroup(parent, name, title='innernode_'+str(self.h5file.root._v_attrs.nodetag))
inner._v_attrs.split_dim = split_dim # splitting dimension
inner._v_attrs.split = split # where the split happened
inner._v_attrs.children = children # number of elements in the node
inner._v_attrs.tag = self.h5file.root._v_attrs.nodetag #unique (in this tree) index of the node
inner._v_attrs.type = 'inner'
inner._v_attrs.maxes = maxes
inner._v_attrs.mins = mins
return inner
def __build(self, parent, idx, maxes, mins, name):
if len(idx)<=self.leafsize:
self.__leafnode(parent, idx, name, maxes, mins)
else:
data = self.h5file.root.data.read()[idx] # extract the needed from all the data
#maxes = np.amax(data,axis=0)
#mins = np.amin(data,axis=0)
# Calculate for each axis the distance between the max
# and the min, then argmax return the axes with the max
# distance, so "d" is the dimension with the max range.
d = np.argmax(maxes-mins)
maxval = maxes[d] # coord of the max in the axis with max range
minval = mins[d] # coord of the min in the axis with max range
if maxval==minval:
# all points are identical (zero dimension); warn user?
self.__leafnode(parent, idx, name, maxes, mins)
data = data[:,d] # take all the coord in the "max range" dimension
# Sliding midpoint rule; see Maneewongvatana and Mount 1999
# for arguments that this is a good idea.
split = (maxval+minval)/2 # coord where to split (=half of the range)
# Tuple containing the indexes of the non zero elements in
# the 0-th dimension corresponding to the condition.
less_idx = np.nonzero(data<=split)[0]
greater_idx = np.nonzero(data>split)[0]
if len(less_idx)==0:
split = np.amin(data)
less_idx = np.nonzero(data<=split)[0]
greater_idx = np.nonzero(data>split)[0]
if len(greater_idx)==0:
split = np.amax(data)
less_idx = np.nonzero(data<split)[0]
greater_idx = np.nonzero(data>=split)[0]
if len(less_idx)==0:
# _still_ zero? all must have the same value
assert np.all(data==data[0]), "Troublesome data array: %s" % data
split = data[0]
less_idx = np.arange(len(data)-1)
greater_idx = np.array([len(data)-1])
lessmaxes = np.copy(maxes) # lower set maxes
lessmaxes[d] = split # lower set ""max_range" axis" max is the split coord
greatermins = np.copy(mins) # higher set mins
greatermins[d] = split # higher set "max_range" axis min is the split coord
inn = self.__innernode(parent, d, split, name, idx.size, maxes, mins)
self.__build(inn._v_pathname, idx[less_idx], lessmaxes, mins, 'less')
self.__build(inn._v_pathname, idx[greater_idx], maxes, greatermins, 'greater')
def __count_neighbors(self, tree_1, tree_2, r, strategy = 'log_nosqrt_sort', self_corr = False, p=2.):
"""Count how many nearby pairs can be formed.
Count the number of pairs (x1,x2) can be formed, with x1 drawn
from self and x2 drawn from other, and where distance(x1,x2,p)<=r.
This is the "two-point correlation" described in Gray and Moore 2000,
"N-body problems in statistical learning", and the code here is based
on their algorithm.
Parameters
==========
other : KDTree
r : float or one-dimensional array of floats The radius to
produce a count for. Multiple radii are searched with a
single tree traversal.
p : float, 1<=p<=infinity
Which Minkowski p-norm to use
Returns
=======
result : integer or one-dimensional array of integers
The number of pairs. Note that this is internally stored in a numpy int,
and so may overflow if very large (two billion).
"""
# Self-corr check
global n_traverse
n_traverse = 0
if self_corr:
print "Self corr ", self_corr
tot_traverse = tree_1._v_attrs.n_nodes*(tree_1.tree._v_attrs.n_nodes-1)/2
else:
tot_traverse = tree_1._v_attrs.n_nodes*tree_2._v_attrs.n_nodes
def traverse(node1, node2, rad_idx):
global n_traverse
n_traverse += 1
if (n_traverse % 1000 == 0):
print "traverse ", n_traverse, " di ", tot_traverse, " with ", node1._v_attrs.tag, node2._v_attrs.tag
min_r = min_distance_rectangle(node1, node2, p) # min dist between the two nodes
max_r = max_distance_rectangle(node1, node2, p) # min dist between the two nodes
# Indexes of the radii enterely including the nodes
# Before there was also other strategies
if strategy == 'log_nosqrt_sort':
#print "calcola i raggi che includono i nodi"
included = r[rad_idx] > 2*np.log10(max_r)
else:
print "Something wrong checking the entirely including radii!!!"
exit()
# If self-corr and not yet checked nodes: sum the number of couples in the nodes.
if (self_corr and (node1._v_attrs.tag < node2._v_attrs.tag)):
result[rad_idx[included]] += node1._v_attrs.children*node2._v_attrs.children
# If self-corr and identical nodes, add half of the couples.
elif (self_corr and (node1._v_attrs.tag == node2._v_attrs.tag)):
result[rad_idx[included]] += node1._v_attrs.children*node2._v_attrs.children/2
# If self-corr and yet checked, drop. (now redundant)
elif (self_corr and (node1._v_attrs.tag > node2._v_attrs.tag)):
pass
# If not sef-corr, add all the couples.
else:
result[rad_idx[included]] += node1._v_attrs.children*node2._v_attrs.children
if np.all(result>=0) == False:
print "Argh!!! Negative count adding entirely included nodes!!!"
print "result ", result
print "nodes tag ", node1._v_attrs.tag, node2._v_attrs.tag
print "min_r, max_r ", min_r, max_r
print "number of particles to add ", node1._v_attrs.children*node2._v_attrs.children
exit()
# Idxs of the radii intersecting the nodes.
if strategy == 'log_nosqrt_sort':
if min_r == 0:
rad_idx = rad_idx[(0 <=r[rad_idx]) & (r[rad_idx]<=2*np.log10(max_r))]
else:
rad_idx = rad_idx[(2*np.log10(min_r)<=r[rad_idx]) & (r[rad_idx]<=2*np.log10(max_r))]
else:
print "Something wrong checking the intersecting radii!!!"
exit()
# No radii intersecting the nodes.
if len(rad_idx)==0:
return
# If the first node is a leaf
if node1._v_attrs.type == 'leaf':
# and also the second.
if node2._v_attrs.type == 'leaf':
### Open leaves and count couples ###
#Before there was also other strategies
if strategy == 'log_nosqrt_sort':
# Calculate all the possible distances.
ds = minkowski_distance_p(tree_1.data.read()[node1.read()][:,np.newaxis,:],
tree_2.data.read()[node2.read()][np.newaxis,:,:],
p).ravel()
ds.sort() # sorting all the distances
# If is self-corr and not already checked.
if (self_corr and (node1._v_attrs.tag <= node2._v_attrs.tag)):
# Self-corr and identical leaves: half of the result.
if node1._v_attrs.tag == node2._v_attrs.tag:
result[rad_idx] += (np.searchsorted(ds,r[rad_idx],side='right'))/2
# Self-corr different leaves.
else:
result[rad_idx] += (np.searchsorted(ds,r[rad_idx],side='right'))
# If not self-corr.
elif (self_corr != True):
result[rad_idx] += (np.searchsorted(ds,r[rad_idx],side='right'))
if np.all(result>=0) == False:
print "Argh!!! Negative count opening leaves"
print "result ", result
print "nodes tag ", node1._v_attrs.tag, node2._v_attrs.tag
print "min_r, max_r ", min_r, max_r
exit()
###################################
#print "Traverse result ", result
# First node is a leaf but second is not.
else:
if (self_corr and (node1._v_attrs.tag > node2.less._v_attrs.tag)):
#print "Pruning traverse on ", node1.tag, node2.less.tag
pass
else:
traverse(node1, node2.less, rad_idx)
if (self_corr and (node1._v_attrs.tag > node2.greater._v_attrs.tag)):
#print "Pruning traverse on ", node1.tag, node2.greater.tag
pass
else:
traverse(node1, node2.greater, rad_idx)
# If first node is not a leaf
else:
# but second node is
if node2._v_attrs.type == 'leaf':
if (self_corr and (node1.less._v_attrs.tag > node2._v_attrs.tag)):
#print "Pruning traverse on ", node1.less.tag, node2.tag
pass
else:
traverse(node1.less, node2, rad_idx)
if (self_corr and (node1.greater._v_attrs.tag > node2._v_attrs.tag)):
#print "Pruning traverse on ", node1.greater.tag, node2.tag
pass
else:
traverse(node1.greater, node2, rad_idx)
# Second node is not a leaf
else:
if (self_corr and (node1.less._v_attrs.tag > node2.less._v_attrs.tag)):
#print "Pruning traverse on ", node1.less.tag, node2.less.tag
pass
else:
traverse(node1.less, node2.less, rad_idx)
if (self_corr and (node1.less._v_attrs.tag > node2.greater._v_attrs.tag)):
#print "Pruning traverse on ", node1.greater.tag, node2.tag
pass
else:
traverse(node1.less, node2.greater, rad_idx)
if (self_corr and (node1.greater._v_attrs.tag > node2.less._v_attrs.tag)):
#print "Pruning traverse on ", node1.greater.tag, node2.less.tag
pass
else:
traverse(node1.greater, node2.less, rad_idx)
if (self_corr and (node1.greater._v_attrs.tag > node2.greater._v_attrs.tag)):
#print "Pruning traverse on ", node1.greater.tag, node2.greater.tag
pass
else:
traverse(node1.greater, node2.greater, rad_idx)
############## Count_neighbours "main". ##############
if np.shape(r) == ():
r = np.array([r])
result = np.zeros(1,dtype=long)
traverse(tree_1, tree_2, np.arange(1))
return result[0]
elif len(np.shape(r))==1:
r = np.asarray(r)
n, = r.shape
result = np.zeros(n,dtype=long)
traverse(tree_1, tree_2, np.arange(n))
return result
else:
raise ValueError("r must be either a single value or a one-dimensional array of values")
def profile(self, r, strategy = 'log_nosqrt_sort', self_corr = False, p=2.):
tree_1 = self.h5file.root.mbp
tree_2 = other.h5file.root.tree
t = time.time()
counts = self.__count_neighbors(tree_1, tree_2, r, strategy = 'log_nosqrt_sort', self_corr = False, p=2.)
print "Counting done in ", time.time()-t
return counts
def correlation(self, other, r, strategy = 'log_nosqrt_sort', self_corr = False, p=2.):
tree_1 = self.h5file.root.tree
tree_2 = other.h5file.root.tree
t = time.time()
counts = self.__count_neighbors(tree_1, tree_2, r, strategy = 'log_nosqrt_sort', self_corr = False, p=2.)
print "Counting done in ", time.time()-t
return counts
def plot_node_bound(self, node):
from enthought.mayavi import mlab
maxes = node._v_attrs.maxes
mins = node._v_attrs.mins
x_M = maxes[0]
y_M = maxes[1]
z_M = maxes[2]
x_m = mins[0]
y_m = mins[1]
z_m = mins[2]
floor_x = np.array([x_m, x_M, x_M, x_m, x_m])
floor_y = np.array([y_m, y_m, y_M, y_M, y_m])
floor_z = np.array([z_m, z_m, z_m, z_m, z_m])
roof_x = np.array([x_m, x_M, x_M, x_m, x_m])
roof_y = np.array([y_m, y_m, y_M, y_M, y_m])
roof_z = np.array([z_M, z_M, z_M, z_M, z_M])
edge1x = np.array([x_m, x_m])
edge2x = np.array([x_M, x_M])
edge3x = np.array([x_M, x_M])
edge4x = np.array([x_m, x_m])
edge1y = np.array([y_m, y_m])
edge2y = np.array([y_m, y_m])
edge3y = np.array([y_M, y_M])
edge4y = np.array([y_M, y_M])
edge1z = np.array([z_m, z_M])
edge2z = np.array([z_m, z_M])
edge3z = np.array([z_m, z_M])
edge4z = np.array([z_m, z_M])
mlab.plot3d(floor_x, floor_y, floor_z)
mlab.plot3d(roof_x, roof_y, roof_z)
mlab.plot3d(edge1x, edge1y, edge1z)
mlab.plot3d(edge2x, edge2y, edge2z)
mlab.plot3d(edge3x, edge3y, edge3z)
mlab.plot3d(edge4x, edge4y, edge4z)
def plot_node_points(self, node):
from enthought.mayavi import mlab
idxs = np.array([])
for leaf in self.h5file.walkNodes(node, classname='Array'):
idxs = np.hstack((idxs, leaf.read()))
points = self.h5file.root.data.read()[idxs.astype(int)]
mlab.points3d(points[:,0], points[:,1],points[:,2])
def close(self):
self.h5file.close()
| 3.28125 | 3 |
examples/dip/blog/views.py | navycut/navycut | 13 | 12770520 | <reponame>navycut/navycut
from navycut.urls import MethodView
from navycut.http import JsonResponse
from .models import *
# write your views here.
class IndexView(MethodView):
def get(self):
return JsonResponse(message="Salve Mundi!")
class GargiView(MethodView):
def get(self):
gargi= Gargi.query.get(1)
return JsonResponse(gargi.to_dict())
def post(self):
gargi = Gargi(name=self.json.name, subject=self.json.subject, body=self.json.body)
gargi.save()
return gargi.to_dict() | 2.34375 | 2 |
src/lib/model.py | Timokleia/QCANet | 26 | 12770521 | # -*- coding: utf-8 -*-
import numpy as np
import chainer
from chainer import cuda, Function, Variable
from chainer import Link, Chain, ChainList
import chainer.functions as F
import chainer.links as L
from src.lib.loss import softmax_dice_loss
class Model_L2(Chain):
def __init__(
self,
ndim=3,
n_class=2,
init_channel=2,
kernel_size=3,
pool_size=2,
ap_factor=2,
gpu=-1,
class_weight=np.array([1, 1]).astype(np.float32),
loss_func='F.softmax_cross_entropy'
):
self.gpu = gpu
self.pool_size = pool_size
if gpu >= 0:
self.class_weight = cuda.to_gpu(np.array(class_weight).astype(np.float32))
else:
self.class_weight = np.array(class_weight).astype(np.float32)
self.train = True
self.loss_func = loss_func
initializer = chainer.initializers.HeNormal()
super(Model_L2, self).__init__(
c0=L.ConvolutionND(ndim, 1, init_channel, kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
c1=L.ConvolutionND(ndim, init_channel, int(init_channel * (ap_factor ** 1)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
c2=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 1)), int(init_channel * (ap_factor ** 1)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
c3=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 1)), int(init_channel * (ap_factor ** 2)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
c4=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 2)), int(init_channel * (ap_factor ** 2)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
c5=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 2)), int(init_channel * (ap_factor ** 3)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
dc0=L.DeconvolutionND(ndim, int(init_channel * (ap_factor ** 3)), int(init_channel * (ap_factor ** 3)), self.pool_size, self.pool_size, 0, initialW=initializer, initial_bias=None),
dc1=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 2) + init_channel * (ap_factor ** 3)), int(init_channel * (ap_factor ** 2)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
dc2=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 2)), int(init_channel * (ap_factor ** 2)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
dc3=L.DeconvolutionND(ndim, int(init_channel * (ap_factor ** 2)), int(init_channel * (ap_factor ** 2)), self.pool_size, self.pool_size, 0, initialW=initializer, initial_bias=None),
dc4=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 1) + init_channel * (ap_factor ** 2)), int(init_channel * (ap_factor ** 1)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
dc5=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 1)), int(init_channel * (ap_factor ** 1)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
dc6=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 1)), n_class, 1, 1, initialW=initializer, initial_bias=None),
bnc0=L.BatchNormalization(init_channel),
bnc1=L.BatchNormalization(int(init_channel * (ap_factor ** 1))),
bnc2=L.BatchNormalization(int(init_channel * (ap_factor ** 1))),
bnc3=L.BatchNormalization(int(init_channel * (ap_factor ** 2))),
bnc4=L.BatchNormalization(int(init_channel * (ap_factor ** 2))),
bnc5=L.BatchNormalization(int(init_channel * (ap_factor ** 3))),
bndc1=L.BatchNormalization(int(init_channel * (ap_factor ** 2))),
bndc2=L.BatchNormalization(int(init_channel * (ap_factor ** 2))),
bndc4=L.BatchNormalization(int(init_channel * (ap_factor ** 1))),
bndc5=L.BatchNormalization(int(init_channel * (ap_factor ** 1)))
)
def _calc(self, x):
e0 = F.relu(self.bnc0(self.c0(x)))
syn0 = F.relu(self.bnc1(self.c1(e0)))
del e0
e1 = F.max_pooling_nd(syn0, self.pool_size, self.pool_size)
e2 = F.relu(self.bnc2(self.c2(e1)))
syn1 = F.relu(self.bnc3(self.c3(e2)))
del e1, e2
e3 = F.max_pooling_nd(syn1, self.pool_size, self.pool_size)
e4 = F.relu(self.bnc4(self.c4(e3)))
e5 = F.relu(self.bnc5(self.c5(e4)))
del e3, e4
d0 = F.concat([self.dc0(e5), syn1])
del e5, syn1
d1 = F.relu(self.bndc1(self.dc1(d0)))
d2 = F.relu(self.bndc2(self.dc2(d1)))
del d0, d1
d3 = F.concat([self.dc3(d2), syn0])
del d2, syn0
d4 = F.relu(self.bndc4(self.dc4(d3)))
d5 = F.relu(self.bndc5(self.dc5(d4)))
del d3, d4
d6 = self.dc6(d5)
del d5
return d6
def __call__(self, x, t=None, seg=True):
h = self._calc(x)
if seg:
pred = F.softmax(h)
del h
return pred.data
else:
#loss = eval(self.loss_func)(h, t, class_weight=self.class_weight)
loss = eval(self.loss_func)(h, t)
pred = F.softmax(h)
del h
return loss, pred.data
class Model_L3(Chain):
def __init__(
self,
ndim=3,
n_class=2,
init_channel=2,
kernel_size=3,
pool_size=2,
ap_factor=2,
gpu=-1,
class_weight=np.array([1, 1]).astype(np.float32),
loss_func='F.softmax_cross_entropy'
):
self.gpu = gpu
self.pool_size = pool_size
if gpu >= 0:
self.class_weight = cuda.to_gpu(np.array(class_weight).astype(np.float32))
else:
self.class_weight = np.array(class_weight).astype(np.float32)
self.train = True
self.loss_func = loss_func
initializer = chainer.initializers.HeNormal()
super(Model_L3, self).__init__(
c0=L.ConvolutionND(ndim, 1, init_channel, kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
c1=L.ConvolutionND(ndim, init_channel, int(init_channel * (ap_factor ** 1)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
c2=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 1)), int(init_channel * (ap_factor ** 1)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
c3=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 1)), int(init_channel * (ap_factor ** 2)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
c4=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 2)), int(init_channel * (ap_factor ** 2)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
c5=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 2)), int(init_channel * (ap_factor ** 3)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
c6=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 3)), int(init_channel * (ap_factor ** 3)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
c7=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 3)), int(init_channel * (ap_factor ** 4)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
dc0=L.DeconvolutionND(ndim, int(init_channel * (ap_factor ** 4)), int(init_channel * (ap_factor ** 4)), self.pool_size, self.pool_size, 0, initialW=initializer, initial_bias=None),
dc1=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 3) + init_channel * (ap_factor ** 4)), int(init_channel * (ap_factor ** 3)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
dc2=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 3)), int(init_channel * (ap_factor ** 3)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
dc3=L.DeconvolutionND(ndim, int(init_channel * (ap_factor ** 3)), int(init_channel * (ap_factor ** 3)), self.pool_size, self.pool_size, 0, initialW=initializer, initial_bias=None),
dc4=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 2) + init_channel * (ap_factor ** 3)), int(init_channel * (ap_factor ** 2)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
dc5=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 2)), int(init_channel * (ap_factor ** 2)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
dc6=L.DeconvolutionND(ndim, int(init_channel * (ap_factor ** 2)), int(init_channel * (ap_factor ** 2)), self.pool_size, self.pool_size, 0, initialW=initializer, initial_bias=None),
dc7=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 1) + init_channel * (ap_factor ** 2)), int(init_channel * (ap_factor ** 1)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
dc8=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 1)), int(init_channel * (ap_factor ** 1)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
dc9=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 1)), n_class, 1, 1, initialW=initializer, initial_bias=None),
bnc0=L.BatchNormalization(init_channel),
bnc1=L.BatchNormalization(int(init_channel * (ap_factor ** 1))),
bnc2=L.BatchNormalization(int(init_channel * (ap_factor ** 1))),
bnc3=L.BatchNormalization(int(init_channel * (ap_factor ** 2))),
bnc4=L.BatchNormalization(int(init_channel * (ap_factor ** 2))),
bnc5=L.BatchNormalization(int(init_channel * (ap_factor ** 3))),
bnc6=L.BatchNormalization(int(init_channel * (ap_factor ** 3))),
bnc7=L.BatchNormalization(int(init_channel * (ap_factor ** 4))),
bndc1=L.BatchNormalization(int(init_channel * (ap_factor ** 3))),
bndc2=L.BatchNormalization(int(init_channel * (ap_factor ** 3))),
bndc4=L.BatchNormalization(int(init_channel * (ap_factor ** 2))),
bndc5=L.BatchNormalization(int(init_channel * (ap_factor ** 2))),
bndc7=L.BatchNormalization(int(init_channel * (ap_factor ** 1))),
bndc8=L.BatchNormalization(int(init_channel * (ap_factor ** 1)))
)
def _calc(self, x):
e0 = F.relu(self.bnc0(self.c0(x)))
syn0 = F.relu(self.bnc1(self.c1(e0)))
del e0
e1 = F.max_pooling_nd(syn0, self.pool_size, self.pool_size)
e2 = F.relu(self.bnc2(self.c2(e1)))
syn1 = F.relu(self.bnc3(self.c3(e2)))
del e1, e2
e3 = F.max_pooling_nd(syn1, self.pool_size, self.pool_size)
e4 = F.relu(self.bnc4(self.c4(e3)))
syn2 = F.relu(self.bnc5(self.c5(e4)))
del e3, e4
e5 = F.max_pooling_nd(syn2, self.pool_size, self.pool_size)
e6 = F.relu(self.bnc6(self.c6(e5)))
e7 = F.relu(self.bnc7(self.c7(e6)))
del e5, e6
d0 = F.concat([self.dc0(e7), syn2])
del e7, syn2
d1 = F.relu(self.bndc1(self.dc1(d0)))
d2 = F.relu(self.bndc2(self.dc2(d1)))
del d0, d1
d3 = F.concat([self.dc3(d2), syn1])
del d2, syn1
d4 = F.relu(self.bndc4(self.dc4(d3)))
d5 = F.relu(self.bndc5(self.dc5(d4)))
del d3, d4
d6 = F.concat([self.dc6(d5), syn0])
del d5, syn0
d7 = F.relu(self.bndc7(self.dc7(d6)))
d8 = F.relu(self.bndc8(self.dc8(d7)))
del d6, d7
d9 = self.dc9(d8)
del d8
return d9
def __call__(self, x, t=None, seg=True):
h = self._calc(x)
if seg:
pred = F.softmax(h)
del h
return pred.data
else:
#loss = eval(self.loss_func)(h, t, class_weight=self.class_weight)
loss = eval(self.loss_func)(h, t)
pred = F.softmax(h)
del h
return loss, pred.data
class Model_L4(Chain):
def __init__(
self,
ndim=3,
n_class=2,
init_channel=2,
kernel_size=3,
pool_size=2,
ap_factor=2,
gpu=-1,
class_weight=np.array([1, 1]).astype(np.float32),
loss_func='F.softmax_cross_entropy'
):
self.gpu = gpu
self.pool_size = pool_size
if gpu >= 0:
self.class_weight = cuda.to_gpu(np.array(class_weight).astype(np.float32))
else:
self.class_weight = np.array(class_weight).astype(np.float32)
self.train = True
self.loss_func = loss_func
initializer = chainer.initializers.HeNormal()
super(Model_L4, self).__init__(
c0=L.ConvolutionND(ndim, 1, init_channel, kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
c1=L.ConvolutionND(ndim, init_channel, int(init_channel * (ap_factor ** 1)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
c2=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 1)), int(init_channel * (ap_factor ** 1)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
c3=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 1)), int(init_channel * (ap_factor ** 2)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
c4=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 2)), int(init_channel * (ap_factor ** 2)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
c5=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 2)), int(init_channel * (ap_factor ** 3)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
c6=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 3)), int(init_channel * (ap_factor ** 3)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
c7=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 3)), int(init_channel * (ap_factor ** 4)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
c8=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 4)), int(init_channel * (ap_factor ** 4)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
c9=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 4)), int(init_channel * (ap_factor ** 5)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
dc0=L.DeconvolutionND(ndim, int(init_channel * (ap_factor ** 5)), int(init_channel * (ap_factor ** 5)), self.pool_size, self.pool_size, 0, initialW=initializer, initial_bias=None),
dc1=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 4) + init_channel * (ap_factor ** 5)), int(init_channel * (ap_factor ** 4)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
dc2=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 4)), int(init_channel * (ap_factor ** 4)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
dc3=L.DeconvolutionND(ndim, int(init_channel * (ap_factor ** 4)), int(init_channel * (ap_factor ** 4)), self.pool_size, self.pool_size, 0, initialW=initializer, initial_bias=None),
dc4=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 3) + init_channel * (ap_factor ** 4)), int(init_channel * (ap_factor ** 3)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
dc5=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 3)), int(init_channel * (ap_factor ** 3)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
dc6=L.DeconvolutionND(ndim, int(init_channel * (ap_factor ** 3)), int(init_channel * (ap_factor ** 3)), self.pool_size, self.pool_size, 0, initialW=initializer, initial_bias=None),
dc7=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 2) + init_channel * (ap_factor ** 3)), int(init_channel * (ap_factor ** 2)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
dc8=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 2)), int(init_channel * (ap_factor ** 2)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
dc9=L.DeconvolutionND(ndim, int(init_channel * (ap_factor ** 2)), int(init_channel * (ap_factor ** 2)), self.pool_size, self.pool_size, 0, initialW=initializer, initial_bias=None),
dc10=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 1) + init_channel * (ap_factor ** 2)), int(init_channel * (ap_factor ** 1)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
dc11=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 1)), int(init_channel * (ap_factor ** 1)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
dc12=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 1)), n_class, 1, 1, initialW=initializer, initial_bias=None),
bnc0=L.BatchNormalization(init_channel),
bnc1=L.BatchNormalization(int(init_channel * (ap_factor ** 1))),
bnc2=L.BatchNormalization(int(init_channel * (ap_factor ** 1))),
bnc3=L.BatchNormalization(int(init_channel * (ap_factor ** 2))),
bnc4=L.BatchNormalization(int(init_channel * (ap_factor ** 2))),
bnc5=L.BatchNormalization(int(init_channel * (ap_factor ** 3))),
bnc6=L.BatchNormalization(int(init_channel * (ap_factor ** 3))),
bnc7=L.BatchNormalization(int(init_channel * (ap_factor ** 4))),
bnc8=L.BatchNormalization(int(init_channel * (ap_factor ** 4))),
bnc9=L.BatchNormalization(int(init_channel * (ap_factor ** 5))),
bndc1=L.BatchNormalization(int(init_channel * (ap_factor ** 4))),
bndc2=L.BatchNormalization(int(init_channel * (ap_factor ** 4))),
bndc4=L.BatchNormalization(int(init_channel * (ap_factor ** 3))),
bndc5=L.BatchNormalization(int(init_channel * (ap_factor ** 3))),
bndc7=L.BatchNormalization(int(init_channel * (ap_factor ** 2))),
bndc8=L.BatchNormalization(int(init_channel * (ap_factor ** 2))),
bndc10=L.BatchNormalization(int(init_channel * (ap_factor ** 1))),
bndc11=L.BatchNormalization(int(init_channel * (ap_factor ** 1)))
)
def _calc(self, x):
e0 = F.relu(self.bnc0(self.c0(x)))
syn0 = F.relu(self.bnc1(self.c1(e0)))
del e0
e1 = F.max_pooling_nd(syn0, self.pool_size, self.pool_size)
e2 = F.relu(self.bnc2(self.c2(e1)))
syn1 = F.relu(self.bnc3(self.c3(e2)))
del e1, e2
e3 = F.max_pooling_nd(syn1, self.pool_size, self.pool_size)
e4 = F.relu(self.bnc4(self.c4(e3)))
syn2 = F.relu(self.bnc5(self.c5(e4)))
del e3, e4
e5 = F.max_pooling_nd(syn2, self.pool_size, self.pool_size)
e6 = F.relu(self.bnc6(self.c6(e5)))
syn3 = F.relu(self.bnc7(self.c7(e6)))
del e5, e6
e7 = F.max_pooling_nd(syn3, self.pool_size, self.pool_size)
e8 = F.relu(self.bnc8(self.c8(e7)))
e9 = F.relu(self.bnc9(self.c9(e8)))
del e7, e8
d0 = F.concat([self.dc0(e9), syn3])
del e9, syn3
d1 = F.relu(self.bndc1(self.dc1(d0)))
d2 = F.relu(self.bndc2(self.dc2(d1)))
del d0, d1
d3 = F.concat([self.dc3(d2), syn2])
del d2, syn2
d4 = F.relu(self.bndc4(self.dc4(d3)))
d5 = F.relu(self.bndc5(self.dc5(d4)))
del d3, d4
d6 = F.concat([self.dc6(d5), syn1])
del d5, syn1
d7 = F.relu(self.bndc7(self.dc7(d6)))
d8 = F.relu(self.bndc8(self.dc8(d7)))
del d6, d7
d9 = F.concat([self.dc9(d8), syn0])
del d8, syn0
d10 = F.relu(self.bndc10(self.dc10(d9)))
d11 = F.relu(self.bndc11(self.dc11(d10)))
del d9, d10
d12 = self.dc12(d11)
del d11
return d12
def __call__(self, x, t=None, seg=True):
h = self._calc(x)
if seg:
pred = F.softmax(h)
del h
return pred.data
else:
#loss = eval(self.loss_func)(h, t, class_weight=self.class_weight)
loss = eval(self.loss_func)(h, t)
pred = F.softmax(h)
del h
return loss, pred.data
| 2.359375 | 2 |
asseteour/resolver/base_asset_wrapper.py | ImagineersHub/asseteour | 0 | 12770522 | <gh_stars>0
from abc import ABCMeta, abstractmethod
from typing import Dict
class BaseAssetWrapper(metaclass=ABCMeta):
@classmethod
@abstractmethod
def parse_obj(cls, payload: Dict):
pass
@classmethod
@abstractmethod
def schema_json(cls, indent=4):
pass
@abstractmethod
def json(self):
pass
@property
@abstractmethod
def publish(self):
pass
| 2.671875 | 3 |
core/domain/summary_services_test.py | Himanshu1495/oppia | 0 | 12770523 | <filename>core/domain/summary_services_test.py<gh_stars>0
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from core.domain import exp_services
from core.domain import exp_services_test
from core.domain import rights_manager
from core.domain import summary_services
from core.domain import user_services
import feconf
class ExplorationDisplayableSummaries(
exp_services_test.ExplorationServicesUnitTests):
"""Test functions for getting displayable exploration summary dicts."""
ALBERT_EMAIL = '<EMAIL>'
BOB_EMAIL = '<EMAIL>'
ALBERT_NAME = 'albert'
BOB_NAME = 'bob'
USER_C_NAME = 'c'
USER_D_NAME = 'd'
USER_C_EMAIL = '<EMAIL>'
USER_D_EMAIL = '<EMAIL>'
USER_C_PROFILE_PICTURE = 'c_profile_picture'
EXP_ID_1 = 'eid1'
EXP_ID_2 = 'eid2'
EXP_ID_3 = 'eid3'
EXP_ID_4 = 'eid4'
EXP_ID_5 = 'eid5'
EXPECTED_VERSION_1 = 4
EXPECTED_VERSION_2 = 2
def setUp(self):
"""Populate the database of explorations and their summaries.
The sequence of events is:
- (1) Albert creates EXP_ID_1.
- (2) Bob edits the title of EXP_ID_1.
- (3) Albert creates EXP_ID_2.
- (4) Albert edits the title of EXP_ID_1.
- (5) Albert edits the title of EXP_ID_2.
- (6) Bob reverts Albert's last edit to EXP_ID_1.
- Bob tries to publish EXP_ID_2, and is denied access.
- (7) Albert publishes EXP_ID_2.
- (8) Albert creates EXP_ID_3
- (9) Albert publishes EXP_ID_3
- (10) Albert deletes EXP_ID_3
- (1) User_3 (has a profile_picture) creates EXP_ID_4.
- (2) User_4 edits the title of EXP_ID_4.
- (3) User_4 edits the title of EXP_ID_4.
"""
super(ExplorationDisplayableSummaries, self).setUp()
self.albert_id = self.get_user_id_from_email(self.ALBERT_EMAIL)
self.bob_id = self.get_user_id_from_email(self.BOB_EMAIL)
self.signup(self.ALBERT_EMAIL, self.ALBERT_NAME)
self.signup(self.BOB_EMAIL, self.BOB_NAME)
self.save_new_valid_exploration(self.EXP_ID_1, self.albert_id)
exp_services.update_exploration(
self.bob_id, self.EXP_ID_1, [{
'cmd': 'edit_exploration_property',
'property_name': 'title',
'new_value': 'Exploration 1 title'
}], 'Changed title.')
self.save_new_valid_exploration(self.EXP_ID_2, self.albert_id)
exp_services.update_exploration(
self.albert_id, self.EXP_ID_1, [{
'cmd': 'edit_exploration_property',
'property_name': 'title',
'new_value': 'Exploration 1 Albert title'
}], 'Changed title to Albert1 title.')
exp_services.update_exploration(
self.albert_id, self.EXP_ID_2, [{
'cmd': 'edit_exploration_property',
'property_name': 'title',
'new_value': 'Exploration 2 Albert title'
}], 'Changed title to Albert2 title.')
exp_services.revert_exploration(self.bob_id, self.EXP_ID_1, 3, 2)
with self.assertRaisesRegexp(
Exception, 'This exploration cannot be published'
):
rights_manager.publish_exploration(self.bob_id, self.EXP_ID_2)
rights_manager.publish_exploration(self.albert_id, self.EXP_ID_2)
self.save_new_valid_exploration(self.EXP_ID_3, self.albert_id)
rights_manager.publish_exploration(self.albert_id, self.EXP_ID_3)
exp_services.delete_exploration(self.albert_id, self.EXP_ID_3)
self.user_c_id = self.get_user_id_from_email(self.USER_C_EMAIL)
self.user_d_id = self.get_user_id_from_email(self.USER_D_EMAIL)
self.signup(self.USER_C_EMAIL, self.USER_C_NAME)
self.signup(self.USER_D_EMAIL, self.USER_D_NAME)
user_services.update_profile_picture_data_url(
self.user_c_id, self.USER_C_PROFILE_PICTURE)
self.save_new_valid_exploration(self.EXP_ID_4, self.user_c_id)
exp_services.update_exploration(
self.user_d_id, self.EXP_ID_4, [{
'cmd': 'edit_exploration_property',
'property_name': 'title',
'new_value': 'Exploration updated title'
}], 'Changed title once.')
exp_services.update_exploration(
self.user_d_id, self.EXP_ID_4, [{
'cmd': 'edit_exploration_property',
'property_name': 'title',
'new_value': 'Exploration updated title again'
}], 'Changed title twice.')
self.save_new_valid_exploration(self.EXP_ID_5, self.bob_id)
def test_get_human_readable_contributors_summary(self):
contributors_summary = {self.albert_id: 10, self.bob_id: 13}
self.assertEqual({
self.ALBERT_NAME: {
'num_commits': 10,
'profile_picture_data_url': None
},
self.BOB_NAME: {
'num_commits': 13,
'profile_picture_data_url': None
}
}, summary_services.get_human_readable_contributors_summary(
contributors_summary))
contributors_summary = {self.user_c_id: 1, self.user_d_id: 2}
self.assertEqual({
self.USER_C_NAME: {
'num_commits': 1,
'profile_picture_data_url': self.USER_C_PROFILE_PICTURE
},
self.USER_D_NAME: {
'num_commits': 2,
'profile_picture_data_url': None
}
}, summary_services.get_human_readable_contributors_summary(
contributors_summary))
def test_get_displayable_exp_summary_dicts_matching_ids(self):
# A list of exp_id's are passed in:
# EXP_ID_1 -- private exploration owned by Albert
# EXP_ID_2 -- pubished exploration owned by Albert
# EXP_ID_3 -- deleted exploration
# EXP_ID_5 -- private exploration owned by Bob
# Should only return [EXP_ID_2]
displayable_summaries = (
summary_services.get_displayable_exp_summary_dicts_matching_ids(
[self.EXP_ID_1, self.EXP_ID_2, self.EXP_ID_3, self.EXP_ID_5]))
expected_summary = {
'status': u'public',
'thumbnail_bg_color': '#05a69a',
'community_owned': False,
'tags': [],
'thumbnail_icon_url': '/images/gallery/thumbnails/Lightbulb.svg',
'language_code': feconf.DEFAULT_LANGUAGE_CODE,
'human_readable_contributors_summary': {
self.ALBERT_NAME: {
'num_commits': 2,
'profile_picture_data_url': None
}
},
'id': self.EXP_ID_2,
'category': u'A category',
'ratings': feconf.get_empty_ratings(),
'title': u'Exploration 2 Albert title',
'num_views': 0,
'objective': u'An objective'
}
self.assertIn('last_updated_msec', displayable_summaries[0])
self.assertDictContainsSubset(expected_summary,
displayable_summaries[0])
def test_get_public_and_filtered_private_summary_dicts_for_creator(self):
# If a new exploration is created by another user (Bob) and not public,
# then Albert cannot see it when querying for explorations.
displayable_summaries = (
summary_services.get_displayable_exp_summary_dicts_matching_ids(
[self.EXP_ID_1, self.EXP_ID_2, self.EXP_ID_3, self.EXP_ID_5],
editor_user_id=self.albert_id))
self.assertEqual(len(displayable_summaries), 2)
self.assertEqual(displayable_summaries[0]['id'], self.EXP_ID_1)
self.assertEqual(displayable_summaries[1]['id'], self.EXP_ID_2)
# However, if Albert is granted editor access to Bob's exploration, then
# Albert has access to the corresponding summary.
rights_manager.assign_role_for_exploration(
self.bob_id, self.EXP_ID_5, self.albert_id,
rights_manager.ROLE_EDITOR)
displayable_summaries = (
summary_services.get_displayable_exp_summary_dicts_matching_ids(
[self.EXP_ID_1, self.EXP_ID_2, self.EXP_ID_3, self.EXP_ID_5],
editor_user_id=self.albert_id))
self.assertEqual(len(displayable_summaries), 3)
self.assertEqual(displayable_summaries[0]['status'], 'private')
self.assertEqual(displayable_summaries[0]['id'], self.EXP_ID_1)
self.assertEqual(displayable_summaries[1]['status'], 'public')
self.assertEqual(displayable_summaries[1]['id'], self.EXP_ID_2)
self.assertEqual(displayable_summaries[2]['status'], 'private')
self.assertEqual(displayable_summaries[2]['id'], self.EXP_ID_5)
| 1.976563 | 2 |
MeasureCoffee.py | ciauri/CoffeeRobot | 2 | 12770524 | <reponame>ciauri/CoffeeRobot<filename>MeasureCoffee.py
import numpy as np
import argparse
import glob
import cv2
import io
import picamera
from Twitter import Twitter
import onlineCoffee
import time
def auto_canny(image, sigma=0.33):
v = np.median(image)
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
edged = cv2.Canny(image, lower, upper)
return edged
def r(image):
#image = cv2.imread('2.jpg')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (1,1), 1)
wide = cv2.Canny(blurred, 10, 50)
tight = cv2.Canny(blurred, 225, 250)
auto = auto_canny(blurred)
# show the images
return wide
def houghlines(im,h):
#im = cv2.imread('2.jpg')
#ret,gray = cv2.threshold(im,40,255,cv2.THRESH_TOZERO_INV)
#gray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
#edges = cv2.Canny(gray,10,200)
def getKey(item):
return abs(item[1]-item[3])
edges = r(im)
lines = cv2.HoughLines(edges,20,np.pi/190,100)
horizontal = []
for line in lines:
for rho,theta in line:
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 1000*(-b)) # Here i have used int() instead of rounding the decimal value, so 3.8 --> 3
y1 = int(y0 + 1000*(a)) # But if you want to round the number, then use np.around() function, then 3.8 --> 4.0
x2 = int(x0 - 1000*(-b)) # But we need integers, so use int() function after that, ie int(np.around(x))
y2 = int(y0 - 1000*(a))
#cv2.line(im,(x1,y1),(x2,y2),(0,255,0),2)
#print(str(x1) + " " + str(y1) + " " + str(x2) + " " + str(y2))
horizontal.append((x1,y1,x2,y2))
#cv2.imshow('houghlines',im)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
horizontal = sorted(horizontal,key=getKey)
i = 0
votes = 0
while True:
cv2.line(im,(horizontal[i][0],horizontal[i][1]),(horizontal[i][2],horizontal[i][3]),(200,0,0),2)
average = (horizontal[i][1]+horizontal[i][3])/2.0
percent = average/h
actual = 100-(percent*100)
if actual > 80:
i += 1
print(actual)
elif actual < 25:
print(actual)
votes +=1
i +=1
elif actual <30:
print("the coffee pot is getting low " + str(actual) + "% full!")
return votes,actual
else:
print("the coffee pot is " + str(actual) + "% full!")
return votes,actual
def detect():
stream = io.BytesIO()
#Get the picture (low resolution, so it should be quite fast)
#Here you can also specify other parameters (e.g.:rotate the image)
with picamera.PiCamera() as camera:
camera.resolution = (700, 525)
camera.capture(stream, format='jpeg')
buff = np.fromstring(stream.getvalue(), dtype=np.uint8)
#Now creates an OpenCV image
img = cv2.imdecode(buff, 1)
#img = cv2.imread('coffee.jpg')
face_cascade = cv2.CascadeClassifier('/home/pi/Documents/OpenCV_Projects/XML_Files/coffeePot.xml')
eye_cascade = cv2.CascadeClassifier('/home/pi/Documents/OpenCV_Projects/XML_Files/liquid.xml')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.2, 500, minSize=(80,100))
for (x,y,w,h) in faces:
img = cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray, 1.2, 10, minSize=(70,50))
return houghlines(roi_color,h)
while True:
percent = 0
votes = 0
t = Twitter()
for i in range(0,20):
numVotes,numPercent =detect()
percent += numPercent
votes += numVotes
percent /= 20
print("********* UPDATING **********")
if votes > 20:
onlineCoffee.updateCoffeeSite("We've most likely run out of coffee... I blame Kevin")
#t.tweet("We've most likely run out of coffee... I blame Kevin")
else:
onlineCoffee.updateCoffeeSite("There's plenty of coffee! It's " + str(int(percent)) + "% full!")
#t.tweet("There's plenty of coffee! It's " + str(int(percent)) + "% full!")
print(votes)
| 2.75 | 3 |
pyloess.py | jcrotinger/pyloess | 3 | 12770525 | # pylint: disable-msg=E1101
"""
Wrapper to lowess and stl routines.
LOWESS:
Initial Fortran code available at:
http://netlib.bell-labs.com/netlib/go/lowess.f.gz
initial author: <NAME>, 1979.
Simple to double precision conversion of the Fortran code by Pierre
Gerard-Marchant, 2007/03.
STL:
Initial Fortran code available at:
http://netlib.bell-labs.com/netlib/a/stl.gz
Initial Authors: <NAME>, <NAME>, <NAME>, and
<NAME>, 1990.
Simple-to-double precision conversion of the Fortran code by Pierre
Gerard-Marchant, 2007/03.
LOESS:
Initial C/Fortran package avialable at
http://netlib.bell-labs.com/netlib/a/dloess.gz
Initial authors: <NAME>, <NAME> and Shyu
Adaptation to Pyrex/Python by <NAME>, 2007/03
:author: <NAME>
:contact: pierregm_at_uga_edu
:date: $Date$
:version: $Id$
"""
__author__ = "<NAME> ($Author$)"
__version__ = '1.0'
__revision__ = "$Revision$"
__date__ = '$Date$'
import numpy
from numpy import bool_, complex_, float_, int_, str_, object_
from numpy import array, recarray, empty, fromiter, logical_not
from . import _lowess, _stl, _loess
#####---------------------------------------------------------------------------
#--- --- FLOWESS ---
#####---------------------------------------------------------------------------
def flowess(x,y,span=0.5,nsteps=2,delta=0):
"""Performs a robust locally weighted regression (lowess).
Outputs a *3xN* array of fitted values, residuals and fit weights.
:Parameters:
x : ndarray
Abscissas of the points on the scatterplot; the values in X must be
ordered from smallest to largest.
y : ndarray
Ordinates of the points on the scatterplot.
span : Float *[0.5]*
Fraction of the total number of points used to compute each fitted value.
As f increases the smoothed values become smoother. Choosing f in the range
.2 to .8 usually results in a good fit.
nsteps : Integer *[2]*
Number of iterations in the robust fit. If nsteps=0, the nonrobust fit
is returned; setting nsteps=2 should serve most purposes.
delta : Integer *[0]*
Nonnegative parameter which may be used to save computations.
If N (the number of elements in x) is less than 100, set delta=0.0;
if N is greater than 100 you should find out how delta works by reading
the additional instructions section.
:Returns:
A recarray of smoothed values ('smooth'), residuals ('residuals') and local
robust weights ('weights').
Additional instructions
-----------------------
Fro the original author:
DELTA can be used to save computations. Very roughly the
algorithm is this: on the initial fit and on each of the
NSTEPS iterations locally weighted regression fitted values
are computed at points in X which are spaced, roughly, DELTA
apart; then the fitted values at the remaining points are
computed using linear interpolation. The first locally
weighted regression (l.w.r.) computation is carried out at
X(1) and the last is carried out at X(N). Suppose the
l.w.r. computation is carried out at X(I). If X(I+1) is
greater than or equal to X(I)+DELTA, the next l.w.r.
computation is carried out at X(I+1). If X(I+1) is less
than X(I)+DELTA, the next l.w.r. computation is carried out
at the largest X(J) which is greater than or equal to X(I)
but is not greater than X(I)+DELTA. Then the fitted values
for X(K) between X(I) and X(J), if there are any, are
computed by linear interpolation of the fitted values at
X(I) and X(J). If N is less than 100 then DELTA can be set
to 0.0 since the computation time will not be too great.
For larger N it is typically not necessary to carry out the
l.w.r. computation for all points, so that much computation
time can be saved by taking DELTA to be greater than 0.0.
If DELTA = Range (X)/k then, if the values in X were
uniformly scattered over the range, the full l.w.r.
computation would be carried out at approximately k points.
Taking k to be 50 often works well.
Method
------
The fitted values are computed by using the nearest neighbor
routine and robust locally weighted regression of degree 1
with the tricube weight function. A few additional features
have been added. Suppose r is FN truncated to an integer.
Let h be the distance to the r-th nearest neighbor
from X[i]. All points within h of X[i] are used. Thus if
the r-th nearest neighbor is exactly the same distance as
other points, more than r points can possibly be used for
the smooth at X[i]. There are two cases where robust
locally weighted regression of degree 0 is actually used at
X[i]. One case occurs when h is 0.0. The second case
occurs when the weighted standard error of the X[i] with
respect to the weights w[j] is less than .001 times the
range of the X[i], where w[j] is the weight assigned to the
j-th point of X (the tricube weight times the robustness
weight) divided by the sum of all of the weights. Finally,
if the w[j] are all zero for the smooth at X[i], the fitted
value is taken to be Y[i].
References
----------
<NAME>. 1978. Visual and Computational Considerations in
Smoothing Scatterplots by Locally Weighted Regression. In
Computer Science and Statistics: Eleventh Annual Symposium on the
Interface, pages 96-100. Institute of Statistics, North Carolina
State University, Raleigh, North Carolina, 1978.
<NAME>, 1979. Robust Locally Weighted Regression and
Smoothing Scatterplots. Journal of the American Statistical
Association, 74:829-836, 1979.
<NAME>, 1981. LOWESS: A Program for Smoothing Scatterplots
by Robust Locally Weighted Regression. The American Statistician,
35:54.
"""
x = array(x, copy=False, subok=True, dtype=float_)
y = array(y, copy=False, subok=True, dtype=float_)
if x.size != y.size:
raise ValueError("Incompatible size between observations and response!")
out_dtype = [('smooth',float_), ('weigths', float_), ('residuals', float_)]
return numeric.fromiter(zip(*_lowess.lowess(x,y,span,nsteps,delta,)),
dtype=out_dtype).view(recarray)
class lowess:
"""An object for robust locally weighted regression.
:IVariables:
inputs : An object storing the inputs.
x : A (n,) ndarray of observations (sorted by increasing values).
y : A (n,) ndarray of responses (sorted by increasing x).
parameters : An object storing the control parameters.
span : Fraction of the total number of points used in the smooth.
nsteps : Number of iterations of the robust fit.
delta : Parameter used to save computation time
outputs : An object storing the outputs.
smooth : A (n,) ndarray of fitted values.
residuals : A (n,) ndarray of fitted residuals.
weights : A (n,) ndarray of robust weights.
Method
------
The fitted values are computed by using the nearest neighbor
routine and robust locally weighted regression of degree 1
with the tricube weight function. A few additional features
have been added. Suppose r is FN truncated to an integer.
Let h be the distance to the r-th nearest neighbor
from X[i]. All points within h of X[i] are used. Thus if
the r-th nearest neighbor is exactly the same distance as
other points, more than r points can possibly be used for
the smooth at X[i]. There are two cases where robust
locally weighted regression of degree 0 is actually used at
X[i]. One case occurs when h is 0.0. The second case
occurs when the weighted standard error of the X[i] with
respect to the weights w[j] is less than .001 times the
range of the X[i], where w[j] is the weight assigned to the
j-th point of X (the tricube weight times the robustness
weight) divided by the sum of all of the weights. Finally,
if the w[j] are all zero for the smooth at X[i], the fitted
value is taken to be Y[i].
References
----------
<NAME>. 1978. Visual and Computational Considerations in
Smoothing Scatterplots by Locally Weighted Regression. In
Computer Science and Statistics: Eleventh Annual Symposium on the
Interface, pages 96-100. Institute of Statistics, North Carolina
State University, Raleigh, North Carolina, 1978.
<NAME>, 1979. Robust Locally Weighted Regression and
Smoothing Scatterplots. Journal of the American Statistical
Association, 74:829-836, 1979.
<NAME>, 1981. LOWESS: A Program for Smoothing Scatterplots
by Robust Locally Weighted Regression. The American Statistician,
35:54.
"""
#............................................
class _inputs(object):
"""Inputs of the lowess fit.
:IVariables:
x : ndarray
A (n,) float ndarray of observations (sorted by increasing values).
y : ndarray
A (n,) float ndarray of responses (sorted by increasing x).
"""
def __init__(self, x, y):
x = array(x, copy=False, subok=True, dtype=float_).ravel()
y = array(y, copy=False, subok=True, dtype=float_).ravel()
if x.size != y.size:
msg = "Incompatible size between observations (%s) and response (%s)!"
raise ValueError(msg % (x.size, y.size))
idx = x.argsort()
self._x = x[idx]
self._y = y[idx]
#.....
x = property(fget=lambda self:self._x)
y = property(fget=lambda self:self._y)
#............................................
class _parameters(object):
"""Parameters of the lowess fit.
:IVariables:
span : float *[0.5]*
Fraction of the total number of points used to compute each fitted value.
As f increases the smoothed values become smoother. Choosing f in the range
.2 to .8 usually results in a good fit.
nsteps : integer *[2]*
Number of iterations in the robust fit. If nsteps=0, the nonrobust fit
is returned; setting nsteps=2 should serve most purposes.
delta : integer *[0]*
Nonnegative parameter which may be used to save computations.
If N (the number of observations) is less than 100, set delta=0.0;
if N is greater than 100 you should find out how delta works by reading
the additional instructions section.
"""
def __init__(self, span, nsteps, delta, caller):
self.activated = False
self._span = span
self._nsteps = nsteps
self._delta = delta
self._caller = caller
#.....
def _get_span(self):
"Gets the current span."
return self._span
def _set_span(self, span):
"Sets the current span, and refit if needed."
if span <= 0 or span > 1:
raise ValueError("span should be between zero and one!")
self._span = span
if self.activated:
self._caller.fit()
span = property(fget=_get_span, fset=_set_span)
#.....
def _get_nsteps(self):
"Gets the current number of iterations."
return self._nsteps
def _set_nsteps(self, nsteps):
"Sets the current number of iterations, and refit if needed."
if nsteps < 0:
raise ValueError("nsteps should be positive!")
self._nsteps = nsteps
if self.activated:
self._caller.fit()
nsteps = property(fget=_get_nsteps, fset=_set_nsteps)
#.....
def _get_delta(self):
"Gets the current delta."
return self._delta
def _set_delta(self, delta):
"Sets the current delta, and refit if needed."
if delta < 0:
raise ValueError("delta should be positive!")
self._delta = delta
if self.activated:
self._caller.fit()
delta = property(fget=_get_delta, fset=_set_delta)
#............................................
class _outputs(object):
"""Outputs of the lowess fit.
:IVariables:
fitted_values : ndarray
A (n,) ndarray of fitted values (readonly).
fitted_residuals : ndarray
A (n,) ndarray of residuals (readonly).
weights : ndarray
A (n,) ndarray of robust weights (readonly).
"""
def __init__(self, n):
self._fval = empty((n,), float_)
self._rw = empty((n,), float_)
self._fres = empty((n,), float_)
#.....
fitted_values = property(fget=lambda self:self._fval)
robust_weights = property(fget=lambda self:self._rw)
fitted_residuals = property(fget=lambda self:self._fres)
#............................................
def __init__(self, x, y, span=0.5, nsteps=2, delta=0):
"""
:Parameters:
x : ndarray
Abscissas of the points on the scatterplot; the values in X must be
ordered from smallest to largest.
y : ndarray
Ordinates of the points on the scatterplot.
span : Float *[0.5]*
Fraction of the total number of points used to compute each fitted value.
As span increases the smoothed values become smoother. Choosing span in
the range .2 to .8 usually results in a good fit.
nsteps : Integer *[2]*
Number of iterations in the robust fit. If nsteps=0, the nonrobust fit
is returned; setting nsteps=2 should serve most purposes.
delta : Integer *[0]*
Nonnegative parameter which may be used to save computations.
If N (the number of elements in x) is less than 100, set delta=0.0;
if N is greater than 100 you should find out how delta works by reading
the additional instructions section.
"""
# Chek the input data .........
# Initialize the attributes ...
self.inputs = lowess._inputs(x,y)
self.parameters = lowess._parameters(span, nsteps, delta, self)
self.outputs = lowess._outputs(self.inputs._x.size)
# Force a fit .................
self.fit()
#............................................
def fit(self):
"""Computes the lowess fit. Returns a lowess.outputs object."""
(x, y) = (self.inputs._x, self.inputs._y)
# Get the parameters .....
self.parameters.activated = True
f = self.parameters._span
nsteps = self.parameters._nsteps
delta = self.parameters._delta
(tmp_s, tmp_w, tmp_r) = _lowess.lowess(x, y, f, nsteps, delta)
# Process the outputs .....
#... set the values
self.outputs.fitted_values[:] = tmp_s.flat
self.outputs.robust_weights[:] = tmp_w.flat
self.outputs.fitted_residuals[:] = tmp_r.flat
# Clean up the mess .......
del(tmp_s, tmp_w, tmp_r)
return self.outputs
#####---------------------------------------------------------------------------
#--- --- STL ---
#####---------------------------------------------------------------------------
def stl(y, np=12, ns=7, nt=None, nl=None, isdeg=1, itdeg=1, ildeg=1,
nsjump=None, ntjump=None, nljump=None, robust=True, ni=None, no=None):
"""Decomposes a time series into seasonal and trend components.
:Parameters:
y : Numerical array
Time Series to be decomposed.
np : Integer *[12]*
Period of the seasonal component.
For example, if the time series is monthly with a yearly cycle, then
np=12.
ns : Integer *[7]*
Length of the seasonal smoother.
The value of ns should be an odd integer greater than or equal to 3.
A value ns>6 is recommended. As ns increases the values of the
seasonal component at a given point in the seasonal cycle (e.g., January
values of a monthly series with a yearly cycle) become smoother.
nt : Integer *[None]*
Length of the trend smoother.
The value of nt should be an odd integer greater than or equal to 3.
A value of nt between 1.5*np and 2*np is recommended. As nt increases,
the values of the trend component become smoother.
If nt is None, it is estimated as the smallest odd integer greater
or equal to (1.5*np)/[1-(1.5/ns)]
nl : Integer *[None]*
Length of the low-pass filter.
The value of nl should be an odd integer greater than or equal to 3.
The smallest odd integer greater than or equal to np is used by default.
isdeg : Integer *[1]*
Degree of locally-fitted polynomial in seasonal smoothing.
The value is 0 or 1.
itdeg : Integer *[1]*
Degree of locally-fitted polynomial in trend smoothing.
The value is 0 or 1.
ildeg : Integer *[1]*
Degree of locally-fitted polynomial in low-pass smoothing.
The value is 0 or 1.
nsjump : Integer *[None]*
Skipping value for seasonal smoothing.
The seasonal smoother skips ahead nsjump points and then linearly
interpolates in between. The value of nsjump should be a positive
integer; if nsjump=1, a seasonal smooth is calculated at all n points.
To make the procedure run faster, a reasonable choice for nsjump is
10%-20% of ns. By default, nsjump= 0.1*ns.
ntjump : Integer *[1]*
Skipping value for trend smoothing. If None, ntjump= 0.1*nt
nljump : Integer *[1]*
Skipping value for low-pass smoothing. If None, nljump= 0.1*nl
robust : Boolean *[True]*
Flag indicating whether robust fitting should be performed.
ni : Integer *[None]*
Number of loops for updating the seasonal and trend components.
The value of ni should be a positive integer.
See the next argument for advice on the choice of ni.
If ni is None, ni is set to 1 for robust fitting, to 5 otherwise.
no : Integer *[None]*
Number of iterations of robust fitting. The value of no should
be a nonnegative integer. If the data are well behaved without
outliers, then robustness iterations are not needed. In this case
set no=0, and set ni=2 to 5 depending on how much security
you want that the seasonal-trend looping converges.
If outliers are present then no=3 is a very secure value unless
the outliers are radical, in which case no=5 or even 10 might
be better. If no>0 then set ni to 1 or 2.
If None, then no is set to 15 for robust fitting, to 0 otherwise.
Returns:
A recarray of estimated trend values ('trend'), estimated seasonal
components ('seasonal'), local robust weights ('weights') and fit
residuals ('residuals').
The final local robust weights are all 1 if no=0.
Reference
---------
<NAME>, <NAME>, <NAME> and <NAME>.
1990. STL: A Seasonal-Trend Decomposition Procedure Based on LOESS
(with Discussion). Journal of Official Statistics, 6:3-73.
"""
ns = max(ns, 3)
if ns%2 == 0:
ns += 1
np = max(2, np)
if nt is None:
nt = max(int((1.5*np/(1.-1.5/ns))+0.5), 3)
if not nt%2:
nt += 1
if nl is None:
nl = max(3,np)
if not nl%2:
nl += 1
if nsjump is None:
nsjump = int(0.1*ns + 0.9)
if ntjump is None:
ntjump = int(0.1*nt + 0.9)
if nljump is None:
nljump = int(0.1*nl + 0.9)
if robust:
if ni is None:
ni = 1
if no is None:
no = 15
else:
if ni is None:
ni = 5
if no is None:
no = 0
if hasattr(y,'_mask') and numpy.any(y._mask):
raise ValueError("Missing values should first be filled !")
y = array(y, subok=True, copy=False).ravel()
(rw,szn,trn,work) = _stl.stl(y,np,ns,nt,nl,isdeg,itdeg,ildeg,
nsjump,ntjump,nljump,ni,no,)
dtyp = [('trend', float_), ('seasonal', float_),
('residuals', float_), ('weights', float_)]
result = fromiter(zip(trn,szn,y-trn-szn,rw), dtype=dtyp)
return result.view(recarray)
#####---------------------------------------------------------------------------
#--- --- Loess ---
#####---------------------------------------------------------------------------
loess = _loess.loess
"""
loess : locally weighted estimates. Multi-variate version
:Keywords:
x : ndarray
A (n,p) ndarray of independent variables, with n the number of observations
and p the number of variables.
y : ndarray
A (n,) ndarray of observations
weights : ndarray
A (n,) ndarray of weights to be given to individual observations in the
sum of squared residuals that forms the local fitting criterion. If not
None, the weights should be non negative. If the different observations
have non-equal variances, the weights should be inversely proportional
to the variances.
By default, an unweighted fit is carried out (all the weights are one).
surface : string ["interpolate"]
Determines whether the fitted surface is computed directly at all points
("direct") or whether an interpolation method is used ("interpolate").
The default ("interpolate") is what most users should use unless special
circumstances warrant.
statistics : string ["approximate"]
Determines whether the statistical quantities are computed exactly
("exact") or approximately ("approximate"). "exact" should only be used
for testing the approximation in statistical development and is not meant
for routine usage because computation time can be horrendous.
trace_hat : string ["wait.to.decide"]
Determines how the trace of the hat matrix should be computed. The hat
matrix is used in the computation of the statistical quantities.
If "exact", an exact computation is done; this could be slow when the
number of observations n becomes large. If "wait.to.decide" is selected,
then a default is "exact" for n < 500 and "approximate" otherwise.
This option is only useful when the fitted surface is interpolated. If
surface is "exact", an exact computation is always done for the trace.
Setting trace_hat to "approximate" for large dataset will substantially
reduce the computation time.
iterations : integer
Number of iterations of the robust fitting method. If the family is
"gaussian", the number of iterations is set to 0.
cell : integer
Maximum cell size of the kd-tree. Suppose k = floor(n*cell*span),
where n is the number of observations, and span the smoothing parameter.
Then, a cell is further divided if the number of observations within it
is greater than or equal to k. This option is only used if the surface
is interpolated.
span : float [0.75]
Smoothing factor, as a fraction of the number of points to take into
account.
degree : integer [2]
Overall degree of locally-fitted polynomial. 1 is locally-linear
fitting and 2 is locally-quadratic fitting. Degree should be 2 at most.
normalize : boolean [True]
Determines whether the independent variables should be normalized.
If True, the normalization is performed by setting the 10% trimmed
standard deviation to one. If False, no normalization is carried out.
This option is only useful for more than one variable. For spatial
coordinates predictors or variables with a common scale, it should be
set to False.
family : string ["gaussian"]
Determines the assumed distribution of the errors. The values are
"gaussian" or "symmetric". If "gaussian" is selected, the fit is
performed with least-squares. If "symmetric" is selected, the fit
is performed robustly by redescending M-estimators.
parametric_flags : sequence [ [False]*p ]
Indicates which independent variables should be conditionally-parametric
(if there are two or more independent variables). The argument should
be a sequence of booleans, with the same size as the number of independent
variables, specified in the order of the predictor group ordered in x.
drop_square : sequence [ [False]* p]
When there are two or more independent variables and when a 2nd order
polynomial is used, "drop_square_flags" specifies those numeric predictors
whose squares should be dropped from the set of fitting variables.
The method of specification is the same as for parametric.
:Outputs:
fitted_values : ndarray
The (n,) ndarray of fitted values.
fitted_residuals : ndarray
The (n,) ndarray of fitted residuals (observations - fitted values).
enp : float
Equivalent number of parameters.
s : float
Estimate of the scale of residuals.
one_delta: float
Statistical parameter used in the computation of standard errors.
two_delta : float
Statistical parameter used in the computation of standard errors.
pseudovalues : ndarray
The (n,) ndarray of adjusted values of the response when robust estimation
is used.
trace_hat : float
Trace of the operator hat matrix.
diagonal :
Diagonal of the operator hat matrix.
robust : ndarray
The (n,) ndarray of robustness weights for robust fitting.
divisor : ndarray
The (p,) array of normalization divisors for numeric predictors.
newdata : ndarray
The (m,p) array of independent variables where the surface must be estimated.
values : ndarray
The (m,) ndarray of loess values evaluated at newdata
stderr : ndarray
The (m,) ndarray of the estimates of the standard error on the estimated
values.
residual_scale : float
Estimate of the scale of the residuals
df : integer
Degrees of freedom of the t-distribution used to compute pointwise
confidence intervals for the evaluated surface.
nest : integer
Number of new observations.
"""
loess_anova = _loess.anova
| 1.34375 | 1 |
python/oneflow/compatible/single_client/__init__.py | grybd/oneflow | 0 | 12770526 | <reponame>grybd/oneflow
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow._oneflow_internal
Size = oneflow._oneflow_internal.Size
device = oneflow._oneflow_internal.device
placement = oneflow._oneflow_internal.placement
locals()["dtype"] = oneflow._oneflow_internal.dtype
locals()["char"] = oneflow._oneflow_internal.char
locals()["float16"] = oneflow._oneflow_internal.float16
locals()["half"] = oneflow._oneflow_internal.float16
locals()["float32"] = oneflow._oneflow_internal.float32
locals()["float"] = oneflow._oneflow_internal.float
locals()["double"] = oneflow._oneflow_internal.double
locals()["float64"] = oneflow._oneflow_internal.float64
locals()["int8"] = oneflow._oneflow_internal.int8
locals()["int"] = oneflow._oneflow_internal.int32
locals()["int32"] = oneflow._oneflow_internal.int32
locals()["int64"] = oneflow._oneflow_internal.int64
locals()["long"] = oneflow._oneflow_internal.int64
locals()["uint8"] = oneflow._oneflow_internal.uint8
locals()["record"] = oneflow._oneflow_internal.record
locals()["tensor_buffer"] = oneflow._oneflow_internal.tensor_buffer
locals()["bfloat16"] = oneflow._oneflow_internal.bfloat16
from oneflow.compatible.single_client.framework import (
env_util,
session_context,
session_util,
)
from oneflow.core.job.job_conf_pb2 import JobConfigProto
from oneflow.core.job.job_set_pb2 import ConfigProto
oneflow._oneflow_internal.DestroyGlobalForeignCallback()
oneflow._oneflow_internal.DestroyEnv()
import time
time.sleep(1)
del time
oneflow._oneflow_internal.SetIsMultiClient(False)
session_context.OpenDefaultSession(
session_util.Session(oneflow._oneflow_internal.NewSessionId())
)
oneflow._oneflow_internal.EnableEagerEnvironment(False)
del env_util
del session_util
del session_context
import oneflow.compatible.single_client.framework.c_api_util
from oneflow.compatible.single_client.framework import (
python_callback,
register_python_callback,
)
oneflow._oneflow_internal.RegisterGlobalForeignCallback(
python_callback.global_python_callback
)
del python_callback
del register_python_callback
from oneflow.compatible.single_client.framework import watcher
oneflow._oneflow_internal.RegisterGlobalWatcher(watcher._global_watcher)
del watcher
from oneflow.compatible.single_client.eager import boxing_util
oneflow._oneflow_internal.deprecated.RegisterBoxingUtilOnlyOnce(
boxing_util._global_boxing_util
)
del boxing_util
from oneflow.compatible.single_client.ops.util import custom_op_module
oneflow._oneflow_internal.RegisterPyKernels(
custom_op_module._python_kernel_reg.kernels_
)
del custom_op_module
from oneflow.compatible.single_client.framework import register_class_method_util
register_class_method_util.RegisterMethod4Class()
del register_class_method_util
INVALID_SPLIT_AXIS = oneflow._oneflow_internal.INVALID_SPLIT_AXIS
import atexit
from oneflow.compatible.single_client.framework.session_context import (
TryCloseAllSession,
)
atexit.register(TryCloseAllSession)
del TryCloseAllSession
del atexit
import sys
__original_exit__ = sys.exit
def custom_exit(returncode):
if returncode != 0:
import oneflow
oneflow._oneflow_internal.MasterSendAbort()
__original_exit__(returncode)
sys.exit = custom_exit
del custom_exit
del sys
from oneflow.compatible.single_client.autograd import no_grad
from oneflow.compatible.single_client.advanced.distribute_ops import (
cast_to_current_logical_view,
)
from oneflow.compatible.single_client.deprecated.initializer_util import (
truncated_normal_initializer as truncated_normal,
)
from oneflow.compatible.single_client.experimental.namescope import (
deprecated_name_scope as name_scope,
)
from oneflow.compatible.single_client.framework.check_point_v2 import (
GetAllVariables as get_all_variables,
)
from oneflow.compatible.single_client.framework.check_point_v2 import Load as load
from oneflow.compatible.single_client.framework.check_point_v2 import (
LoadVariables as load_variables,
)
from oneflow.compatible.single_client.framework.check_point_v2 import save
from oneflow.compatible.single_client.framework.dtype import (
convert_oneflow_dtype_to_numpy_dtype,
dtypes,
)
from oneflow.compatible.single_client.framework.env_util import (
api_enable_eager_execution as enable_eager_execution,
)
from oneflow.compatible.single_client.framework.env_util import (
api_get_current_machine_id as current_machine_id,
)
from oneflow.compatible.single_client.framework.env_util import (
api_get_current_resource as current_resource,
)
from oneflow.compatible.single_client.framework.function_desc import (
api_current_global_function_desc as current_global_function_desc,
)
from oneflow.compatible.single_client.framework.function_util import FunctionConfig
from oneflow.compatible.single_client.framework.function_util import (
FunctionConfig as ExecutionConfig,
)
from oneflow.compatible.single_client.framework.function_util import (
FunctionConfig as function_config,
)
from oneflow.compatible.single_client.framework.function_util import (
api_oneflow_function as global_function,
)
from oneflow.compatible.single_client.framework.generator import (
create_generator as Generator,
)
from oneflow.compatible.single_client.framework.generator import manual_seed
from oneflow.compatible.single_client.framework.input_blob_def import (
DeprecatedFixedTensorDef as FixedTensorDef,
)
from oneflow.compatible.single_client.framework.input_blob_def import (
DeprecatedMirroredTensorDef as MirroredTensorDef,
)
from oneflow.compatible.single_client.framework.job_set_util import (
inter_job_reuse_mem_strategy,
)
from oneflow.compatible.single_client.framework.model import Model
from oneflow.compatible.single_client.framework.ops import api_acc as acc
from oneflow.compatible.single_client.framework.ops import (
api_hierarchical_parallel_cast as hierarchical_parallel_cast,
)
from oneflow.compatible.single_client.framework.ops import api_pack as pack
from oneflow.compatible.single_client.framework.ops import (
api_parallel_cast as parallel_cast,
)
from oneflow.compatible.single_client.framework.ops import api_repeat as repeat
from oneflow.compatible.single_client.framework.ops import api_unpack as unpack
from oneflow.compatible.single_client.framework.placement_util import (
deprecated_placement as device_prior_placement,
)
from oneflow.compatible.single_client.framework.placement_util import (
deprecated_placement as fixed_placement,
)
from oneflow.compatible.single_client.framework.scope_util import (
api_current_scope as current_scope,
)
from oneflow.compatible.single_client.framework.session_util import (
TmpInitEagerGlobalSession as InitEagerGlobalSession,
)
from oneflow.compatible.single_client.framework.session_util import (
api_clear_default_session as clear_default_session,
)
from oneflow.compatible.single_client.framework.session_util import (
api_eager_execution_enabled as eager_execution_enabled,
)
from oneflow.compatible.single_client.framework.session_util import (
api_find_or_create_module as find_or_create_module,
)
from oneflow.compatible.single_client.framework.session_util import (
api_sync_default_session as sync_default_session,
)
from oneflow.compatible.single_client.framework.tensor import Tensor
from oneflow.compatible.single_client.ops.array_ops import amp_white_identity
from oneflow.compatible.single_client.ops.array_ops import (
api_slice_update as slice_update,
)
from oneflow.compatible.single_client.ops.array_ops import (
argwhere,
broadcast_like,
cast_to_static_shape,
concat,
dim_gather,
dynamic_reshape,
elem_cnt,
expand,
expand_dims,
flatten,
gather,
gather_nd,
identity,
identity_n,
masked_fill,
nonzero,
ones,
reshape,
reshape_like,
reverse,
scatter_nd,
slice,
slice_v2,
squeeze,
stack,
sync_dynamic_resize,
tensor_scatter_nd_add,
tensor_scatter_nd_update,
transpose,
where,
zeros,
)
from oneflow.compatible.single_client.ops.assign_op import assign
from oneflow.compatible.single_client.ops.builtin_ops import BuiltinOp as builtin_op
from oneflow.compatible.single_client.ops.categorical_ordinal_encode_op import (
categorical_ordinal_encode,
)
from oneflow.compatible.single_client.ops.combined_margin_loss import (
combined_margin_loss,
)
from oneflow.compatible.single_client.ops.constant_op import (
constant,
constant_like,
constant_scalar,
ones_like,
zeros_like,
)
from oneflow.compatible.single_client.ops.count_not_finite import (
count_not_finite,
multi_count_not_finite,
)
from oneflow.compatible.single_client.ops.diag_ops import diag
from oneflow.compatible.single_client.ops.eager_nccl_ops import eager_nccl_all_reduce
from oneflow.compatible.single_client.ops.get_variable import (
api_get_variable as get_variable,
)
from oneflow.compatible.single_client.ops.initializer_util import (
constant_initializer,
empty_initializer,
)
from oneflow.compatible.single_client.ops.initializer_util import (
glorot_normal_initializer,
)
from oneflow.compatible.single_client.ops.initializer_util import (
glorot_normal_initializer as xavier_normal_initializer,
)
from oneflow.compatible.single_client.ops.initializer_util import (
glorot_uniform_initializer,
)
from oneflow.compatible.single_client.ops.initializer_util import (
glorot_uniform_initializer as xavier_uniform_initializer,
)
from oneflow.compatible.single_client.ops.initializer_util import (
kaiming_initializer,
ones_initializer,
random_normal_initializer,
random_uniform_initializer,
truncated_normal_initializer,
variance_scaling_initializer,
zeros_initializer,
)
from oneflow.compatible.single_client.ops.linalg import matmul
from oneflow.compatible.single_client.ops.loss_ops import ctc_loss, smooth_l1_loss
from oneflow.compatible.single_client.ops.math_ops import (
broadcast_to_compatible_with as broadcast_to_compatible_with,
)
from oneflow.compatible.single_client.ops.math_ops import cast
from oneflow.compatible.single_client.ops.math_ops import clip_by_value as clamp
from oneflow.compatible.single_client.ops.math_ops import clip_by_value as clip
from oneflow.compatible.single_client.ops.math_ops import (
clip_by_value as clip_by_scalar,
)
from oneflow.compatible.single_client.ops.math_ops import clip_by_value as clip_by_value
from oneflow.compatible.single_client.ops.math_ops import in_top_k as in_top_k
from oneflow.compatible.single_client.ops.math_ops import range
from oneflow.compatible.single_client.ops.math_ops import (
unsorted_batch_segment_sum as unsorted_batch_segment_sum,
)
from oneflow.compatible.single_client.ops.math_ops import (
unsorted_segment_sum as unsorted_segment_sum,
)
from oneflow.compatible.single_client.ops.math_ops import (
unsorted_segment_sum_like as unsorted_segment_sum_like,
)
from oneflow.compatible.single_client.ops.one_hot import one_hot
from oneflow.compatible.single_client.ops.pad import (
constant_pad2d,
pad,
pad_grad,
reflection_pad2d,
replication_pad2d,
same_padding,
zero_pad2d,
)
from oneflow.compatible.single_client.ops.partial_fc_sample import (
distributed_partial_fc_sample,
)
from oneflow.compatible.single_client.ops.sort_ops import argsort, sort
from oneflow.compatible.single_client.ops.tensor_buffer_ops import (
gen_tensor_buffer,
tensor_buffer_to_list_of_tensors,
tensor_buffer_to_tensor,
tensor_to_tensor_buffer,
)
from oneflow.compatible.single_client.ops.user_data_ops import (
api_image_random_crop as image_random_crop,
)
from oneflow.compatible.single_client.ops.user_data_ops import (
api_image_resize as image_resize,
)
from oneflow.compatible.single_client.ops.user_data_ops import (
api_image_target_resize as image_target_resize,
)
from oneflow.compatible.single_client.ops.user_data_ops import (
image_batch_align as image_batch_align,
)
from oneflow.compatible.single_client.ops.user_data_ops import (
image_decode as image_decode,
)
from oneflow.compatible.single_client.ops.user_data_ops import image_flip as image_flip
from oneflow.compatible.single_client.ops.user_data_ops import (
image_normalize as image_normalize,
)
from oneflow.compatible.single_client.ops.user_data_ops import (
object_bbox_flip as object_bbox_flip,
)
from oneflow.compatible.single_client.ops.user_data_ops import (
object_bbox_scale as object_bbox_scale,
)
from oneflow.compatible.single_client.ops.user_data_ops import (
object_segm_poly_flip as object_segmentation_polygon_flip,
)
from oneflow.compatible.single_client.ops.user_data_ops import (
object_segm_poly_scale as object_segmentation_polygon_scale,
)
from oneflow.compatible.single_client.ops.user_data_ops import (
object_segm_poly_to_mask as object_segmentation_polygon_to_mask,
)
from oneflow.compatible.single_client.ops.user_op_builder import (
api_consistent_user_op_builder as consistent_user_op_builder,
)
from oneflow.compatible.single_client.ops.user_op_builder import (
api_consistent_user_op_module_builder as consistent_user_op_module_builder,
)
from oneflow.compatible.single_client.ops.user_op_builder import (
api_user_op_builder as user_op_builder,
)
from oneflow.compatible.single_client.ops.user_op_builder import (
api_user_op_module_builder as user_op_module_builder,
)
from oneflow.compatible.single_client.ops.watch import Watch as watch
from oneflow.compatible.single_client.ops.watch import WatchDiff as watch_diff
from . import (
checkpoint,
config,
data,
distribute,
distributed,
env,
image,
layers,
losses,
math,
model,
optimizer,
profiler,
random,
regularizers,
saved_model,
scope,
summary,
sysconfig,
tensorrt,
train,
typing,
util,
)
| 1.53125 | 2 |
napari/plugins/__init__.py | yinawang28/napari | 0 | 12770527 | <gh_stars>0
import sys
from pluggy import HookimplMarker
from ._hook_callers import execute_hook
from .exceptions import PluginError, PluginImportError, PluginRegistrationError
from .manager import PluginManager
# Marker to be imported and used in plugins (and for own implementations)
# Note: plugins may also just import pluggy directly and make their own
# napari_hook_implementation.
napari_hook_implementation = HookimplMarker("napari")
# the main plugin manager instance for the `napari` plugin namespace.
plugin_manager = PluginManager()
__all__ = [
"napari_hook_implementation",
"PluginManager",
"plugin_manager",
"PluginError",
"PluginImportError",
"PluginRegistrationError",
"execute_hook",
]
| 1.828125 | 2 |
orchestration/__init__.py | gonzalorodrigo/ScSFWorkload | 1 | 12770528 | <gh_stars>1-10
import os
from time import sleep
from commonLib.DBManager import DB
from orchestration.analyzing import (AnalysisRunnerSingle,
AnalysisRunnerDelta,
AnalysisGroupRunner)
from orchestration.definition import (ExperimentDefinition,
GroupExperimentDefinition,
DeltaExperimentDefinition)
from orchestration.running import ExperimentRunner
from stats import NumericStats
def get_central_db(dbName="workload"):
"""Returns a DB object configured to access the workload analysis
central database. It gets configured through environment variables.
Args:
- dbName: string with database use name to use if not configured
through environment var.
Env vars:
- ANALYSIS_DB_HOST: hostname of the system hosting the database.
- ANALYSIS_DB_NAME: database name to read from.
- ANALYSIS_DB_USER: user to be used to access the database.
- ANALYSIS_DB_PASS: password to be used to used to access the database.
- ANALYSIS_DB_PORT: port on which the database runs.
"""
return DB(os.getenv("ANALYSIS_DB_HOST", "127.0.0.1"),
os.getenv("ANALYSIS_DB_NAME", dbName),
os.getenv("ANALYSIS_DB_USER", "root"),
os.getenv("ANALYSIS_DB_PASS", ""),
os.getenv("ANALYSIS_DB_PORT","3306"))
def get_sim_db(hostname="127.0.0.1"):
"""Returns a DB object configured to access the internal database of
a slurm scheduler. It gets configured through environment variables.
Args:
- hostname: default hostname of the machine containing the database if not
set through an env var.
Env Vars:
- SLURM_DB_HOST: slurm database host to connect to.
- SLURM_DB_NAME: slurm database name of the slurm worker. If not set takes
slurm_acct_db.
- SLURMDB_USER: user to be used to access the slurm database.
- SLURMDB_PASS: password to be used to used to access the slurm database.
- SLURMDB_PORT: port on which the slurm database runs.
"""
return DB(os.getenv("SLURM_DB_HOST", hostname),
os.getenv("SLURM_DB_NAME", "slurm_acct_db"),
os.getenv("SLURMDB_USER", None),
os.getenv("SLURMDB_PASS", None),
os.getenv("SLURMDB_PORT","3306"))
class ExperimentWorker(object):
"""This class retrieves experiment configurations, creates the corresponding
workload, configures a slurm experiment runner, runs the experiment, and
stores results in the analysis database.
Configuration of the running enviroment camos from the static
configuration of the ExperimentRunner class.
"""
def do_work(self, central_db_obj, sched_db_obj, trace_id=None):
"""
Args:
- central_db_obj: DB object configured to access the analysis database.
- sched_db_obj: DB object configured to access the slurm database of
an experiment worker.
- trace_id: If set to an experiment valid trace_id, it runs only the
experiment identified by trace_id.
"""
there_are_more=True
while there_are_more:
ed = ExperimentDefinition()
if trace_id:
ed.load(central_db_obj, trace_id)
ed.mark_pre_simulating(central_db_obj)
else:
there_are_more = ed.load_fresh(central_db_obj)
if there_are_more:
print(("About to run exp({0}):{1}".format(
ed._trace_id, ed._name)))
er = ExperimentRunner(ed)
if(er.do_full_run(sched_db_obj, central_db_obj)):
print(("Exp({0}) Done".format(
ed._trace_id)))
else:
print(("Exp({0}) Error!".format(
ed._trace_id)))
if trace_id:
break
def rescue_exp(self, central_db_obj, sched_db_obj, trace_id=None):
"""Retrieves the job trace from the database of an experiment worker and
stores it in the central db.
Args:
- central_db_obj: DB object configured to access the analysis database.
- sched_db_obj: DB object configured to access the slurm database of
an experiment worker.
- trace_id: trace_id of the experiment to which the rescued trace
corresponds.
"""
there_are_more=True
while there_are_more:
ed = ExperimentDefinition()
if trace_id:
ed.load(central_db_obj, trace_id)
ed.mark_simulation_done(central_db_obj)
else:
there_are_more = ed.load_next_state("simulation_failed",
"simulation_done")
if there_are_more:
print(("About to run resque({0}):{1}".format(
ed._trace_id, ed._name)))
er = ExperimentRunner(ed)
if(er.check_trace_and_store(sched_db_obj, central_db_obj)):
er.clean_trace_file()
print(("Exp({0}) Done".format(
ed._trace_id)))
else:
print(("Exp({0}) Error!".format(
ed._trace_id)))
if trace_id:
break
class AnalysisWorker(object):
"""This class processes the results of different experiment types and store
the final results in the analysis database.
"""
def do_work_single(self, db_obj, trace_id=None):
"""Processes single type experiment results.
Args:
- db_obj: DB object configured to access the analysis database.
- trace_id: If set to "None", it processes all experiments in
"simulation_state". If set to an integer, it will analyze the
experiment identified by trace_id.
"""
there_are_more=True
while there_are_more:
ed = ExperimentDefinition()
if trace_id:
ed.load(db_obj, trace_id)
ed.mark_pre_analyzing(db_obj)
else:
there_are_more = ed.load_pending(db_obj)
if there_are_more:
print(("Analyzing experiment {0}".format(ed._trace_id)))
er = AnalysisRunnerSingle(ed)
er.do_full_analysis(db_obj)
if trace_id:
break
def do_work_second_pass(self, db_obj, pre_trace_id):
"""Takes three experiments, and repeast the workflow analysis for
each experiment but only taking into account the first n workflows
in each one. n = minimum number of workflows acroos the three traces.
Args:
- db_obj: DB object configured to access the analysis database.
- trace_id: If set to an integer, it will analyze the
experiments identified by trace_id, trace_id+1, trace_id+2.
"""
there_are_more=True
while there_are_more:
ed_manifest = ExperimentDefinition()
ed_single = ExperimentDefinition()
ed_multi = ExperimentDefinition()
if pre_trace_id:
trace_id=int(pre_trace_id)
ed_manifest.load(db_obj, trace_id)
there_are_more=True
else:
there_are_more = ed_manifest.load_next_ready_for_pass(db_obj)
trace_id=int(ed_manifest._trace_id)
if there_are_more:
ed_single.load(db_obj, trace_id+1)
ed_multi.load(db_obj, trace_id+2)
ed_list=[ed_manifest, ed_single, ed_multi]
print(("Reading workflow info for traces: {0}".format(
[ed._trace_id for ed in ed_list])))
if (ed_manifest._workflow_handling!="manifest" or
ed_single._workflow_handling!="single" or
ed_multi._workflow_handling!="multi"):
print(("Incorrect workflow handling for traces"
"({0}, {1}, {2}): ({3}, {4}, {5})",format(
ed_manifest._trace_id,
ed_single._trace_id,
ed_multi._trace_id,
ed_manifest._workflow_handling,
ed_single._workflow_handling,
ed_multi._workflow_handling)
))
print ("Exiting...")
exit()
for ed in ed_list:
ed.mark_pre_second_pass(db_obj)
num_workflows=None
for ed in ed_list:
exp_wfs=self.get_num_workflows(db_obj, ed._trace_id)
if num_workflows is None:
num_workflows = exp_wfs
else:
num_workflows=min(num_workflows, exp_wfs)
print(("Final workflow count: {0}".format(num_workflows)))
for ed in ed_list:
print(("Doing second pass for trace: {0}".format(
ed._trace_id)))
er = AnalysisRunnerSingle(ed)
er.do_workflow_limited_analysis(db_obj, num_workflows)
print(("Second pass completed for {0}".format(
[ed._trace_id for ed in ed_list])))
if pre_trace_id:
break
def get_num_workflows(self, db_obj, trace_id):
result_type="wf_turnaround"
key=result_type+"_stats"
result = NumericStats()
result.load(db_obj, trace_id, key)
return int(result._get("count"))
def do_work_delta(self, db_obj, trace_id=None, sleep_time=60):
"""Processes delta type experiment results.
Args:
- db_obj: DB object configured to access the analysis database.
- sleep_time: wait time in seconds to wait between processing two delta
experiments.
"""
there_are_more=True
while there_are_more:
ed = DeltaExperimentDefinition()
if trace_id:
ed.load(db_obj, trace_id)
ed.mark_pre_analyzing(db_obj)
else:
there_are_more = ed.load_pending(db_obj)
if there_are_more:
if ed.is_it_ready_to_process(db_obj):
er = AnalysisRunnerDelta(ed)
er.do_full_analysis(db_obj)
sleep(sleep_time)
if trace_id:
break
def do_work_grouped(self, db_obj, trace_id=None, sleep_time=60):
"""Processes grouped type experiment results.
Args:
- db_obj: DB object configured to access the analysis database.
- sleep_time: wait time in seconds to wait between processing two
grouped experiments.
"""
there_are_more=True
while there_are_more:
ed = GroupExperimentDefinition()
if trace_id:
ed.load(db_obj, trace_id)
ed.mark_pre_analyzing(db_obj)
else:
there_are_more = ed.load_pending(db_obj)
if there_are_more:
if ed.is_it_ready_to_process(db_obj):
print(("Analyzing grouped experiment {0}".format(
ed._trace_id)))
er = AnalysisGroupRunner(ed)
er.do_full_analysis(db_obj)
if trace_id:
break
elif there_are_more:
print(("There are grouped experiments to be processed, but,"
"their subtrace are not ready yet. Sleeping for {0}s."
"".format(sleep_time)))
# sleep(sleep_time)
else:
print("No more experiments to process, exiting.")
def do_mean_utilizatin(self, db_obj, trace_id=None):
ed = GroupExperimentDefinition()
if trace_id:
trace_id_list=[trace_id]
else:
trace_id_list=ed.get_exps_in_state(db_obj, "analysis_done")
trace_id_list+=ed.get_exps_in_state(db_obj, "second_pass_done")
print(("processing following group traces (utilization mean):{0}".format(
trace_id_list)))
for trace_id in trace_id_list:
print(("Calculating for", trace_id))
ed = GroupExperimentDefinition()
ed.load(db_obj, trace_id=trace_id)
er = AnalysisGroupRunner(ed)
er.do_only_mean(db_obj)
def do_work_grouped_second_pass(self, db_obj, pre_trace_id):
"""Takes three experiments, and repeast the workflow analysis for
each experiment but only taking into account the first n workflows
in each one. n = minimum number of workflows acroos the three traces.
Args:
- db_obj: DB object configured to access the analysis database.
- trace_id: If set to an integer, it will analyze the
experiments identified by trace_id, trace_id+1, trace_id+2.
"""
there_are_more=True
while there_are_more:
ed_manifest = GroupExperimentDefinition()
ed_single = GroupExperimentDefinition()
ed_multi = GroupExperimentDefinition()
if pre_trace_id:
trace_id=int(pre_trace_id)
ed_manifest.load(db_obj, trace_id)
there_are_more=True
else:
there_are_more = ed_manifest.load_next_ready_for_pass(db_obj)
trace_id=int(ed_manifest._trace_id)
if there_are_more:
ed_single.load(db_obj, trace_id+1)
ed_multi.load(db_obj, trace_id+2)
ed_list=[ed_manifest, ed_single, ed_multi]
print(("Reading workflow info for traces: {0}".format(
[ed._trace_id for ed in ed_list])))
if (ed_manifest._workflow_handling!="manifest" or
ed_single._workflow_handling!="single" or
ed_multi._workflow_handling!="multi"):
print(("Incorrect workflow handling for traces"
"({0}, {1}, {2}): ({3}, {4}, {5})",format(
ed_manifest._trace_id,
ed_single._trace_id,
ed_multi._trace_id,
ed_manifest._workflow_handling,
ed_single._workflow_handling,
ed_multi._workflow_handling)
))
print ("Exiting...")
exit()
for ed in ed_list:
ed.mark_pre_second_pass(db_obj)
list_num_workflows=[]
for (st_1, st_2, st_3) in zip(ed_manifest._subtraces,
ed_single._subtraces,
ed_multi._subtraces):
num_workflows=None
for ed_id in [st_1, st_2, st_3]:
exp_wfs=self.get_num_workflows(db_obj, ed_id)
if num_workflows is None:
num_workflows = exp_wfs
else:
num_workflows=min(num_workflows, exp_wfs)
list_num_workflows.append(num_workflows)
print(("Final workflow count: {0}".format(list_num_workflows)))
for ed in ed_list:
print(("Doing second pass for trace: {0}".format(
ed._trace_id)))
er = AnalysisGroupRunner(ed)
er.do_workflow_limited_analysis(db_obj, list_num_workflows)
print(("Second pass completed for {0}".format(
[ed._trace_id for ed in ed_list])))
if pre_trace_id:
break
| 2.421875 | 2 |
003-input.py | richardvecsey/python-basics | 3 | 12770529 | <filename>003-input.py
"""
Get input from user via console or command line
-----------------------------------------------
Output: (string)
"""
name = input('What is your name? ')
print('Hello {}!'.format(name)) | 3.8125 | 4 |
security_monkey/auditors/github/org.py | boladmin/security_monkey | 4,258 | 12770530 | # Copyright 2017 Netflix
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module: security_monkey.watchers.github.org
:platform: Unix
:synopsis: Auditor for GitHub Organizations
.. version:: $$VERSION$$
.. moduleauthor:: <NAME> <<EMAIL>>
"""
from security_monkey.auditor import Auditor
from security_monkey.watchers.github.org import GitHubOrg
class GitHubOrgAuditor(Auditor):
index = GitHubOrg.index
i_am_singular = GitHubOrg.i_am_singular
i_am_plural = GitHubOrg.i_am_plural
def __init__(self, accounts=None, debug=False):
super(GitHubOrgAuditor, self).__init__(accounts=accounts, debug=debug)
def check_for_public_repo(self, org_item):
"""
Organizational view that it has public repositories. Default score of 0. This is mostly
informational.
:param org_item:
:return:
"""
tag = "Organization contains public repositories."
if org_item.config["public_repos"] > 0:
self.add_issue(0, tag, org_item, notes="Organization contains public repositories")
def check_for_non_twofa_members(self, org_item):
"""
Alert if the org has users that don't have 2FA enabled.
Will keep this at a level of 2 -- unles there are admins without 2FA, then that is level 10!
:param org_item:
:return:
"""
tag = "Organization contains users without 2FA enabled."
owner_no_twofa = "Organization owner does NOT have 2FA enabled!"
if len(org_item.config["no_2fa_members"]) > 0:
self.add_issue(2, tag, org_item, notes="Organization contains users without 2FA enabled")
for notwofa in org_item.config["no_2fa_members"]:
if notwofa in org_item.config["owners"]:
self.add_issue(10, owner_no_twofa, org_item, notes="Organization OWNER: {} does NOT "
"have 2FA enabled!".format(notwofa))
| 2.546875 | 3 |
hivs_pp/migrations/0012_allow_blank_gender_field_on_delivery.py | tehamalab/hivs | 0 | 12770531 | # Generated by Django 2.0.7 on 2018-10-25 16:57
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('hivs_pp', '0011_rename_field_confidential_to_is_confidential_on_service'),
]
operations = [
migrations.AlterField(
model_name='delivery',
name='gender',
field=models.ForeignKey(blank=True, help_text="If client profile is set this can be overwritten based on the client's profile.", null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='pp_deliveries', to='hivs_utils.Gender', verbose_name='client gender'),
),
]
| 1.59375 | 2 |
core/WxMsgModel.py | NextStand/WechatBiz | 1 | 12770532 | <gh_stars>1-10
# coding:utf-8
"""
微信消息模板
"""
__author__ = 'BLUE'
__time__ = 'Wed May 22 2019 11:14:55 GMT+0800'
# ------------------------------------------------------------------------
from .Exceptions import ParseError
from .WechatBase import WxBizMsgTypeBase, WxBizCrmEventBase, WxBizContactUserBase, WxBizContactPartyBase, WxBizPicEventBase
MESSAGE_TYPES = {}
def handle_for_type(type):
def register(f):
MESSAGE_TYPES[type] = f
return f
return register
@handle_for_type('text')
class TextMessage(WxBizMsgTypeBase):
""" 文本消息 """
def __init__(self, message):
self.Content = message.pop('Content', '')
super(TextMessage, self).__init__(message)
@handle_for_type('image')
class ImageMessage(WxBizMsgTypeBase):
""" 图片消息 """
def __init__(self, message):
try:
self.PicUrl = message.pop('PicUrl')
self.MediaId = message.pop('MediaId')
except KeyError:
raise ParseError()
super(ImageMessage, self).__init__(message)
@handle_for_type('voice')
class VoiceMessage(WxBizMsgTypeBase):
""" 语音消息 """
def __init__(self, message):
try:
self.MediaId = message.pop('MediaId')
except KeyError:
raise ParseError()
super(VoiceMessage, self).__init__(message)
@handle_for_type('video')
class VideoMessage(WxBizMsgTypeBase):
""" 视频消息 """
def __init__(self, message):
try:
self.MediaId = message.pop('MediaId')
self.ThumbMediaId = message.pop('ThumbMediaId')
except KeyError:
raise ParseError()
super(VideoMessage, self).__init__(message)
@handle_for_type('location')
class LocationMessage(WxBizMsgTypeBase):
""" 位置消息 """
def __init__(self, message):
try:
Location_X = message.pop('Location_X')
Location_Y = message.pop('Location_Y')
self.location = (float(Location_X), float(Location_Y))
self.Scale = int(message.pop('Scale'))
self.Label = message.pop('Label')
except KeyError:
raise ParseError()
super(LocationMessage, self).__init__(message)
@handle_for_type('link')
class LinkMessage(WxBizMsgTypeBase):
""" 链接消息 """
def __init__(self, message):
try:
self.Title = message.pop('Title')
self.Description = message.pop('Description')
self.PicUrl = message.pop('PicUrl')
except KeyError:
raise ParseError()
super(LinkMessage, self).__init__(message)
@handle_for_type('subscribe')
class SubscribeEvent(WxBizMsgTypeBase):
""" 关注事件 """
def __init__(self, message):
try:
self.EventKey = message.pop('EventKey')
except KeyError:
raise ParseError(u'关注事件消息解析失败')
super(SubscribeEvent, self).__init__(message)
@handle_for_type('unsubscribe')
class UnSubscribeEvent(WxBizMsgTypeBase):
""" 取消关注事件 """
def __init__(self, message):
try:
self.EventKey = message.pop('EventKey')
except KeyError:
raise ParseError(u'取消关注事件消息解析失败')
super(UnSubscribeEvent, self).__init__(message)
@handle_for_type('location')
class LocationEvent(WxBizMsgTypeBase):
""" 上报地理位置事件 """
def __init__(self, message):
try:
self.Location_Y = float(message.pop('Location_Y'))
self.Location_X = float(message.pop('Location_X'))
self.Label = message.pop('Label')
self.Scale = int(message.pop('Scale'))
except KeyError:
raise ParseError(u'上报地理位置事件消息解析失败')
super(LocationEvent, self).__init__(message)
@handle_for_type('batch_job_result')
class BatchJobEvent(WxBizMsgTypeBase):
""" 异步任务完成推送事件 """
def __init__(self, message):
try:
self.JobId = message.pop('JobId')
self.JobType = message.pop('JobType')
self.ErrCode = message.pop('ErrCode')
self.ErrMsg = message.pop('ErrMsg')
except KeyError:
raise ParseError(u'异步任务完成推送事件消息解析失败')
super(BatchJobEvent, self).__init__(message)
@handle_for_type('create_user')
class CreateUserEvent(WxBizContactUserBase):
""" 新增成员事件 """
def __init__(self, message):
try:
super(CreateUserEvent, self).__init__(message)
except KeyError:
raise ParseError(u'新增成员事件消息解析失败')
@handle_for_type('update_user')
class UpdateUserEvent(WxBizContactUserBase):
""" 更新成员事件 """
def __init__(self, message):
try:
self.NewUserID = message.pop('NewUserID')
except KeyError:
raise ParseError(u'更新成员事件消息解析失败')
super(UpdateUserEvent, self).__init__(message)
@handle_for_type('delete_user')
class DeleteUserEvent(WxBizMsgTypeBase):
""" 删除成员事件 """
def __init__(self, message):
try:
self.UserID = message.pop('UserID', None)
except KeyError:
raise ParseError(u'删除成员事件消息解析失败')
super(DeleteUserEvent, self).__init__(message)
@handle_for_type('create_party')
class CreatePartyEvent(WxBizContactPartyBase):
""" 新增部门事件 """
def __init__(self, message):
try:
self.Order = message.pop('Order')
except KeyError:
raise ParseError(u'新增部门事件消息解析失败')
super(CreatePartyEvent, self).__init__(message)
@handle_for_type('update_party')
class UpdatePartyEvent(WxBizContactPartyBase):
""" 更新部门事件 """
def __init__(self, message):
try:
super(UpdatePartyEvent, self).__init__(message)
except KeyError:
raise ParseError(u'更新部门事件消息解析失败')
@handle_for_type('delete_party')
class DeletePartyEvent(WxBizMsgTypeBase):
""" 删除部门事件 """
def __init__(self, message):
try:
self.Id = message.pop('Id')
except KeyError:
raise ParseError(u'删除部门事件消息解析失败')
super(DeletePartyEvent, self).__init__(message)
@handle_for_type('update_tag')
class UpdateTagEvent(WxBizMsgTypeBase):
""" 标签成员变更事件 """
def __init__(self, message):
try:
self.TagId = message.pop('TagId')
self.AddUserItems = message.pop('AddUserItems')
self.DelUserItems = message.pop('DelUserItems')
self.AddPartyItems = message.pop('AddPartyItems')
self.DelPartyItems = message.pop('DelPartyItems')
except KeyError:
raise ParseError(u'标签成员变更事件消息解析失败')
super(UpdateTagEvent, self).__init__(message)
@handle_for_type('click')
class ClickEvent(WxBizMsgTypeBase):
""" 点击菜单拉取消息事件 """
def __init__(self, message):
try:
self.EventKey = message.pop('EventKey')
except KeyError:
raise ParseError(u'点击菜单拉取消息事件消息解析失败')
super(ClickEvent, self).__init__(message)
@handle_for_type('view')
class ViewEvent(WxBizMsgTypeBase):
""" 点击菜单跳转链接事件 """
def __init__(self, message):
try:
self.EventKey = message.pop('EventKey')
except KeyError:
raise ParseError(u'点击菜单跳转链接事件消息解析失败')
super(ViewEvent, self).__init__(message)
@handle_for_type('scancode_push')
class ScancodePushEvent(WxBizMsgTypeBase):
""" 点击菜单扫码推事件 """
def __init__(self, message):
try:
self.EventKey = message.pop('EventKey')
self.ScanCodeInfo = message.pop('ScanCodeInfo')
self.ScanType = self.ScanCodeInfo[0].get('ScanType')
self.ScanResult = self.ScanCodeInfo[0].get('ScanResult')
except KeyError:
raise ParseError(u'扫码推事件消息解析失败')
super(ScancodePushEvent, self).__init__(message)
@handle_for_type('scancode_waitmsg')
class ScancodeWaitEvent(WxBizMsgTypeBase):
""" 点击菜单扫码推事件且弹出“消息接收中”提示框事件 """
def __init__(self, message):
try:
self.EventKey = message.pop('EventKey')
self.ScanCodeInfo = message.pop('ScanCodeInfo')
self.ScanType = self.ScanCodeInfo[0].get('ScanType')
self.ScanResult = self.ScanCodeInfo[0].get('ScanResult')
except KeyError:
raise ParseError(u'扫码推事件且弹出“消息接收中”提示框事件消息解析失败')
super(ScancodeWaitEvent, self).__init__(message)
@handle_for_type('pic_sysphoto')
class PicSysPhotoEvent(WxBizPicEventBase):
""" 点击菜单弹出系统拍照发图事件 """
def __init__(self, message):
try:
super(PicSysPhotoEvent, self).__init__(message)
except KeyError:
raise ParseError(u'弹出系统拍照发图事件消息解析失败')
@handle_for_type('pic_photo_or_album')
class PicPhotoOrAlbumEvent(WxBizPicEventBase):
""" 点击菜单弹出拍照或者相册发图事件 """
def __init__(self, message):
print(123)
try:
super(PicPhotoOrAlbumEvent, self).__init__(message)
except KeyError:
raise ParseError(u'弹出拍照或者相册发图事件消息解析失败')
@handle_for_type('pic_weixin')
class PicWeixinEvent(WxBizPicEventBase):
""" 点击菜单弹出微信相册发图器事件 """
def __init__(self, message):
print('*'*30)
try:
super(PicWeixinEvent, self).__init__(message)
except KeyError:
raise ParseError(u'弹出拍照或者相册发图事件消息解析失败')
@handle_for_type('location_select')
class LocalSelectEvent(WxBizMsgTypeBase):
""" 点击菜单弹出地理位置选择器事件 """
def __init__(self, message):
try:
self.EventKey = message.pop('EventKey')
self.SendLocationInfo = message.pop('SendLocationInfo')
self.Location_X = self.SendLocationInfo[0].get('Location_X')
self.Location_Y = self.SendLocationInfo[0].get('Location_Y')
self.Scale = self.SendLocationInfo[0].get('Scale')
self.Label = self.SendLocationInfo[0].get('Label')
self.Poiname = self.SendLocationInfo[0].get('Poiname')
except KeyError:
raise ParseError(u'弹出地理位置选择器事件消息解析失败')
super(LocalSelectEvent, self).__init__(message)
@handle_for_type('open_approval_change')
class ProvalChangeEvent(WxBizMsgTypeBase):
""" 审核状态通知事件 """
def __init__(self, message):
try:
self.EventKey = message.pop('EventKey')
self.ApprovalInfo = message.pop('ApprovalInfo')
except KeyError:
raise ParseError(u'审核状态通知事件消息解析失败')
super(ProvalChangeEvent, self).__init__(message)
@handle_for_type('taskcard_click')
class TaskCardEvent(WxBizMsgTypeBase):
""" 任务卡片事件 """
def __init__(self, message):
try:
self.EventKey = message.pop('EventKey')
self.TaskId = message.pop('TaskId')
except KeyError:
raise ParseError(u'任务卡片事件消息解析失败')
super(TaskCardEvent, self).__init__(message)
@handle_for_type('add_external_contact')
class AddExternalEvent(WxBizCrmEventBase):
""" 添加外部联系人事件 """
def __init__(self, message):
try:
self.State = message.pop('State')
except KeyError:
raise ParseError(u'添加外部联系人事件消息解析失败')
super(AddExternalEvent, self).__init__(message)
@handle_for_type('del_external_contact')
class DelExternalEvent(WxBizCrmEventBase):
""" 删除外部联系人事件 """
def __init__(self, message):
super(DelExternalEvent, self).__init__(message)
class UnknownMessage(WxBizMsgTypeBase):
def __init__(self, message):
self.MsgType = 'unknown'
super(UnknownMessage, self).__init__(message)
| 2.1875 | 2 |
molsysmt/attic/fix.py | dprada/molsysmt | 3 | 12770533 | <filename>molsysmt/attic/fix.py
from .multitool import get_form
from .multitool import get as _get
from ._private_tools.exceptions import *
_chain_IDs=['A','B','C','D','E','F','G','H','I','J','K','L','M',
'N','O','P','Q','R','S','T','U','V','W','X','Y','Z']
def fix_chains(item,chains=None):
in_form = get_form(item)
if in_form=='parmed.Structure' or in_form=='parmed.GromacsTopologyFile':
tmp_molecules, tmp_types=_get(item,molecules=True,molecule_type=True)
n_proteins=0
with_water=False
with_ions=False
chain={}
for type_molecule in tmp_types:
if type_molecule=='protein':
n_proteins+=1
elif type_molecule=='water':
with_water=True
elif type_molecule=='ion':
with_ions=True
ii=0
if n_proteins>0:
ii=n_proteins
chain['protein']=_chain_IDs[:ii]
ii+=-1
if with_water:
ii+=1
chain['water']=_chain_IDs[ii]
if with_ions:
ii+=1
chain['ion']=_chain_IDs[ii]
n_proteins=0
for ii in range(len(tmp_molecules)):
if tmp_types[ii]=='protein':
chain_molecule=chain['protein'][n_proteins]
n_proteins+=1
else:
chain_molecule=chain[tmp_types[ii]]
for atom_idx in tmp_molecules[ii]:
item.atoms[atom_idx].residue.chain=chain_molecule
pass
def fix(item, missing_atoms=True, missing_residues=True, nonstandard_residues=True,
missing_terminals=True, missing_loops=False, missing_hydrogens=True,
pH=7.4, to_form=None, engine_fix='PDBFixer', engine_hydrogens='PDBFixer',
engine_loops='Modeller', verbose=False):
"""fix_pdb_structure(item, missing_atoms=True, missing_residues=True, nonstandard_residues=True,
missing_terminals=True, missing_loops=False, missing_hydrogens=True,
pH=7.4, to_form=None, engine_fix='PDBFixer', engine_hydrogens='PDBFixer',
engine_loops='Modeller', verbose=False):
Fixing missing atoms, residues, terminals or loops in the molecular model coming from a pdb file.
This method fixes the possible missing atoms, residues, loops or terminals in a molecular
model. The result is a new molecular model, in the desired supported form, with those elements
fixed.
Parameters
----------
item: molecular model
Molecular model in any supported form by MolSysMT.
arg2: type, default='value'
Paragraph with explanation.
Returns
-------
object: type
Paragraph with explanation.
Examples
--------
See Also
--------
:func:`molsysmt.load`
Notes
-----
Todo
----
Warning
-------
The method has being tested with the following input forms: pdbid, pdbfile, pdbfixer.PDBFixer
and openmm.Modeller.
"""
from .tools.forms import digest as digest_forms
from ._private_tools.engines import digest_engine
from .multitool import convert
form_in, form_out = digest_forms(item, to_form)
engine_fix = digest_engines(engine_fix)
engine_hydrogens = digest_engines(engine_hydrogens)
engine_loops = digest_engines(engine_loops)
tmp_item = None
if engine_fix=='PDBFixer':
tmp_item = convert(item, to_form='pdbfixer.PDBFixer')
if missing_residues:
tmp_item.findMissingResidues()
if missing_atoms:
tmp_item.findMissingAtoms()
if nonstandard_residues:
tmp_item.findNonstandardResidues()
if verbose:
print('Missing residues:', tmp_item.missingResidues)
print('Non standard residues:', tmp_item.nonstandardResidues)
print('Missing atoms', tmp_item.missingAtoms)
print('Missing terminals:', tmp_item.missingTerminals)
tmp_item.addMissingAtoms()
if verbose:
print('Missing residues or atoms reported fixed.')
if missing_hydrogens:
from .protonation import add_missing_hydrogens
tmp_item = add_missing_hydrogens(tmp_item, pH=pH, engine=engine_hydrogens, verbose=verbose)
if missing_loops:
from .model_loops import add_loop
tmp_item = add_loop(tmp_item, engine=engine_loops)
tmp_item = convert(tmp_item, to_form=form_out)
return tmp_item
| 2.28125 | 2 |
plugins/beacon/setup.py | ernadhalilovic/alerta-contrib | 0 | 12770534 | from setuptools import setup, find_packages
version = '0.0.1'
setup(
name="alerta-beacon",
version=version,
description='Alerta plugin for Beacon',
url='https://github.com/ernadhalilovic/alerta-contrib',
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
packages=find_packages(),
py_modules=['alerta_beacon'],
install_requires=[
'requests'
],
include_package_data=True,
zip_safe=True,
entry_points={
'alerta.plugins': [
'beacon = alerta_beacon:ServiceIntegration'
]
}
)
| 1.125 | 1 |
niapy/tests/test_task.py | hrnciar/NiaPy | 0 | 12770535 | <filename>niapy/tests/test_task.py
# encoding=utf8
from unittest import TestCase
import numpy as np
from numpy.random import default_rng
from niapy.benchmarks import Benchmark
from niapy.task import StoppingTask, ThrowingTask
from niapy.util import full_array, FesException, GenException, RefException
class MyBenchmark(Benchmark):
def __init__(self):
super().__init__(-10, 10)
def function(self):
def evaluate(D, x):
return sum(x ** 2)
return evaluate
class StoppingTaskBaseTestCase(TestCase):
r"""Test case for testing `Task`, `StoppingTask` and `CountingTask` classes.
Date:
April 2019
Author:
<NAME>
See Also:
* :class:`niapy.util.Task`
* :class:`niapy.util.CountingTask`
* :class:`niapy.util.StoppingTask`
"""
def setUp(self):
self.D = 6
self.Lower, self.Upper = [2, 1, 1], [10, 10, 2]
self.task = StoppingTask(dimension=self.D, lower=self.Lower, upper=self.Upper)
def test_dim_ok(self):
self.assertEqual(self.D, self.task.dimension)
self.assertEqual(self.D, self.task.dimension)
def test_lower(self):
self.assertTrue(np.array_equal(full_array(self.Lower, self.D), self.task.lower))
self.assertTrue(np.array_equal(full_array(self.Lower, self.D), self.task.lower))
def test_upper(self):
self.assertTrue(np.array_equal(full_array(self.Upper, self.D), self.task.upper))
self.assertTrue(np.array_equal(full_array(self.Upper, self.D), self.task.upper))
def test_range(self):
self.assertTrue(
np.array_equal(full_array(self.Upper, self.D) - full_array(self.Lower, self.D), self.task.range))
self.assertTrue(
np.array_equal(full_array(self.Upper, self.D) - full_array(self.Lower, self.D), self.task.range))
def test_ngens(self):
self.assertEqual(np.inf, self.task.max_iters)
def test_nfess(self):
self.assertEqual(np.inf, self.task.max_evals)
def test_stop_cond(self):
self.assertFalse(self.task.stopping_condition())
def test_stop_condi(self):
self.assertFalse(self.task.stopping_condition_iter())
def test_eval(self):
self.assertRaises(AttributeError, lambda: self.task.eval([]))
def test_evals(self):
self.assertEqual(0, self.task.evals)
def test_iters(self):
self.assertEqual(0, self.task.iters)
def test_next_iter(self):
self.assertEqual(None, self.task.next_iter())
def test_is_feasible(self):
self.assertFalse(self.task.is_feasible(full_array([1, 2, 3], self.D)))
class StoppingTaskTestCase(TestCase):
r"""Test case for testing `Task`, `StoppingTask` and `CountingTask` classes.
Date:
April 2019
Author:
<NAME>
See Also:
* :class:`niapy.util.Task`
* :class:`niapy.util.CountingTask`
* :class:`niapy.util.StoppingTask`
"""
def setUp(self):
self.D, self.nFES, self.nGEN = 10, 10, 10
self.t = StoppingTask(max_evals=self.nFES, max_iters=self.nGEN, cutoff_value=1, dimension=self.D,
benchmark=MyBenchmark())
def test_isFeasible(self):
x = np.full(self.D, 10)
self.assertTrue(self.t.is_feasible(x))
x = np.full(self.D, -10)
self.assertTrue(self.t.is_feasible(x))
x = default_rng().uniform(-10, 10, self.D)
self.assertTrue(self.t.is_feasible(x))
x = np.full(self.D, -20)
self.assertFalse(self.t.is_feasible(x))
x = np.full(self.D, 20)
self.assertFalse(self.t.is_feasible(x))
def test_nextIter(self):
for i in range(self.nGEN):
self.assertFalse(self.t.stopping_condition())
self.t.next_iter()
self.assertTrue(self.t.stopping_condition())
def test_stopCondI(self):
for i in range(self.nGEN):
self.assertFalse(self.t.stopping_condition_iter(), msg='Error at %s iteration!!!' % i)
self.assertTrue(self.t.stopping_condition_iter())
def test_eval(self):
x = np.ones(self.D)
for i in range(self.nFES):
self.assertAlmostEqual(self.t.eval(x), self.D, msg='Error at %s iteration!!!' % i)
self.assertTrue(self.t.stopping_condition())
def test_eval_over_nFES(self):
x = np.ones(self.D)
for i in range(self.nFES):
self.t.eval(x)
self.assertEqual(np.inf, self.t.eval(x))
self.assertTrue(self.t.stopping_condition())
def test_eval_over_nGEN(self):
x = np.ones(self.D)
for i in range(self.nGEN):
self.t.next_iter()
self.assertEqual(np.inf, self.t.eval(x))
self.assertTrue(self.t.stopping_condition())
def test_nFES_count(self):
x = np.ones(self.D)
for i in range(self.nFES):
self.t.eval(x)
self.assertEqual(self.t.evals, i + 1, 'Error at %s. evaluation' % (i + 1))
def test_nGEN_count(self):
x = np.ones(self.D)
for i in range(self.nGEN):
self.t.next_iter()
self.assertEqual(self.t.iters, i + 1, 'Error at %s. iteration' % (i + 1))
def test_stopCond_evals(self):
x = np.ones(self.D)
for i in range(self.nFES - 1):
self.t.eval(x)
self.assertFalse(self.t.stopping_condition())
self.t.eval(x)
self.assertTrue(self.t.stopping_condition())
def test_stopCond_iters(self):
x = np.ones(self.D)
for i in range(self.nGEN - 1):
self.t.next_iter()
self.assertFalse(self.t.stopping_condition())
self.t.next_iter()
self.assertTrue(self.t.stopping_condition())
def test_stopCond_refValue(self):
x = np.ones(self.D)
for i in range(self.nGEN - 5):
self.assertFalse(self.t.stopping_condition())
self.assertEqual(self.D, self.t.eval(x))
self.t.next_iter()
x = np.zeros(self.D)
self.assertEqual(0, self.t.eval(x))
self.assertTrue(self.t.stopping_condition())
self.assertEqual(self.nGEN - 5, self.t.iters)
def test_print_conv_one(self):
r1, r2 = [], []
for i in range(self.nFES):
x = np.full(self.D, 10 - i)
r1.append(i + 1), r2.append(self.t.eval(x))
t_r1, t_r2 = self.t.return_conv()
self.assertTrue(np.array_equal(r1, t_r1))
self.assertTrue(np.array_equal(r2, t_r2))
def test_print_conv_two(self):
r1, r2 = [], []
for i in range(self.nFES):
x = np.full(self.D, 10 - i if i not in (3, 4, 5) else 4)
r1.append(i + 1), r2.append(self.t.eval(x))
t_r1, t_r2 = self.t.return_conv()
self.assertTrue(np.array_equal(r2, t_r2))
self.assertTrue(np.array_equal(r1, t_r1))
class ThrowingTaskTestCase(TestCase):
r"""Test case for testing `ThrowingTask` class.
Date:
April 2019
Author:
<NAME>
See Also:
* :class:`niapy.util.ThrowingTask`
"""
def setUp(self):
self.D, self.nFES, self.nGEN = 10, 10, 10
self.t = ThrowingTask(dimension=self.D, max_evals=self.nFES, max_iters=self.nGEN, cutoff_value=0,
benchmark=MyBenchmark())
def test_isFeasible(self):
x = np.full(self.D, 10)
self.assertTrue(self.t.is_feasible(x))
x = np.full(self.D, -10)
self.assertTrue(self.t.is_feasible(x))
x = default_rng().uniform(-10, 10, self.D)
self.assertTrue(self.t.is_feasible(x))
x = np.full(self.D, -20)
self.assertFalse(self.t.is_feasible(x))
x = np.full(self.D, 20)
self.assertFalse(self.t.is_feasible(x))
def test_nextIter(self):
for i in range(self.nGEN):
self.assertFalse(self.t.stopping_condition())
self.t.next_iter()
self.assertTrue(self.t.stopping_condition())
def test_stopCondI(self):
for i in range(self.nGEN):
self.assertFalse(self.t.stopping_condition_iter())
self.assertTrue(self.t.stopping_condition_iter())
def test_eval(self):
x = np.ones(self.D)
for i in range(self.nFES):
self.assertAlmostEqual(self.t.eval(x), self.D, msg='Error at %s iteration!!!' % i)
self.assertRaises(FesException, lambda: self.t.eval(x))
def test_eval_over_nFES(self):
x = np.ones(self.D)
for i in range(self.nFES):
self.t.eval(x)
self.assertRaises(FesException, lambda: self.t.eval(x))
def test_eval_over_nGEN(self):
x = np.ones(self.D)
for i in range(self.nGEN):
self.t.next_iter()
self.assertRaises(GenException, lambda: self.t.eval(x))
def test_nFES_count(self):
x = np.ones(self.D)
for i in range(self.nFES):
self.t.eval(x)
self.assertEqual(self.t.evals, i + 1, 'Error at %s. evaluation' % (i + 1))
def test_nGEN_count(self):
x = np.ones(self.D)
for i in range(self.nGEN):
self.t.next_iter()
self.assertEqual(self.t.iters, i + 1, 'Error at %s. iteration' % (i + 1))
def test_stopCond_evals(self):
x = np.ones(self.D)
for i in range(self.nFES - 1):
self.t.eval(x)
self.assertFalse(self.t.stopping_condition())
self.t.eval(x)
self.assertTrue(self.t.stopping_condition())
def test_stopCond_iters(self):
x = np.ones(self.D)
for i in range(self.nGEN - 1):
self.t.next_iter()
self.assertFalse(self.t.stopping_condition())
self.t.next_iter()
self.assertTrue(self.t.stopping_condition())
def test_stopCond_refValue(self):
x = np.ones(self.D)
for i in range(self.nGEN - 5):
self.assertFalse(self.t.stopping_condition())
self.assertEqual(self.D, self.t.eval(x))
self.t.next_iter()
x = np.zeros(self.D)
self.assertEqual(0, self.t.eval(x))
self.assertRaises(RefException, lambda: self.t.eval(x))
# vim: tabstop=3 noexpandtab shiftwidth=3 softtabstop=3
| 2.484375 | 2 |
torchfes/colvar/coordination.py | AkihideHayashi/torchfes1 | 0 | 12770536 | from math import inf
from typing import Dict, Tuple, List
import torch
from torch import nn, Tensor
import pointneighbor as pn
from ..adj import get_adj_sft_spc, vec_sod
from .. import properties as p
def ravel1(idx: List[Tensor], siz: List[int]):
return pn.fn.ravel1(
torch.stack(idx), torch.tensor(siz, device=idx[0].device), dim=0)
def smap(a, c, d, rd):
return (1.0 + c * rd ** a) ** d
def smap_c(a, b):
return 2 ** (a / b) - 1
def smap_d(a, b):
return - b / a
class Smap(nn.Module):
a: Tensor
b: Tensor
c: Tensor
d: Tensor
r0: Tensor
d0: Tensor
def __init__(self, d0, r0, a, b):
super().__init__()
self.register_buffer('d0', d0)
self.register_buffer('r0', r0)
self.register_buffer('a', a)
self.register_buffer('b', b)
self.register_buffer('c', smap_c(self.a, self.b))
self.register_buffer('d', smap_d(self.a, self.b))
def forward(self, eij, dst):
rd = (dst - self.d0[eij]) / self.r0[eij]
ret = smap(self.a[eij], self.c[eij], self.d[eij], rd
).masked_fill(eij < 0, 0.0)
return ret.masked_fill(rd < 0, 1.0)
def rational_almost(rd, nn, nd):
num = 1 - rd.pow(nn)
den = 1 - rd.pow(nd)
return num / den
def rational_singularity(rd, nn, nd):
return 0.5 * nn * (2 + (nn - nd) * (rd - 1)) / nd
def _no_nan(x: Tensor):
return (x == x).all()
class Rational(nn.Module):
def __init__(self, d0, r0, nn, nd):
super().__init__()
self.register_buffer('d0', d0)
self.register_buffer('r0', r0)
self.register_buffer('nn', nn)
self.register_buffer('nd', nd)
self.eps = 1e-2
def forward(self, eij, dst):
assert _no_nan(dst)
rd = (dst - self.d0[eij]) / self.r0[eij]
nn = self.nn[eij]
nd = self.nd[eij]
rat_almost = rational_almost(rd, nn, nd)
rat_singul = rational_singularity(rd, nn, nd)
sing = (rd < 1 + self.eps) & (rd > 1 - self.eps)
rat = torch.where(sing, rat_singul, rat_almost)
ret = rat.masked_fill(rd < 0, 1.0)
assert _no_nan(ret)
ret.masked_fill_(eij < 0, 0.0)
return ret
def mollifier_inner(x, a, rc):
return (
1
- torch.exp(- a / (rc * rc - (rc - x) ** 2)) / torch.exp(-a / rc / rc)
)
def mollifier_outer(x: Tensor, rc: Tensor) -> Tensor:
return (x <= 0).to(x)
def mollifier(x: Tensor, a: Tensor, rc: Tensor):
mask = (x > 1e-6) & (x < rc - 1e-6)
outer = mollifier_outer(x, rc)
inner = mollifier_inner(x[mask], a[mask], rc[mask])
return outer.masked_scatter(mask, inner)
class Mollifier(nn.Module):
a: Tensor
d0: Tensor
rc: Tensor
def __init__(self, a, d0, rc):
super().__init__()
self.register_buffer('a', a)
self.register_buffer('d0', d0)
self.register_buffer('rc', rc)
def forward(self, eij, dst):
rc = self.rc[eij]
d0 = self.d0[eij]
a = self.a[eij]
return mollifier(dst - d0, a, rc - d0)
class Coordination(nn.Module):
elm: Tensor
coef: Tensor
def __init__(self, mod, numel: int, rc: float,
items: Dict[Tuple[int, int], Dict[str, float]]):
super().__init__()
elm = -torch.ones([numel, numel], dtype=torch.long)
dic: Dict[str, List[float]] = {}
n = 0
for n, ((i, j), prp) in enumerate(items.items()):
elm[i, j] = n
elm[j, i] = n
for key, val in prp.items():
if key not in dic:
dic[key] = []
dic[key].append(val)
self.register_buffer('elm', elm)
self.mod = mod(**{key: torch.tensor(val) for key, val in dic.items()})
self.rc = rc
self.n = n + 1
self.pbc = torch.full([self.n], inf)
def forward(self, inp: Dict[str, Tensor]):
adj = get_adj_sft_spc(inp, p.coo, self.rc)
n, i, j = pn.coo2_n_i_j(adj)
ei = inp[p.elm][n, i]
ej = inp[p.elm][n, j]
eij = self.elm[ei, ej]
adapt = eij >= 0
_, sod = vec_sod(inp, adj)
dis: Tensor = sod[adapt].sqrt()
eij = eij[adapt]
coords = self.mod(eij, dis)
n_bch, _ = inp[p.elm].size()
idx = ravel1([n[adapt], eij], [n_bch, self.n])
coord = torch.zeros([n_bch * self.n],
dtype=sod.dtype, device=sod.device)
coord.index_add_(0, idx, coords)
ret = coord.view([n_bch, self.n]) / 2
return ret
class SlabCoordination(nn.Module):
elm: Tensor
coef: Tensor
wz: Tensor
wr: Tensor
pbc: Tensor
def __init__(self, mod, numel: int, rc: float,
items: Dict[
Tuple[int, int],
Tuple[List[float], Dict[str, float]]], dim: int = 2):
super().__init__()
elm = -torch.ones([numel, numel], dtype=torch.long)
dic: Dict[str, List[float]] = {}
wz = []
wr = []
n = 0
for n, ((i, j), (coef, prp)) in enumerate(items.items()):
elm[i, j] = n
for key, val in prp.items():
if key not in dic:
dic[key] = []
dic[key].append(val)
wz.append(coef[0])
wr.append(coef[1])
self.register_buffer('elm', elm)
self.mod = mod(**{key: torch.tensor(val) for key, val in dic.items()})
self.rc = rc
self.n = n + 1
self.register_buffer('pbc', torch.full([self.n], inf))
self.register_buffer('wz', 0.5 / torch.tensor(wz).pow(2))
self.register_buffer('wr', 0.5 / torch.tensor(wr).pow(2))
self.dim = dim
def forward(self, inp: Dict[str, Tensor]):
num_bch = inp[p.elm].size(0)
adj = get_adj_sft_spc(inp, p.coo, self.rc)
n, i, j = pn.coo2_n_i_j(adj)
ei = inp[p.elm][n, i]
ej = inp[p.elm][n, j]
eij = self.elm[ei, ej]
adapt = eij >= 0
vec, sod = vec_sod(inp, adj)
n, i, j = n[adapt], i[adapt], j[adapt]
vec, sod = vec[adapt], sod[adapt]
ei, ej, eij = ei[adapt], ej[adapt], eij[adapt]
zij = -vec[:, self.dim]
wij = torch.exp(-self.wz[eij] * zij) * torch.exp(-self.wr[eij] * sod)
i_max = i.max() + 5
ni = n * i_max + i
unique, idx, cou = torch.unique_consecutive(
ni, return_inverse=True, return_counts=True)
cum = pn.fn.cumsum_from_zero(cou)
den = torch.zeros_like(unique, dtype=wij.dtype)
den.index_add_(0, idx, wij)
num = torch.zeros_like(unique, dtype=wij.dtype)
num.index_add_(0, idx, wij * zij)
zij_ = num / den
eij_ = eij[cum]
n_ = n[cum]
cij_ = self.mod(eij_, zij_)
idx_ = n_ * self.n + eij_
ret = torch.zeros([num_bch * self.n],
device=n.device, dtype=cij_.dtype)
ret.index_add_(0, idx_, cij_)
return ret.view([num_bch, self.n])
| 2.15625 | 2 |
tests/wheel_google_cloud.py | gflaherty/rules_pyz | 26 | 12770537 | import google
print 'google:', google, google.__path__
import google.cloud
print 'google.cloud:', google.cloud
import google.cloud.datastore
def main():
# tests "extras" dependencies, as well as native dependencies
# google.cloud is also an "implicit" namespace package: the tool needs to make it a "real"
# package
print 'datatore:', google.cloud.datastore
if __name__ == '__main__':
main()
| 2.09375 | 2 |
npt/datasets/income.py | jacobkimmel/non-parametric-transformers | 302 | 12770538 | <filename>npt/datasets/income.py
from pathlib import Path
import numpy as np
import pandas as pd
from sklearn.datasets import fetch_openml
from npt.datasets.base import BaseDataset
class IncomeDataset(BaseDataset):
def __init__(self, c):
super().__init__(
fixed_test_set_index=-99762)
self.c = c
def load(self):
"""KDD Income Dataset
Possibly used in VIME and TabNet.
There are multiple datasets called income.
https://archive.ics.uci.edu/ml/datasets/census+income
https://archive.ics.uci.edu/ml/datasets/Census-Income+%28KDD%29
The KDD One is significantly larger than the other one.
We will take KDD one. Both TabNet and VIME are not super explicit about
which dataset they use.
TabNet cite Oza et al "Online Bagging and Boosting", which use the
bigger one. So we will start with that.
(But there is no full TabNet Code to confirm.)
Binary classification.
Target in last column.
299.285 rows.
42 attributes. Use get_num_cat_auto to assign.
1 target
"""
# Load data from https://www.openml.org/d/4535
data_home = Path(self.c.data_path) / self.c.data_set
data = fetch_openml('Census-income', version=1, data_home=data_home)
# target in 'data'
self.data_table = data['data']
if isinstance(self.data_table, np.ndarray):
pass
elif isinstance(self.data_table, pd.DataFrame):
self.data_table = self.data_table.to_numpy()
self.N = self.data_table.shape[0]
self.D = self.data_table.shape[1]
# Target col is the last feature
# last column is target (V42)
# (binary classification, if income > or < 50k)
self.num_target_cols = []
self.cat_target_cols = [self.D - 1]
self.num_features, self.cat_features = BaseDataset.get_num_cat_auto(
self.data_table, cutoff=55)
print('income num cat features')
print(len(self.num_features))
print(len(self.cat_features))
# TODO: add missing entries to sanity check
self.missing_matrix = np.zeros((self.N, self.D), dtype=np.bool_)
self.is_data_loaded = True
self.tmp_file_or_dir_names = ['openml'] | 2.984375 | 3 |
src/library/requirements.py | dylanhogg/crazy-awesome-python | 13 | 12770539 | <filename>src/library/requirements.py
import urllib.request
import urllib.error
from pathlib import Path
from loguru import logger
def save_content(repopath, branch, filename, content):
folder = "data/"
Path(folder).mkdir(parents=True, exist_ok=True)
out_filename = folder + repopath.replace("/", "~") + f"~{filename}"
with open(out_filename, "w") as f:
f.write(content)
logger.info(f"Saved file {out_filename}")
def safe_get_url(repopath, branch, filename):
try:
url = f"https://raw.githubusercontent.com/{repopath}/{branch}/{filename}"
resource = urllib.request.urlopen(url)
charset = resource.headers.get_content_charset()
return resource.read().decode(charset).strip()
except urllib.error.HTTPError as ex:
return ""
def get_requirements(repopath):
filenames = [
"requirements.txt",
"setup.py", # TODO: needs postprocessing for install_requires etc.
"pyproject.toml" # TODO: postprocessing for [build-system] / requires | [tool.poetry] / packages etc
]
saved_filenames = []
for branch in ["master"]: # NOTE: master redirects to main as at Sep 2021.
for filename in filenames:
content = safe_get_url(repopath, branch, filename)
if len(content) > 0:
save_content(repopath, branch, filename, content)
saved_filenames.append(filename)
return saved_filenames # TODO: return list of tuples (repopath, branch, filename, local_filename)
| 2.96875 | 3 |
libraries/botbuilder-core/tests/test_private_conversation_state.py | Fl4v/botbuilder-python | 388 | 12770540 | <filename>libraries/botbuilder-core/tests/test_private_conversation_state.py
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import aiounittest
from botbuilder.core import MemoryStorage, TurnContext, PrivateConversationState
from botbuilder.core.adapters import TestAdapter
from botbuilder.schema import Activity, ChannelAccount, ConversationAccount
RECEIVED_MESSAGE = Activity(
text="received",
type="message",
channel_id="test",
conversation=ConversationAccount(id="convo"),
from_property=ChannelAccount(id="user"),
)
class TestPrivateConversationState(aiounittest.AsyncTestCase):
async def test_should_load_and_save_state_from_storage(self):
storage = MemoryStorage()
adapter = TestAdapter()
context = TurnContext(adapter, RECEIVED_MESSAGE)
private_conversation_state = PrivateConversationState(storage)
# Simulate a "Turn" in a conversation by loading the state,
# changing it and then saving the changes to state.
await private_conversation_state.load(context)
key = private_conversation_state.get_storage_key(context)
state = private_conversation_state.get(context)
assert state == {}, "State not loaded"
assert key, "Key not found"
state["test"] = "foo"
await private_conversation_state.save_changes(context)
# Check the storage to see if the changes to state were saved.
items = await storage.read([key])
assert key in items, "Saved state not found in storage."
assert items[key]["test"] == "foo", "Missing test value in stored state."
| 2.421875 | 2 |
school/migrations/0010_auto_20200816_0326.py | threecoolcat/ThreeCoolCat | 6 | 12770541 | <filename>school/migrations/0010_auto_20200816_0326.py
# Generated by Django 3.0.7 on 2020-08-16 03:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('school', '0008_auto_20200816_0303'),
]
operations = [
# migrations.AlterField(
# model_name='teacher',
# name='courses',
# field=models.ManyToManyField(related_name='CoursesOfTeacher', through='school.TeacherWithCourse', to='school.Course', verbose_name='课程'),
# ),
# migrations.AlterModelTable(
# name='teacherwithcourse',
# table='teacher_course',
# ),
]
| 1.648438 | 2 |
src/h02_learn/dataset/__init__.py | devpouya/GeneralizedEasyFirstParser | 0 | 12770542 | <reponame>devpouya/GeneralizedEasyFirstParser
from os import path
from torch.utils.data import DataLoader
from h01_data import load_vocabs, load_embeddings, get_ud_fname, get_oracle_actions#,get_oracle_actions_small,get_ud_fname_small
from utils import constants
from .syntax import SyntaxDataset, LanguageBatchSampler
from transformers import BertTokenizer, BertTokenizerFast
from transformers import AutoTokenizer,RobertaTokenizerFast
def generate_batch(batch,transition_system):
r"""
Since the text entries have different lengths, a custom function
generate_batch() is used to generate data batches and offsets,
which are compatible with EmbeddingBag. The function is passed
to 'collate_fn' in torch.utils.data.DataLoader. The input to
'collate_fn' is a list of tensors with the size of batch_size,
and the 'collate_fn' function packs them into a mini-batch.[len(entry[0][0]) for entry in batch]
Pay attention here and make sure that 'collate_fn' is declared
as a top level def. This ensures that the function is available
in each worker.
Output:
text: the text entries in the data_batch are packed into a list and
concatenated as a single tensor for the input of nn.EmbeddingBag.
offsets: the offsets is a tensor of delimiters to represent the beginning
index of the individual sequence in the text tensor.
cls: a tensor saving the labels of individual text entries.
"""
tensor = batch[0][0][0]
# for entry in batch:
# print(entry[2])
batch_size = len(batch)
max_length_text = max([len(entry[0][0]) for entry in batch])
max_length = max([len(entry[0][1]) for entry in batch])
map_length = max([len(entry[3][0]) for entry in batch])
max_length_actions = max([len(entry[2][0]) for entry in batch])
text = tensor.new_zeros(batch_size, max_length_text)
text_mappings = tensor.new_ones(batch_size, map_length) * -1
pos = tensor.new_zeros(batch_size, max_length)
heads = tensor.new_ones(batch_size, max_length) * -1
rels = tensor.new_ones(batch_size, max_length) * -1
if transition_system == constants.agenda:
transitions = tensor.new_ones(batch_size, max_length_actions, 2) * -1
else:
transitions = tensor.new_ones(batch_size, max_length_actions) * -1
relations_in_order = tensor.new_zeros(batch_size, max_length)
for i, sentence in enumerate(batch):
sent_len = len(sentence[0][0])
pos_len = len(sentence[0][1])
map_len = len(sentence[3][0])
text_mappings[i, :map_len] = sentence[3][0]
text[i, :sent_len] = sentence[0][0]
pos[i, :pos_len] = sentence[0][1]
heads[i, :pos_len] = sentence[1][0]
rels[i, :pos_len] = sentence[1][1]
for i, sentence in enumerate(batch):
num_actions = len(sentence[2][0])
transitions[i, :num_actions] = sentence[2][0]
num_rels = len(sentence[2][1])
relations_in_order[i, :num_rels] = sentence[2][1]
return (text, pos), (heads, rels), (transitions, relations_in_order), text_mappings
def get_data_loader(fname, transitions_file, transition_system, tokenizer, batch_size, rel_size, shuffle):
dataset = SyntaxDataset(fname, transitions_file, transition_system, tokenizer, rel_size)
#print(dataset.language_starts)
#sampler = LanguageBatchSampler(batch_size=batch_size,language_start_indicies=dataset.language_starts, shuffle=shuffle)
#return DataLoader(dataset, collate_fn=lambda batch: generate_batch(batch,transition_system)), dataset.max_rel
#return DataLoader(dataset, collate_fn=lambda batch: generate_batch(batch,transition_system), batch_sampler=sampler), dataset.max_sent_len
return DataLoader(dataset, collate_fn=lambda batch: generate_batch(batch,transition_system)), dataset.max_sent_len
def get_data_loaders(data_path, all_languages, batch_size, batch_size_eval, transitions=None, transition_system=None,
bert_model=None, is_easy_first=True):
all_fnames_train = []
all_fnames_test = {}
all_fnames_dev = {}
all_transitions_train = []
all_transitions_test = {}
all_transitions_dev = {}
max_rels_size = 0
for language in all_languages:
src_path = path.join(data_path, constants.UD_PATH_PROCESSED, language)
#vocabs = load_vocabs(src_path)
#_,_,rels = vocabs
#if rels.size > max_rels_size:
# max_rels_size = rels.size
(fname_train, fname_dev, fname_test) = get_ud_fname(src_path)
all_fnames_train.append(fname_train)
all_fnames_test[language] = fname_test
all_fnames_dev[language] = fname_dev
#all_fnames_test.append(fname_test)
#all_fnames_dev.append(fname_dev)
transitions_train, transitions_dev, transitions_test = None, None, None
if transitions is not None:
if transition_system == "AGENDA-PARSER":
is_agenda=True
else:
is_agenda=False
(transitions_train, transitions_dev, transitions_test) = get_oracle_actions(src_path, transitions,is_easy_first)
all_transitions_train.append(transitions_train)
all_transitions_test[language] = transitions_test
all_transitions_dev[language] = transitions_dev
#all_transitions_test.append(transitions_test)
#all_transitions_dev.append(transitions_dev)
if len(all_languages) > 1:
tokenizer = AutoTokenizer.from_pretrained("xlm-roberta-base")
#tokenizer = AutoTokenizer.from_pretrained("xlm-roberta-large")
else:
l = all_languages[0]
if l == "eu":
tokenizer = AutoTokenizer.from_pretrained("ixa-ehu/berteus-base-cased")
elif l == "ko":
tokenizer = AutoTokenizer.from_pretrained("kykim/bert-kor-base")
elif l == "hu":
tokenizer = AutoTokenizer.from_pretrained("SZTAKI-HLT/hubert-base-cc")
elif l == "af":
tokenizer = RobertaTokenizerFast.from_pretrained("jannesg/takalane_afr_roberta", add_prefix_space=True)
elif l == "la":
tokenizer = AutoTokenizer.from_pretrained("cook/cicero-similis")
elif l == "ur":
tokenizer = AutoTokenizer.from_pretrained("Geotrend/bert-base-ur-cased")
elif l == "da":
tokenizer = BertTokenizer.from_pretrained("Maltehb/danish-bert-botxo")
elif l == "ga":
tokenizer = BertTokenizer.from_pretrained("DCU-NLP/bert-base-irish-cased-v1")
elif l == "lt":
tokenizer = BertTokenizer.from_pretrained("Geotrend/bert-base-lt-cased")
elif l == "qhe":
tokenizer = BertTokenizer.from_pretrained("monsoon-nlp/hindi-bert")
elif l == "sl":
tokenizer = BertTokenizer.from_pretrained("EMBEDDIA/sloberta")
else:
tokenizer = BertTokenizer.from_pretrained("Geotrend/bert-base-nl-cased")
#tokenizer = BertTokenizer.from_pretrained("bert-base-multilingual-cased")
#tokenizer = AutoTokenizer.from_pretrained("xlm-roberta-base")
#tokenizer = AutoTokenizer.from_pretrained("ixa-ehu/berteus-base-cased")
trainloader, _ = get_data_loader(all_fnames_train, all_transitions_train, transition_system, tokenizer,
batch_size,max_rels_size,
shuffle=True)
devloader_lang_map = {}
testloader_lang_map = {}
for language in all_languages:
devloader, _ = get_data_loader(all_fnames_dev[language], all_transitions_dev[language], transition_system, tokenizer,
batch_size_eval,max_rels_size,
shuffle=False)
testloader, _ = get_data_loader(all_fnames_test[language], all_transitions_test[language], transition_system, tokenizer,
batch_size_eval,max_rels_size,
shuffle=False)
devloader_lang_map[language] = devloader
testloader_lang_map[language] = testloader
#max_sent_len = max(max_sent_len_dev, max_sent_len_test, max_sent_len_train)
return trainloader, devloader_lang_map, testloader_lang_map, max_rels_size
| 2.375 | 2 |
buildroot/support/testing/infra/emulator.py | rbrenton/hassos | 617 | 12770543 | <reponame>rbrenton/hassos
import pexpect
import infra
class Emulator(object):
def __init__(self, builddir, downloaddir, logtofile, timeout_multiplier):
self.qemu = None
self.downloaddir = downloaddir
self.logfile = infra.open_log_file(builddir, "run", logtofile)
# We use elastic runners on the cloud to runs our tests. Those runners
# can take a long time to run the emulator. Use a timeout multiplier
# when running the tests to avoid sporadic failures.
self.timeout_multiplier = timeout_multiplier
# Start Qemu to boot the system
#
# arch: Qemu architecture to use
#
# kernel: path to the kernel image, or the special string
# 'builtin'. 'builtin' means a pre-built kernel image will be
# downloaded from ARTEFACTS_URL and suitable options are
# automatically passed to qemu and added to the kernel cmdline. So
# far only armv5, armv7 and i386 builtin kernels are available.
# If None, then no kernel is used, and we assume a bootable device
# will be specified.
#
# kernel_cmdline: array of kernel arguments to pass to Qemu -append option
#
# options: array of command line options to pass to Qemu
#
def boot(self, arch, kernel=None, kernel_cmdline=None, options=None):
if arch in ["armv7", "armv5"]:
qemu_arch = "arm"
else:
qemu_arch = arch
qemu_cmd = ["qemu-system-{}".format(qemu_arch),
"-serial", "stdio",
"-display", "none"]
if options:
qemu_cmd += options
if kernel_cmdline is None:
kernel_cmdline = []
if kernel:
if kernel == "builtin":
if arch in ["armv7", "armv5"]:
kernel_cmdline.append("console=ttyAMA0")
if arch == "armv7":
kernel = infra.download(self.downloaddir,
"kernel-vexpress")
dtb = infra.download(self.downloaddir,
"vexpress-v2p-ca9.dtb")
qemu_cmd += ["-dtb", dtb]
qemu_cmd += ["-M", "vexpress-a9"]
elif arch == "armv5":
kernel = infra.download(self.downloaddir,
"kernel-versatile")
qemu_cmd += ["-M", "versatilepb"]
qemu_cmd += ["-kernel", kernel]
if kernel_cmdline:
qemu_cmd += ["-append", " ".join(kernel_cmdline)]
self.logfile.write("> starting qemu with '%s'\n" % " ".join(qemu_cmd))
self.qemu = pexpect.spawn(qemu_cmd[0], qemu_cmd[1:],
timeout=5 * self.timeout_multiplier,
env={"QEMU_AUDIO_DRV": "none"})
# We want only stdout into the log to avoid double echo
self.qemu.logfile_read = self.logfile
# Wait for the login prompt to appear, and then login as root with
# the provided password, or no password if not specified.
def login(self, password=None):
# The login prompt can take some time to appear when running multiple
# instances in parallel, so set the timeout to a large value
index = self.qemu.expect(["buildroot login:", pexpect.TIMEOUT],
timeout=60 * self.timeout_multiplier)
if index != 0:
self.logfile.write("==> System does not boot")
raise SystemError("System does not boot")
self.qemu.sendline("root")
if password:
self.qemu.expect("Password:")
self.qemu.sendline(password)
index = self.qemu.expect(["# ", pexpect.TIMEOUT])
if index != 0:
raise SystemError("Cannot login")
self.run("dmesg -n 1")
# Run the given 'cmd' with a 'timeout' on the target
# return a tuple (output, exit_code)
def run(self, cmd, timeout=-1):
self.qemu.sendline(cmd)
if timeout != -1:
timeout *= self.timeout_multiplier
self.qemu.expect("# ", timeout=timeout)
# Remove double carriage return from qemu stdout so str.splitlines()
# works as expected.
output = self.qemu.before.replace("\r\r", "\r").splitlines()[1:]
self.qemu.sendline("echo $?")
self.qemu.expect("# ")
exit_code = self.qemu.before.splitlines()[2]
exit_code = int(exit_code)
return output, exit_code
def stop(self):
if self.qemu is None:
return
self.qemu.terminate(force=True)
| 2.296875 | 2 |
main.py | harunlakodla/colab-socket-image-pyngrok | 0 | 12770544 | <gh_stars>0
"""
loop dosyası içerisinde raspberry pi programımızın akışı sağlanacaktır..
resimler webSocketsOpencvServer üzerinden akışı sağlanacaktır..
roboSocketCom üzerinden pyserial kütüphanesi sayesinde verilerimizin akışı mikrokontrolcüye gidecektir.
"""
import socket,threading,_thread,asyncio
from webSocketsOpencvServer import WebSocketsOpencvServer
from Adafruit_IO import Client, RequestError, Feed
from pyngrok import ngrok,conf
def log_event_callback(log):
print(str(log))
async def roboRUN(serverHost,roboOpencvServerPort,loop):
roboOpencv = WebSocketsOpencvServer(serverHost=serverHost, serverPort=roboOpencvServerPort, camId=0)
t1 = loop.create_task(roboOpencv.socketRun())
await t1
def mainLoop():
ADAFRUIT_IO_KEY = 'your adafruit key'
ADAFRUIT_IO_USERNAME = 'your adafruit io username'
aio = Client(ADAFRUIT_IO_USERNAME, ADAFRUIT_IO_KEY)
"----------------------------"
serverHost="0.0.0.0"
roboOpencvServerPort=5000
"---------------------------"
loop = asyncio.get_event_loop()
# cihazın isimini çekiyoruz
hostname = socket.gethostname()
# cihazın ip adresini çekiyoruz
ip_address = socket.gethostbyname(hostname)
pyngrok_config = conf.PyngrokConfig(log_event_callback=log_event_callback,
max_logs=10)
conf.set_default(pyngrok_config)
ngrok.set_auth_token("<KEY>")
# Open a ngrok tunnel to the socket
public_url = ngrok.connect(roboOpencvServerPort,"tcp").public_url
# print("ngrok tunnel \"{}\" -> \"tcp://127.0.0.1:{}/\"".format(public_url, roboOpencvServerPort))
ssh_url, port = public_url.strip("tcp://").split(":")
print(f" * ngrok tunnel available, access with `ssh root@{ssh_url} -p{port}`")
"-------------------------"
try:
server_url = aio.feeds('server-url')
except RequestError: # Doesn't exist, create a new feed
feed = Feed(name="server-url")
server_url = aio.create_feed(feed)
try:
server_port = aio.feeds('server-port')
except RequestError: # Doesn't exist, create a new feed
feed = Feed(name="server-port")
server_port = aio.create_feed(feed)
aio.send_data(server_url.key, str(ssh_url))
aio.send_data(server_port.key, str(port))
# print(ip_address)
loop.run_until_complete(roboRUN(serverHost, roboOpencvServerPort,loop))
loop.run_forever()
# loop.run_until_complete(roboRUN(serverHost, roboServerPort, roboOpencvServerPort))
if __name__ == '__main__':
mainLoop()
| 2.796875 | 3 |
azure-mgmt-eventhub/azure/mgmt/eventhub/v2015_08_01/models/__init__.py | JonathanGailliez/azure-sdk-for-python | 1 | 12770545 | <filename>azure-mgmt-eventhub/azure/mgmt/eventhub/v2015_08_01/models/__init__.py
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
try:
from .operation_display_py3 import OperationDisplay
from .operation_py3 import Operation
from .tracked_resource_py3 import TrackedResource
from .resource_py3 import Resource
from .sku_py3 import Sku
from .namespace_create_or_update_parameters_py3 import NamespaceCreateOrUpdateParameters
from .namespace_resource_py3 import NamespaceResource
from .shared_access_authorization_rule_create_or_update_parameters_py3 import SharedAccessAuthorizationRuleCreateOrUpdateParameters
from .shared_access_authorization_rule_resource_py3 import SharedAccessAuthorizationRuleResource
from .resource_list_keys_py3 import ResourceListKeys
from .regenerate_keys_parameters_py3 import RegenerateKeysParameters
from .event_hub_create_or_update_parameters_py3 import EventHubCreateOrUpdateParameters
from .event_hub_resource_py3 import EventHubResource
from .consumer_group_create_or_update_parameters_py3 import ConsumerGroupCreateOrUpdateParameters
from .consumer_group_resource_py3 import ConsumerGroupResource
from .check_name_availability_parameter_py3 import CheckNameAvailabilityParameter
from .check_name_availability_result_py3 import CheckNameAvailabilityResult
from .namespace_update_parameter_py3 import NamespaceUpdateParameter
except (SyntaxError, ImportError):
from .operation_display import OperationDisplay
from .operation import Operation
from .tracked_resource import TrackedResource
from .resource import Resource
from .sku import Sku
from .namespace_create_or_update_parameters import NamespaceCreateOrUpdateParameters
from .namespace_resource import NamespaceResource
from .shared_access_authorization_rule_create_or_update_parameters import SharedAccessAuthorizationRuleCreateOrUpdateParameters
from .shared_access_authorization_rule_resource import SharedAccessAuthorizationRuleResource
from .resource_list_keys import ResourceListKeys
from .regenerate_keys_parameters import RegenerateKeysParameters
from .event_hub_create_or_update_parameters import EventHubCreateOrUpdateParameters
from .event_hub_resource import EventHubResource
from .consumer_group_create_or_update_parameters import ConsumerGroupCreateOrUpdateParameters
from .consumer_group_resource import ConsumerGroupResource
from .check_name_availability_parameter import CheckNameAvailabilityParameter
from .check_name_availability_result import CheckNameAvailabilityResult
from .namespace_update_parameter import NamespaceUpdateParameter
from .operation_paged import OperationPaged
from .namespace_resource_paged import NamespaceResourcePaged
from .shared_access_authorization_rule_resource_paged import SharedAccessAuthorizationRuleResourcePaged
from .event_hub_resource_paged import EventHubResourcePaged
from .consumer_group_resource_paged import ConsumerGroupResourcePaged
from .event_hub_management_client_enums import (
SkuName,
SkuTier,
NamespaceState,
AccessRights,
Policykey,
EntityStatus,
UnavailableReason,
)
__all__ = [
'OperationDisplay',
'Operation',
'TrackedResource',
'Resource',
'Sku',
'NamespaceCreateOrUpdateParameters',
'NamespaceResource',
'SharedAccessAuthorizationRuleCreateOrUpdateParameters',
'SharedAccessAuthorizationRuleResource',
'ResourceListKeys',
'RegenerateKeysParameters',
'EventHubCreateOrUpdateParameters',
'EventHubResource',
'ConsumerGroupCreateOrUpdateParameters',
'ConsumerGroupResource',
'CheckNameAvailabilityParameter',
'CheckNameAvailabilityResult',
'NamespaceUpdateParameter',
'OperationPaged',
'NamespaceResourcePaged',
'SharedAccessAuthorizationRuleResourcePaged',
'EventHubResourcePaged',
'ConsumerGroupResourcePaged',
'SkuName',
'SkuTier',
'NamespaceState',
'AccessRights',
'Policykey',
'EntityStatus',
'UnavailableReason',
]
| 1.4375 | 1 |
subtask_134/generate_submission_format_retrieval_teststd.py | i2r-simmc/i2r-simmc-2021 | 0 | 12770546 | import argparse
import json
from copy import deepcopy
import numpy as np
def write_submission_output(dialog_turn_id_data, retrieval_scores, output_submission_format_path):
"""
Write the model_scores in
"""
submission_format_output=[]
for dialog in dialog_turn_id_data:
_dialog=[]
for turn_id in range(len(dialog['turn_info'])):
_turn=dialog['turn_info'][turn_id]
assert turn_id==_turn['turn_id']
if turn_id == dialog["final_turn_id"]:
_flat_id=_turn['flat_id']
start_index = _flat_id[0]
end_index = _flat_id[1]
round_scores = retrieval_scores[start_index:end_index]
_dialog.append({"turn_id":turn_id, "scores":round_scores})
submission_format_output.append({"dialog_id":dialog["dialog_id"],
"candidate_scores":_dialog})
with open(output_submission_format_path, "w") as f_retrieval_submission_format:
json.dump(submission_format_output, f_retrieval_submission_format)
def main(args):
print("Reading: {}".format(args["dialog_turn_id_json_path"]))
with open(args["dialog_turn_id_json_path"], "r") as file_id:
dialog_turn_id_data = json.load(file_id)
print("Reading: {}".format(args["model_flat_score_path"]))
with open(args["model_flat_score_path"], "r") as f_score:
retrieval_scores = f_score.readlines()
retrieval_scores = [-float(x.strip()) for x in retrieval_scores]
write_submission_output(
dialog_turn_id_data, retrieval_scores, args["output_submission_format_path"]
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Response Retrieval Evaluation")
parser.add_argument(
"--dialog_turn_id_json_path",
default="data/furniture_train_retrieval_candidates.json",
help="Data with retrieval candidates, gt",
)
parser.add_argument(
"--model_flat_score_path",
default=None,
help="Candidate scores generated by the model",
)
parser.add_argument(
"--output_submission_format_path",
default=None,
help="generate output_submission_format",
)
try:
parsed_args = vars(parser.parse_args())
except (IOError) as msg:
parser.error(str(msg))
main(parsed_args) | 2.828125 | 3 |
riffraff/riffle.py | RealPolitiX/riffraff | 0 | 12770547 | <filename>riffraff/riffle.py
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
def riffle(*arr):
"""
Interleave multiple arrays of the same number of elements.
**Parameter**\n
*arr: array
A number of arrays
**Return**\n
riffarr: 1D array
An array with interleaving elements from each input array.
"""
arrtmp = list(map(np.ravel, arr))
arrlen = list(map(len, arrtmp))
minlen = np.min(arrlen)
if not(np.prod(arrlen == minlen)):
arrtmp = [a[:minlen] for a in arrtmp]
riffarr = np.vstack(arrtmp).reshape((-1,), order='F')
return riffarr | 3.3125 | 3 |
lang/py/cookbook/v2/source/cb2_2_29_sol_1.py | ch1huizong/learning | 0 | 12770548 | def VersionFile(file_spec, vtype='copy'):
import os, shutil
if os.path.isfile(file_spec):
# check the 'vtype' parameter
if vtype not in ('copy', 'rename'):
raise ValueError, 'Unknown vtype %r' % (vtype,)
# Determine root filename so the extension doesn't get longer
n, e = os.path.splitext(file_spec)
# Is e a three-digits integer preceded by a dot?
if len(e) == 4 and e[1:].isdigit():
num = 1 + int(e[1:])
root = n
else:
num = 0
root = file_spec
# Find next available file version
for i in xrange(num, 1000):
new_file = '%s.%03d' % (root, i)
if not os.path.exists(new_file):
if vtype == 'copy':
shutil.copy(file_spec, new_file)
else:
os.rename(file_spec, new_file)
return True
raise RuntimeError, "Can't %s %r, all names taken"%(vtype,file_spec)
return False
if __name__ == '__main__':
import os
# create a dummy file 'test.txt'
tfn = 'test.txt'
open(tfn, 'w').close()
# version it 3 times
print VersionFile(tfn)
# emits: True
print VersionFile(tfn)
# emits: True
print VersionFile(tfn)
# emits: True
# remove all test.txt* files we just made
for x in ('', '.000', '.001', '.002'):
os.unlink(tfn + x)
# show what happens when the file does not exist
print VersionFile(tfn)
# emits: False
print VersionFile(tfn)
# emits: False
| 2.984375 | 3 |
frankenstrings/frankenstrings.py | CybercentreCanada/assemblyline-service-frankenstrings | 2 | 12770549 | """ FrankenStrings Service """
import binascii
import hashlib
import mmap
import os
import re
import traceback
from typing import Dict, Iterable, List, Optional, Set, Tuple
import magic
import pefile
from assemblyline.common.net import is_valid_domain, is_valid_email
from assemblyline.common.str_utils import safe_str
from assemblyline_v4_service.common.balbuzard.bbcrack import bbcrack
from assemblyline_v4_service.common.balbuzard.patterns import PatternMatch
from assemblyline_v4_service.common.base import ServiceBase
from assemblyline_v4_service.common.result import Result, ResultSection, BODY_FORMAT, Heuristic
from assemblyline_v4_service.common.request import ServiceRequest
from assemblyline_v4_service.common.task import MaxExtractedExceeded
from frankenstrings.flarefloss import strings
class FrankenStrings(ServiceBase):
""" FrankenStrings Service """
FILETYPES = [
'application',
'document',
'exec',
'image',
'Microsoft',
'text',
]
HEXENC_STRINGS = [
b'\\u',
b'%u',
b'\\x',
b'0x',
b'&H', # hex notation in VBA
]
BBCRACK_TO_TAG = {
'NET_FULL_URI': 'network.static.uri',
}
def __init__(self, config: Optional[Dict] = None) -> None:
super().__init__(config)
# Unless patterns are added/adjusted to patterns.py, the following should remain at 7:
self.st_min_length = 7
self.sample_type = ''
self.excess_extracted = 0
def start(self) -> None:
self.log.debug("FrankenStrings service started")
# --- Support Functions ------------------------------------------------------------------------------------------------
def extract_file(self, request, data, file_name, description):
""" Adds data to a request as an extracted file
request: the request
data: the file data
filename: the name to give the file
description: the desctiption of the file to give the request
"""
if self.excess_extracted:
# Already over maximimum number of extracted files
self.excess_extracted += 1
return
try:
# If for some reason the directory doesn't exist, create it
if not os.path.exists(self.working_directory):
os.makedirs(self.working_directory)
file_path = os.path.join(self.working_directory, file_name)
with open(file_path, 'wb') as f:
f.write(data)
request.add_extracted(file_path, file_name, description)
except MaxExtractedExceeded:
self.excess_extracted += 1
except Exception:
self.log.error(f"Error extracting {file_name} from {request.sha256}: {traceback.format_exc(limit=2)}")
def ioc_to_tag(self, data: bytes, patterns: PatternMatch, res: Optional[ResultSection] = None,
taglist: bool = False, check_length: bool = False, strs_max_size: int = 0,
st_max_length: int = 300) -> Dict[str, Set[str]]:
"""Searches data for patterns and adds as AL tag to result output.
Args:
data: Data to be searched.
patterns: FrankenStrings Patterns() object.
res: AL result.
taglist: True if tag list should be returned.
check_length: True if length of string should be compared to st_max_length.
strs_max_size: Maximum size of strings list. If greater then only network IOCs will be searched.
st_max_length: Maximum length of a string from data that can be searched.
Returns: tag list as dictionary (always empty if taglist is false)
"""
tags: Dict[str, Set[str]] = {}
min_length = self.st_min_length if check_length else 4
strs: Set[bytes] = set()
just_network = False
# Flare-FLOSS ascii string extract
for ast in strings.extract_ascii_strings(data, n=min_length):
if not check_length or len(ast.s) < st_max_length:
strs.add(ast.s)
# Flare-FLOSS unicode string extract
for ust in strings.extract_unicode_strings(data, n=min_length):
if not check_length or len(ust.s) < st_max_length:
strs.add(ust.s)
if check_length and len(strs) > strs_max_size:
just_network = True
for s in strs:
st_value: Dict[str, Iterable[bytes]] = patterns.ioc_match(s, bogon_ip=True, just_network=just_network)
for ty, val in st_value.items():
if taglist and ty not in tags:
tags[ty] = set()
for v in val:
if ty == 'network.static.domain' and not is_valid_domain(v.decode('utf-8')):
continue
if ty == 'network.email.address' and not is_valid_email(v.decode('utf-8')):
continue
if len(v) < 1001:
if res:
res.add_tag(ty, safe_str(v))
if taglist:
tags[ty].add(safe_str(v))
return tags
@staticmethod
def decode_bu(data: bytes, size: int) -> bytes:
""" Convert ascii to hex.
Args:
data: Ascii string to be converted.
size: Unit size.
Returns:
Decoded data.
"""
decoded = b''
if size == 2:
while data != b'':
decoded += binascii.a2b_hex(data[2:4])
data = data[4:]
if size == 4:
while data != b'':
decoded += binascii.a2b_hex(data[4:6]) + binascii.a2b_hex(data[2:4])
data = data[6:]
if size == 8:
while data != b'':
decoded += binascii.a2b_hex(data[8:10]) + binascii.a2b_hex(data[6:8]) + \
binascii.a2b_hex(data[4:6]) + binascii.a2b_hex(data[2:4])
data = data[10:]
if size == 16:
while data != b'':
decoded += binascii.a2b_hex(data[16:18]) + binascii.a2b_hex(data[14:16]) + \
binascii.a2b_hex(data[12:14]) + binascii.a2b_hex(data[10:12]) + \
binascii.a2b_hex(data[8:10]) + binascii.a2b_hex(data[6:8]) + \
binascii.a2b_hex(data[4:6]) + binascii.a2b_hex(data[2:4])
data = data[18:]
return decoded
@staticmethod
def unicode_longest_string(listdata: List[bytes]) -> bytes:
"""Compare sizes of unicode strings.
Args:
listdata: A list of binary strings
Returns:
Result of test: Do all strings match in length?
If True, returns all strings combined.
If False, returns longest string greater than 50 bytes.
If no string longer than 50 bytes, returns empty string.
"""
maxstr = max(listdata, key=len)
newstr = b""
if all(len(i) == len(maxstr) for i in listdata):
for i in listdata:
newstr += i
return newstr
if len(maxstr) > 50:
return maxstr
return newstr
def decode_encoded_udata(self, request: ServiceRequest, encoding: bytes,
data: bytes, decoded_res: Dict[str, Tuple[bytes, bytes]]) -> List[str]:
"""Compare sizes of unicode strings. Some code taken from bas64dump.py @ https://DidierStevens.com.
Args:
request: AL request object (for submitting extracted files to AL when needed).
encoding: Encoding string used (i.e. '0x').
data: Data to be examined.
Returns:
List of hashes of extracted files submitted to AL and list of decoded unicode data information.
"""
decoded_list: List[Tuple[bytes, bytes]] = []
dropped: List[str] = []
qword = re.compile(rb'(?:'+re.escape(encoding)+b'[A-Fa-f0-9]{16})+')
dword = re.compile(rb'(?:'+re.escape(encoding)+b'[A-Fa-f0-9]{8})+')
word = re.compile(rb'(?:'+re.escape(encoding)+b'[A-Fa-f0-9]{4})+')
byte = re.compile(rb'(?:'+re.escape(encoding)+b'[A-Fa-f0-9]{2})+')
qbu = re.findall(qword, data)
if qbu:
qlstr = self.unicode_longest_string(qbu)
if len(qlstr) > 50:
decoded_list.append((self.decode_bu(qlstr, size=16), qlstr[:200]))
dbu = re.findall(dword, data)
if dbu:
dlstr = self.unicode_longest_string(dbu)
if len(dlstr) > 50:
decoded_list.append((self.decode_bu(dlstr, size=8), dlstr[:200]))
wbu = re.findall(word, data)
if wbu:
wlstr = self.unicode_longest_string(wbu)
if len(wlstr) > 50:
decoded_list.append((self.decode_bu(wlstr, size=4), wlstr[:200]))
bbu = re.findall(byte, data)
if bbu:
blstr = self.unicode_longest_string(bbu)
if len(blstr) > 50:
decoded_list.append((self.decode_bu(blstr, size=2), blstr[:200]))
filtered_list = filter(lambda x: len(x[0]) > 30, decoded_list)
for decoded in filtered_list:
uniq_char = set(decoded[0])
sha256hash = hashlib.sha256(decoded[0]).hexdigest()
if len(decoded[0]) >= 500:
if len(uniq_char) > 20:
dropped.append(sha256hash)
udata_file_name = f"{sha256hash[0:10]}_enchex_{safe_str(encoding)}_decoded"
self.extract_file(request, decoded[0], udata_file_name,
"Extracted unicode file during FrankenStrings analysis")
elif len(uniq_char) > 6:
decoded_res[sha256hash] = decoded
return dropped
# Base64 Parse
def b64(self, request: ServiceRequest, b64_string: bytes,
patterns: PatternMatch) -> Tuple[Dict[str, Tuple[int, bytes, bytes, bytes]], Dict[str, Set[str]]]:
"""Decode B64 data.
Args:
request: AL request object (for submitting extracted files to AL when needed).
b64_string: Possible base64 string.
patterns: FrankenStrings patterns object.
Returns:
Result information.
"""
results: Dict[str, Tuple[int, bytes, bytes, bytes]] = {}
pat: Dict[str, Set[str]] = {}
if len(b64_string) >= 16 and len(b64_string) % 4 == 0:
# noinspection PyBroadException
try:
base64data = binascii.a2b_base64(b64_string)
sha256hash = hashlib.sha256(base64data).hexdigest()
# Search for embedded files of interest
if 200 < len(base64data) < 10000000:
m = magic.Magic(mime=True)
mag = magic.Magic()
ftype = m.from_buffer(base64data)
mag_ftype = mag.from_buffer(base64data)
for file_type in self.FILETYPES:
if (file_type in ftype and 'octet-stream' not in ftype) or file_type in mag_ftype:
b64_file_name = f"{sha256hash[0:10]}_b64_decoded"
self.extract_file(request, base64data, b64_file_name,
"Extracted b64 file during FrankenStrings analysis")
results[sha256hash] = (len(b64_string), b64_string[0:50],
b"[Possible file contents. See extracted files.]", b"")
return results, pat
# See if any IOCs in decoded data
pat = self.ioc_to_tag(base64data, patterns, taglist=True)
# Filter printable characters then put in results
asc_b64 = bytes(i for i in base64data if 31 < i < 127)
if len(asc_b64) > 0:
# If patterns exists, report. If not, report only if string looks interesting
if len(pat) > 0:
results[sha256hash] = (len(b64_string), b64_string[0:50], asc_b64, base64data)
# PDF and Office documents have too many FPS
elif not self.sample_type.startswith('document/office') \
and not self.sample_type.startswith('document/pdf'):
# If data has length greater than 50, and unique character to length ratio is high
uniq_char = set(asc_b64)
if len(uniq_char) > 12 and len(re.sub(b"[^A-Za-z0-9]+", b"", asc_b64)) > 50:
results[sha256hash] = (len(b64_string), b64_string[0:50], asc_b64, base64data)
# If not all printable characters but IOCs discovered, extract to file
elif len(pat) > 0:
b64_file_name = f"{sha256hash[0:10]}_b64_decoded"
self.extract_file(request, base64data, b64_file_name,
"Extracted b64 file during FrankenStrings analysis")
results[sha256hash] = (len(b64_string), b64_string[0:50],
b"[IOCs discovered with other non-printable data. "
b"See extracted files.]", b"")
except Exception:
return results, pat
return results, pat
def unhexlify_ascii(self, request: ServiceRequest, data: bytes, filetype: str,
patterns: PatternMatch) -> Tuple[bool, Dict[str, Set[str]], Dict[str, Tuple[bytes, bytes, str]]]:
"""Plain ascii hex conversion.
Args:
request: AL request object (for submitting extracted files to AL when needed).
data: Data to examine.
filetype: request file type.
patterns: Frankenstrings patterns object.
Returns:
If a file was extracted, tags, and xor results
"""
tags: Dict[str, Set[str]] = {}
xor: Dict[str, Tuple[bytes, bytes, str]] = {}
if len(data) % 2 != 0:
data = data[:-1]
# noinspection PyBroadException
try:
binstr = binascii.unhexlify(data)
except Exception:
return False, tags, xor
# If data has less than 7 uniq chars return
uniq_char = set(binstr)
if len(uniq_char) < 7:
return False, tags, xor
# If data is greater than 500 bytes create extracted file
if len(binstr) > 500:
if len(uniq_char) < 20:
return False, tags, xor
sha256hash = hashlib.sha256(binstr).hexdigest()
asciihex_file_name = f"{sha256hash[0:10]}_asciihex_decoded"
self.extract_file(request, binstr, asciihex_file_name,
"Extracted ascii-hex file during FrankenStrings analysis")
return True, tags, xor
# Else look for patterns
tags = self.ioc_to_tag(binstr, patterns, taglist=True, st_max_length=1000)
if tags:
return False, tags, xor
# Else look for small XOR encoded strings in code files
if 20 < len(binstr) <= 128 and filetype.startswith('code/'):
xresult: List[Tuple[str, str, bytes]] = bbcrack(binstr, level='small_string')
if len(xresult) > 0:
for transform, regex, match in xresult:
if regex.startswith('EXE_'):
# noinspection PyTypeChecker
xor['file.string.blacklisted'] = (data, match, transform)
else:
# noinspection PyTypeChecker
xor[regex] = (data, match, transform)
return False, tags, xor
return False, tags, xor
# Executable extraction
def pe_dump(self, request: ServiceRequest, temp_file: str, offset: int, file_string: str, msg: str,
fail_on_except: bool = False) -> bool:
"""Use PEFile application to find the end of the file (biggest section length wins).
Args:
request: AL request object (for submitting extracted PE AL).
temp_file: Sample file with possible embedded PE.
offset: Offset of temp_file where PE file begins.
file_string: String appended to extracted PE file name.
msg: File extraction message
fail_on_except: When False, if PEFile fails, extract from offset all the way to the end of the initial file.
Returns:
True if PE extracted.
"""
pe_extract = None
mm = None
try:
with open(temp_file, "rb") as f:
mm = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
pedata = mm[offset:]
# noinspection PyBroadException
try:
peinfo = pefile.PE(data=pedata)
lsize = 0
for section in peinfo.sections:
size = section.PointerToRawData + section.SizeOfRawData
if size > lsize:
lsize = size
if lsize > 0:
pe_extract = pedata[0:lsize]
else:
if not fail_on_except:
pe_extract = pedata
except Exception:
if not fail_on_except:
pe_extract = pedata
if pe_extract:
pe_file_name = f"{hashlib.sha256(pe_extract).hexdigest()[0:10]}_{file_string}"
self.extract_file(request, pe_extract, pe_file_name, msg)
except Exception:
self.log.warning("Dumping PE file failed for {request.sha256}")
finally:
# noinspection PyBroadException
try:
if mm is not None:
mm.close()
except Exception:
pass
return bool(pe_extract)
# --- Results methods ------------------------------------------------------------------------------------------------
def ascii_results(self, request: ServiceRequest, patterns: PatternMatch,
max_length: int, st_max_size: int) -> Optional[ResultSection]:
"""
Finds and reports ASCII & Unicode IOC Strings.
Args:
request: AL request object with result section
patterns: PatternMatch object
Returns:
The created result section (with request.result as its parent)
"""
# Check the maximum length except for code files
chkl = not self.sample_type.startswith('code')
ascii_res = (ResultSection("The following IOC were found in plain text in the file:",
body_format=BODY_FORMAT.MEMORY_DUMP))
file_plainstr_iocs = self.ioc_to_tag(request.file_contents, patterns, ascii_res, taglist=True,
check_length=chkl, strs_max_size=st_max_size,
st_max_length=max_length)
if file_plainstr_iocs:
request.result.add_section(ascii_res)
for k, l in sorted(file_plainstr_iocs.items()):
for i in sorted(l):
ascii_res.add_line(f"Found {k.upper().replace('.', ' ')} string: {safe_str(i)}")
return ascii_res
return None
def embedded_pe_results(self, request: ServiceRequest) -> Optional[ResultSection]:
"""
Finds, extracts and reports embedded executables
Args:
request: AL request object with result section
Returns:
The result section (with request.result as its parent) if one is created
"""
# PE Strings
pat_exedos = rb'(?s)This program cannot be run in DOS mode'
pat_exeheader = rb'(?s)MZ.{32,1024}PE\000\000.+'
embedded_pe = False
for pos_exe in re.findall(pat_exeheader, request.file_contents[1:]):
if re.search(pat_exedos, pos_exe):
pe_sha256 = hashlib.sha256(pos_exe).hexdigest()
temp_file = os.path.join(self.working_directory, "EXE_TEMP_{}".format(pe_sha256))
with open(temp_file, 'wb') as pedata:
pedata.write(pos_exe)
embedded_pe = embedded_pe or self.pe_dump(request, temp_file, offset=0, file_string="embed_pe",
msg="PE header strings discovered in sample",
fail_on_except=True)
# Report embedded PEs if any are found
if embedded_pe:
return ResultSection("Embedded PE header discovered in sample. See extracted files.",
heuristic=Heuristic(3), parent=request.result)
return None
def base64_results(self, request: ServiceRequest, patterns: PatternMatch) -> Optional[ResultSection]:
"""
Finds and reports Base64 encoded text
Args:
request: AL request object with result section
patterns: PatternMatch object
Returns:
The result section (with request.result as its parent) if one is created
"""
b64_al_results = []
b64_matches = set()
# Base64 characters with possible space, newline characters and HTML line feeds (&#(XA|10);)
for b64_match in re.findall(b'([\x20]{0,2}(?:[A-Za-z0-9+/]{10,}={0,2}'
b'(?:&#[x1][A0];)?[\r]?[\n]?){2,})', request.file_contents):
b64_string = b64_match.replace(b'\n', b'').replace(b'\r', b'').replace(b' ', b'')\
.replace(b'
', b'').replace(b' ', b'')
if b64_string in b64_matches:
continue
b64_matches.add(b64_string)
uniq_char = set(b64_string)
if len(uniq_char) > 6:
b64result, tags = self.b64(request, b64_string, patterns)
if len(b64result) > 0:
b64_al_results.append((b64result, tags))
# UTF-16 strings
for ust in strings.extract_unicode_strings(request.file_contents, n=self.st_min_length):
for b64_match in re.findall(b'([\x20]{0,2}(?:[A-Za-z0-9+/]{10,}={0,2}[\r]?[\n]?){2,})', ust.s):
b64_string = b64_match.replace(b'\n', b'').replace(b'\r', b'').replace(b' ', b'')
uniq_char = set(b64_string)
if len(uniq_char) > 6:
b64result, tags = self.b64(request, b64_string, patterns)
if len(b64result) > 0:
b64_al_results.append((b64result, tags))
# Report B64 Results
if len(b64_al_results) > 0:
b64_ascii_content: List[bytes] = []
b64_res = (ResultSection("Base64 Strings:", heuristic=Heuristic(1), parent=request.result))
b64index = 0
for b64dict, tags in b64_al_results:
for ttype, values in tags.items():
for v in values:
b64_res.add_tag(ttype, v)
for b64k, b64l in b64dict.items():
b64index += 1
sub_b64_res = (ResultSection(f"Result {b64index}", parent=b64_res))
sub_b64_res.add_line(f'BASE64 TEXT SIZE: {b64l[0]}')
sub_b64_res.add_line(f'BASE64 SAMPLE TEXT: {safe_str(b64l[1])}[........]')
sub_b64_res.add_line(f'DECODED SHA256: {b64k}')
subb_b64_res = (ResultSection("DECODED ASCII DUMP:",
body_format=BODY_FORMAT.MEMORY_DUMP, parent=sub_b64_res))
subb_b64_res.add_line(safe_str(b64l[2]))
if b64l[2] not in [b"[Possible file contents. See extracted files.]",
b"[IOCs discovered with other non-printable data. See extracted files.]"]:
b64_ascii_content.append(b64l[3])
# Write all non-extracted decoded b64 content to file
if len(b64_ascii_content) > 0:
all_b64 = b"\n".join(b64_ascii_content)
b64_all_sha256 = hashlib.sha256(all_b64).hexdigest()
self.extract_file(request, all_b64, f"all_b64_{b64_all_sha256[:7]}.txt",
"all misc decoded b64 from sample")
return b64_res
return None
def bbcrack_results(self, request: ServiceRequest) -> Optional[ResultSection]:
"""
Balbuzard's bbcrack XOR'd strings to find embedded patterns/PE files of interest
Args:
request: AL request object with result section
Returns:
The result section (with request.result as its parent) if one is created
"""
x_res = (ResultSection("BBCrack XOR'd Strings:", body_format=BODY_FORMAT.MEMORY_DUMP,
heuristic=Heuristic(2)))
if request.deep_scan:
xresult = bbcrack(request.file_contents, level=2)
else:
xresult = bbcrack(request.file_contents, level=1)
xformat_string = '%-20s %-7s %-7s %-50s'
xor_al_results = []
xindex = 0
for transform, regex, offset, score, smatch in xresult:
if regex == 'EXE_HEAD':
xindex += 1
xtemp_file = os.path.join(self.working_directory, f"EXE_HEAD_{xindex}_{offset}_{score}.unXORD")
with open(xtemp_file, 'wb') as xdata:
xdata.write(smatch)
pe_extracted = self.pe_dump(request, xtemp_file, offset, file_string="xorpe_decoded",
msg="Extracted xor file during FrakenStrings analysis.")
if pe_extracted:
xor_al_results.append(xformat_string % (str(transform), offset, score,
"[PE Header Detected. "
"See Extracted files]"))
else:
if not regex.startswith("EXE_"):
x_res.add_tag(self.BBCRACK_TO_TAG.get(regex, regex), smatch)
xor_al_results.append(xformat_string
% (str(transform), offset, score, safe_str(smatch)))
# Result Graph:
if len(xor_al_results) > 0:
xcolumn_names = ('Transform', 'Offset', 'Score', 'Decoded String')
x_res.add_line(xformat_string % xcolumn_names)
x_res.add_line(xformat_string % tuple('-' * len(s) for s in xcolumn_names))
x_res.add_lines(xor_al_results)
request.result.add_section(x_res)
return x_res
return None
def unicode_results(self, request: ServiceRequest, patterns: PatternMatch) -> Optional[ResultSection]:
"""
Finds and report unicode encoded strings
Args:
request: AL request object with result section
patterns: PatternMatch object
Returns:
The result section (with request.result as its parent) if one is created
"""
unicode_al_results: Dict[str, Tuple[bytes, bytes]] = {}
dropped_unicode: List[Tuple[str, str]] = []
for hes in self.HEXENC_STRINGS:
if re.search(re.escape(hes) + b'[A-Fa-f0-9]{2}', request.file_contents):
dropped = self.decode_encoded_udata(request, hes, request.file_contents, unicode_al_results)
for uhash in dropped:
dropped_unicode.append((uhash, safe_str(hes)))
# Report Unicode Encoded Data:
unicode_heur = Heuristic(5, frequency=len(dropped_unicode)) if dropped_unicode else None
unicode_emb_res = ResultSection("Found Unicode-Like Strings in Non-Executable:",
body_format=BODY_FORMAT.MEMORY_DUMP,
heuristic=unicode_heur)
for uhash, uenc in dropped_unicode:
unicode_emb_res.add_line(f"Extracted over 50 bytes of possible embedded unicode with "
f"{uenc} encoding. SHA256: {uhash}. See extracted files.")
for unires_index, (sha256, (decoded, encoded)) in enumerate(unicode_al_results.items()):
sub_uni_res = (ResultSection(f"Result {unires_index}",
parent=unicode_emb_res))
sub_uni_res.add_line(f'ENCODED TEXT SIZE: {len(decoded)}')
sub_uni_res.add_line(f'ENCODED SAMPLE TEXT: {safe_str(encoded)}[........]')
sub_uni_res.add_line(f'DECODED SHA256: {sha256}')
subb_uni_res = (ResultSection("DECODED ASCII DUMP:",
body_format=BODY_FORMAT.MEMORY_DUMP,
parent=sub_uni_res))
subb_uni_res.add_line('{}'.format(safe_str(decoded)))
# Look for IOCs of interest
hits = self.ioc_to_tag(decoded, patterns, sub_uni_res, st_max_length=1000, taglist=True)
if hits:
sub_uni_res.set_heuristic(6)
subb_uni_res.add_line("Suspicious string(s) found in decoded data.")
else:
sub_uni_res.set_heuristic(4)
if unicode_al_results or dropped_unicode:
request.result.add_section(unicode_emb_res)
return unicode_emb_res
return None
def hex_results(self, request: ServiceRequest, patterns: PatternMatch) -> None:
"""
Finds and reports long ascii hex strings
Args:
request: AL request object with result section
patterns: PatternMatch object
"""
asciihex_file_found = False
asciihex_dict: Dict[str, Set[str]] = {}
asciihex_bb_dict: Dict[str, Set[Tuple[bytes, bytes, str]]] = {}
hex_pat = re.compile(b'((?:[0-9a-fA-F]{2}[\r]?[\n]?){16,})')
for hex_match in re.findall(hex_pat, request.file_contents):
hex_string = hex_match.replace(b'\r', b'').replace(b'\n', b'')
afile_found, asciihex_results, xorhex_results = self.unhexlify_ascii(request, hex_string, request.file_type,
patterns)
if afile_found:
asciihex_file_found = True
for ascii_key, ascii_values in asciihex_results.items():
asciihex_dict.setdefault(ascii_key, set())
asciihex_dict[ascii_key].update(ascii_values)
for xor_key, xor_results in xorhex_results.items():
if xor_key.startswith('BB_'):
xor_key = xor_key.split('_', 1)[1]
asciihex_bb_dict.setdefault(xor_key, set())
asciihex_bb_dict[xor_key].add(xor_results)
else:
asciihex_dict.setdefault(xor_key, set())
asciihex_dict[xor_key].add(safe_str(xor_results[1]))
# Report Ascii Hex Encoded Data:
if asciihex_file_found:
asciihex_emb_res = (ResultSection("Found Large Ascii Hex Strings in Non-Executable:",
body_format=BODY_FORMAT.MEMORY_DUMP,
heuristic=Heuristic(7),
parent=request.result))
asciihex_emb_res.add_line("Extracted possible ascii-hex object(s). See extracted files.")
if asciihex_dict:
# Different scores are used depending on whether the file is a document
asciihex_res = (ResultSection("ASCII HEX DECODED IOC Strings:",
body_format=BODY_FORMAT.MEMORY_DUMP,
heuristic=Heuristic(10 if request.file_type.startswith("document") else 8),
parent=request.result))
for key, hex_list in sorted(asciihex_dict.items()):
for h in hex_list:
asciihex_res.add_line(f"Found {key.replace('_', ' ')} decoded HEX string: {safe_str(h)}")
asciihex_res.add_tag(key, h)
if asciihex_bb_dict:
asciihex_bb_res = (ResultSection("ASCII HEX AND XOR DECODED IOC Strings:",
heuristic=Heuristic(9), parent=request.result))
for xindex, (xkey, xset) in enumerate(sorted(asciihex_bb_dict.items())):
for xresult in xset:
data, match, transform = xresult
asx_res = (ResultSection(f"Result {xindex}", parent=asciihex_bb_res))
asx_res.add_line(f"Found {xkey.replace('_', ' ')} decoded HEX string, masked with "
f"transform {safe_str(transform)}:")
asx_res.add_line("Decoded XOR string:")
asx_res.add_line(safe_str(match))
asx_res.add_line("Original ASCII HEX String:")
asx_res.add_line(safe_str(data))
asciihex_bb_res.add_tag(xkey, match)
# --- Execute ----------------------------------------------------------------------------------------------------------
def execute(self, request: ServiceRequest) -> None:
""" Main Module. See README for details."""
request.result = Result()
patterns = PatternMatch()
self.sample_type = request.file_type
self.excess_extracted = 0
# Filters for submission modes. Listed in order of use.
if request.deep_scan:
# Maximum size of submitted file to run this service:
max_size = 8000000
# String length maximum
# Used in basic ASCII and UNICODE modules:
max_length = 1000000
# String list maximum size
# List produced by basic ASCII and UNICODE module results and will determine
# if patterns.py will only evaluate network IOC patterns:
st_max_size = 1000000
# BBcrack maximum size of submitted file to run module:
bb_max_size = 200000
else:
max_size = self.config.get('max_size', 3000000)
max_length = self.config.get('max_length', 5000)
st_max_size = self.config.get('st_max_size', 0)
bb_max_size = self.config.get('bb_max_size', 85000)
# Begin analysis
if (len(request.file_contents) or 0) >= max_size or self.sample_type.startswith("archive/"):
# No analysis is done if the file is an archive or too large
return
self.ascii_results(request, patterns, max_length, st_max_size)
self.embedded_pe_results(request)
# Possible encoded strings -- all sample types except code/* (code is handled by deobfuscripter service)
if not self.sample_type.startswith('code'):
self.base64_results(request, patterns)
if (len(request.file_contents) or 0) < bb_max_size:
self.bbcrack_results(request)
# Other possible encoded strings -- all sample types but code and executables
if not self.sample_type.split('/', 1)[0] in ['executable', 'code']:
self.unicode_results(request, patterns)
# Go over again, looking for long ASCII-HEX character strings
if not self.sample_type.startswith('document/office'):
self.hex_results(request, patterns)
if self.excess_extracted:
self.log.warning(f"Too many files extracted from {request.sha256}, "
f"{self.excess_extracted} files were not extracted")
request.result.add_section(ResultSection(f"Over extraction limit: "
f"{self.excess_extracted} files were not extracted"))
| 1.882813 | 2 |
src/metadata/get_dependents.py | Yanivmd/maloss | 1 | 12770550 | <gh_stars>1-10
import pickle
import sys
results = []
def getdependentsofpackage(pkg_manager, pkg_name):
for pkg in G.predecessors(pkg_name):
print pkg
if pkg == pkg_manager:
continue
else:
if pkg not in results:
results.append(pkg)
getdependentsofpackage(pkg_manager, pkg)
if __name__ == '__main__':
if len(sys.argv) < 2:
print "Invalid number of arguments \n Usage : program_name package_manager pkg_name"
exit(0)
# load the graph object
pkg_manager = sys.argv[1]
G = pickle.load(open(pkg_manager + "_dependency_graph_object.txt", "rb"))
getdependentsofpackage(pkg_manager, sys.argv[2])
if len(results) == 0:
print "package does not have any dependents"
else:
print results
| 2.859375 | 3 |