hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3354c275c13c721af222bf79de13479550e220ff
| 706
|
py
|
Python
|
src/utils/get_model_and_data_checkpointed.py
|
Tim-blo/ACTOR
|
f10d7534a34fa557ab6b1739217649ae4f654505
|
[
"MIT"
] | null | null | null |
src/utils/get_model_and_data_checkpointed.py
|
Tim-blo/ACTOR
|
f10d7534a34fa557ab6b1739217649ae4f654505
|
[
"MIT"
] | null | null | null |
src/utils/get_model_and_data_checkpointed.py
|
Tim-blo/ACTOR
|
f10d7534a34fa557ab6b1739217649ae4f654505
|
[
"MIT"
] | null | null | null |
from ..datasets.get_dataset import get_datasets
from ..recognition.get_model import get_model as get_rec_model
from ..models.get_model import get_model as get_gen_model
def get_model_and_data_checkpointed(parameters):
datasets = get_datasets(parameters)
model = get_gen_model(parameters)
print("Restore weights..")
checkpoint = parameters["checkpoint"]
checkpoint_path = os.path.join(parameters["folder"],
'checkpoint_{:04d}.pth.tar'.format(checkpoint))
state_dict = torch.load(checkpoint_path, map_location=parameters["device"])
model.load_state_dict(state_dict)
print("Checkpoint model loaded!")
return model, datasets
| 41.529412
| 94
| 0.720963
|
dde7d701b4016c3d5c9cee7a6344ff74d4236de1
| 7,455
|
py
|
Python
|
scripts/Qubit/TimeDomain/Dimon/chi11_cal.py
|
sourav-majumder/qtlab
|
96b2a127b1df7b45622c90229bd5ef8a4083614e
|
[
"MIT"
] | null | null | null |
scripts/Qubit/TimeDomain/Dimon/chi11_cal.py
|
sourav-majumder/qtlab
|
96b2a127b1df7b45622c90229bd5ef8a4083614e
|
[
"MIT"
] | null | null | null |
scripts/Qubit/TimeDomain/Dimon/chi11_cal.py
|
sourav-majumder/qtlab
|
96b2a127b1df7b45622c90229bd5ef8a4083614e
|
[
"MIT"
] | null | null | null |
from constants import *
from ZurichInstruments_UHFLI import ZurichInstruments_UHFLI
import sys
import os
import shutil
import qt
import progressbar
import numpy as np
import time
import awg_on_first as opt
##############################################
us = 1e-6
##############################################
uhf = ZurichInstruments_UHFLI('dev2232')
znb = qt.instruments.create('ZNB20', 'RhodeSchwartz_ZNB20', address='TCPIP0::192.168.1.3::INSTR')
rte = qt.instruments.create('RTE1104', 'RhodeSchwartz_RTE1104', address = 'TCPIP0::192.168.1.6::INSTR')
# sgs = qt.instruments.create('sgs', 'RS_SGS100A', address = SGS100A_ADDRESS)
# aps = qt.instruments.create('APSYN420', 'AnaPico_APSYN420', address = APSYN420_ADDRESS)
# fsv = qt.instruments.create('FSV', 'RhodeSchwartz_FSV', address = FSV_ADDRESS)
smf = qt.instruments.create('SMF100', 'RhodeSchwartz_SMF100', address = SMF100_ADDRESS)
# aps = qt.instruments.create('APSYN420', 'AnaPico_APSYN420', address = APSYN420_ADDRESS)
##############################################
def metagen(var_outer,var_outermost):
# Meta file creator
metafile=open('%s.meta.txt' % data.get_filepath()[:-4], 'w+')
metafile.write("#Inner"+"\n"+str(len_innerloop)+"\n"+str(inner_start)+"\n"+str(record_length)+"\n"+inner_meta_info)
metafile.write("#Outer"+"\n"+str(len_outerloop)+"\n"+str(outer_start)+"\n"+str(var_outer)+"\n"+outer_meta_info)
metafile.write("#OuterMost"+"\n"+str(len_outermostloop)+"\n"+str(outermost_start)+"\n"+str(var_outermost)+"\n"+outermost_meta_info)
metafile.write('#for each of the values\n')
values = data.get_values()
i=0
while i<len(values):
metafile.write('%d\n%s\n'% (i+3, values[i]['name']))
i+=1
metafile.close()
def generate_meta_file(outer_index,outer_value, outermost_index,outermost_value):
metafile = open('%s.meta.txt' % data.get_filepath()[:-4], 'w')
metafile.write('#inner loop\n%s\n%s\n%s\n%s\n'%
(record_length, start_time/us, stop_time/us, 'Time(us)'))
metafile.write('#outer loop\n%s\n%s\n%s\n%s\n'%
(outer_index, outer_value, outerloop[0], 'Pulse len (samples)'))
metafile.write('#outermost loop\n%s\n%s\n%s\n%s\n'%
(outermost_index, outermostloop[0], outermost_value, 'Frequency'))
metafile.write('#for each of the values\n')
values = data.get_values()
i=0
while i<len(values):
metafile.write('%d\n%s\n'% (i+4, values[i]['name']))
i+=1
metafile.close()
##################################################
def copy_script():
shutil.copy2('%s'%sys.argv[0],'%s/%s'%(data.get_filepath()[:-(len(data_file_name)+11)],os.path.basename(sys.argv[0])))
##################################################
#
# NOTE on the units used in AWG code
#
# delay = CLC CYCLE = Define the time between control and measure. Each cycle is 4.4ns
# pulse_length = SAMPLE POINTS = Define the length of flat top of Gaussian control pulse.
# power_points = mV = Lock-in output is mV. Assume 750mV Full scaling
# sigma = SAMPLE POINTS = Width of Gaussian rise+fall
#
######################
def get_IQ():
with open('Qubit/optimize_data_update.txt', 'r') as f:
lines = f.read().splitlines()
print('**** Reading the last calibration ****')
last_line = lines[-1].split(' ')
ph = float(last_line[4])
return ph
ph = get_IQ()
def awg_program(ph = ph):
awg_program_string = """
const control_power = 1;
const cycle = 4.4e-9;
const us = 1e-6;
const ns = 1e-9;
const len = 50;
const phase = %f;
const measure_pulse_length = 10 ; // us
//Define the frequency.
wave w1 = sine(256, 1.0, phase, 24);
wave w2 = sine(256, 1.0, 0 , 24);
//Define the envelope.
wave w_gauss = gauss(256,control_power, 128, 32);
//Multiply both sin and envelope.
wave w_play1 = multiply(w1, w_gauss);
wave w_play2 = multiply(w2, w_gauss);
while (true) {
playWave(w_play1, w_play2);
waitWave();
wait(14);
setTrigger(0b0010);
wait(len);
setTrigger(0b0000);
wait(1);
setTrigger(0b0001);
wait(measure_pulse_length*us/cycle);
setTrigger(0b0000);
wait(150*us/cycle);
}
"""%(ph)
return awg_program_string
data_file_name = raw_input('Enter name of data file: ')
data=qt.Data(name=data_file_name)
# data.add_coordinate('Frequency', units = 'Hz')
data.add_coordinate('power', units = 'dBm')
data.add_coordinate('Time', units = 's')
data.add_value('X-Quadrature')
data.add_value('Y-Quadrature')
data.add_value('R-Quadrature')
# OuterMost loop is
outermost_start = 6.000660*GHz
outermost_stop = 6.000660*GHz # One more than the end point you
outermost_len = 1
outermost_meta_info = "Frequency (Hz)"
#Outerloop
outer_start = 7.426*GHz
outer_stop = 7.440*GHz
outer_len = 57
outer_meta_info = "pulse_len (samples)"
Mix_freq = 133*MHz
#####################
outermostloop = np.linspace(outermost_start,outermost_stop,outermost_len)
outerloop = np.linspace(outer_start,outer_stop,outer_len)
len_outermostloop=len(outermostloop)
len_outerloop=len(outerloop)
data_info=["X","Y","R"]
##### Initialization of Instruments
uhf.setup_awg(awg_program())
uhf.awg_on(single=False)
channels = [1,2]
# aps.set_frequency(control_array[0])
##Initioalization of sgs
# sgs.set_power(-20)
############# RUN once to setup the innermost loop
start = time.time()
rte.wait_till_complete()
start_time, stop_time, record_length = rte.get_header()
assert raw_input('continue? [y/n]').upper() != 'N'
inner_start = start_time
inner_stop = stop_time+1
inner_meta_info = "Inner"
innerloop = np.linspace(inner_start,inner_stop,record_length)
len_innerloop = len(innerloop)
###########################################
delay_progress_bar = progressbar.ProgressBar(maxval=len_outerloop, \
widgets=['Total: ', progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage(), ' (', progressbar.ETA(), ') '])
delay_progress_bar.start()
first_time = True # to copy script and generate meta file once
for outermost_index, outermost_value in enumerate(outermostloop):
#Stuff can be added here to execute a 3D loop
outermost_value_array = np.linspace(outermost_value, outermost_value, record_length)
for outer_index, outer_value in enumerate(outerloop):
smf.set_frequency(outer_value)
znb.set_center_frequency(outer_value-Mix_freq)
znb.send_trigger()
# sgs.set_power(outer_value)
# uhf.setup_awg(awg_program(outer_value))
# uhf.awg_on(single=False)
# aps.set_frequency(control)
rte.reset_averages()
rte.run_nx_single(wait=True)
outer_value_array = np.linspace(outer_value, outer_value, record_length)
time_array, voltages = rte.get_data(channels)
voltages.append((voltages[0]**2+voltages[1]**2)**0.5)
data.add_data_point(outer_value_array, time_array, *voltages)
if first_time:
generate_meta_file(outer_len, outer_stop, outermost_len,outermost_stop)
copy_script()
first_time = False
delay_progress_bar.update(outer_index+1)
data.close_file()
print(time.time()-start)
| 35.165094
| 136
| 0.629913
|
3385087c39320d994a67b9457868d7264c006b2a
| 3,216
|
py
|
Python
|
src/kblue/nodes/managers.py
|
tulare/kblue
|
731aa3c4600f3b7c0e53efb51075335ca266b665
|
[
"MIT"
] | null | null | null |
src/kblue/nodes/managers.py
|
tulare/kblue
|
731aa3c4600f3b7c0e53efb51075335ca266b665
|
[
"MIT"
] | null | null | null |
src/kblue/nodes/managers.py
|
tulare/kblue
|
731aa3c4600f3b7c0e53efb51075335ca266b665
|
[
"MIT"
] | null | null | null |
# -*- encoding: utf-8 -*-
import dbus
from .const import *
from .core import Node
__all__ = [
'ObjectManager',
'AgentManager', 'ProfileManager', 'HealthManager',
'GattManager', 'LEAdvertisingManager'
]
# ------------------------------------------------------------------------------
# --- Class ObjectManager -----------------------------------------------------
# ------------------------------------------------------------------------------
class ObjectManager(Node) :
INTERFACE = DBUS_OM_INTERFACE
def __init__(self, dbus_node=None) :
if dbus_node is None :
bus = dbus.SystemBus()
path = '/'
obj = bus.get_object(SERVICE_NAME, path)
dbus_node = dbus.Interface(obj, self.INTERFACE)
super().__init__(dbus_node)
@property
def managed_objects(self) :
return self.GetManagedObjects()
@property
def paths(self) :
return sorted(str(path) for path in self.managed_objects)
# ------------------------------------------------------------------------------
# --- Class BluezManager -----------------------------------------------------
# ------------------------------------------------------------------------------
class BluezManager(Node) :
INTERFACE = BLUEZ_MANAGER_INTERFACE
def __init__(self, dbus_node=None) :
if dbus_node is None :
bus = dbus.SystemBus()
path = '/org/bluez'
obj = bus.get_object(SERVICE_NAME, path)
dbus_node = dbus.Interface(obj, self.INTERFACE)
super().__init__(dbus_node)
# ------------------------------------------------------------------------------
# --- Class AgentManager -----------------------------------------------------
# ------------------------------------------------------------------------------
class AgentManager(BluezManager) :
INTERFACE = AGENT_MANAGER_INTERFACE
# ------------------------------------------------------------------------------
# --- Class ProfileManager -----------------------------------------------------
# ------------------------------------------------------------------------------
class ProfileManager(BluezManager) :
INTERFACE = PROFILE_MANAGER_INTERFACE
# ------------------------------------------------------------------------------
# --- Class HealthManager -----------------------------------------------------
# ------------------------------------------------------------------------------
class HealthManager(BluezManager) :
INTERFACE = HEALTH_MANAGER_INTERFACE
# ------------------------------------------------------------------------------
# --- Class GattManager --------------------------------------------------------
# ------------------------------------------------------------------------------
class GattManager(Node) :
INTERFACE = GATT_MANAGER_INTERFACE
# ------------------------------------------------------------------------------
# --- Class LEAdvertisingManager -----------------------------------------------
# ------------------------------------------------------------------------------
class LEAdvertisingManager(Node) :
INTERFACE = LE_ADVERTISING_MANAGER_INTERFACE
| 33.5
| 80
| 0.35199
|
eefb8c3609f361ae6a8603c14977c9138905e55f
| 928
|
py
|
Python
|
src/sage/modular/modform/all.py
|
UCD4IDS/sage
|
43474c96d533fd396fe29fe0782d44dc7f5164f7
|
[
"BSL-1.0"
] | 1,742
|
2015-01-04T07:06:13.000Z
|
2022-03-30T11:32:52.000Z
|
src/sage/modular/modform/all.py
|
UCD4IDS/sage
|
43474c96d533fd396fe29fe0782d44dc7f5164f7
|
[
"BSL-1.0"
] | 66
|
2015-03-19T19:17:24.000Z
|
2022-03-16T11:59:30.000Z
|
src/sage/modular/modform/all.py
|
UCD4IDS/sage
|
43474c96d533fd396fe29fe0782d44dc7f5164f7
|
[
"BSL-1.0"
] | 495
|
2015-01-10T10:23:18.000Z
|
2022-03-24T22:06:11.000Z
|
#########################################################################
# Copyright (C) 2004--2006 William Stein <wstein@gmail.com>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# http://www.gnu.org/licenses/
#########################################################################
from .constructor import ModularForms, CuspForms, EisensteinForms, Newforms, Newform
from .eis_series import eisenstein_series_qexp, eisenstein_series_lseries
from .half_integral import half_integral_weight_modform_basis
from .theta import theta_qexp, theta2_qexp
from .j_invariant import j_invariant_qexp
from .vm_basis import victor_miller_basis, delta_qexp
from .hecke_operator_on_qexp import hecke_operator_on_qexp, hecke_operator_on_basis
from .numerical import NumericalEigenforms as numerical_eigenforms
from .element import delta_lseries
from .ring import ModularFormsRing
| 33.142857
| 84
| 0.682112
|
8055c4b2129a3647686dd1bc93cddb6f93d142b1
| 5,524
|
py
|
Python
|
Metrics/Find and Replace Kerning Groups.py
|
danielgamage/Mekkablue-Scripts
|
0b0b4468ec938f8c669b3552e2fa429080b65bf1
|
[
"Apache-2.0"
] | null | null | null |
Metrics/Find and Replace Kerning Groups.py
|
danielgamage/Mekkablue-Scripts
|
0b0b4468ec938f8c669b3552e2fa429080b65bf1
|
[
"Apache-2.0"
] | null | null | null |
Metrics/Find and Replace Kerning Groups.py
|
danielgamage/Mekkablue-Scripts
|
0b0b4468ec938f8c669b3552e2fa429080b65bf1
|
[
"Apache-2.0"
] | null | null | null |
#MenuTitle: Find and Replace in Kerning Groups
# -*- coding: utf-8 -*-
__doc__="""
Finds and replaces text in the metrics keys of selected glyphs. Leave the Find string blank to hang the replace string at the end of the metrics keys.
"""
import vanilla
import GlyphsApp
class KerningGroupReplacer( object ):
def __init__( self ):
self.w = vanilla.FloatingWindow( (335, 125), "Find and Replace in Kerning Groups", autosaveName="com.mekkablue.KerningGroupReplacer.mainwindow" )
self.w.text_Find = vanilla.TextBox( (10, 30+3, 55, 20), "Find", sizeStyle='small' )
self.w.text_Replace = vanilla.TextBox( (10, 55+3, 55, 20), "Replace", sizeStyle='small' )
self.w.text_left = vanilla.TextBox( (70, 12, 120, 14), "Left Group", sizeStyle='small' )
self.w.leftSearchFor = vanilla.EditText( (70, 30, 120, 20), ".tf", callback=self.SavePreferences, sizeStyle='small', placeholder='(leave these blank ...' )
self.w.leftReplaceBy = vanilla.EditText( (70, 55, 120, 20), "", callback=self.SavePreferences, sizeStyle='small', placeholder='(empty)' )
self.w.text_right = vanilla.TextBox( (200, 12, 120, 14), "Right Group", sizeStyle='small' )
self.w.rightSearchFor = vanilla.EditText( (200, 30, 120, 20), ".tf", callback=self.SavePreferences, sizeStyle='small', placeholder='... to append)' )
self.w.rightReplaceBy = vanilla.EditText( (200, 55, 120, 20), "", callback=self.SavePreferences, sizeStyle='small', placeholder='(empty)' )
self.w.runButton = vanilla.Button((-110, -20-15, -15, -15), "Replace", sizeStyle='regular', callback=self.KerningGroupReplaceMain )
self.w.setDefaultButton( self.w.runButton )
if not self.LoadPreferences():
print "Note: Could not load preferences. Will resort to defaults"
self.w.open()
self.w.makeKey()
def SavePreferences( self, sender ):
try:
Glyphs.defaults[ "com.mekkablue.KerningGroupReplacer.leftSearchFor" ] = self.w.leftSearchFor.get()
Glyphs.defaults[ "com.mekkablue.KerningGroupReplacer.leftReplaceBy" ] = self.w.leftReplaceBy.get()
Glyphs.defaults[ "com.mekkablue.KerningGroupReplacer.rightSearchFor" ] = self.w.rightSearchFor.get()
Glyphs.defaults[ "com.mekkablue.KerningGroupReplacer.rightReplaceBy" ] = self.w.rightReplaceBy.get()
except:
return False
return True
def LoadPreferences( self ):
try:
NSUserDefaults.standardUserDefaults().registerDefaults_(
{
"com.mekkablue.KerningGroupReplacer.leftSearchFor":"",
"com.mekkablue.KerningGroupReplacer.leftReplaceBy":"",
"com.mekkablue.KerningGroupReplacer.rightSearchFor":"",
"com.mekkablue.KerningGroupReplacer.rightReplaceBy":""
}
)
self.w.leftSearchFor.set( Glyphs.defaults[ "com.mekkablue.KerningGroupReplacer.leftSearchFor" ] )
self.w.leftReplaceBy.set( Glyphs.defaults[ "com.mekkablue.KerningGroupReplacer.leftReplaceBy" ] )
self.w.rightSearchFor.set( Glyphs.defaults[ "com.mekkablue.KerningGroupReplacer.rightSearchFor" ] )
self.w.rightReplaceBy.set( Glyphs.defaults[ "com.mekkablue.KerningGroupReplacer.rightReplaceBy" ] )
except:
return False
return True
def replaceGroupName( self, groupName, searchString, replaceString ):
try:
if groupName:
if searchString == "":
# simply append replaceString if no search string is supplied:
return groupName + replaceString
else:
return groupName.replace( searchString, replaceString )
else:
return None
except Exception, e:
print e
def replaceInGroups( self, thisGlyph, LsearchFor, LreplaceBy, RsearchFor, RreplaceBy ):
thisGlyph.beginUndo()
thisGlyphName = thisGlyph.name
# Left Group:
oldLeftGroup = thisGlyph.leftKerningGroup
if not oldLeftGroup:
print "%s: no left group set. Left unchanged." % thisGlyphName
else:
newLeftGroup = self.replaceGroupName( oldLeftGroup, LsearchFor, LreplaceBy )
if oldLeftGroup == newLeftGroup:
print "%s: left group unchanged (%s)." % (thisGlyphName, newLeftGroup)
else:
thisGlyph.leftKerningGroup = newLeftGroup
print "%s: new left group: '%s'." % ( thisGlyphName, newLeftGroup )
# Right Group:
oldRightGroup = thisGlyph.rightKerningGroup
if not oldRightGroup:
print "%s: no right group set. Left unchanged." % thisGlyphName
else:
newRightGroup = self.replaceGroupName( oldRightGroup, RsearchFor, RreplaceBy )
if oldRightGroup == newRightGroup:
print "%s: right group unchanged (%s)." % ( thisGlyph.name, newRightGroup )
else:
thisGlyph.rightKerningGroup = newRightGroup
print "%s: new right group: '%s'." % ( thisGlyph.name, newRightGroup )
thisGlyph.endUndo()
def KerningGroupReplaceMain( self, sender ):
Glyphs.clearLog()
Glyphs.font.disableUpdateInterface()
try:
if not self.SavePreferences( self ):
print "Note: Could not write preferences."
Font = Glyphs.font
selectedLayers = Font.selectedLayers
currentLayers = [ l for l in selectedLayers if l.parent.name is not None ]
LsearchFor = self.w.leftSearchFor.get()
LreplaceBy = self.w.leftReplaceBy.get()
RsearchFor = self.w.rightSearchFor.get()
RreplaceBy = self.w.rightReplaceBy.get()
for thisLayer in currentLayers:
try:
thisGlyph = thisLayer.parent
self.replaceInGroups( thisGlyph, LsearchFor, LreplaceBy, RsearchFor, RreplaceBy )
except Exception, e:
print "Error while processing glyph %s:" % thisGlyph.name
print e
self.w.close()
except Exception, e:
raise e
Glyphs.font.enableUpdateInterface()
KerningGroupReplacer()
| 39.457143
| 157
| 0.71651
|
8cd9a8d638fd01e6961e9bf8fa3cef64d10cb6e8
| 585
|
py
|
Python
|
backend/apps/users/urls.py
|
adm-in/foodgram-project-react
|
c681297d9f1b34a6504350c4fb4306cbdb156c6a
|
[
"Apache-2.0"
] | null | null | null |
backend/apps/users/urls.py
|
adm-in/foodgram-project-react
|
c681297d9f1b34a6504350c4fb4306cbdb156c6a
|
[
"Apache-2.0"
] | null | null | null |
backend/apps/users/urls.py
|
adm-in/foodgram-project-react
|
c681297d9f1b34a6504350c4fb4306cbdb156c6a
|
[
"Apache-2.0"
] | null | null | null |
from django.urls import include, path
from rest_framework.routers import DefaultRouter
from users import views
app_name = 'users'
router_v1 = DefaultRouter()
router_v1.register(
'users/subscriptions',
views.SubscriptionsViewSet,
basename='subscriptions',
)
router_v1.register('users', views.UserViewSet, basename='users')
urlpatterns = [
path('', include(router_v1.urls)),
path('', include('djoser.urls')),
path('users/<int:pk>/subscribe/', views.subscribe),
path('auth/', include('djoser.urls.authtoken')),
path('auth/', include('djoser.urls')),
]
| 25.434783
| 64
| 0.707692
|
680a93ca57eb3545ee9c44b819d21a5f67b3fa65
| 568
|
py
|
Python
|
tests/test_bowa.py
|
augustogoulart/bowa
|
340581e21a02df7a80315a69f46275ee93e454c1
|
[
"MIT"
] | null | null | null |
tests/test_bowa.py
|
augustogoulart/bowa
|
340581e21a02df7a80315a69f46275ee93e454c1
|
[
"MIT"
] | 1
|
2018-10-29T00:51:27.000Z
|
2018-10-29T00:51:27.000Z
|
tests/test_bowa.py
|
augustogoulart/bowa
|
340581e21a02df7a80315a69f46275ee93e454c1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `bowa` package."""
import pytest
from bowa import bowa
@pytest.fixture
def response():
"""Sample pytest fixture.
See more at: http://doc.pytest.org/en/latest/fixture.html
"""
# import requests
# return requests.get('https://github.com/audreyr/cookiecutter-pypackage')
def test_content(response):
"""Sample pytest test function with the pytest fixture as an argument."""
# from bs4 import BeautifulSoup
# assert 'GitHub' in BeautifulSoup(response.content).title.string
| 21.846154
| 78
| 0.690141
|
7f3b3ab664a2a85e369ddbf28f99e7cab16012df
| 4,652
|
py
|
Python
|
core/domain/learner_group_domain.py
|
nithinrdy/oppia
|
b8b33a4f3fbd44f3b969239e6ee1abb3a3ef9d5f
|
[
"Apache-2.0"
] | null | null | null |
core/domain/learner_group_domain.py
|
nithinrdy/oppia
|
b8b33a4f3fbd44f3b969239e6ee1abb3a3ef9d5f
|
[
"Apache-2.0"
] | null | null | null |
core/domain/learner_group_domain.py
|
nithinrdy/oppia
|
b8b33a4f3fbd44f3b969239e6ee1abb3a3ef9d5f
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2022 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Domain objects for Learner Groups."""
from __future__ import annotations
from core import utils
from typing import List
from typing_extensions import TypedDict
class LearnerGroupDict(TypedDict):
"""Dictionary for LearnerGroup domain object."""
group_id: str
title: str
description: str
facilitator_user_ids: List[str]
student_user_ids: List[str]
invited_student_user_ids: List[str]
subtopic_page_ids: List[str]
story_ids: List[str]
class LearnerGroup:
"""Domain object for learner group."""
def __init__(
self,
group_id: str,
title: str,
description: str,
facilitator_user_ids: List[str],
student_user_ids: List[str],
invited_student_user_ids: List[str],
subtopic_page_ids: List[str],
story_ids: List[str]
) -> None:
"""Constructs a LearnerGroup domain object.
Attributes:
group_id: str. The unique ID of the learner group.
title: str. The title of the learner group.
description: str. The description of the learner group.
facilitator_user_ids: List[str]. The list of user ids of
facilitators of the learner group.
student_user_ids: List[str]. The list of user ids of students
of the learner group.
invited_student_user_ids: List[str]. The list of user ids of the
users invited to join the learner group as a student.
subtopic_page_ids: List[str]. The list of subtopic page ids that
are part of the learner group syllabus. A subtopic page id is
depicted as topicId:subtopicId string.
story_ids: List[str]. The list of story ids of the learner group.
"""
self.group_id = group_id
self.title = title
self.description = description
self.facilitator_user_ids = facilitator_user_ids
self.student_user_ids = student_user_ids
self.invited_student_user_ids = invited_student_user_ids
self.subtopic_page_ids = subtopic_page_ids
self.story_ids = story_ids
def to_dict(self) -> LearnerGroupDict:
"""Convert the LearnerGroup domain instance into a dictionary
form with its keys as the attributes of this class.
Returns:
dict. A dictionary containing the LearnerGroup class
information in a dictionary form.
"""
return {
'group_id': self.group_id,
'title': self.title,
'description': self.description,
'facilitator_user_ids': self.facilitator_user_ids,
'student_user_ids': self.student_user_ids,
'invited_student_user_ids': self.invited_student_user_ids,
'subtopic_page_ids': self.subtopic_page_ids,
'story_ids': self.story_ids
}
def validate(self) -> None:
"""Validates the LearnerGroup domain object.
Raises:
ValidationError. One or more attributes of the LearnerGroup
are invalid.
"""
if len(self.facilitator_user_ids) < 1:
raise utils.ValidationError(
'Expected learner group to have at least one facilitator.')
invited_student_set = set(self.invited_student_user_ids)
student_set = set(self.student_user_ids)
if len(invited_student_set.intersection(student_set)) > 0:
raise utils.ValidationError(
'Learner group student cannot be invited to join the group.')
facilitator_set = set(self.facilitator_user_ids)
if len(facilitator_set.intersection(student_set)) > 0:
raise utils.ValidationError(
'Learner group facilitator cannot be a student of the group.')
if len(facilitator_set.intersection(invited_student_set)) > 0:
raise utils.ValidationError(
'Learner group facilitator cannot be invited to '
'join the group.')
| 36.062016
| 78
| 0.656707
|
0082b5a981d5e8e64e4328ebe8f56f73e22f01f7
| 478
|
py
|
Python
|
IntroductoryProblems/Python/permutations.py
|
ashish-bisht/CSES-problem-set
|
ebf0bbb55c66398e4391d7fa25a8a2d1fa906146
|
[
"MIT"
] | null | null | null |
IntroductoryProblems/Python/permutations.py
|
ashish-bisht/CSES-problem-set
|
ebf0bbb55c66398e4391d7fa25a8a2d1fa906146
|
[
"MIT"
] | null | null | null |
IntroductoryProblems/Python/permutations.py
|
ashish-bisht/CSES-problem-set
|
ebf0bbb55c66398e4391d7fa25a8a2d1fa906146
|
[
"MIT"
] | 1
|
2020-12-07T15:50:52.000Z
|
2020-12-07T15:50:52.000Z
|
def permutations(num):
if num <= 3:
return "No Solution"
beautiful = [-1] * num
left = 0
right = num//2
cur_num = num
while left < right and right < num:
if cur_num % 2 == 0:
beautiful[left] = cur_num
left += 1
else:
beautiful[right] = cur_num
right += 1
cur_num -= 1
return beautiful
print(permutations(5)) # 4,2,5,3,1
print(permutations(6))
print(permutations(3))
| 20.782609
| 39
| 0.525105
|
cde540a9a055f4b1a392c90b411caa58a5194963
| 1,950
|
py
|
Python
|
firmware/CsvWriter.py
|
Elleo/aq-device
|
be2cdd5aeaad64fddfbac522c55a2320fce4b5de
|
[
"Apache-2.0"
] | 1
|
2022-03-28T10:00:24.000Z
|
2022-03-28T10:00:24.000Z
|
firmware/CsvWriter.py
|
Elleo/aq-device
|
be2cdd5aeaad64fddfbac522c55a2320fce4b5de
|
[
"Apache-2.0"
] | 13
|
2021-12-11T08:37:49.000Z
|
2022-03-31T12:03:06.000Z
|
firmware/CsvWriter.py
|
Elleo/aq-device
|
be2cdd5aeaad64fddfbac522c55a2320fce4b5de
|
[
"Apache-2.0"
] | 1
|
2022-03-27T12:58:10.000Z
|
2022-03-27T12:58:10.000Z
|
# Copyright (c) 2021 RS Components Ltd
# SPDX-License-Identifier: MIT License
'''
CSV writer helper class
'''
from DesignSpark.ESDK import AppLogger
import csv
import copy
from datetime import datetime
class CsvWriter:
def __init__(self, friendlyName='', debug=False, hwid=0, loggingLevel='full'):
self.logger = AppLogger.getLogger(__name__, debug, loggingLevel)
self.hardwareId = hwid
self.friendlyName = friendlyName
self.csvFilename = "/aq/data/{fn}_{hwid}_{ts}.csv".format(fn=self.friendlyName, \
hwid=self.hardwareId, \
ts=datetime.utcnow().strftime("%Y_%m_%d-%H_%M_%S"))
# Should more sensors be added, this should be updated to reflect available values
self.csvColumns = ['timestamp', 'temperature', 'humidity', 'vocIndex', 'co2', 'pm1.0', 'pm2.5', 'pm4.0', 'pm10']
with open(self.csvFilename, 'w') as fh:
csvWriter = csv.DictWriter(fh, fieldnames=self.csvColumns)
csvWriter.writeheader()
def addRow(self, sensorData):
sensorDataArray = copy.deepcopy(sensorData)
try:
# Strip other keys so all we're left with is sensor data to iterate over
location = sensorDataArray.pop("geohash", None)
hwid = sensorDataArray.pop("hardwareId", None)
csvSensorDataArray = {'timestamp': int(datetime.utcnow().timestamp())}
for sensorType, sd in sensorDataArray.items():
sd.pop("sensor", None) # Remove sensor type from data array
csvSensorDataArray.update(sd)
self.logger.debug("CSV data dict {}".format(csvSensorDataArray))
with open(self.csvFilename, 'a') as fh:
csvWriter = csv.DictWriter(fh, fieldnames=self.csvColumns)
csvWriter.writerow(csvSensorDataArray)
except Exception as e:
self.logger.error("Could not write CSV data, reason {}".format(e))
| 39.795918
| 120
| 0.641538
|
9450fe6a9f80c221b04a842a238b82702317550b
| 346
|
py
|
Python
|
sherlock/codelib/generator/temp_variable.py
|
Luavis/sherlock.py
|
6974244c5ddb2f5e41aea2fbc7f913917fd15532
|
[
"MIT"
] | 55
|
2017-02-12T08:22:44.000Z
|
2022-01-08T12:34:53.000Z
|
sherlock/codelib/generator/temp_variable.py
|
Ronlin1/sherlock.py
|
6974244c5ddb2f5e41aea2fbc7f913917fd15532
|
[
"MIT"
] | 3
|
2017-02-24T16:23:40.000Z
|
2022-02-05T19:54:26.000Z
|
sherlock/codelib/generator/temp_variable.py
|
Ronlin1/sherlock.py
|
6974244c5ddb2f5e41aea2fbc7f913917fd15532
|
[
"MIT"
] | 7
|
2017-01-17T17:34:59.000Z
|
2022-02-15T15:23:17.000Z
|
class TempVariableManager(object):
def __init__(self, prefix_name):
self.prefix_name = prefix_name
self.variable_id = 0
def get_new_name(self):
self.variable_id += 1
return self.get_last_variable_name()
def get_last_variable_name(self):
return '%s_%d' % (self.prefix_name, self.variable_id)
| 26.615385
| 61
| 0.67341
|
4602f1ea22fbd3d3e8bebee92993438aef7dc61e
| 2,606
|
py
|
Python
|
src/wechaty/user/url_link.py
|
jiaqianjing/python-wechaty
|
c2565b370de2142efb22be6dc0a5b998440dd85e
|
[
"Apache-2.0"
] | 1
|
2021-04-14T08:03:45.000Z
|
2021-04-14T08:03:45.000Z
|
src/wechaty/user/url_link.py
|
jiaqianjing/python-wechaty
|
c2565b370de2142efb22be6dc0a5b998440dd85e
|
[
"Apache-2.0"
] | 6
|
2020-10-09T20:04:33.000Z
|
2022-01-22T10:43:10.000Z
|
src/wechaty/user/url_link.py
|
jiaqianjing/python-wechaty
|
c2565b370de2142efb22be6dc0a5b998440dd85e
|
[
"Apache-2.0"
] | null | null | null |
"""
UrlLink for Contact Message
"""
from __future__ import annotations
from typing import (
Type,
Union
)
import requests
from lxml import etree # type: ignore
from wechaty_puppet import UrlLinkPayload, get_logger # type: ignore
log = get_logger('UrlLink')
class UrlLink:
"""
url_link object which handle the url_link content
"""
def __init__(
self,
payload: UrlLinkPayload,
):
"""
initialization
:param payload:
"""
self.payload: UrlLinkPayload = payload
@classmethod
def create(
cls: Type[UrlLink],
url: str, title: Union[str, None], thumbnail_url: Union[str, None], description: Union[str, None]
) -> UrlLink:
"""
create urllink from url string
"""
log.info('create url_link for %s', url)
html = etree.HTML(requests.get(url).text)
if title is None:
title = html.xpath('//meta[@property="og:title"]/@content')
title = title[0] if len(title) else url
if thumbnail_url is None:
thumbnail_url = html.xpath('//meta[@property="og:image"]/@content')
thumbnailUrl = thumbnail_url[0] if len(thumbnail_url) else ""
if description is None:
description = html.xpath('//meta[@property="og:description"]/@content')
description = description[0] if len(description) else ""
payload = UrlLinkPayload(
title=title,
url=url,
thumbnailUrl=thumbnail_url,
description=description
)
return UrlLink(payload)
def __str__(self):
"""
UrlLink string format output
:return:
"""
return 'UrlLink<%s>' % self.payload.url
@property
def title(self) -> str:
"""
get UrlLink title
:return:
"""
if self.payload.title is None:
return ''
return self.payload.title
@property
def thumbnailUrl(self) -> str:
"""
get thumbnail url
:return:
"""
if self.payload.thumbnailUrl is None:
return ''
return self.payload.thumbnailUrl
@property
def description(self) -> str:
"""
get description
:return:
"""
if self.payload.description is None:
return ''
return self.payload.description
@property
def url(self) -> str:
"""
get url
:return:
"""
if self.payload.url is None:
return ''
return self.payload.url
| 24.819048
| 105
| 0.553722
|
49aa2e682ed635138021302e996260d06f870457
| 11,803
|
py
|
Python
|
ssseg/modules/datasets/coco.py
|
skydengyao/sssegmentation
|
606b05983fa967bb3c98d1120f44dfc516532dad
|
[
"MIT"
] | 1
|
2021-05-28T06:42:37.000Z
|
2021-05-28T06:42:37.000Z
|
ssseg/modules/datasets/coco.py
|
skydengyao/sssegmentation
|
606b05983fa967bb3c98d1120f44dfc516532dad
|
[
"MIT"
] | null | null | null |
ssseg/modules/datasets/coco.py
|
skydengyao/sssegmentation
|
606b05983fa967bb3c98d1120f44dfc516532dad
|
[
"MIT"
] | null | null | null |
'''
Function:
load the coco dataset
Author:
Zhenchao Jin
'''
import os
import cv2
import pandas as pd
from .base import *
from tqdm import tqdm
'''coco dataset pretraining for VOC'''
class COCODataset(BaseDataset):
num_classes = 21
classnames = ['__background__', 'airplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car',
'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorcycle', 'person',
'potted-plant', 'sheep', 'sofa', 'train', 'tv']
valid_clsids = [0, 5, 2, 16, 9, 44, 6, 3, 17, 62, 21, 67, 18, 19, 4, 1, 64, 20, 63, 7, 72]
assert num_classes == len(classnames)
def __init__(self, mode, logger_handle, dataset_cfg, **kwargs):
super(COCODataset, self).__init__(mode, logger_handle, dataset_cfg, **kwargs)
from pycocotools import mask
from pycocotools.coco import COCO
# obtain the dirs
rootdir = dataset_cfg['rootdir']
self.image_dir = os.path.join(rootdir, f"{dataset_cfg['set']}2017")
# obatin imageids
self.annfilepath = os.path.join(rootdir, f"annotations/instances_{dataset_cfg['set']}2017.json")
self.coco_api = COCO(self.annfilepath)
self.cocomask_api = mask
self.imageids = []
imageids_bar = tqdm(list(self.coco_api.imgs.keys()))
for imageid in imageids_bar:
imageids_bar.set_description('Preprocess imageid %s' % imageid)
target = self.coco_api.loadAnns(self.coco_api.getAnnIds(imgIds=imageid))
image_meta = self.coco_api.loadImgs(imageid)[0]
segmentation = self.getsegmentation(target, image_meta['height'], image_meta['width'])
if (segmentation > 0).sum() > 1000:
self.imageids.append(imageid)
'''pull item'''
def __getitem__(self, index):
imageid = self.imageids[index]
image_meta = self.coco_api.loadImgs(imageid)[0]
imagepath = os.path.join(self.image_dir, image_meta['file_name'])
# read image
image = cv2.imread(imagepath)
# read annotation
if self.dataset_cfg.get('with_ann', True):
target = self.coco_api.loadAnns(self.coco_api.getAnnIds(imgIds=imageid))
segmentation = self.getsegmentation(target, image_meta['height'], image_meta['width'])
else:
segmentation = np.zeros((image.shape[0], image.shape[1]))
# construct sample
sample = {'image': image, 'segmentation': segmentation, 'width': image.shape[1], 'height': image.shape[0]}
if self.mode == 'TEST':
sample.update({'groundtruth': segmentation.copy()})
sample.update({'id': imageid})
# preprocess and return sample
if self.mode == 'TRAIN':
sample = self.synctransform(sample, 'without_totensor_normalize_pad')
sample['edge'] = self.generateedge(sample['segmentation'].copy())
sample = self.synctransform(sample, 'only_totensor_normalize_pad')
else:
sample = self.synctransform(sample, 'all')
return sample
'''length'''
def __len__(self):
return len(self.imageids)
'''get segmentation mask'''
def getsegmentation(self, target, height, width):
segmentation = np.zeros((height, width), dtype=np.uint8)
for instance in target:
rle = self.cocomask_api.frPyObjects(instance['segmentation'], height, width)
mask = self.cocomask_api.decode(rle)
clsid = instance['category_id']
if clsid not in self.valid_clsids: continue
label = self.valid_clsids.index(clsid)
if len(mask.shape) < 3: segmentation[:, :] += (segmentation == 0) * (mask * label)
else: segmentation[:, :] += (segmentation == 0) * ((np.sum(mask, axis=2) > 0) * label).astype(np.uint8)
return segmentation
'''coco stuff 10k dataset'''
class COCOStuff10kDataset(BaseDataset):
num_classes = 182
classnames = ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
'fire hydrant', 'street sign', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep',
'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'hat', 'backpack', 'umbrella', 'shoe', 'eye glasses', 'handbag',
'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove',
'skateboard', 'surfboard', 'tennis racket', 'bottle', 'plate', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake',
'chair', 'couch', 'potted plant', 'bed', 'mirror', 'dining table', 'window', 'desk', 'toilet', 'door', 'tv',
'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator',
'blender', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush', 'hair brush', 'banner',
'blanket', 'branch', 'bridge', 'building-other', 'bush', 'cabinet', 'cage', 'cardboard', 'carpet', 'ceiling-other',
'ceiling-tile', 'cloth', 'clothes', 'clouds', 'counter', 'cupboard', 'curtain', 'desk-stuff', 'dirt', 'door-stuff',
'fence', 'floor-marble', 'floor-other', 'floor-stone', 'floor-tile', 'floor-wood', 'flower', 'fog', 'food-other',
'fruit', 'furniture-other', 'grass', 'gravel', 'ground-other', 'hill', 'house', 'leaves', 'light', 'mat', 'metal',
'mirror-stuff', 'moss', 'mountain', 'mud', 'napkin', 'net', 'paper', 'pavement', 'pillow', 'plant-other', 'plastic',
'platform', 'playingfield', 'railing', 'railroad', 'river', 'road', 'rock', 'roof', 'rug', 'salad', 'sand', 'sea',
'shelf', 'sky-other', 'skyscraper', 'snow', 'solid-other', 'stairs', 'stone', 'straw', 'structural-other', 'table',
'tent', 'textile-other', 'towel', 'tree', 'vegetable', 'wall-brick', 'wall-concrete', 'wall-other', 'wall-panel',
'wall-stone', 'wall-tile', 'wall-wood', 'water-other', 'waterdrops', 'window-blind', 'window-other', 'wood']
clsid2label = {0: 255}
for i in range(1, num_classes+1): clsid2label[i] = i - 1
assert num_classes == len(classnames)
def __init__(self, mode, logger_handle, dataset_cfg, **kwargs):
super(COCOStuff10kDataset, self).__init__(mode, logger_handle, dataset_cfg, **kwargs)
# obtain the dirs
rootdir = dataset_cfg['rootdir']
self.image_dir = os.path.join(rootdir, 'images')
self.ann_dir = os.path.join(rootdir, 'annotations')
# obatin imageids
df = pd.read_csv(os.path.join(rootdir, 'imageLists', dataset_cfg['set']+'.txt'), names=['imageids'])
self.imageids = df['imageids'].values
self.imageids = [str(_id) for _id in self.imageids]
'''pull item'''
def __getitem__(self, index):
imageid = self.imageids[index]
imagepath = os.path.join(self.image_dir, imageid+'.jpg')
annpath = os.path.join(self.ann_dir, imageid+'.mat')
sample = self.read(imagepath, annpath, self.dataset_cfg.get('with_ann', True))
sample.update({'id': imageid})
if self.mode == 'TRAIN':
sample = self.synctransform(sample, 'without_totensor_normalize_pad')
sample['edge'] = self.generateedge(sample['segmentation'].copy())
sample = self.synctransform(sample, 'only_totensor_normalize_pad')
else:
sample = self.synctransform(sample, 'all')
return sample
'''length'''
def __len__(self):
return len(self.imageids)
'''coco stuff dataset'''
class COCOStuffDataset(BaseDataset):
num_classes = 182
classnames = ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
'fire hydrant', 'street sign', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep',
'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'hat', 'backpack', 'umbrella', 'shoe', 'eye glasses', 'handbag',
'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove',
'skateboard', 'surfboard', 'tennis racket', 'bottle', 'plate', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake',
'chair', 'couch', 'potted plant', 'bed', 'mirror', 'dining table', 'window', 'desk', 'toilet', 'door', 'tv',
'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator',
'blender', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush', 'hair brush', 'banner',
'blanket', 'branch', 'bridge', 'building-other', 'bush', 'cabinet', 'cage', 'cardboard', 'carpet', 'ceiling-other',
'ceiling-tile', 'cloth', 'clothes', 'clouds', 'counter', 'cupboard', 'curtain', 'desk-stuff', 'dirt', 'door-stuff',
'fence', 'floor-marble', 'floor-other', 'floor-stone', 'floor-tile', 'floor-wood', 'flower', 'fog', 'food-other',
'fruit', 'furniture-other', 'grass', 'gravel', 'ground-other', 'hill', 'house', 'leaves', 'light', 'mat', 'metal',
'mirror-stuff', 'moss', 'mountain', 'mud', 'napkin', 'net', 'paper', 'pavement', 'pillow', 'plant-other', 'plastic',
'platform', 'playingfield', 'railing', 'railroad', 'river', 'road', 'rock', 'roof', 'rug', 'salad', 'sand', 'sea',
'shelf', 'sky-other', 'skyscraper', 'snow', 'solid-other', 'stairs', 'stone', 'straw', 'structural-other', 'table',
'tent', 'textile-other', 'towel', 'tree', 'vegetable', 'wall-brick', 'wall-concrete', 'wall-other', 'wall-panel',
'wall-stone', 'wall-tile', 'wall-wood', 'water-other', 'waterdrops', 'window-blind', 'window-other', 'wood']
clsid2label = {0: 255}
for i in range(1, num_classes+1): clsid2label[i] = i - 1
assert num_classes == len(classnames)
def __init__(self, mode, logger_handle, dataset_cfg, **kwargs):
super(COCOStuffDataset, self).__init__(mode, logger_handle, dataset_cfg, **kwargs)
from pycocotools import mask
from pycocotools.coco import COCO
# obtain the dirs
rootdir = dataset_cfg['rootdir']
self.image_dir = os.path.join(rootdir, f"{dataset_cfg['set']}2017")
# obatin imageids
self.annfilepath = os.path.join(rootdir, f"annotations/stuff_{dataset_cfg['set']}2017.json")
self.coco_api = COCO(self.annfilepath)
self.imageids = list(self.coco_api.imgs.keys())
'''pull item'''
def __getitem__(self, index):
imageid = self.imageids[index]
image_meta = self.coco_api.loadImgs(imageid)[0]
imagepath = os.path.join(self.image_dir, image_meta['file_name'])
annpath = imagepath.replace('jpg', 'png')
sample = self.read(imagepath, annpath, self.dataset_cfg.get('with_ann', True))
sample.update({'id': imageid})
if self.mode == 'TRAIN':
sample = self.synctransform(sample, 'without_totensor_normalize_pad')
sample['edge'] = self.generateedge(sample['segmentation'].copy())
sample = self.synctransform(sample, 'only_totensor_normalize_pad')
else:
sample = self.synctransform(sample, 'all')
return sample
'''length'''
def __len__(self):
return len(self.imageids)
| 62.121053
| 134
| 0.592392
|
462c897450135b76a5d490afa1d482b498496611
| 3,430
|
py
|
Python
|
json_navigator.py
|
TimKozak/JsonNavigator
|
f0591a98e725454877eb37a0f5fa29b9269c88a8
|
[
"MIT"
] | null | null | null |
json_navigator.py
|
TimKozak/JsonNavigator
|
f0591a98e725454877eb37a0f5fa29b9269c88a8
|
[
"MIT"
] | null | null | null |
json_navigator.py
|
TimKozak/JsonNavigator
|
f0591a98e725454877eb37a0f5fa29b9269c88a8
|
[
"MIT"
] | null | null | null |
"""
Module for Twitter Data json navigation
"""
import json
from pprint import pprint
import requests
def twitter_api():
"""
Get json from twitter
"""
while True:
try:
your_token = input("Paste your token: ")
tweet_tag = input("Enter twitter tag: @")
base_url = "https://api.twitter.com/"
bearer_token = your_token
search_url = '{}1.1/friends/list.json'.format(base_url)
search_headers = {
'Authorization': 'Bearer {}'.format(bearer_token)
}
search_params = {
'screen_name': f'@{tweet_tag}',
'count': 15
}
response = requests.get(
search_url, headers=search_headers, params=search_params)
return response.json()['users']
except Exception:
print("Invalid token or tag")
endpoint = input("Do you want to end? (y/n) ")
if endpoint == "y":
return "Thanks for using the app"
def find_by_keys(twitter_dict: str):
"""
Extract values from json by keys.
"""
twitter = twitter_dict
print("---"*10)
pprint(twitter_dict)
print("---"*10)
while True:
if isinstance(twitter, dict):
try:
key = input("Pick an existing key and recieve info: ")
if key in twitter.keys():
print("---"*10)
pprint(twitter[key])
print("---"*10)
if isinstance(twitter[key], dict):
print("---"*10)
pprint(list(twitter[key]))
print("---"*10)
twitter = twitter[key]
elif isinstance(twitter[key], list):
print("---"*10)
pprint(twitter[key])
print("---"*10)
twitter = twitter[key]
else:
return twitter[key]
except KeyError:
print("Invalid key")
endpoint = input("Do you want to end? (y/n) ")
if endpoint == "y":
return "Thanks for using the app"
elif isinstance(twitter, list):
try:
index = int(
input("Pick an existing index and recieve info: "))
if index in range(len(twitter)):
print("---"*10)
pprint(twitter[index])
print("---"*10)
if isinstance(twitter[index], dict):
print("---"*10)
pprint(list(twitter[index]))
print("---"*10)
twitter = twitter[index]
elif isinstance(twitter[index], list):
print("---"*10)
pprint(twitter[index])
print("---"*10)
twitter = twitter[index]
else:
return twitter[index]
except IndexError:
print("Invalid index")
endpoint = input("Do you want to end? (y/n) ")
if endpoint == "y":
return "Thanks for using the app"
if __name__ == "__main__":
my_json = twitter_api()
if my_json != "Thanks for using the app":
find_by_keys(my_json)
| 29.067797
| 73
| 0.453061
|
cc6f910bd20b9fa986928b40b5317beb0c76f26e
| 963
|
py
|
Python
|
AllAgents/bob/coordinates/is_coordinates.py
|
42-AI/TCP-Unity-Client
|
b4dcce1776d0f0b1c7a85584e03e4e7d603532db
|
[
"MIT"
] | null | null | null |
AllAgents/bob/coordinates/is_coordinates.py
|
42-AI/TCP-Unity-Client
|
b4dcce1776d0f0b1c7a85584e03e4e7d603532db
|
[
"MIT"
] | null | null | null |
AllAgents/bob/coordinates/is_coordinates.py
|
42-AI/TCP-Unity-Client
|
b4dcce1776d0f0b1c7a85584e03e4e7d603532db
|
[
"MIT"
] | null | null | null |
from bomberman import defines
from bomberman.defines import t_action
from bomberman.states.StatePlayer import StatePlayer
from typing import List, Tuple, Union
Coordinates = Union[Tuple[int, int], Tuple[float, float]]
def is_valid_coordinates(board: List[List[str]], coordinates: Coordinates) -> bool:
x, z = coordinates
if not (0 <= x < 11):
return False
if not (0 <= z < 11):
return False
item = board[z][x]
if item in ["W"]:
return False
return True
def is_safe_coordinates(board: List[List[str]], coordinates: Coordinates) -> bool:
if not is_valid_coordinates(board, coordinates):
return False
x, z = coordinates
item = board[z][x]
if item in ["B", "E"]:
return False
return True
def is_walkable_coordinates(board: List[List[str]], coordinates: Coordinates) -> bool:
if not is_valid_coordinates(board, coordinates):
return False
x, z = coordinates
item = board[z][x]
if item in ["C", "W"]:
return False
return True
| 25.342105
| 86
| 0.71028
|
b2590143a9a43ad6b59c4fd8c1b3289464e29df6
| 11,386
|
py
|
Python
|
src/training_rnn.py
|
sylvainlapeyrade/LSTM_KDD99_Keras
|
e07c05803dfd4ad454cf5043531bb3e205ec022b
|
[
"MIT"
] | 17
|
2019-08-08T00:14:25.000Z
|
2022-03-04T14:55:17.000Z
|
src/training_rnn.py
|
sylvainlapeyrade/LSTM_KDD99_Keras
|
e07c05803dfd4ad454cf5043531bb3e205ec022b
|
[
"MIT"
] | 6
|
2019-12-17T09:59:02.000Z
|
2020-07-27T22:24:47.000Z
|
src/training_rnn.py
|
sylvainlapeyrade/RNN_Intrusion-Detection_Keras
|
e07c05803dfd4ad454cf5043531bb3e205ec022b
|
[
"MIT"
] | 6
|
2019-08-08T00:14:29.000Z
|
2021-12-11T22:41:25.000Z
|
import tensorflow
import pandas as pd
import numpy as np
import os
from time import time
from keras.layers import Dense, Dropout, CuDNNLSTM, CuDNNGRU, RNN, LSTM, GRU
from keras import Sequential
from keras.callbacks import TensorBoard, ModelCheckpoint
from kdd_processing import kdd_encoding
from unsw_processing import unsw_encoding
from results_visualisation import print_results
# Allows tensorflow to run multiple sessions (Multiple learning simultaneously)
# Comment the 3 following lines if causing issues
# config = tensorflow.ConfigProto()
# config.gpu_options.allow_growth = True
# sess = tensorflow.Session(config=config)
csv_values = ['epochs', 'acc', 'loss', 'val_acc', 'val_loss', "train_data",
"features_nb", 'loss_fct', 'optimizer', 'activation_fct',
'layer_nb', 'unit_nb', 'batch_size', 'dropout', 'cell_type',
'encoder']
csv_best_res = ['param', 'value', 'min_mean_val_loss']
# epochs: Number of iteration of the training dataset
# train_data: Number of rows in training dataset (see processing files)
# features_nb: Number of features kept as input (see processing files)
# loss fct: Loss function used in training
# optimizer: Optimizer used in training
# activation_fct: Activation function used in outputs layer
# layer_nb: Number of hidden layers in the network
# unit_nb: Number of cells for each layer
# batch_size: Number of elements observed before updating weights
# dropout: Fraction of inputs randomly discarded
# cell_type: Type of cell ['CuDNNLSTM', 'CuDNNGRU', 'RNN', 'LSTM', 'GRU']
# encoder: Encoding performed (see processing files)
# dataset: Processing file to be called ['kdd', 'unsw']
# training_nb: Number of model to be trained with the same params
# resultstocsv: Wether to save results to csv
# resultstologs: Wether to save models and tensorboard logs
# showresults: Wether to show detailled statistics about the trained model
# shuffle: Wether to shuffle the batches sequences during training
# ***** REFERENCES PARAMETERS *****
params = {'epochs': 3, 'train_data': 494021, 'features_nb': 4,
'loss_fct': 'mse', 'optimizer': 'rmsprop',
'activation_fct': 'sigmoid', 'layer_nb': 1, 'unit_nb': 128,
'batch_size': 1024, 'dropout': 0.2, 'cell_type': 'CuDNNLSTM',
'encoder': 'labelencoder', 'dataset': 'kdd', 'training_nb': 1,
'resultstocsv': False, 'resultstologs': False, 'showresults': True,
'shuffle': True}
# ***** VARIABLE PARAMETERS *****
params_var = {'encoder': ['standardscaler', 'labelencoder',
'minmaxscaler01', 'minmaxscaler11',
'ordinalencoder'],
'optimizer': ['adam', 'sgd', 'rmsprop', 'nadam', 'adamax',
'adadelta'],
'activation_fct': ['sigmoid', 'softmax', 'relu', 'tanh'],
'layer_nb': [1, 2, 3, 4],
'unit_nb': [4, 8, 32, 64, 128, 256],
'dropout': [0.1, 0.2, 0.3, 0.4],
'batch_size': [512, 1024, 2048],
# 'features_nb': [4, 8, 41],
# 'train_data': [494021, 4898431, 125973, 25191],
# 'cell_type': ['CuDNNLSTM', 'CuDNNGRU', 'RNN', 'LSTM', 'GRU'],
# 'dataset : ['kdd', 'unsw']
}
model_path = './models/'
logs_path = './logs/'
res_path = './results/' + 'testcsv/'
if params['resultstologs'] is True:
res_name = str(params['train_data']) + '_' + str(params['features_nb']) +\
'_' + params['loss_fct'] + '_' + params['optimizer'] + '_' +\
params['activation_fct'] + '_' + str(params['layer_nb']) + '_' +\
str(params['unit_nb']) + '_' + str(params['batch_size']) + '_' +\
str(params['dropout']) + '_' + params['cell_type'] + '_' +\
params['encoder'] + '_' + str(time())
# Encode dataset and return : x_train, x_test, y_train, y_tests
def load_data():
if params['dataset'] == 'kdd':
x_train, x_test, y_train, y_test = kdd_encoding(params)
elif params['dataset'] == 'unsw':
x_train, x_test, y_train, y_test = unsw_encoding(params)
# Reshape the inputs in the accepted model format
x_train = np.array(x_train).reshape([-1, x_train.shape[1], 1])
x_test = np.array(x_test).reshape([-1, x_test.shape[1], 1])
return x_train, x_test, y_train, y_test
# Create and train a model
def train_model(x_train, x_test, y_train, y_test):
if params['cell_type'] == 'CuDNNLSTM':
cell = CuDNNLSTM
elif params['cell_type'] == 'CuDNNGRU':
cell = CuDNNGRU
elif params['cell_type'] == 'RNN':
cell = RNN
elif params['cell_type'] == 'LSTM':
cell = LSTM
elif params['cell_type'] == 'GRU':
cell = GRU
# Create a Sequential layer, one layer after the other
model = Sequential()
# If there is more than 1 layer, the first must return sequences
for _ in range(params['layer_nb']-1):
model.add(cell(units=params['unit_nb'],
input_shape=(x_train.shape[1:]), return_sequences=True))
model.add(Dropout(rate=params['dropout']))
# If there is only 1 layer, it must not return sequences
if(params['layer_nb'] == 1):
model.add(cell(units=params['unit_nb'], input_shape=x_train.shape[1:]))
model.add(Dropout(rate=params['dropout']))
else: # If there is more than 1, the following must not return sequences
model.add(cell(units=params['unit_nb']))
model.add(Dropout(rate=params['dropout']))
# Outputs layer
model.add(Dense(units=y_train.shape[1],
activation=params['activation_fct']))
model.compile(loss=params['loss_fct'], optimizer=params['optimizer'],
metrics=['accuracy'])
# Create model and logs folder if does not exist
if params['resultstologs'] is True:
if not os.path.exists(logs_path):
os.makedirs(logs_path)
if not os.path.exists(model_path):
os.makedirs(model_path)
save_model = ModelCheckpoint(filepath=model_path + res_name,
monitor='val_acc', save_best_only=True)
tensorboard = TensorBoard(logs_path+res_name)
callbacks = [save_model, tensorboard]
else:
callbacks = None
model.summary()
hist = model.fit(x_train, y_train, params['batch_size'], params['epochs'],
verbose=1, shuffle=params['shuffle'],
validation_data=(x_test, y_test), callbacks=callbacks)
if params['showresults'] is True:
print_results(params, model, x_train, x_test, y_train, y_test)
return hist
def res_to_csv():
ref_min_val_loss = 10 # Minimal reference loss value
nsmall = 5 # Number of val loss for the mean val loss
# Create the results directory if it doesnt exist
if not os.path.exists(res_path):
os.makedirs(res_path)
full_res_path = res_path + 'full_results.csv'
best_res_path = res_path + 'best_result.csv'
# Initialize results and best_results dataframes
results_df = pd.DataFrame(columns=csv_values)
results_df.to_csv(full_res_path, index=False)
best_res_df = pd.DataFrame(columns=csv_best_res)
def fill_dataframe(df, history, epoch):
df = df.append({'epochs': epoch,
'acc': history.history['acc'][epoch],
'loss': history.history['loss'][epoch],
'val_acc': history.history['val_acc'][epoch],
'val_loss': history.history['val_loss'][epoch],
'train_data': params['train_data'],
'features_nb': params['features_nb'],
'loss_fct': params['loss_fct'],
'optimizer': params['optimizer'],
'activation_fct': params['activation_fct'],
'layer_nb': params['layer_nb'],
'unit_nb': params['unit_nb'],
'batch_size': params['batch_size'],
'dropout': params['dropout'],
'cell_type': params['cell_type'],
'encoder': params['encoder']},
ignore_index=True)
return df
# Make the mean of the n smallest val_loss for each feature values
def min_mean_val_loss(feature):
# Load the results previously saved as csv for the features
df = pd.read_csv(res_path+feature+".csv", index_col=False)
names = df[feature].unique().tolist()
df_loss = pd.DataFrame(columns=names)
# For each value of the feature, compare the n smallest val loss
for i in range(len(names)):
df_value_loss = df.loc[df[feature] == names[i]]
df_value_loss = df_value_loss.nsmallest(nsmall, 'val_loss')
df_loss[names[i]] = np.array(df_value_loss['val_loss'])
# Return the index and the value of the feature
# with the smallest mean val loss
return df_loss.mean().idxmin(), df_loss.mean().min()
for feature in params_var.keys():
results_df.to_csv(res_path + feature + ".csv", index=False)
save_feature_value = params[feature]
for feature_value in params_var[feature]:
df_value = pd.DataFrame(columns=csv_values)
params[feature] = feature_value
if feature == 'encoder' or feature == 'train_data':
# The encoding will have to change, so the data are reaload
x_train, x_test, y_train, y_test = load_data()
for _ in range(params['training_nb']):
history = train_model(x_train, x_test, y_train, y_test)
# The datafranme is filled for each epoch
for epoch in range(params['epochs']):
df_value = fill_dataframe(df_value, history, epoch)
# At the end of the training, results are saved in csv
df_value.to_csv(full_res_path, header=False, index=False, mode='a')
df_value.to_csv(res_path + feature + ".csv", header=False,
index=False, mode='a')
# Once the test of the value is over, return the min mean val loss
feature_value_min_loss, min_mean_loss = min_mean_val_loss(feature)
# Compare the best val loss for the feature value with the reference
# of the best val loss. If better, the best val becomes the reference,
# and feature value correspondind is chosen for the rest of the test
if min_mean_loss < ref_min_val_loss:
params[feature] = feature_value_min_loss
ref_min_val_loss = min_mean_loss
else:
params[feature] = save_feature_value
# Save the best feature value, reference is saved if better
best_res_df = best_res_df.append({'param': feature,
'value': params[feature],
'min_mean_val_loss': min_mean_loss},
ignore_index=True)
best_res_df.to_csv(best_res_path, index=False)
if __name__ == "__main__":
x_train, x_test, y_train, y_test = load_data()
for i in range(params['training_nb']):
if params['resultstocsv'] is False:
train_model(x_train, x_test, y_train, y_test)
else:
res_to_csv()
| 43.292776
| 79
| 0.61277
|
ca23cc6c336a31497b9fcdc5ab504fcc228eb340
| 501
|
py
|
Python
|
pyscript/fpaid.py
|
polossk/Project-Mizuki-and-Hagane
|
36f9d57f0de27bd374f07d2f9aacc022c73e8d9d
|
[
"MIT"
] | 1
|
2017-07-28T06:35:09.000Z
|
2017-07-28T06:35:09.000Z
|
pyscript/fpaid.py
|
polossk/Project-Mizuki-and-Hagane
|
36f9d57f0de27bd374f07d2f9aacc022c73e8d9d
|
[
"MIT"
] | null | null | null |
pyscript/fpaid.py
|
polossk/Project-Mizuki-and-Hagane
|
36f9d57f0de27bd374f07d2f9aacc022c73e8d9d
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
from pymongo import MongoClient
import sys, time
def main():
if len(sys.argv) < 2:
sys.exit()
numbers = sys.argv[1:]
c = MongoClient('mongodb://username:password@localhost:23333/test?authSource=admin')
posts = c.test.users
for uid in numbers:
posts.update({"usershowid": uid}, {"$set": {"paidconfirm": True, "paid": True}})
print("update uid = {0} with paidconfirm := True.".format(uid))
print(time.time(), '\n')
if __name__ == '__main__':
main()
| 26.368421
| 85
| 0.686627
|
179abf5be1018db6373c714b3c3d8d4b3024d038
| 1,928
|
py
|
Python
|
bluebottle/payments_beyonic/migrations/0001_initial.py
|
terrameijar/bluebottle
|
b4f5ba9c4f03e678fdd36091b29240307ea69ffd
|
[
"BSD-3-Clause"
] | 10
|
2015-05-28T18:26:40.000Z
|
2021-09-06T10:07:03.000Z
|
bluebottle/payments_beyonic/migrations/0001_initial.py
|
terrameijar/bluebottle
|
b4f5ba9c4f03e678fdd36091b29240307ea69ffd
|
[
"BSD-3-Clause"
] | 762
|
2015-01-15T10:00:59.000Z
|
2022-03-31T15:35:14.000Z
|
bluebottle/payments_beyonic/migrations/0001_initial.py
|
terrameijar/bluebottle
|
b4f5ba9c4f03e678fdd36091b29240307ea69ffd
|
[
"BSD-3-Clause"
] | 9
|
2015-02-20T13:19:30.000Z
|
2022-03-08T14:09:17.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-04-04 11:00
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django_extensions.db.fields.json
class Migration(migrations.Migration):
initial = True
dependencies = [
('payments', '0005_auto_20170919_1621'),
]
operations = [
migrations.CreateModel(
name='BeyonicPayment',
fields=[
('payment_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='payments.Payment')),
('amount', models.CharField(blank=True, help_text=b'Amount', max_length=200, null=True)),
('currency', models.CharField(blank=True, default=b'USD', help_text=b'Transaction currency', max_length=200, null=True)),
('mobile', models.CharField(blank=True, help_text=b'Mobile Number', max_length=200, null=True)),
('description', models.CharField(blank=True, help_text=b'Description', max_length=200, null=True)),
('metadata', django_extensions.db.fields.json.JSONField(blank=True, default=dict, help_text=b'Metadata', max_length=100, null=True)),
('transaction_reference', models.CharField(blank=True, help_text=b'Transaction ID', max_length=200, null=True)),
('response', models.TextField(blank=True, help_text='Response from Beyonic', null=True)),
('update_response', models.TextField(blank=True, help_text='Result from Beyonic (status update)', null=True)),
],
options={
'ordering': ('-created', '-updated'),
'verbose_name': 'Beyonic Payment',
'verbose_name_plural': 'Beyonic Payments',
},
bases=('payments.payment',),
),
]
| 48.2
| 194
| 0.638485
|
6a8b72c8b7c986ad52fea9f675019af8c5e3cc58
| 6,387
|
py
|
Python
|
movie_predict_modules/clean_data(edited).py
|
andurilhuang/Movie_Income_Prediction
|
5c705fe60295a175e9dc54191df422fc49ceccd6
|
[
"MIT"
] | 1
|
2017-11-02T06:06:39.000Z
|
2017-11-02T06:06:39.000Z
|
movie_predict_modules/clean_data(edited).py
|
andurilhuang/Movie_Income_Prediction
|
5c705fe60295a175e9dc54191df422fc49ceccd6
|
[
"MIT"
] | null | null | null |
movie_predict_modules/clean_data(edited).py
|
andurilhuang/Movie_Income_Prediction
|
5c705fe60295a175e9dc54191df422fc49ceccd6
|
[
"MIT"
] | 4
|
2017-11-01T19:24:31.000Z
|
2018-09-13T00:05:41.000Z
|
from sklearn import preprocessing
import json
import math
import numpy as np
import pandas as pd
import random
import requests
def get_act_pop_avg():
"""get average poplularity rating for actos by sampling"""
query_ave = "https://api.themoviedb.org/3/person/popular?" \
"api_key=60027f35df522f00e57a79b9d3568423&language=en-US&page=%d"
rd = random.sample(range(1,1000),20)
rd_pop=[]
for n in rd:
rq = requests.get(query_ave %n).json()
for item in rq['results']:
rd_pop.append(item['popularity'])
ave_pop = np.mean(rd_pop)
return ave_pop
def clean_director_actor():
"""add director_actor popularity rating"""
ave_pop = 2.08979
TMDB_KEY = '60027f35df522f00e57a79b9d3568423'
df = pd.read_csv("FinalMerge.csv", encoding="latin1")
Actors_split = []
for item in df['Actors']:
item = str(item).split(",")
Actors_split.append(item)
Directors_split =[]
for item in df['Director']:
item = str(item).split(",")
Directors_split.append(item)
for item in Actors_split:
for i in range(len(item)):
item[i] = str(item[i]).strip()
for item in Directors_split:
for i in range(len(item)):
item[i] = str(item[i]).strip()
Actor_Popularity = []
count = 0
url = "https://api.themoviedb.org/3/search/person"
for item in Actors_split:
pop_sum = []
for i in item:
try:
payload = {'api_key':TMDB_KEY, 'query':i, 'language':'en-US'}
result = requests.get(url, data=payload).json()
pop_sum.append(result['results'][0]['popularity'])
except:
pop_sum.append(ave_pop)
Actor_Popularity.append(np.mean(pop_sum))
count = count+1
print(count)
df['actor_popularity'] = Actor_Popularity
Director_Popularity = []
dir_count = 0
for item in Directors_split:
pop = []
for i in item:
try:
payload = {'api_key':TMDB_KEY, 'query':i, 'language':'en-US'}
result = requests.get(url, data=payload).json()
pop.append(result['results'][0]['popularity'])
except:
pop.append(ave_pop)
Director_Popularity.append(np.mean(pop))
dir_count = dir_count+1
print (dir_count)
df['director_popularity'] = Director_Popularity
return df.to_csv("data_clean.csv")
def clean_data():
"""preparing data for regression"""
df = pd.read_csv("data_clean.csv", encoding = "latin1")
# drop unnecessary columns
df = df.drop(["Unnamed: 0", "imdb_id", "Title", "X.x", "X.y", "Country",\
"Actors", "Director", "Year", "Production"], axis=1)
# drop_missing values
mis_val_col = ["Genre", "IMDB.Votes", "Runtime", "IMDB.Rating", "Language"]
for col in mis_val_col:
df = df.drop(df[df[col].isnull()].index)
# budget
df["budget"] = df["budget"].map(lambda x:math.log10(x))
# revenue
df["revenue"] = df["revenue"].map(lambda x:math.log10(x))
# genre
df = pd.concat([df, df['Genre'].str.get_dummies(sep=', ')], axis=1)
df['Thriller'] = df[['Thriller', 'Horror']].sum(axis=1)
df['Fantasy'] = df[['Fantasy', 'Sci-Fi']].sum(axis=1)
df['Other_genre'] = df[['Music', 'History', 'Sport', 'War', 'Western',\
'Musical', 'Documentary', 'News']].sum(axis=1)
df.drop(['Music', 'History', 'Sport', 'War', 'Western', 'Musical',\
'Documentary', 'News', 'Horror', 'Sci-Fi'], axis=1, inplace=True)
genre_lst = list(df)[19:32]
for x in genre_lst:
df.loc[df['%s' % x] > 1, '%s' % x] = 1
df = df.drop("Genre",axis=1)
# IMDB.Votes
df['IMDB.Votes'] = df['IMDB.Votes'].replace(',', '',regex=True)
df['IMDB.Votes'] = df['IMDB.Votes'].astype(int)
df["IMDB.Votes"] = df["IMDB.Votes"].map(lambda x:math.log10(x))
# language
df['Language'] = df.Language.str.count(',')+1
# rated
df["Rated"] = df["Rated"].replace(np.nan, "UNRATED")\
.replace("NOT RATED", "UNRATED")
df = df.drop(df[(df["Rated"] == "TV-MA") | (df["Rated"] == "TV-PG") |\
(df["Rated"] == "TV-14")].index)
df = pd.concat([df, df['Rated'].str.get_dummies(sep = ', ')], axis = 1)
# released
# index of released date col
index = df.columns.get_loc("Released")
#change date data to timestamp
release_dates = pd.to_datetime(df["Released"])
# released date is weekend of not
weekend_list = []
for each in release_dates:
day_ofweek = each.dayofweek
if day_ofweek >= 4 and day_ofweek <= 6:
tag = 1
else:
tag = 0
weekend_list.append(tag)
# released date is on dump months
undumpmonth_list = []
for each in release_dates:
month = each.month
if month == 12 or month == 1 or month == 2 or month == 8 or month == 9:
tag = 0
else:
tag = 1
undumpmonth_list.append(tag)
df.insert(loc = index + 1,column = "released_on_weekend", value = weekend_list)
df.insert(loc = index + 2,column = "released_not_on_dump_month",\
value = undumpmonth_list)
df.drop("Released", axis = 1)
# runtime
df["Runtime"] = df["Runtime"].map(lambda x:int(x.strip("min")))
# normalization
x1 = df[['IMDB.Rating', 'IMDB.Votes', 'Language','Runtime',\
'budget', 'actor_popularity', 'director_popularity']]
x2 = df[['released_on_weekend', 'released_not_on_dump_month','Action', 'Adventure',\
'Animation', 'Biography', 'Comedy', 'Crime', 'Drama', 'Family',\
'Fantasy', 'Mystery', 'Romance', 'Thriller', 'Other_genre', 'G',\
'NC-17', 'PG', 'PG-13', 'R', 'UNRATED']]
y = df['revenue'].reset_index().drop("index",axis = 1)
normalizer = preprocessing.MinMaxScaler()
x1 = normalizer.fit_transform(x1)
x1 = pd.DataFrame(x1, columns = ['IMDB.Rating', 'IMDB.Votes', 'Language', 'Runtime',\
'budget', 'actor_popularity','director_popularity'])
x2 = x2.reset_index().drop("index", axis = 1)
X = pd.concat([x1, x2], axis = 1)
df_for_model = pd.concat([X, y], axis = 1)
return df_for_model
| 36.919075
| 90
| 0.570534
|
093b7a26732e4735c0a937a4753071c5ef7143ec
| 765
|
py
|
Python
|
django_uuid_primary_key/urls.py
|
vaibhavmule/django-uuid-primary-key
|
5d751a3c3c1b16556fae04efeea82a310e5c7ef5
|
[
"MIT"
] | 6
|
2017-10-31T17:45:43.000Z
|
2022-02-04T10:59:33.000Z
|
django_uuid_primary_key/urls.py
|
vaibhavmule/django-uuid-primary-key
|
5d751a3c3c1b16556fae04efeea82a310e5c7ef5
|
[
"MIT"
] | 9
|
2017-11-01T07:14:50.000Z
|
2022-02-10T07:17:54.000Z
|
django_uuid_primary_key/urls.py
|
vaibhavmule/django-uuid-primary-key
|
5d751a3c3c1b16556fae04efeea82a310e5c7ef5
|
[
"MIT"
] | 4
|
2017-11-01T06:56:39.000Z
|
2019-08-14T12:03:29.000Z
|
"""django_uuid_primary_key URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/dev/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| 34.772727
| 77
| 0.71634
|
c605af63117521f9bcca6169a0029588892b4a7f
| 1,176
|
py
|
Python
|
5-Reinforcement Learning/Thompson Sampling/Code.py
|
HMSheharyar/MachineLearning
|
4f2fdf2a1cafb4b8c743dd3af3bd118a8a47ff68
|
[
"MIT"
] | 1
|
2019-02-16T18:39:29.000Z
|
2019-02-16T18:39:29.000Z
|
5-Reinforcement Learning/Thompson Sampling/Code.py
|
HMSheharyar/MachineLearning
|
4f2fdf2a1cafb4b8c743dd3af3bd118a8a47ff68
|
[
"MIT"
] | null | null | null |
5-Reinforcement Learning/Thompson Sampling/Code.py
|
HMSheharyar/MachineLearning
|
4f2fdf2a1cafb4b8c743dd3af3bd118a8a47ff68
|
[
"MIT"
] | null | null | null |
#------------------------------------------------- Thompson Sampling -------------------------------------------------#
#---Libraries Import---#
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
#---Importing the dataset---#
dataset = pd.read_csv('FileName.csv')
#---Implementing Thompson Sampling---#
import random
N = 10000
d = 10
ads_selected = []
numbers_of_rewards_1 = [0] * d
numbers_of_rewards_0 = [0] * d
total_reward = 0
for n in range(0, N):
ad = 0
max_random = 0
for i in range(0, d):
random_beta = random.betavariate(numbers_of_rewards_1[i] + 1, numbers_of_rewards_0[i] + 1)
if random_beta > max_random:
max_random = random_beta
ad = i
ads_selected.append(ad)
reward = dataset.values[n, ad]
if reward == 1:
numbers_of_rewards_1[ad] = numbers_of_rewards_1[ad] + 1
else:
numbers_of_rewards_0[ad] = numbers_of_rewards_0[ad] + 1
total_reward = total_reward + reward
#---Visualising the results - Histogram---#
plt.hist(ads_selected)
plt.title('Histogram of ads selections')
plt.xlabel('Ads')
plt.ylabel('Number of times each ad was selected')
plt.show()
| 28
| 119
| 0.618197
|
65b199bbf4cf22444067f4ad51b065616b339ce9
| 11,024
|
py
|
Python
|
tests/conftest.py
|
tyrasd/osmaxx
|
da4454083d17b2ef8b0623cad62e39992b6bd52a
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
tyrasd/osmaxx
|
da4454083d17b2ef8b0623cad62e39992b6bd52a
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
tyrasd/osmaxx
|
da4454083d17b2ef8b0623cad62e39992b6bd52a
|
[
"MIT"
] | null | null | null |
# pylint: disable=C0111
import os
import tempfile
from collections import Mapping
from datetime import timedelta
import pytest
from osmaxx.utils.frozendict import frozendict
test_data_dir = os.path.join(os.path.dirname(__file__), 'test_data')
postgres_container_userland_port = 65432 # required for travis, so using it everywhere
def pytest_configure():
from django.conf import settings
import environ
settings.configure(
ROOT_DIR=environ.Path(__file__) - 1,
DEBUG_PROPAGATE_EXCEPTIONS=True,
DATABASES={
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'postgres',
'USER': 'postgres',
'PASSWORD': 'postgres',
'PORT': '54321',
'HOST': '127.0.0.1',
}
},
SITE_ID=1,
SECRET_KEY='not very secret in tests',
USE_I18N=True,
USE_L10N=True,
STATIC_URL='/static/',
MEDIA_URL='/media/',
MEDIA_ROOT=tempfile.mkdtemp(),
ROOT_URLCONF='tests.urls',
TEMPLATE_LOADERS=(
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
),
TEMPLATES=[
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.request',
],
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]
},
},
],
MIDDLEWARE_CLASSES=(
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'osmaxx.job_progress.middleware.ExportUpdaterMiddleware',
),
INSTALLED_APPS=(
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.gis',
'rest_framework',
'rest_framework_gis',
'rest_framework.authtoken',
'crispy_forms',
'stored_messages',
'tests',
# version app
'osmaxx.version',
# conversion service apps
'osmaxx.clipping_area',
'osmaxx.conversion',
# web_frontend apps
'osmaxx.core',
'osmaxx.excerptexport',
'osmaxx.job_progress',
'osmaxx.profile',
),
PASSWORD_HASHERS=(
'django.contrib.auth.hashers.SHA1PasswordHasher',
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
'django.contrib.auth.hashers.MD5PasswordHasher',
'django.contrib.auth.hashers.CryptPasswordHasher',
),
RQ_QUEUE_NAMES=['default'],
RQ_QUEUES={
'default': {
'HOST': 'localhost',
'PORT': 6379,
'DB': 0,
'PASSWORD': '',
'DEFAULT_TIMEOUT': 3600,
},
},
JWT_AUTH={
'JWT_ENCODE_HANDLER': 'rest_framework_jwt.utils.jwt_encode_handler',
'JWT_DECODE_HANDLER': 'rest_framework_jwt.utils.jwt_decode_handler',
'JWT_PAYLOAD_HANDLER': 'rest_framework_jwt.utils.jwt_payload_handler',
'JWT_PAYLOAD_GET_USER_ID_HANDLER': 'rest_framework_jwt.utils.jwt_get_user_id_from_payload_handler',
'JWT_RESPONSE_PAYLOAD_HANDLER': 'rest_framework_jwt.utils.jwt_response_payload_handler',
'JWT_ALGORITHM': 'HS256',
'JWT_VERIFY': True,
'JWT_VERIFY_EXPIRATION': True,
'JWT_LEEWAY': 0,
'JWT_EXPIRATION_DELTA': timedelta(seconds=300),
'JWT_AUDIENCE': None,
'JWT_ISSUER': None,
'JWT_ALLOW_REFRESH': False,
'JWT_REFRESH_EXPIRATION_DELTA': timedelta(days=7),
'JWT_AUTH_HEADER_PREFIX': 'JWT',
},
OSMAXX_CONVERSION_SERVICE={
'PBF_PLANET_FILE_PATH': os.path.join(test_data_dir, 'osm', 'monaco-latest.osm.pbf'),
},
_OSMAXX_POLYFILE_LOCATION=os.path.join(test_data_dir, 'polyfiles'),
OSMAXX_TEST_SETTINGS={
'CONVERSION_SERVICE_URL': 'http://localhost:8901/api/',
'CONVERSION_SERVICE_USERNAME': 'dev',
'CONVERSION_SERVICE_PASSWORD': 'dev',
},
OSMAXX={
'download_file_name': '%(excerpt_name)s-%(date)s.%(content_type)s.%(file_extension)s',
'EXTRACTION_PROCESSING_TIMEOUT_TIMEDELTA': timedelta(hours=48),
'CONVERSION_SERVICE_URL': 'http://localhost:8901/api/',
'CONVERSION_SERVICE_USERNAME': 'dev',
'CONVERSION_SERVICE_PASSWORD': 'dev',
'EXCLUSIVE_USER_GROUP': 'dev',
'ACCOUNT_MANAGER_EMAIL': 'accountmanager@example.com',
},
OSMAXX_FRONTEND_USER_GROUP='osmaxx_frontend_users',
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
'LOCATION': ''
}
},
MIGRATION_MODULES={
'sites': 'osmaxx.contrib.sites.migrations',
'auth': 'osmaxx.contrib.auth.migrations',
'stored_messages': 'osmaxx.third_party_apps.stored_messages.migrations',
}
)
# if any global fixtures are needed, add them below
@pytest.yield_fixture
def requests_mock():
import requests_mock
with requests_mock.mock() as m:
yield m
@pytest.fixture
def user(db, django_user_model, django_username_field):
"""A Django user.
This uses an existing user with username "user", or creates a new one with
password "password".
"""
# Adapted from pytest_django.fixtures.admin_user
UserModel = django_user_model # noqa
username_field = django_username_field
username = 'user'
password = 'password'
try:
user = UserModel._default_manager.get(**{username_field: 'user'})
except UserModel.DoesNotExist:
extra_fields = {}
if username_field != 'username':
extra_fields[username_field] = username
user = UserModel._default_manager.create_user(
username, '{}@example.com'.format(username), password, **extra_fields)
return user
@pytest.fixture
def authenticated_client(client, user):
"""
Client fixture using an authenticated user.
Since this fixture creates a database object, you must
mark your test with @pytest.mark.django_db()
Args:
client: Default client fixture from pytest-django
Returns:
Authenticated Client
"""
client.login(username='user', password='password')
client.user = user
return client
@pytest.fixture
def api_client():
from rest_framework.test import APIClient
return APIClient()
@pytest.fixture
def authenticated_api_client(api_client, user):
"""
API-Client fixture using an authenticated user.
Since this fixture creates a database object, you must
mark your test with @pytest.mark.django_db()
Args:
api_client: api-client fixture
Returns:
Authenticated Client
"""
return authenticated_client(api_client, user)
@pytest.fixture
def frontend_accessible_authenticated_api_client(api_client, user):
from osmaxx.profile.models import Profile
Profile.objects.create(associated_user=user, unverified_email=user.email)
return authenticated_client(api_client, user)
@pytest.fixture
def persisted_valid_clipping_area():
from django.contrib.gis.geos import Polygon, MultiPolygon
from osmaxx.clipping_area.models import ClippingArea
poly_1 = Polygon(((0, 0), (0, 1), (1, 1), (0, 0)))
poly_2 = Polygon(((1, 1), (1, 2), (2, 2), (1, 1)))
multi_polygon = MultiPolygon(poly_1, poly_2)
persisted_valid_clipping_area = ClippingArea.objects.create(name='test', clipping_multi_polygon=multi_polygon)
assert persisted_valid_clipping_area.osmosis_polygon_file_string != ''
assert persisted_valid_clipping_area.osmosis_polygon_file_string is not None
assert str(persisted_valid_clipping_area) == "test ({})".format(persisted_valid_clipping_area.id)
return persisted_valid_clipping_area
@pytest.fixture
def authorized_client(authenticated_client):
from django.contrib.auth.models import Group
from django.conf import settings
authenticated_client.user.groups.add(Group.objects.get(name=settings.OSMAXX_FRONTEND_USER_GROUP))
return authenticated_client
@pytest.fixture
def geos_geometry_can_be_created_from_geojson_string():
"""
Just a sanity check asserting that GEOSGeometry instances can be created from GeoJSON strings.
If you get an error here, check your libraries, especially GDAL. (libgdal.so.1)
"""
from django.contrib.gis.geos import GEOSGeometry
import json
geojson_point = dict(type="Point", coordinates=[100.0, 0.0])
geojson_point_string = json.dumps(geojson_point)
GEOSGeometry(geojson_point_string)
@pytest.fixture
def area_polyfile_string():
return '''
none
polygon-1
7.495679855346679 43.75782881091782
7.38581657409668 43.75782881091782
7.38581657409668 43.70833803832912
7.495679855346679 43.75782881091782
END
END
'''.lstrip()
class TagCombination(Mapping):
def __init__(self, *args, **kwargs):
tags = dict(osm_id=id(self))
tags.update(*args, **kwargs)
self.__tags = frozendict(tags)
self.__hash = hash(frozenset(self.items()))
def __getitem__(self, item):
return self.__tags[item]
def __iter__(self):
return iter(self.__tags)
def __len__(self):
return len(self.__tags)
def __str__(self):
return ' '.join("{key}={value}".format(key=key, value=value) for key, value in self.items())
def __hash__(self):
return self.__hash
| 33.305136
| 114
| 0.628901
|
f76f9273e248f2a20d1d8fa88043f5da01fb8c0d
| 4,509
|
py
|
Python
|
intropyproject-classify-pet-images/classify_images.py
|
djarrin/udacity-use-pre-trained-image-classifier
|
afe4d2a273048d15b4b2fe37411fba725355d2b6
|
[
"MIT"
] | null | null | null |
intropyproject-classify-pet-images/classify_images.py
|
djarrin/udacity-use-pre-trained-image-classifier
|
afe4d2a273048d15b4b2fe37411fba725355d2b6
|
[
"MIT"
] | null | null | null |
intropyproject-classify-pet-images/classify_images.py
|
djarrin/udacity-use-pre-trained-image-classifier
|
afe4d2a273048d15b4b2fe37411fba725355d2b6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# */AIPND-revision/intropyproject-classify-pet-images/classify_images.py
#
# PROGRAMMER: David Jarrin
# DATE CREATED: 3/1/19
# REVISED DATE: 3/2/19
# PURPOSE: Create a function classify_images that uses the classifier function
# to create the classifier labels and then compares the classifier
# labels to the pet image labels. This function inputs:
# -The Image Folder as image_dir within classify_images and function
# and as in_arg.dir for function call within main.
# -The results dictionary as results_dic within classify_images
# function and results for the functin call within main.
# -The CNN model architecture as model within classify_images function
# and in_arg.arch for the function call within main.
# This function uses the extend function to add items to the list
# that's the 'value' of the results dictionary. You will be adding the
# classifier label as the item at index 1 of the list and the comparison
# of the pet and classifier labels as the item at index 2 of the list.
#
##
# Imports classifier function for using CNN to classify images
from classifier import classifier
# TODO 3: Define classify_images function below, specifically replace the None
# below by the function definition of the classify_images function.
# Notice that this function doesn't return anything because the
# results_dic dictionary that is passed into the function is a mutable
# data type so no return is needed.
#
def classify_images(images_dir, results_dic, model):
"""
Creates classifier labels with classifier function, compares pet labels to
the classifier labels, and adds the classifier label and the comparison of
the labels to the results dictionary using the extend function. Be sure to
format the classifier labels so that they will match your pet image labels.
The format will include putting the classifier labels in all lower case
letters and strip the leading and trailing whitespace characters from them.
For example, the Classifier function returns = 'Maltese dog, Maltese terrier, Maltese'
so the classifier label = 'maltese dog, maltese terrier, maltese'.
Recall that dog names from the classifier function can be a string of dog
names separated by commas when a particular breed of dog has multiple dog
names associated with that breed. For example, you will find pet images of
a 'dalmatian'(pet label) and it will match to the classifier label
'dalmatian, coach dog, carriage dog' if the classifier function correctly
classified the pet images of dalmatians.
PLEASE NOTE: This function uses the classifier() function defined in
classifier.py within this function. The proper use of this function is
in test_classifier.py Please refer to this program prior to using the
classifier() function to classify images within this function
Parameters:
images_dir - The (full) path to the folder of images that are to be
classified by the classifier function (string)
results_dic - Results Dictionary with 'key' as image filename and 'value'
as a List. Where the list will contain the following items:
index 0 = pet image label (string)
--- where index 1 & index 2 are added by this function ---
NEW - index 1 = classifier label (string)
NEW - index 2 = 1/0 (int) where 1 = match between pet image
and classifer labels and 0 = no match between labels
model - Indicates which CNN model architecture will be used by the
classifier function to classify the pet images,
values must be either: resnet alexnet vgg (string)
Returns:
None - results_dic is mutable data type so no return needed.
"""
for key in results_dic:
#get classifier return and strip whitespace and turn lowercase
model_label = classifier(images_dir + key, model).lower().strip()
#append the model label
results_dic[key].append(model_label)
#set trueValue in from result dict
trueValue = results_dic[key][0]
#set whether there is a match or not
if trueValue in model_label:
results_dic[key].append(1)
else:
results_dic[key].append(0)
return None
| 54.987805
| 90
| 0.696385
|
b3c24213f3474d01362b41eb661d7e4ea211c968
| 123,656
|
py
|
Python
|
Lib/test/test_zipfile.py
|
GreatBahram/cpython
|
3529718925f40d14ed48d281d809187bc7314a14
|
[
"0BSD"
] | 7
|
2018-09-25T17:32:32.000Z
|
2021-06-16T18:40:40.000Z
|
Lib/test/test_zipfile.py
|
GreatBahram/cpython
|
3529718925f40d14ed48d281d809187bc7314a14
|
[
"0BSD"
] | 17
|
2018-01-26T13:23:58.000Z
|
2022-03-01T13:44:36.000Z
|
Lib/test/test_zipfile.py
|
GreatBahram/cpython
|
3529718925f40d14ed48d281d809187bc7314a14
|
[
"0BSD"
] | 1
|
2020-09-13T02:40:55.000Z
|
2020-09-13T02:40:55.000Z
|
import contextlib
import importlib.util
import io
import itertools
import os
import pathlib
import posixpath
import string
import struct
import subprocess
import sys
import time
import unittest
import unittest.mock as mock
import zipfile
import functools
from tempfile import TemporaryFile
from random import randint, random, randbytes
from test.support import script_helper
from test.support import (findfile, requires_zlib, requires_bz2,
requires_lzma, captured_stdout)
from test.support.os_helper import TESTFN, unlink, rmtree, temp_dir, temp_cwd
TESTFN2 = TESTFN + "2"
TESTFNDIR = TESTFN + "d"
FIXEDTEST_SIZE = 1000
DATAFILES_DIR = 'zipfile_datafiles'
SMALL_TEST_DATA = [('_ziptest1', '1q2w3e4r5t'),
('ziptest2dir/_ziptest2', 'qawsedrftg'),
('ziptest2dir/ziptest3dir/_ziptest3', 'azsxdcfvgb'),
('ziptest2dir/ziptest3dir/ziptest4dir/_ziptest3', '6y7u8i9o0p')]
def get_files(test):
yield TESTFN2
with TemporaryFile() as f:
yield f
test.assertFalse(f.closed)
with io.BytesIO() as f:
yield f
test.assertFalse(f.closed)
class AbstractTestsWithSourceFile:
@classmethod
def setUpClass(cls):
cls.line_gen = [bytes("Zipfile test line %d. random float: %f\n" %
(i, random()), "ascii")
for i in range(FIXEDTEST_SIZE)]
cls.data = b''.join(cls.line_gen)
def setUp(self):
# Make a source file with some lines
with open(TESTFN, "wb") as fp:
fp.write(self.data)
def make_test_archive(self, f, compression, compresslevel=None):
kwargs = {'compression': compression, 'compresslevel': compresslevel}
# Create the ZIP archive
with zipfile.ZipFile(f, "w", **kwargs) as zipfp:
zipfp.write(TESTFN, "another.name")
zipfp.write(TESTFN, TESTFN)
zipfp.writestr("strfile", self.data)
with zipfp.open('written-open-w', mode='w') as f:
for line in self.line_gen:
f.write(line)
def zip_test(self, f, compression, compresslevel=None):
self.make_test_archive(f, compression, compresslevel)
# Read the ZIP archive
with zipfile.ZipFile(f, "r", compression) as zipfp:
self.assertEqual(zipfp.read(TESTFN), self.data)
self.assertEqual(zipfp.read("another.name"), self.data)
self.assertEqual(zipfp.read("strfile"), self.data)
# Print the ZIP directory
fp = io.StringIO()
zipfp.printdir(file=fp)
directory = fp.getvalue()
lines = directory.splitlines()
self.assertEqual(len(lines), 5) # Number of files + header
self.assertIn('File Name', lines[0])
self.assertIn('Modified', lines[0])
self.assertIn('Size', lines[0])
fn, date, time_, size = lines[1].split()
self.assertEqual(fn, 'another.name')
self.assertTrue(time.strptime(date, '%Y-%m-%d'))
self.assertTrue(time.strptime(time_, '%H:%M:%S'))
self.assertEqual(size, str(len(self.data)))
# Check the namelist
names = zipfp.namelist()
self.assertEqual(len(names), 4)
self.assertIn(TESTFN, names)
self.assertIn("another.name", names)
self.assertIn("strfile", names)
self.assertIn("written-open-w", names)
# Check infolist
infos = zipfp.infolist()
names = [i.filename for i in infos]
self.assertEqual(len(names), 4)
self.assertIn(TESTFN, names)
self.assertIn("another.name", names)
self.assertIn("strfile", names)
self.assertIn("written-open-w", names)
for i in infos:
self.assertEqual(i.file_size, len(self.data))
# check getinfo
for nm in (TESTFN, "another.name", "strfile", "written-open-w"):
info = zipfp.getinfo(nm)
self.assertEqual(info.filename, nm)
self.assertEqual(info.file_size, len(self.data))
# Check that testzip doesn't raise an exception
zipfp.testzip()
def test_basic(self):
for f in get_files(self):
self.zip_test(f, self.compression)
def zip_open_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r", compression) as zipfp:
zipdata1 = []
with zipfp.open(TESTFN) as zipopen1:
while True:
read_data = zipopen1.read(256)
if not read_data:
break
zipdata1.append(read_data)
zipdata2 = []
with zipfp.open("another.name") as zipopen2:
while True:
read_data = zipopen2.read(256)
if not read_data:
break
zipdata2.append(read_data)
self.assertEqual(b''.join(zipdata1), self.data)
self.assertEqual(b''.join(zipdata2), self.data)
def test_open(self):
for f in get_files(self):
self.zip_open_test(f, self.compression)
def test_open_with_pathlike(self):
path = pathlib.Path(TESTFN2)
self.zip_open_test(path, self.compression)
with zipfile.ZipFile(path, "r", self.compression) as zipfp:
self.assertIsInstance(zipfp.filename, str)
def zip_random_open_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r", compression) as zipfp:
zipdata1 = []
with zipfp.open(TESTFN) as zipopen1:
while True:
read_data = zipopen1.read(randint(1, 1024))
if not read_data:
break
zipdata1.append(read_data)
self.assertEqual(b''.join(zipdata1), self.data)
def test_random_open(self):
for f in get_files(self):
self.zip_random_open_test(f, self.compression)
def zip_read1_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r") as zipfp, \
zipfp.open(TESTFN) as zipopen:
zipdata = []
while True:
read_data = zipopen.read1(-1)
if not read_data:
break
zipdata.append(read_data)
self.assertEqual(b''.join(zipdata), self.data)
def test_read1(self):
for f in get_files(self):
self.zip_read1_test(f, self.compression)
def zip_read1_10_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r") as zipfp, \
zipfp.open(TESTFN) as zipopen:
zipdata = []
while True:
read_data = zipopen.read1(10)
self.assertLessEqual(len(read_data), 10)
if not read_data:
break
zipdata.append(read_data)
self.assertEqual(b''.join(zipdata), self.data)
def test_read1_10(self):
for f in get_files(self):
self.zip_read1_10_test(f, self.compression)
def zip_readline_read_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r") as zipfp, \
zipfp.open(TESTFN) as zipopen:
data = b''
while True:
read = zipopen.readline()
if not read:
break
data += read
read = zipopen.read(100)
if not read:
break
data += read
self.assertEqual(data, self.data)
def test_readline_read(self):
# Issue #7610: calls to readline() interleaved with calls to read().
for f in get_files(self):
self.zip_readline_read_test(f, self.compression)
def zip_readline_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r") as zipfp:
with zipfp.open(TESTFN) as zipopen:
for line in self.line_gen:
linedata = zipopen.readline()
self.assertEqual(linedata, line)
def test_readline(self):
for f in get_files(self):
self.zip_readline_test(f, self.compression)
def zip_readlines_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r") as zipfp:
with zipfp.open(TESTFN) as zipopen:
ziplines = zipopen.readlines()
for line, zipline in zip(self.line_gen, ziplines):
self.assertEqual(zipline, line)
def test_readlines(self):
for f in get_files(self):
self.zip_readlines_test(f, self.compression)
def zip_iterlines_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r") as zipfp:
with zipfp.open(TESTFN) as zipopen:
for line, zipline in zip(self.line_gen, zipopen):
self.assertEqual(zipline, line)
def test_iterlines(self):
for f in get_files(self):
self.zip_iterlines_test(f, self.compression)
def test_low_compression(self):
"""Check for cases where compressed data is larger than original."""
# Create the ZIP archive
with zipfile.ZipFile(TESTFN2, "w", self.compression) as zipfp:
zipfp.writestr("strfile", '12')
# Get an open object for strfile
with zipfile.ZipFile(TESTFN2, "r", self.compression) as zipfp:
with zipfp.open("strfile") as openobj:
self.assertEqual(openobj.read(1), b'1')
self.assertEqual(openobj.read(1), b'2')
def test_writestr_compression(self):
zipfp = zipfile.ZipFile(TESTFN2, "w")
zipfp.writestr("b.txt", "hello world", compress_type=self.compression)
info = zipfp.getinfo('b.txt')
self.assertEqual(info.compress_type, self.compression)
def test_writestr_compresslevel(self):
zipfp = zipfile.ZipFile(TESTFN2, "w", compresslevel=1)
zipfp.writestr("a.txt", "hello world", compress_type=self.compression)
zipfp.writestr("b.txt", "hello world", compress_type=self.compression,
compresslevel=2)
# Compression level follows the constructor.
a_info = zipfp.getinfo('a.txt')
self.assertEqual(a_info.compress_type, self.compression)
self.assertEqual(a_info._compresslevel, 1)
# Compression level is overridden.
b_info = zipfp.getinfo('b.txt')
self.assertEqual(b_info.compress_type, self.compression)
self.assertEqual(b_info._compresslevel, 2)
def test_read_return_size(self):
# Issue #9837: ZipExtFile.read() shouldn't return more bytes
# than requested.
for test_size in (1, 4095, 4096, 4097, 16384):
file_size = test_size + 1
junk = randbytes(file_size)
with zipfile.ZipFile(io.BytesIO(), "w", self.compression) as zipf:
zipf.writestr('foo', junk)
with zipf.open('foo', 'r') as fp:
buf = fp.read(test_size)
self.assertEqual(len(buf), test_size)
def test_truncated_zipfile(self):
fp = io.BytesIO()
with zipfile.ZipFile(fp, mode='w') as zipf:
zipf.writestr('strfile', self.data, compress_type=self.compression)
end_offset = fp.tell()
zipfiledata = fp.getvalue()
fp = io.BytesIO(zipfiledata)
with zipfile.ZipFile(fp) as zipf:
with zipf.open('strfile') as zipopen:
fp.truncate(end_offset - 20)
with self.assertRaises(EOFError):
zipopen.read()
fp = io.BytesIO(zipfiledata)
with zipfile.ZipFile(fp) as zipf:
with zipf.open('strfile') as zipopen:
fp.truncate(end_offset - 20)
with self.assertRaises(EOFError):
while zipopen.read(100):
pass
fp = io.BytesIO(zipfiledata)
with zipfile.ZipFile(fp) as zipf:
with zipf.open('strfile') as zipopen:
fp.truncate(end_offset - 20)
with self.assertRaises(EOFError):
while zipopen.read1(100):
pass
def test_repr(self):
fname = 'file.name'
for f in get_files(self):
with zipfile.ZipFile(f, 'w', self.compression) as zipfp:
zipfp.write(TESTFN, fname)
r = repr(zipfp)
self.assertIn("mode='w'", r)
with zipfile.ZipFile(f, 'r') as zipfp:
r = repr(zipfp)
if isinstance(f, str):
self.assertIn('filename=%r' % f, r)
else:
self.assertIn('file=%r' % f, r)
self.assertIn("mode='r'", r)
r = repr(zipfp.getinfo(fname))
self.assertIn('filename=%r' % fname, r)
self.assertIn('filemode=', r)
self.assertIn('file_size=', r)
if self.compression != zipfile.ZIP_STORED:
self.assertIn('compress_type=', r)
self.assertIn('compress_size=', r)
with zipfp.open(fname) as zipopen:
r = repr(zipopen)
self.assertIn('name=%r' % fname, r)
self.assertIn("mode='r'", r)
if self.compression != zipfile.ZIP_STORED:
self.assertIn('compress_type=', r)
self.assertIn('[closed]', repr(zipopen))
self.assertIn('[closed]', repr(zipfp))
def test_compresslevel_basic(self):
for f in get_files(self):
self.zip_test(f, self.compression, compresslevel=9)
def test_per_file_compresslevel(self):
"""Check that files within a Zip archive can have different
compression levels."""
with zipfile.ZipFile(TESTFN2, "w", compresslevel=1) as zipfp:
zipfp.write(TESTFN, 'compress_1')
zipfp.write(TESTFN, 'compress_9', compresslevel=9)
one_info = zipfp.getinfo('compress_1')
nine_info = zipfp.getinfo('compress_9')
self.assertEqual(one_info._compresslevel, 1)
self.assertEqual(nine_info._compresslevel, 9)
def test_writing_errors(self):
class BrokenFile(io.BytesIO):
def write(self, data):
nonlocal count
if count is not None:
if count == stop:
raise OSError
count += 1
super().write(data)
stop = 0
while True:
testfile = BrokenFile()
count = None
with zipfile.ZipFile(testfile, 'w', self.compression) as zipfp:
with zipfp.open('file1', 'w') as f:
f.write(b'data1')
count = 0
try:
with zipfp.open('file2', 'w') as f:
f.write(b'data2')
except OSError:
stop += 1
else:
break
finally:
count = None
with zipfile.ZipFile(io.BytesIO(testfile.getvalue())) as zipfp:
self.assertEqual(zipfp.namelist(), ['file1'])
self.assertEqual(zipfp.read('file1'), b'data1')
with zipfile.ZipFile(io.BytesIO(testfile.getvalue())) as zipfp:
self.assertEqual(zipfp.namelist(), ['file1', 'file2'])
self.assertEqual(zipfp.read('file1'), b'data1')
self.assertEqual(zipfp.read('file2'), b'data2')
def tearDown(self):
unlink(TESTFN)
unlink(TESTFN2)
class StoredTestsWithSourceFile(AbstractTestsWithSourceFile,
unittest.TestCase):
compression = zipfile.ZIP_STORED
test_low_compression = None
def zip_test_writestr_permissions(self, f, compression):
# Make sure that writestr and open(... mode='w') create files with
# mode 0600, when they are passed a name rather than a ZipInfo
# instance.
self.make_test_archive(f, compression)
with zipfile.ZipFile(f, "r") as zipfp:
zinfo = zipfp.getinfo('strfile')
self.assertEqual(zinfo.external_attr, 0o600 << 16)
zinfo2 = zipfp.getinfo('written-open-w')
self.assertEqual(zinfo2.external_attr, 0o600 << 16)
def test_writestr_permissions(self):
for f in get_files(self):
self.zip_test_writestr_permissions(f, zipfile.ZIP_STORED)
def test_absolute_arcnames(self):
with zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED) as zipfp:
zipfp.write(TESTFN, "/absolute")
with zipfile.ZipFile(TESTFN2, "r", zipfile.ZIP_STORED) as zipfp:
self.assertEqual(zipfp.namelist(), ["absolute"])
def test_append_to_zip_file(self):
"""Test appending to an existing zipfile."""
with zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED) as zipfp:
zipfp.write(TESTFN, TESTFN)
with zipfile.ZipFile(TESTFN2, "a", zipfile.ZIP_STORED) as zipfp:
zipfp.writestr("strfile", self.data)
self.assertEqual(zipfp.namelist(), [TESTFN, "strfile"])
def test_append_to_non_zip_file(self):
"""Test appending to an existing file that is not a zipfile."""
# NOTE: this test fails if len(d) < 22 because of the first
# line "fpin.seek(-22, 2)" in _EndRecData
data = b'I am not a ZipFile!'*10
with open(TESTFN2, 'wb') as f:
f.write(data)
with zipfile.ZipFile(TESTFN2, "a", zipfile.ZIP_STORED) as zipfp:
zipfp.write(TESTFN, TESTFN)
with open(TESTFN2, 'rb') as f:
f.seek(len(data))
with zipfile.ZipFile(f, "r") as zipfp:
self.assertEqual(zipfp.namelist(), [TESTFN])
self.assertEqual(zipfp.read(TESTFN), self.data)
with open(TESTFN2, 'rb') as f:
self.assertEqual(f.read(len(data)), data)
zipfiledata = f.read()
with io.BytesIO(zipfiledata) as bio, zipfile.ZipFile(bio) as zipfp:
self.assertEqual(zipfp.namelist(), [TESTFN])
self.assertEqual(zipfp.read(TESTFN), self.data)
def test_read_concatenated_zip_file(self):
with io.BytesIO() as bio:
with zipfile.ZipFile(bio, 'w', zipfile.ZIP_STORED) as zipfp:
zipfp.write(TESTFN, TESTFN)
zipfiledata = bio.getvalue()
data = b'I am not a ZipFile!'*10
with open(TESTFN2, 'wb') as f:
f.write(data)
f.write(zipfiledata)
with zipfile.ZipFile(TESTFN2) as zipfp:
self.assertEqual(zipfp.namelist(), [TESTFN])
self.assertEqual(zipfp.read(TESTFN), self.data)
def test_append_to_concatenated_zip_file(self):
with io.BytesIO() as bio:
with zipfile.ZipFile(bio, 'w', zipfile.ZIP_STORED) as zipfp:
zipfp.write(TESTFN, TESTFN)
zipfiledata = bio.getvalue()
data = b'I am not a ZipFile!'*1000000
with open(TESTFN2, 'wb') as f:
f.write(data)
f.write(zipfiledata)
with zipfile.ZipFile(TESTFN2, 'a') as zipfp:
self.assertEqual(zipfp.namelist(), [TESTFN])
zipfp.writestr('strfile', self.data)
with open(TESTFN2, 'rb') as f:
self.assertEqual(f.read(len(data)), data)
zipfiledata = f.read()
with io.BytesIO(zipfiledata) as bio, zipfile.ZipFile(bio) as zipfp:
self.assertEqual(zipfp.namelist(), [TESTFN, 'strfile'])
self.assertEqual(zipfp.read(TESTFN), self.data)
self.assertEqual(zipfp.read('strfile'), self.data)
def test_ignores_newline_at_end(self):
with zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED) as zipfp:
zipfp.write(TESTFN, TESTFN)
with open(TESTFN2, 'a') as f:
f.write("\r\n\00\00\00")
with zipfile.ZipFile(TESTFN2, "r") as zipfp:
self.assertIsInstance(zipfp, zipfile.ZipFile)
def test_ignores_stuff_appended_past_comments(self):
with zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED) as zipfp:
zipfp.comment = b"this is a comment"
zipfp.write(TESTFN, TESTFN)
with open(TESTFN2, 'a') as f:
f.write("abcdef\r\n")
with zipfile.ZipFile(TESTFN2, "r") as zipfp:
self.assertIsInstance(zipfp, zipfile.ZipFile)
self.assertEqual(zipfp.comment, b"this is a comment")
def test_write_default_name(self):
"""Check that calling ZipFile.write without arcname specified
produces the expected result."""
with zipfile.ZipFile(TESTFN2, "w") as zipfp:
zipfp.write(TESTFN)
with open(TESTFN, "rb") as f:
self.assertEqual(zipfp.read(TESTFN), f.read())
def test_io_on_closed_zipextfile(self):
fname = "somefile.txt"
with zipfile.ZipFile(TESTFN2, mode="w") as zipfp:
zipfp.writestr(fname, "bogus")
with zipfile.ZipFile(TESTFN2, mode="r") as zipfp:
with zipfp.open(fname) as fid:
fid.close()
self.assertRaises(ValueError, fid.read)
self.assertRaises(ValueError, fid.seek, 0)
self.assertRaises(ValueError, fid.tell)
self.assertRaises(ValueError, fid.readable)
self.assertRaises(ValueError, fid.seekable)
def test_write_to_readonly(self):
"""Check that trying to call write() on a readonly ZipFile object
raises a ValueError."""
with zipfile.ZipFile(TESTFN2, mode="w") as zipfp:
zipfp.writestr("somefile.txt", "bogus")
with zipfile.ZipFile(TESTFN2, mode="r") as zipfp:
self.assertRaises(ValueError, zipfp.write, TESTFN)
with zipfile.ZipFile(TESTFN2, mode="r") as zipfp:
with self.assertRaises(ValueError):
zipfp.open(TESTFN, mode='w')
def test_add_file_before_1980(self):
# Set atime and mtime to 1970-01-01
os.utime(TESTFN, (0, 0))
with zipfile.ZipFile(TESTFN2, "w") as zipfp:
self.assertRaises(ValueError, zipfp.write, TESTFN)
with zipfile.ZipFile(TESTFN2, "w", strict_timestamps=False) as zipfp:
zipfp.write(TESTFN)
zinfo = zipfp.getinfo(TESTFN)
self.assertEqual(zinfo.date_time, (1980, 1, 1, 0, 0, 0))
def test_add_file_after_2107(self):
# Set atime and mtime to 2108-12-30
ts = 4386268800
try:
time.localtime(ts)
except OverflowError:
self.skipTest(f'time.localtime({ts}) raises OverflowError')
try:
os.utime(TESTFN, (ts, ts))
except OverflowError:
self.skipTest('Host fs cannot set timestamp to required value.')
mtime_ns = os.stat(TESTFN).st_mtime_ns
if mtime_ns != (4386268800 * 10**9):
# XFS filesystem is limited to 32-bit timestamp, but the syscall
# didn't fail. Moreover, there is a VFS bug which returns
# a cached timestamp which is different than the value on disk.
#
# Test st_mtime_ns rather than st_mtime to avoid rounding issues.
#
# https://bugzilla.redhat.com/show_bug.cgi?id=1795576
# https://bugs.python.org/issue39460#msg360952
self.skipTest(f"Linux VFS/XFS kernel bug detected: {mtime_ns=}")
with zipfile.ZipFile(TESTFN2, "w") as zipfp:
self.assertRaises(struct.error, zipfp.write, TESTFN)
with zipfile.ZipFile(TESTFN2, "w", strict_timestamps=False) as zipfp:
zipfp.write(TESTFN)
zinfo = zipfp.getinfo(TESTFN)
self.assertEqual(zinfo.date_time, (2107, 12, 31, 23, 59, 59))
@requires_zlib()
class DeflateTestsWithSourceFile(AbstractTestsWithSourceFile,
unittest.TestCase):
compression = zipfile.ZIP_DEFLATED
def test_per_file_compression(self):
"""Check that files within a Zip archive can have different
compression options."""
with zipfile.ZipFile(TESTFN2, "w") as zipfp:
zipfp.write(TESTFN, 'storeme', zipfile.ZIP_STORED)
zipfp.write(TESTFN, 'deflateme', zipfile.ZIP_DEFLATED)
sinfo = zipfp.getinfo('storeme')
dinfo = zipfp.getinfo('deflateme')
self.assertEqual(sinfo.compress_type, zipfile.ZIP_STORED)
self.assertEqual(dinfo.compress_type, zipfile.ZIP_DEFLATED)
@requires_bz2()
class Bzip2TestsWithSourceFile(AbstractTestsWithSourceFile,
unittest.TestCase):
compression = zipfile.ZIP_BZIP2
@requires_lzma()
class LzmaTestsWithSourceFile(AbstractTestsWithSourceFile,
unittest.TestCase):
compression = zipfile.ZIP_LZMA
class AbstractTestZip64InSmallFiles:
# These tests test the ZIP64 functionality without using large files,
# see test_zipfile64 for proper tests.
@classmethod
def setUpClass(cls):
line_gen = (bytes("Test of zipfile line %d." % i, "ascii")
for i in range(0, FIXEDTEST_SIZE))
cls.data = b'\n'.join(line_gen)
def setUp(self):
self._limit = zipfile.ZIP64_LIMIT
self._filecount_limit = zipfile.ZIP_FILECOUNT_LIMIT
zipfile.ZIP64_LIMIT = 1000
zipfile.ZIP_FILECOUNT_LIMIT = 9
# Make a source file with some lines
with open(TESTFN, "wb") as fp:
fp.write(self.data)
def zip_test(self, f, compression):
# Create the ZIP archive
with zipfile.ZipFile(f, "w", compression, allowZip64=True) as zipfp:
zipfp.write(TESTFN, "another.name")
zipfp.write(TESTFN, TESTFN)
zipfp.writestr("strfile", self.data)
# Read the ZIP archive
with zipfile.ZipFile(f, "r", compression) as zipfp:
self.assertEqual(zipfp.read(TESTFN), self.data)
self.assertEqual(zipfp.read("another.name"), self.data)
self.assertEqual(zipfp.read("strfile"), self.data)
# Print the ZIP directory
fp = io.StringIO()
zipfp.printdir(fp)
directory = fp.getvalue()
lines = directory.splitlines()
self.assertEqual(len(lines), 4) # Number of files + header
self.assertIn('File Name', lines[0])
self.assertIn('Modified', lines[0])
self.assertIn('Size', lines[0])
fn, date, time_, size = lines[1].split()
self.assertEqual(fn, 'another.name')
self.assertTrue(time.strptime(date, '%Y-%m-%d'))
self.assertTrue(time.strptime(time_, '%H:%M:%S'))
self.assertEqual(size, str(len(self.data)))
# Check the namelist
names = zipfp.namelist()
self.assertEqual(len(names), 3)
self.assertIn(TESTFN, names)
self.assertIn("another.name", names)
self.assertIn("strfile", names)
# Check infolist
infos = zipfp.infolist()
names = [i.filename for i in infos]
self.assertEqual(len(names), 3)
self.assertIn(TESTFN, names)
self.assertIn("another.name", names)
self.assertIn("strfile", names)
for i in infos:
self.assertEqual(i.file_size, len(self.data))
# check getinfo
for nm in (TESTFN, "another.name", "strfile"):
info = zipfp.getinfo(nm)
self.assertEqual(info.filename, nm)
self.assertEqual(info.file_size, len(self.data))
# Check that testzip doesn't raise an exception
zipfp.testzip()
def test_basic(self):
for f in get_files(self):
self.zip_test(f, self.compression)
def test_too_many_files(self):
# This test checks that more than 64k files can be added to an archive,
# and that the resulting archive can be read properly by ZipFile
zipf = zipfile.ZipFile(TESTFN, "w", self.compression,
allowZip64=True)
zipf.debug = 100
numfiles = 15
for i in range(numfiles):
zipf.writestr("foo%08d" % i, "%d" % (i**3 % 57))
self.assertEqual(len(zipf.namelist()), numfiles)
zipf.close()
zipf2 = zipfile.ZipFile(TESTFN, "r", self.compression)
self.assertEqual(len(zipf2.namelist()), numfiles)
for i in range(numfiles):
content = zipf2.read("foo%08d" % i).decode('ascii')
self.assertEqual(content, "%d" % (i**3 % 57))
zipf2.close()
def test_too_many_files_append(self):
zipf = zipfile.ZipFile(TESTFN, "w", self.compression,
allowZip64=False)
zipf.debug = 100
numfiles = 9
for i in range(numfiles):
zipf.writestr("foo%08d" % i, "%d" % (i**3 % 57))
self.assertEqual(len(zipf.namelist()), numfiles)
with self.assertRaises(zipfile.LargeZipFile):
zipf.writestr("foo%08d" % numfiles, b'')
self.assertEqual(len(zipf.namelist()), numfiles)
zipf.close()
zipf = zipfile.ZipFile(TESTFN, "a", self.compression,
allowZip64=False)
zipf.debug = 100
self.assertEqual(len(zipf.namelist()), numfiles)
with self.assertRaises(zipfile.LargeZipFile):
zipf.writestr("foo%08d" % numfiles, b'')
self.assertEqual(len(zipf.namelist()), numfiles)
zipf.close()
zipf = zipfile.ZipFile(TESTFN, "a", self.compression,
allowZip64=True)
zipf.debug = 100
self.assertEqual(len(zipf.namelist()), numfiles)
numfiles2 = 15
for i in range(numfiles, numfiles2):
zipf.writestr("foo%08d" % i, "%d" % (i**3 % 57))
self.assertEqual(len(zipf.namelist()), numfiles2)
zipf.close()
zipf2 = zipfile.ZipFile(TESTFN, "r", self.compression)
self.assertEqual(len(zipf2.namelist()), numfiles2)
for i in range(numfiles2):
content = zipf2.read("foo%08d" % i).decode('ascii')
self.assertEqual(content, "%d" % (i**3 % 57))
zipf2.close()
def tearDown(self):
zipfile.ZIP64_LIMIT = self._limit
zipfile.ZIP_FILECOUNT_LIMIT = self._filecount_limit
unlink(TESTFN)
unlink(TESTFN2)
class StoredTestZip64InSmallFiles(AbstractTestZip64InSmallFiles,
unittest.TestCase):
compression = zipfile.ZIP_STORED
def large_file_exception_test(self, f, compression):
with zipfile.ZipFile(f, "w", compression, allowZip64=False) as zipfp:
self.assertRaises(zipfile.LargeZipFile,
zipfp.write, TESTFN, "another.name")
def large_file_exception_test2(self, f, compression):
with zipfile.ZipFile(f, "w", compression, allowZip64=False) as zipfp:
self.assertRaises(zipfile.LargeZipFile,
zipfp.writestr, "another.name", self.data)
def test_large_file_exception(self):
for f in get_files(self):
self.large_file_exception_test(f, zipfile.ZIP_STORED)
self.large_file_exception_test2(f, zipfile.ZIP_STORED)
def test_absolute_arcnames(self):
with zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED,
allowZip64=True) as zipfp:
zipfp.write(TESTFN, "/absolute")
with zipfile.ZipFile(TESTFN2, "r", zipfile.ZIP_STORED) as zipfp:
self.assertEqual(zipfp.namelist(), ["absolute"])
def test_append(self):
# Test that appending to the Zip64 archive doesn't change
# extra fields of existing entries.
with zipfile.ZipFile(TESTFN2, "w", allowZip64=True) as zipfp:
zipfp.writestr("strfile", self.data)
with zipfile.ZipFile(TESTFN2, "r", allowZip64=True) as zipfp:
zinfo = zipfp.getinfo("strfile")
extra = zinfo.extra
with zipfile.ZipFile(TESTFN2, "a", allowZip64=True) as zipfp:
zipfp.writestr("strfile2", self.data)
with zipfile.ZipFile(TESTFN2, "r", allowZip64=True) as zipfp:
zinfo = zipfp.getinfo("strfile")
self.assertEqual(zinfo.extra, extra)
def make_zip64_file(
self, file_size_64_set=False, file_size_extra=False,
compress_size_64_set=False, compress_size_extra=False,
header_offset_64_set=False, header_offset_extra=False,
):
"""Generate bytes sequence for a zip with (incomplete) zip64 data.
The actual values (not the zip 64 0xffffffff values) stored in the file
are:
file_size: 8
compress_size: 8
header_offset: 0
"""
actual_size = 8
actual_header_offset = 0
local_zip64_fields = []
central_zip64_fields = []
file_size = actual_size
if file_size_64_set:
file_size = 0xffffffff
if file_size_extra:
local_zip64_fields.append(actual_size)
central_zip64_fields.append(actual_size)
file_size = struct.pack("<L", file_size)
compress_size = actual_size
if compress_size_64_set:
compress_size = 0xffffffff
if compress_size_extra:
local_zip64_fields.append(actual_size)
central_zip64_fields.append(actual_size)
compress_size = struct.pack("<L", compress_size)
header_offset = actual_header_offset
if header_offset_64_set:
header_offset = 0xffffffff
if header_offset_extra:
central_zip64_fields.append(actual_header_offset)
header_offset = struct.pack("<L", header_offset)
local_extra = struct.pack(
'<HH' + 'Q'*len(local_zip64_fields),
0x0001,
8*len(local_zip64_fields),
*local_zip64_fields
)
central_extra = struct.pack(
'<HH' + 'Q'*len(central_zip64_fields),
0x0001,
8*len(central_zip64_fields),
*central_zip64_fields
)
central_dir_size = struct.pack('<Q', 58 + 8 * len(central_zip64_fields))
offset_to_central_dir = struct.pack('<Q', 50 + 8 * len(local_zip64_fields))
local_extra_length = struct.pack("<H", 4 + 8 * len(local_zip64_fields))
central_extra_length = struct.pack("<H", 4 + 8 * len(central_zip64_fields))
filename = b"test.txt"
content = b"test1234"
filename_length = struct.pack("<H", len(filename))
zip64_contents = (
# Local file header
b"PK\x03\x04\x14\x00\x00\x00\x00\x00\x00\x00!\x00\x9e%\xf5\xaf"
+ compress_size
+ file_size
+ filename_length
+ local_extra_length
+ filename
+ local_extra
+ content
# Central directory:
+ b"PK\x01\x02-\x03-\x00\x00\x00\x00\x00\x00\x00!\x00\x9e%\xf5\xaf"
+ compress_size
+ file_size
+ filename_length
+ central_extra_length
+ b"\x00\x00\x00\x00\x00\x00\x00\x00\x80\x01"
+ header_offset
+ filename
+ central_extra
# Zip64 end of central directory
+ b"PK\x06\x06,\x00\x00\x00\x00\x00\x00\x00-\x00-"
+ b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00"
+ b"\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00"
+ central_dir_size
+ offset_to_central_dir
# Zip64 end of central directory locator
+ b"PK\x06\x07\x00\x00\x00\x00l\x00\x00\x00\x00\x00\x00\x00\x01"
+ b"\x00\x00\x00"
# end of central directory
+ b"PK\x05\x06\x00\x00\x00\x00\x01\x00\x01\x00:\x00\x00\x002\x00"
+ b"\x00\x00\x00\x00"
)
return zip64_contents
def test_bad_zip64_extra(self):
"""Missing zip64 extra records raises an exception.
There are 4 fields that the zip64 format handles (the disk number is
not used in this module and so is ignored here). According to the zip
spec:
The order of the fields in the zip64 extended
information record is fixed, but the fields MUST
only appear if the corresponding Local or Central
directory record field is set to 0xFFFF or 0xFFFFFFFF.
If the zip64 extra content doesn't contain enough entries for the
number of fields marked with 0xFFFF or 0xFFFFFFFF, we raise an error.
This test mismatches the length of the zip64 extra field and the number
of fields set to indicate the presence of zip64 data.
"""
# zip64 file size present, no fields in extra, expecting one, equals
# missing file size.
missing_file_size_extra = self.make_zip64_file(
file_size_64_set=True,
)
with self.assertRaises(zipfile.BadZipFile) as e:
zipfile.ZipFile(io.BytesIO(missing_file_size_extra))
self.assertIn('file size', str(e.exception).lower())
# zip64 file size present, zip64 compress size present, one field in
# extra, expecting two, equals missing compress size.
missing_compress_size_extra = self.make_zip64_file(
file_size_64_set=True,
file_size_extra=True,
compress_size_64_set=True,
)
with self.assertRaises(zipfile.BadZipFile) as e:
zipfile.ZipFile(io.BytesIO(missing_compress_size_extra))
self.assertIn('compress size', str(e.exception).lower())
# zip64 compress size present, no fields in extra, expecting one,
# equals missing compress size.
missing_compress_size_extra = self.make_zip64_file(
compress_size_64_set=True,
)
with self.assertRaises(zipfile.BadZipFile) as e:
zipfile.ZipFile(io.BytesIO(missing_compress_size_extra))
self.assertIn('compress size', str(e.exception).lower())
# zip64 file size present, zip64 compress size present, zip64 header
# offset present, two fields in extra, expecting three, equals missing
# header offset
missing_header_offset_extra = self.make_zip64_file(
file_size_64_set=True,
file_size_extra=True,
compress_size_64_set=True,
compress_size_extra=True,
header_offset_64_set=True,
)
with self.assertRaises(zipfile.BadZipFile) as e:
zipfile.ZipFile(io.BytesIO(missing_header_offset_extra))
self.assertIn('header offset', str(e.exception).lower())
# zip64 compress size present, zip64 header offset present, one field
# in extra, expecting two, equals missing header offset
missing_header_offset_extra = self.make_zip64_file(
file_size_64_set=False,
compress_size_64_set=True,
compress_size_extra=True,
header_offset_64_set=True,
)
with self.assertRaises(zipfile.BadZipFile) as e:
zipfile.ZipFile(io.BytesIO(missing_header_offset_extra))
self.assertIn('header offset', str(e.exception).lower())
# zip64 file size present, zip64 header offset present, one field in
# extra, expecting two, equals missing header offset
missing_header_offset_extra = self.make_zip64_file(
file_size_64_set=True,
file_size_extra=True,
compress_size_64_set=False,
header_offset_64_set=True,
)
with self.assertRaises(zipfile.BadZipFile) as e:
zipfile.ZipFile(io.BytesIO(missing_header_offset_extra))
self.assertIn('header offset', str(e.exception).lower())
# zip64 header offset present, no fields in extra, expecting one,
# equals missing header offset
missing_header_offset_extra = self.make_zip64_file(
file_size_64_set=False,
compress_size_64_set=False,
header_offset_64_set=True,
)
with self.assertRaises(zipfile.BadZipFile) as e:
zipfile.ZipFile(io.BytesIO(missing_header_offset_extra))
self.assertIn('header offset', str(e.exception).lower())
def test_generated_valid_zip64_extra(self):
# These values are what is set in the make_zip64_file method.
expected_file_size = 8
expected_compress_size = 8
expected_header_offset = 0
expected_content = b"test1234"
# Loop through the various valid combinations of zip64 masks
# present and extra fields present.
params = (
{"file_size_64_set": True, "file_size_extra": True},
{"compress_size_64_set": True, "compress_size_extra": True},
{"header_offset_64_set": True, "header_offset_extra": True},
)
for r in range(1, len(params) + 1):
for combo in itertools.combinations(params, r):
kwargs = {}
for c in combo:
kwargs.update(c)
with zipfile.ZipFile(io.BytesIO(self.make_zip64_file(**kwargs))) as zf:
zinfo = zf.infolist()[0]
self.assertEqual(zinfo.file_size, expected_file_size)
self.assertEqual(zinfo.compress_size, expected_compress_size)
self.assertEqual(zinfo.header_offset, expected_header_offset)
self.assertEqual(zf.read(zinfo), expected_content)
@requires_zlib()
class DeflateTestZip64InSmallFiles(AbstractTestZip64InSmallFiles,
unittest.TestCase):
compression = zipfile.ZIP_DEFLATED
@requires_bz2()
class Bzip2TestZip64InSmallFiles(AbstractTestZip64InSmallFiles,
unittest.TestCase):
compression = zipfile.ZIP_BZIP2
@requires_lzma()
class LzmaTestZip64InSmallFiles(AbstractTestZip64InSmallFiles,
unittest.TestCase):
compression = zipfile.ZIP_LZMA
class AbstractWriterTests:
def tearDown(self):
unlink(TESTFN2)
def test_close_after_close(self):
data = b'content'
with zipfile.ZipFile(TESTFN2, "w", self.compression) as zipf:
w = zipf.open('test', 'w')
w.write(data)
w.close()
self.assertTrue(w.closed)
w.close()
self.assertTrue(w.closed)
self.assertEqual(zipf.read('test'), data)
def test_write_after_close(self):
data = b'content'
with zipfile.ZipFile(TESTFN2, "w", self.compression) as zipf:
w = zipf.open('test', 'w')
w.write(data)
w.close()
self.assertTrue(w.closed)
self.assertRaises(ValueError, w.write, b'')
self.assertEqual(zipf.read('test'), data)
class StoredWriterTests(AbstractWriterTests, unittest.TestCase):
compression = zipfile.ZIP_STORED
@requires_zlib()
class DeflateWriterTests(AbstractWriterTests, unittest.TestCase):
compression = zipfile.ZIP_DEFLATED
@requires_bz2()
class Bzip2WriterTests(AbstractWriterTests, unittest.TestCase):
compression = zipfile.ZIP_BZIP2
@requires_lzma()
class LzmaWriterTests(AbstractWriterTests, unittest.TestCase):
compression = zipfile.ZIP_LZMA
class PyZipFileTests(unittest.TestCase):
def assertCompiledIn(self, name, namelist):
if name + 'o' not in namelist:
self.assertIn(name + 'c', namelist)
def requiresWriteAccess(self, path):
# effective_ids unavailable on windows
if not os.access(path, os.W_OK,
effective_ids=os.access in os.supports_effective_ids):
self.skipTest('requires write access to the installed location')
filename = os.path.join(path, 'test_zipfile.try')
try:
fd = os.open(filename, os.O_WRONLY | os.O_CREAT)
os.close(fd)
except Exception:
self.skipTest('requires write access to the installed location')
unlink(filename)
def test_write_pyfile(self):
self.requiresWriteAccess(os.path.dirname(__file__))
with TemporaryFile() as t, zipfile.PyZipFile(t, "w") as zipfp:
fn = __file__
if fn.endswith('.pyc'):
path_split = fn.split(os.sep)
if os.altsep is not None:
path_split.extend(fn.split(os.altsep))
if '__pycache__' in path_split:
fn = importlib.util.source_from_cache(fn)
else:
fn = fn[:-1]
zipfp.writepy(fn)
bn = os.path.basename(fn)
self.assertNotIn(bn, zipfp.namelist())
self.assertCompiledIn(bn, zipfp.namelist())
with TemporaryFile() as t, zipfile.PyZipFile(t, "w") as zipfp:
fn = __file__
if fn.endswith('.pyc'):
fn = fn[:-1]
zipfp.writepy(fn, "testpackage")
bn = "%s/%s" % ("testpackage", os.path.basename(fn))
self.assertNotIn(bn, zipfp.namelist())
self.assertCompiledIn(bn, zipfp.namelist())
def test_write_python_package(self):
import email
packagedir = os.path.dirname(email.__file__)
self.requiresWriteAccess(packagedir)
with TemporaryFile() as t, zipfile.PyZipFile(t, "w") as zipfp:
zipfp.writepy(packagedir)
# Check for a couple of modules at different levels of the
# hierarchy
names = zipfp.namelist()
self.assertCompiledIn('email/__init__.py', names)
self.assertCompiledIn('email/mime/text.py', names)
def test_write_filtered_python_package(self):
import test
packagedir = os.path.dirname(test.__file__)
self.requiresWriteAccess(packagedir)
with TemporaryFile() as t, zipfile.PyZipFile(t, "w") as zipfp:
# first make sure that the test folder gives error messages
# (on the badsyntax_... files)
with captured_stdout() as reportSIO:
zipfp.writepy(packagedir)
reportStr = reportSIO.getvalue()
self.assertTrue('SyntaxError' in reportStr)
# then check that the filter works on the whole package
with captured_stdout() as reportSIO:
zipfp.writepy(packagedir, filterfunc=lambda whatever: False)
reportStr = reportSIO.getvalue()
self.assertTrue('SyntaxError' not in reportStr)
# then check that the filter works on individual files
def filter(path):
return not os.path.basename(path).startswith("bad")
with captured_stdout() as reportSIO, self.assertWarns(UserWarning):
zipfp.writepy(packagedir, filterfunc=filter)
reportStr = reportSIO.getvalue()
if reportStr:
print(reportStr)
self.assertTrue('SyntaxError' not in reportStr)
def test_write_with_optimization(self):
import email
packagedir = os.path.dirname(email.__file__)
self.requiresWriteAccess(packagedir)
optlevel = 1 if __debug__ else 0
ext = '.pyc'
with TemporaryFile() as t, \
zipfile.PyZipFile(t, "w", optimize=optlevel) as zipfp:
zipfp.writepy(packagedir)
names = zipfp.namelist()
self.assertIn('email/__init__' + ext, names)
self.assertIn('email/mime/text' + ext, names)
def test_write_python_directory(self):
os.mkdir(TESTFN2)
try:
with open(os.path.join(TESTFN2, "mod1.py"), "w") as fp:
fp.write("print(42)\n")
with open(os.path.join(TESTFN2, "mod2.py"), "w") as fp:
fp.write("print(42 * 42)\n")
with open(os.path.join(TESTFN2, "mod2.txt"), "w") as fp:
fp.write("bla bla bla\n")
with TemporaryFile() as t, zipfile.PyZipFile(t, "w") as zipfp:
zipfp.writepy(TESTFN2)
names = zipfp.namelist()
self.assertCompiledIn('mod1.py', names)
self.assertCompiledIn('mod2.py', names)
self.assertNotIn('mod2.txt', names)
finally:
rmtree(TESTFN2)
def test_write_python_directory_filtered(self):
os.mkdir(TESTFN2)
try:
with open(os.path.join(TESTFN2, "mod1.py"), "w") as fp:
fp.write("print(42)\n")
with open(os.path.join(TESTFN2, "mod2.py"), "w") as fp:
fp.write("print(42 * 42)\n")
with TemporaryFile() as t, zipfile.PyZipFile(t, "w") as zipfp:
zipfp.writepy(TESTFN2, filterfunc=lambda fn:
not fn.endswith('mod2.py'))
names = zipfp.namelist()
self.assertCompiledIn('mod1.py', names)
self.assertNotIn('mod2.py', names)
finally:
rmtree(TESTFN2)
def test_write_non_pyfile(self):
with TemporaryFile() as t, zipfile.PyZipFile(t, "w") as zipfp:
with open(TESTFN, 'w') as f:
f.write('most definitely not a python file')
self.assertRaises(RuntimeError, zipfp.writepy, TESTFN)
unlink(TESTFN)
def test_write_pyfile_bad_syntax(self):
os.mkdir(TESTFN2)
try:
with open(os.path.join(TESTFN2, "mod1.py"), "w") as fp:
fp.write("Bad syntax in python file\n")
with TemporaryFile() as t, zipfile.PyZipFile(t, "w") as zipfp:
# syntax errors are printed to stdout
with captured_stdout() as s:
zipfp.writepy(os.path.join(TESTFN2, "mod1.py"))
self.assertIn("SyntaxError", s.getvalue())
# as it will not have compiled the python file, it will
# include the .py file not .pyc
names = zipfp.namelist()
self.assertIn('mod1.py', names)
self.assertNotIn('mod1.pyc', names)
finally:
rmtree(TESTFN2)
def test_write_pathlike(self):
os.mkdir(TESTFN2)
try:
with open(os.path.join(TESTFN2, "mod1.py"), "w") as fp:
fp.write("print(42)\n")
with TemporaryFile() as t, zipfile.PyZipFile(t, "w") as zipfp:
zipfp.writepy(pathlib.Path(TESTFN2) / "mod1.py")
names = zipfp.namelist()
self.assertCompiledIn('mod1.py', names)
finally:
rmtree(TESTFN2)
class ExtractTests(unittest.TestCase):
def make_test_file(self):
with zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED) as zipfp:
for fpath, fdata in SMALL_TEST_DATA:
zipfp.writestr(fpath, fdata)
def test_extract(self):
with temp_cwd():
self.make_test_file()
with zipfile.ZipFile(TESTFN2, "r") as zipfp:
for fpath, fdata in SMALL_TEST_DATA:
writtenfile = zipfp.extract(fpath)
# make sure it was written to the right place
correctfile = os.path.join(os.getcwd(), fpath)
correctfile = os.path.normpath(correctfile)
self.assertEqual(writtenfile, correctfile)
# make sure correct data is in correct file
with open(writtenfile, "rb") as f:
self.assertEqual(fdata.encode(), f.read())
unlink(writtenfile)
def _test_extract_with_target(self, target):
self.make_test_file()
with zipfile.ZipFile(TESTFN2, "r") as zipfp:
for fpath, fdata in SMALL_TEST_DATA:
writtenfile = zipfp.extract(fpath, target)
# make sure it was written to the right place
correctfile = os.path.join(target, fpath)
correctfile = os.path.normpath(correctfile)
self.assertTrue(os.path.samefile(writtenfile, correctfile), (writtenfile, target))
# make sure correct data is in correct file
with open(writtenfile, "rb") as f:
self.assertEqual(fdata.encode(), f.read())
unlink(writtenfile)
unlink(TESTFN2)
def test_extract_with_target(self):
with temp_dir() as extdir:
self._test_extract_with_target(extdir)
def test_extract_with_target_pathlike(self):
with temp_dir() as extdir:
self._test_extract_with_target(pathlib.Path(extdir))
def test_extract_all(self):
with temp_cwd():
self.make_test_file()
with zipfile.ZipFile(TESTFN2, "r") as zipfp:
zipfp.extractall()
for fpath, fdata in SMALL_TEST_DATA:
outfile = os.path.join(os.getcwd(), fpath)
with open(outfile, "rb") as f:
self.assertEqual(fdata.encode(), f.read())
unlink(outfile)
def _test_extract_all_with_target(self, target):
self.make_test_file()
with zipfile.ZipFile(TESTFN2, "r") as zipfp:
zipfp.extractall(target)
for fpath, fdata in SMALL_TEST_DATA:
outfile = os.path.join(target, fpath)
with open(outfile, "rb") as f:
self.assertEqual(fdata.encode(), f.read())
unlink(outfile)
unlink(TESTFN2)
def test_extract_all_with_target(self):
with temp_dir() as extdir:
self._test_extract_all_with_target(extdir)
def test_extract_all_with_target_pathlike(self):
with temp_dir() as extdir:
self._test_extract_all_with_target(pathlib.Path(extdir))
def check_file(self, filename, content):
self.assertTrue(os.path.isfile(filename))
with open(filename, 'rb') as f:
self.assertEqual(f.read(), content)
def test_sanitize_windows_name(self):
san = zipfile.ZipFile._sanitize_windows_name
# Passing pathsep in allows this test to work regardless of platform.
self.assertEqual(san(r',,?,C:,foo,bar/z', ','), r'_,C_,foo,bar/z')
self.assertEqual(san(r'a\b,c<d>e|f"g?h*i', ','), r'a\b,c_d_e_f_g_h_i')
self.assertEqual(san('../../foo../../ba..r', '/'), r'foo/ba..r')
def test_extract_hackers_arcnames_common_cases(self):
common_hacknames = [
('../foo/bar', 'foo/bar'),
('foo/../bar', 'foo/bar'),
('foo/../../bar', 'foo/bar'),
('foo/bar/..', 'foo/bar'),
('./../foo/bar', 'foo/bar'),
('/foo/bar', 'foo/bar'),
('/foo/../bar', 'foo/bar'),
('/foo/../../bar', 'foo/bar'),
]
self._test_extract_hackers_arcnames(common_hacknames)
@unittest.skipIf(os.path.sep != '\\', 'Requires \\ as path separator.')
def test_extract_hackers_arcnames_windows_only(self):
"""Test combination of path fixing and windows name sanitization."""
windows_hacknames = [
(r'..\foo\bar', 'foo/bar'),
(r'..\/foo\/bar', 'foo/bar'),
(r'foo/\..\/bar', 'foo/bar'),
(r'foo\/../\bar', 'foo/bar'),
(r'C:foo/bar', 'foo/bar'),
(r'C:/foo/bar', 'foo/bar'),
(r'C://foo/bar', 'foo/bar'),
(r'C:\foo\bar', 'foo/bar'),
(r'//conky/mountpoint/foo/bar', 'foo/bar'),
(r'\\conky\mountpoint\foo\bar', 'foo/bar'),
(r'///conky/mountpoint/foo/bar', 'conky/mountpoint/foo/bar'),
(r'\\\conky\mountpoint\foo\bar', 'conky/mountpoint/foo/bar'),
(r'//conky//mountpoint/foo/bar', 'conky/mountpoint/foo/bar'),
(r'\\conky\\mountpoint\foo\bar', 'conky/mountpoint/foo/bar'),
(r'//?/C:/foo/bar', 'foo/bar'),
(r'\\?\C:\foo\bar', 'foo/bar'),
(r'C:/../C:/foo/bar', 'C_/foo/bar'),
(r'a:b\c<d>e|f"g?h*i', 'b/c_d_e_f_g_h_i'),
('../../foo../../ba..r', 'foo/ba..r'),
]
self._test_extract_hackers_arcnames(windows_hacknames)
@unittest.skipIf(os.path.sep != '/', r'Requires / as path separator.')
def test_extract_hackers_arcnames_posix_only(self):
posix_hacknames = [
('//foo/bar', 'foo/bar'),
('../../foo../../ba..r', 'foo../ba..r'),
(r'foo/..\bar', r'foo/..\bar'),
]
self._test_extract_hackers_arcnames(posix_hacknames)
def _test_extract_hackers_arcnames(self, hacknames):
for arcname, fixedname in hacknames:
content = b'foobar' + arcname.encode()
with zipfile.ZipFile(TESTFN2, 'w', zipfile.ZIP_STORED) as zipfp:
zinfo = zipfile.ZipInfo()
# preserve backslashes
zinfo.filename = arcname
zinfo.external_attr = 0o600 << 16
zipfp.writestr(zinfo, content)
arcname = arcname.replace(os.sep, "/")
targetpath = os.path.join('target', 'subdir', 'subsub')
correctfile = os.path.join(targetpath, *fixedname.split('/'))
with zipfile.ZipFile(TESTFN2, 'r') as zipfp:
writtenfile = zipfp.extract(arcname, targetpath)
self.assertEqual(writtenfile, correctfile,
msg='extract %r: %r != %r' %
(arcname, writtenfile, correctfile))
self.check_file(correctfile, content)
rmtree('target')
with zipfile.ZipFile(TESTFN2, 'r') as zipfp:
zipfp.extractall(targetpath)
self.check_file(correctfile, content)
rmtree('target')
correctfile = os.path.join(os.getcwd(), *fixedname.split('/'))
with zipfile.ZipFile(TESTFN2, 'r') as zipfp:
writtenfile = zipfp.extract(arcname)
self.assertEqual(writtenfile, correctfile,
msg="extract %r" % arcname)
self.check_file(correctfile, content)
rmtree(fixedname.split('/')[0])
with zipfile.ZipFile(TESTFN2, 'r') as zipfp:
zipfp.extractall()
self.check_file(correctfile, content)
rmtree(fixedname.split('/')[0])
unlink(TESTFN2)
class OtherTests(unittest.TestCase):
def test_open_via_zip_info(self):
# Create the ZIP archive
with zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED) as zipfp:
zipfp.writestr("name", "foo")
with self.assertWarns(UserWarning):
zipfp.writestr("name", "bar")
self.assertEqual(zipfp.namelist(), ["name"] * 2)
with zipfile.ZipFile(TESTFN2, "r") as zipfp:
infos = zipfp.infolist()
data = b""
for info in infos:
with zipfp.open(info) as zipopen:
data += zipopen.read()
self.assertIn(data, {b"foobar", b"barfoo"})
data = b""
for info in infos:
data += zipfp.read(info)
self.assertIn(data, {b"foobar", b"barfoo"})
def test_writestr_extended_local_header_issue1202(self):
with zipfile.ZipFile(TESTFN2, 'w') as orig_zip:
for data in 'abcdefghijklmnop':
zinfo = zipfile.ZipInfo(data)
zinfo.flag_bits |= 0x08 # Include an extended local header.
orig_zip.writestr(zinfo, data)
def test_close(self):
"""Check that the zipfile is closed after the 'with' block."""
with zipfile.ZipFile(TESTFN2, "w") as zipfp:
for fpath, fdata in SMALL_TEST_DATA:
zipfp.writestr(fpath, fdata)
self.assertIsNotNone(zipfp.fp, 'zipfp is not open')
self.assertIsNone(zipfp.fp, 'zipfp is not closed')
with zipfile.ZipFile(TESTFN2, "r") as zipfp:
self.assertIsNotNone(zipfp.fp, 'zipfp is not open')
self.assertIsNone(zipfp.fp, 'zipfp is not closed')
def test_close_on_exception(self):
"""Check that the zipfile is closed if an exception is raised in the
'with' block."""
with zipfile.ZipFile(TESTFN2, "w") as zipfp:
for fpath, fdata in SMALL_TEST_DATA:
zipfp.writestr(fpath, fdata)
try:
with zipfile.ZipFile(TESTFN2, "r") as zipfp2:
raise zipfile.BadZipFile()
except zipfile.BadZipFile:
self.assertIsNone(zipfp2.fp, 'zipfp is not closed')
def test_unsupported_version(self):
# File has an extract_version of 120
data = (b'PK\x03\x04x\x00\x00\x00\x00\x00!p\xa1@\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00xPK\x01\x02x\x03x\x00\x00\x00\x00'
b'\x00!p\xa1@\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\x01\x00\x00\x00\x00xPK\x05\x06'
b'\x00\x00\x00\x00\x01\x00\x01\x00/\x00\x00\x00\x1f\x00\x00\x00\x00\x00')
self.assertRaises(NotImplementedError, zipfile.ZipFile,
io.BytesIO(data), 'r')
@requires_zlib()
def test_read_unicode_filenames(self):
# bug #10801
fname = findfile('zip_cp437_header.zip')
with zipfile.ZipFile(fname) as zipfp:
for name in zipfp.namelist():
zipfp.open(name).close()
def test_write_unicode_filenames(self):
with zipfile.ZipFile(TESTFN, "w") as zf:
zf.writestr("foo.txt", "Test for unicode filename")
zf.writestr("\xf6.txt", "Test for unicode filename")
self.assertIsInstance(zf.infolist()[0].filename, str)
with zipfile.ZipFile(TESTFN, "r") as zf:
self.assertEqual(zf.filelist[0].filename, "foo.txt")
self.assertEqual(zf.filelist[1].filename, "\xf6.txt")
def test_read_after_write_unicode_filenames(self):
with zipfile.ZipFile(TESTFN2, 'w') as zipfp:
zipfp.writestr('приклад', b'sample')
self.assertEqual(zipfp.read('приклад'), b'sample')
def test_exclusive_create_zip_file(self):
"""Test exclusive creating a new zipfile."""
unlink(TESTFN2)
filename = 'testfile.txt'
content = b'hello, world. this is some content.'
with zipfile.ZipFile(TESTFN2, "x", zipfile.ZIP_STORED) as zipfp:
zipfp.writestr(filename, content)
with self.assertRaises(FileExistsError):
zipfile.ZipFile(TESTFN2, "x", zipfile.ZIP_STORED)
with zipfile.ZipFile(TESTFN2, "r") as zipfp:
self.assertEqual(zipfp.namelist(), [filename])
self.assertEqual(zipfp.read(filename), content)
def test_create_non_existent_file_for_append(self):
if os.path.exists(TESTFN):
os.unlink(TESTFN)
filename = 'testfile.txt'
content = b'hello, world. this is some content.'
try:
with zipfile.ZipFile(TESTFN, 'a') as zf:
zf.writestr(filename, content)
except OSError:
self.fail('Could not append data to a non-existent zip file.')
self.assertTrue(os.path.exists(TESTFN))
with zipfile.ZipFile(TESTFN, 'r') as zf:
self.assertEqual(zf.read(filename), content)
def test_close_erroneous_file(self):
# This test checks that the ZipFile constructor closes the file object
# it opens if there's an error in the file. If it doesn't, the
# traceback holds a reference to the ZipFile object and, indirectly,
# the file object.
# On Windows, this causes the os.unlink() call to fail because the
# underlying file is still open. This is SF bug #412214.
#
with open(TESTFN, "w") as fp:
fp.write("this is not a legal zip file\n")
try:
zf = zipfile.ZipFile(TESTFN)
except zipfile.BadZipFile:
pass
def test_is_zip_erroneous_file(self):
"""Check that is_zipfile() correctly identifies non-zip files."""
# - passing a filename
with open(TESTFN, "w") as fp:
fp.write("this is not a legal zip file\n")
self.assertFalse(zipfile.is_zipfile(TESTFN))
# - passing a path-like object
self.assertFalse(zipfile.is_zipfile(pathlib.Path(TESTFN)))
# - passing a file object
with open(TESTFN, "rb") as fp:
self.assertFalse(zipfile.is_zipfile(fp))
# - passing a file-like object
fp = io.BytesIO()
fp.write(b"this is not a legal zip file\n")
self.assertFalse(zipfile.is_zipfile(fp))
fp.seek(0, 0)
self.assertFalse(zipfile.is_zipfile(fp))
def test_damaged_zipfile(self):
"""Check that zipfiles with missing bytes at the end raise BadZipFile."""
# - Create a valid zip file
fp = io.BytesIO()
with zipfile.ZipFile(fp, mode="w") as zipf:
zipf.writestr("foo.txt", b"O, for a Muse of Fire!")
zipfiledata = fp.getvalue()
# - Now create copies of it missing the last N bytes and make sure
# a BadZipFile exception is raised when we try to open it
for N in range(len(zipfiledata)):
fp = io.BytesIO(zipfiledata[:N])
self.assertRaises(zipfile.BadZipFile, zipfile.ZipFile, fp)
def test_is_zip_valid_file(self):
"""Check that is_zipfile() correctly identifies zip files."""
# - passing a filename
with zipfile.ZipFile(TESTFN, mode="w") as zipf:
zipf.writestr("foo.txt", b"O, for a Muse of Fire!")
self.assertTrue(zipfile.is_zipfile(TESTFN))
# - passing a file object
with open(TESTFN, "rb") as fp:
self.assertTrue(zipfile.is_zipfile(fp))
fp.seek(0, 0)
zip_contents = fp.read()
# - passing a file-like object
fp = io.BytesIO()
fp.write(zip_contents)
self.assertTrue(zipfile.is_zipfile(fp))
fp.seek(0, 0)
self.assertTrue(zipfile.is_zipfile(fp))
def test_non_existent_file_raises_OSError(self):
# make sure we don't raise an AttributeError when a partially-constructed
# ZipFile instance is finalized; this tests for regression on SF tracker
# bug #403871.
# The bug we're testing for caused an AttributeError to be raised
# when a ZipFile instance was created for a file that did not
# exist; the .fp member was not initialized but was needed by the
# __del__() method. Since the AttributeError is in the __del__(),
# it is ignored, but the user should be sufficiently annoyed by
# the message on the output that regression will be noticed
# quickly.
self.assertRaises(OSError, zipfile.ZipFile, TESTFN)
def test_empty_file_raises_BadZipFile(self):
f = open(TESTFN, 'w')
f.close()
self.assertRaises(zipfile.BadZipFile, zipfile.ZipFile, TESTFN)
with open(TESTFN, 'w') as fp:
fp.write("short file")
self.assertRaises(zipfile.BadZipFile, zipfile.ZipFile, TESTFN)
def test_closed_zip_raises_ValueError(self):
"""Verify that testzip() doesn't swallow inappropriate exceptions."""
data = io.BytesIO()
with zipfile.ZipFile(data, mode="w") as zipf:
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
# This is correct; calling .read on a closed ZipFile should raise
# a ValueError, and so should calling .testzip. An earlier
# version of .testzip would swallow this exception (and any other)
# and report that the first file in the archive was corrupt.
self.assertRaises(ValueError, zipf.read, "foo.txt")
self.assertRaises(ValueError, zipf.open, "foo.txt")
self.assertRaises(ValueError, zipf.testzip)
self.assertRaises(ValueError, zipf.writestr, "bogus.txt", "bogus")
with open(TESTFN, 'w') as f:
f.write('zipfile test data')
self.assertRaises(ValueError, zipf.write, TESTFN)
def test_bad_constructor_mode(self):
"""Check that bad modes passed to ZipFile constructor are caught."""
self.assertRaises(ValueError, zipfile.ZipFile, TESTFN, "q")
def test_bad_open_mode(self):
"""Check that bad modes passed to ZipFile.open are caught."""
with zipfile.ZipFile(TESTFN, mode="w") as zipf:
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
with zipfile.ZipFile(TESTFN, mode="r") as zipf:
# read the data to make sure the file is there
zipf.read("foo.txt")
self.assertRaises(ValueError, zipf.open, "foo.txt", "q")
# universal newlines support is removed
self.assertRaises(ValueError, zipf.open, "foo.txt", "U")
self.assertRaises(ValueError, zipf.open, "foo.txt", "rU")
def test_read0(self):
"""Check that calling read(0) on a ZipExtFile object returns an empty
string and doesn't advance file pointer."""
with zipfile.ZipFile(TESTFN, mode="w") as zipf:
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
# read the data to make sure the file is there
with zipf.open("foo.txt") as f:
for i in range(FIXEDTEST_SIZE):
self.assertEqual(f.read(0), b'')
self.assertEqual(f.read(), b"O, for a Muse of Fire!")
def test_open_non_existent_item(self):
"""Check that attempting to call open() for an item that doesn't
exist in the archive raises a RuntimeError."""
with zipfile.ZipFile(TESTFN, mode="w") as zipf:
self.assertRaises(KeyError, zipf.open, "foo.txt", "r")
def test_bad_compression_mode(self):
"""Check that bad compression methods passed to ZipFile.open are
caught."""
self.assertRaises(NotImplementedError, zipfile.ZipFile, TESTFN, "w", -1)
def test_unsupported_compression(self):
# data is declared as shrunk, but actually deflated
data = (b'PK\x03\x04.\x00\x00\x00\x01\x00\xe4C\xa1@\x00\x00\x00'
b'\x00\x02\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00x\x03\x00PK\x01'
b'\x02.\x03.\x00\x00\x00\x01\x00\xe4C\xa1@\x00\x00\x00\x00\x02\x00\x00'
b'\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x80\x01\x00\x00\x00\x00xPK\x05\x06\x00\x00\x00\x00\x01\x00\x01\x00'
b'/\x00\x00\x00!\x00\x00\x00\x00\x00')
with zipfile.ZipFile(io.BytesIO(data), 'r') as zipf:
self.assertRaises(NotImplementedError, zipf.open, 'x')
def test_null_byte_in_filename(self):
"""Check that a filename containing a null byte is properly
terminated."""
with zipfile.ZipFile(TESTFN, mode="w") as zipf:
zipf.writestr("foo.txt\x00qqq", b"O, for a Muse of Fire!")
self.assertEqual(zipf.namelist(), ['foo.txt'])
def test_struct_sizes(self):
"""Check that ZIP internal structure sizes are calculated correctly."""
self.assertEqual(zipfile.sizeEndCentDir, 22)
self.assertEqual(zipfile.sizeCentralDir, 46)
self.assertEqual(zipfile.sizeEndCentDir64, 56)
self.assertEqual(zipfile.sizeEndCentDir64Locator, 20)
def test_comments(self):
"""Check that comments on the archive are handled properly."""
# check default comment is empty
with zipfile.ZipFile(TESTFN, mode="w") as zipf:
self.assertEqual(zipf.comment, b'')
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
with zipfile.ZipFile(TESTFN, mode="r") as zipfr:
self.assertEqual(zipfr.comment, b'')
# check a simple short comment
comment = b'Bravely taking to his feet, he beat a very brave retreat.'
with zipfile.ZipFile(TESTFN, mode="w") as zipf:
zipf.comment = comment
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
with zipfile.ZipFile(TESTFN, mode="r") as zipfr:
self.assertEqual(zipf.comment, comment)
# check a comment of max length
comment2 = ''.join(['%d' % (i**3 % 10) for i in range((1 << 16)-1)])
comment2 = comment2.encode("ascii")
with zipfile.ZipFile(TESTFN, mode="w") as zipf:
zipf.comment = comment2
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
with zipfile.ZipFile(TESTFN, mode="r") as zipfr:
self.assertEqual(zipfr.comment, comment2)
# check a comment that is too long is truncated
with zipfile.ZipFile(TESTFN, mode="w") as zipf:
with self.assertWarns(UserWarning):
zipf.comment = comment2 + b'oops'
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
with zipfile.ZipFile(TESTFN, mode="r") as zipfr:
self.assertEqual(zipfr.comment, comment2)
# check that comments are correctly modified in append mode
with zipfile.ZipFile(TESTFN,mode="w") as zipf:
zipf.comment = b"original comment"
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
with zipfile.ZipFile(TESTFN,mode="a") as zipf:
zipf.comment = b"an updated comment"
with zipfile.ZipFile(TESTFN,mode="r") as zipf:
self.assertEqual(zipf.comment, b"an updated comment")
# check that comments are correctly shortened in append mode
# and the file is indeed truncated
with zipfile.ZipFile(TESTFN,mode="w") as zipf:
zipf.comment = b"original comment that's longer"
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
original_zip_size = os.path.getsize(TESTFN)
with zipfile.ZipFile(TESTFN,mode="a") as zipf:
zipf.comment = b"shorter comment"
self.assertTrue(original_zip_size > os.path.getsize(TESTFN))
with zipfile.ZipFile(TESTFN,mode="r") as zipf:
self.assertEqual(zipf.comment, b"shorter comment")
def test_unicode_comment(self):
with zipfile.ZipFile(TESTFN, "w", zipfile.ZIP_STORED) as zipf:
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
with self.assertRaises(TypeError):
zipf.comment = "this is an error"
def test_change_comment_in_empty_archive(self):
with zipfile.ZipFile(TESTFN, "a", zipfile.ZIP_STORED) as zipf:
self.assertFalse(zipf.filelist)
zipf.comment = b"this is a comment"
with zipfile.ZipFile(TESTFN, "r") as zipf:
self.assertEqual(zipf.comment, b"this is a comment")
def test_change_comment_in_nonempty_archive(self):
with zipfile.ZipFile(TESTFN, "w", zipfile.ZIP_STORED) as zipf:
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
with zipfile.ZipFile(TESTFN, "a", zipfile.ZIP_STORED) as zipf:
self.assertTrue(zipf.filelist)
zipf.comment = b"this is a comment"
with zipfile.ZipFile(TESTFN, "r") as zipf:
self.assertEqual(zipf.comment, b"this is a comment")
def test_empty_zipfile(self):
# Check that creating a file in 'w' or 'a' mode and closing without
# adding any files to the archives creates a valid empty ZIP file
zipf = zipfile.ZipFile(TESTFN, mode="w")
zipf.close()
try:
zipf = zipfile.ZipFile(TESTFN, mode="r")
except zipfile.BadZipFile:
self.fail("Unable to create empty ZIP file in 'w' mode")
zipf = zipfile.ZipFile(TESTFN, mode="a")
zipf.close()
try:
zipf = zipfile.ZipFile(TESTFN, mode="r")
except:
self.fail("Unable to create empty ZIP file in 'a' mode")
def test_open_empty_file(self):
# Issue 1710703: Check that opening a file with less than 22 bytes
# raises a BadZipFile exception (rather than the previously unhelpful
# OSError)
f = open(TESTFN, 'w')
f.close()
self.assertRaises(zipfile.BadZipFile, zipfile.ZipFile, TESTFN, 'r')
def test_create_zipinfo_before_1980(self):
self.assertRaises(ValueError,
zipfile.ZipInfo, 'seventies', (1979, 1, 1, 0, 0, 0))
def test_create_empty_zipinfo_repr(self):
"""Before bpo-26185, repr() on empty ZipInfo object was failing."""
zi = zipfile.ZipInfo(filename="empty")
self.assertEqual(repr(zi), "<ZipInfo filename='empty' file_size=0>")
def test_create_empty_zipinfo_default_attributes(self):
"""Ensure all required attributes are set."""
zi = zipfile.ZipInfo()
self.assertEqual(zi.orig_filename, "NoName")
self.assertEqual(zi.filename, "NoName")
self.assertEqual(zi.date_time, (1980, 1, 1, 0, 0, 0))
self.assertEqual(zi.compress_type, zipfile.ZIP_STORED)
self.assertEqual(zi.comment, b"")
self.assertEqual(zi.extra, b"")
self.assertIn(zi.create_system, (0, 3))
self.assertEqual(zi.create_version, zipfile.DEFAULT_VERSION)
self.assertEqual(zi.extract_version, zipfile.DEFAULT_VERSION)
self.assertEqual(zi.reserved, 0)
self.assertEqual(zi.flag_bits, 0)
self.assertEqual(zi.volume, 0)
self.assertEqual(zi.internal_attr, 0)
self.assertEqual(zi.external_attr, 0)
# Before bpo-26185, both were missing
self.assertEqual(zi.file_size, 0)
self.assertEqual(zi.compress_size, 0)
def test_zipfile_with_short_extra_field(self):
"""If an extra field in the header is less than 4 bytes, skip it."""
zipdata = (
b'PK\x03\x04\x14\x00\x00\x00\x00\x00\x93\x9b\xad@\x8b\x9e'
b'\xd9\xd3\x01\x00\x00\x00\x01\x00\x00\x00\x03\x00\x03\x00ab'
b'c\x00\x00\x00APK\x01\x02\x14\x03\x14\x00\x00\x00\x00'
b'\x00\x93\x9b\xad@\x8b\x9e\xd9\xd3\x01\x00\x00\x00\x01\x00\x00'
b'\x00\x03\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\xa4\x81\x00'
b'\x00\x00\x00abc\x00\x00PK\x05\x06\x00\x00\x00\x00'
b'\x01\x00\x01\x003\x00\x00\x00%\x00\x00\x00\x00\x00'
)
with zipfile.ZipFile(io.BytesIO(zipdata), 'r') as zipf:
# testzip returns the name of the first corrupt file, or None
self.assertIsNone(zipf.testzip())
def test_open_conflicting_handles(self):
# It's only possible to open one writable file handle at a time
msg1 = b"It's fun to charter an accountant!"
msg2 = b"And sail the wide accountant sea"
msg3 = b"To find, explore the funds offshore"
with zipfile.ZipFile(TESTFN2, 'w', zipfile.ZIP_STORED) as zipf:
with zipf.open('foo', mode='w') as w2:
w2.write(msg1)
with zipf.open('bar', mode='w') as w1:
with self.assertRaises(ValueError):
zipf.open('handle', mode='w')
with self.assertRaises(ValueError):
zipf.open('foo', mode='r')
with self.assertRaises(ValueError):
zipf.writestr('str', 'abcde')
with self.assertRaises(ValueError):
zipf.write(__file__, 'file')
with self.assertRaises(ValueError):
zipf.close()
w1.write(msg2)
with zipf.open('baz', mode='w') as w2:
w2.write(msg3)
with zipfile.ZipFile(TESTFN2, 'r') as zipf:
self.assertEqual(zipf.read('foo'), msg1)
self.assertEqual(zipf.read('bar'), msg2)
self.assertEqual(zipf.read('baz'), msg3)
self.assertEqual(zipf.namelist(), ['foo', 'bar', 'baz'])
def test_seek_tell(self):
# Test seek functionality
txt = b"Where's Bruce?"
bloc = txt.find(b"Bruce")
# Check seek on a file
with zipfile.ZipFile(TESTFN, "w") as zipf:
zipf.writestr("foo.txt", txt)
with zipfile.ZipFile(TESTFN, "r") as zipf:
with zipf.open("foo.txt", "r") as fp:
fp.seek(bloc, os.SEEK_SET)
self.assertEqual(fp.tell(), bloc)
fp.seek(-bloc, os.SEEK_CUR)
self.assertEqual(fp.tell(), 0)
fp.seek(bloc, os.SEEK_CUR)
self.assertEqual(fp.tell(), bloc)
self.assertEqual(fp.read(5), txt[bloc:bloc+5])
fp.seek(0, os.SEEK_END)
self.assertEqual(fp.tell(), len(txt))
fp.seek(0, os.SEEK_SET)
self.assertEqual(fp.tell(), 0)
# Check seek on memory file
data = io.BytesIO()
with zipfile.ZipFile(data, mode="w") as zipf:
zipf.writestr("foo.txt", txt)
with zipfile.ZipFile(data, mode="r") as zipf:
with zipf.open("foo.txt", "r") as fp:
fp.seek(bloc, os.SEEK_SET)
self.assertEqual(fp.tell(), bloc)
fp.seek(-bloc, os.SEEK_CUR)
self.assertEqual(fp.tell(), 0)
fp.seek(bloc, os.SEEK_CUR)
self.assertEqual(fp.tell(), bloc)
self.assertEqual(fp.read(5), txt[bloc:bloc+5])
fp.seek(0, os.SEEK_END)
self.assertEqual(fp.tell(), len(txt))
fp.seek(0, os.SEEK_SET)
self.assertEqual(fp.tell(), 0)
@requires_bz2()
def test_decompress_without_3rd_party_library(self):
data = b'PK\x05\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
zip_file = io.BytesIO(data)
with zipfile.ZipFile(zip_file, 'w', compression=zipfile.ZIP_BZIP2) as zf:
zf.writestr('a.txt', b'a')
with mock.patch('zipfile.bz2', None):
with zipfile.ZipFile(zip_file) as zf:
self.assertRaises(RuntimeError, zf.extract, 'a.txt')
def tearDown(self):
unlink(TESTFN)
unlink(TESTFN2)
class AbstractBadCrcTests:
def test_testzip_with_bad_crc(self):
"""Tests that files with bad CRCs return their name from testzip."""
zipdata = self.zip_with_bad_crc
with zipfile.ZipFile(io.BytesIO(zipdata), mode="r") as zipf:
# testzip returns the name of the first corrupt file, or None
self.assertEqual('afile', zipf.testzip())
def test_read_with_bad_crc(self):
"""Tests that files with bad CRCs raise a BadZipFile exception when read."""
zipdata = self.zip_with_bad_crc
# Using ZipFile.read()
with zipfile.ZipFile(io.BytesIO(zipdata), mode="r") as zipf:
self.assertRaises(zipfile.BadZipFile, zipf.read, 'afile')
# Using ZipExtFile.read()
with zipfile.ZipFile(io.BytesIO(zipdata), mode="r") as zipf:
with zipf.open('afile', 'r') as corrupt_file:
self.assertRaises(zipfile.BadZipFile, corrupt_file.read)
# Same with small reads (in order to exercise the buffering logic)
with zipfile.ZipFile(io.BytesIO(zipdata), mode="r") as zipf:
with zipf.open('afile', 'r') as corrupt_file:
corrupt_file.MIN_READ_SIZE = 2
with self.assertRaises(zipfile.BadZipFile):
while corrupt_file.read(2):
pass
class StoredBadCrcTests(AbstractBadCrcTests, unittest.TestCase):
compression = zipfile.ZIP_STORED
zip_with_bad_crc = (
b'PK\003\004\024\0\0\0\0\0 \213\212;:r'
b'\253\377\f\0\0\0\f\0\0\0\005\0\0\000af'
b'ilehello,AworldP'
b'K\001\002\024\003\024\0\0\0\0\0 \213\212;:'
b'r\253\377\f\0\0\0\f\0\0\0\005\0\0\0\0'
b'\0\0\0\0\0\0\0\200\001\0\0\0\000afi'
b'lePK\005\006\0\0\0\0\001\0\001\0003\000'
b'\0\0/\0\0\0\0\0')
@requires_zlib()
class DeflateBadCrcTests(AbstractBadCrcTests, unittest.TestCase):
compression = zipfile.ZIP_DEFLATED
zip_with_bad_crc = (
b'PK\x03\x04\x14\x00\x00\x00\x08\x00n}\x0c=FA'
b'KE\x10\x00\x00\x00n\x00\x00\x00\x05\x00\x00\x00af'
b'ile\xcbH\xcd\xc9\xc9W(\xcf/\xcaI\xc9\xa0'
b'=\x13\x00PK\x01\x02\x14\x03\x14\x00\x00\x00\x08\x00n'
b'}\x0c=FAKE\x10\x00\x00\x00n\x00\x00\x00\x05'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\x01\x00\x00\x00'
b'\x00afilePK\x05\x06\x00\x00\x00\x00\x01\x00'
b'\x01\x003\x00\x00\x003\x00\x00\x00\x00\x00')
@requires_bz2()
class Bzip2BadCrcTests(AbstractBadCrcTests, unittest.TestCase):
compression = zipfile.ZIP_BZIP2
zip_with_bad_crc = (
b'PK\x03\x04\x14\x03\x00\x00\x0c\x00nu\x0c=FA'
b'KE8\x00\x00\x00n\x00\x00\x00\x05\x00\x00\x00af'
b'ileBZh91AY&SY\xd4\xa8\xca'
b'\x7f\x00\x00\x0f\x11\x80@\x00\x06D\x90\x80 \x00 \xa5'
b'P\xd9!\x03\x03\x13\x13\x13\x89\xa9\xa9\xc2u5:\x9f'
b'\x8b\xb9"\x9c(HjTe?\x80PK\x01\x02\x14'
b'\x03\x14\x03\x00\x00\x0c\x00nu\x0c=FAKE8'
b'\x00\x00\x00n\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00 \x80\x80\x81\x00\x00\x00\x00afilePK'
b'\x05\x06\x00\x00\x00\x00\x01\x00\x01\x003\x00\x00\x00[\x00'
b'\x00\x00\x00\x00')
@requires_lzma()
class LzmaBadCrcTests(AbstractBadCrcTests, unittest.TestCase):
compression = zipfile.ZIP_LZMA
zip_with_bad_crc = (
b'PK\x03\x04\x14\x03\x00\x00\x0e\x00nu\x0c=FA'
b'KE\x1b\x00\x00\x00n\x00\x00\x00\x05\x00\x00\x00af'
b'ile\t\x04\x05\x00]\x00\x00\x00\x04\x004\x19I'
b'\xee\x8d\xe9\x17\x89:3`\tq!.8\x00PK'
b'\x01\x02\x14\x03\x14\x03\x00\x00\x0e\x00nu\x0c=FA'
b'KE\x1b\x00\x00\x00n\x00\x00\x00\x05\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00 \x80\x80\x81\x00\x00\x00\x00afil'
b'ePK\x05\x06\x00\x00\x00\x00\x01\x00\x01\x003\x00\x00'
b'\x00>\x00\x00\x00\x00\x00')
class DecryptionTests(unittest.TestCase):
"""Check that ZIP decryption works. Since the library does not
support encryption at the moment, we use a pre-generated encrypted
ZIP file."""
data = (
b'PK\x03\x04\x14\x00\x01\x00\x00\x00n\x92i.#y\xef?&\x00\x00\x00\x1a\x00'
b'\x00\x00\x08\x00\x00\x00test.txt\xfa\x10\xa0gly|\xfa-\xc5\xc0=\xf9y'
b'\x18\xe0\xa8r\xb3Z}Lg\xbc\xae\xf9|\x9b\x19\xe4\x8b\xba\xbb)\x8c\xb0\xdbl'
b'PK\x01\x02\x14\x00\x14\x00\x01\x00\x00\x00n\x92i.#y\xef?&\x00\x00\x00'
b'\x1a\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x01\x00 \x00\xb6\x81'
b'\x00\x00\x00\x00test.txtPK\x05\x06\x00\x00\x00\x00\x01\x00\x01\x006\x00'
b'\x00\x00L\x00\x00\x00\x00\x00' )
data2 = (
b'PK\x03\x04\x14\x00\t\x00\x08\x00\xcf}38xu\xaa\xb2\x14\x00\x00\x00\x00\x02'
b'\x00\x00\x04\x00\x15\x00zeroUT\t\x00\x03\xd6\x8b\x92G\xda\x8b\x92GUx\x04'
b'\x00\xe8\x03\xe8\x03\xc7<M\xb5a\xceX\xa3Y&\x8b{oE\xd7\x9d\x8c\x98\x02\xc0'
b'PK\x07\x08xu\xaa\xb2\x14\x00\x00\x00\x00\x02\x00\x00PK\x01\x02\x17\x03'
b'\x14\x00\t\x00\x08\x00\xcf}38xu\xaa\xb2\x14\x00\x00\x00\x00\x02\x00\x00'
b'\x04\x00\r\x00\x00\x00\x00\x00\x00\x00\x00\x00\xa4\x81\x00\x00\x00\x00ze'
b'roUT\x05\x00\x03\xd6\x8b\x92GUx\x00\x00PK\x05\x06\x00\x00\x00\x00\x01'
b'\x00\x01\x00?\x00\x00\x00[\x00\x00\x00\x00\x00' )
plain = b'zipfile.py encryption test'
plain2 = b'\x00'*512
def setUp(self):
with open(TESTFN, "wb") as fp:
fp.write(self.data)
self.zip = zipfile.ZipFile(TESTFN, "r")
with open(TESTFN2, "wb") as fp:
fp.write(self.data2)
self.zip2 = zipfile.ZipFile(TESTFN2, "r")
def tearDown(self):
self.zip.close()
os.unlink(TESTFN)
self.zip2.close()
os.unlink(TESTFN2)
def test_no_password(self):
# Reading the encrypted file without password
# must generate a RunTime exception
self.assertRaises(RuntimeError, self.zip.read, "test.txt")
self.assertRaises(RuntimeError, self.zip2.read, "zero")
def test_bad_password(self):
self.zip.setpassword(b"perl")
self.assertRaises(RuntimeError, self.zip.read, "test.txt")
self.zip2.setpassword(b"perl")
self.assertRaises(RuntimeError, self.zip2.read, "zero")
@requires_zlib()
def test_good_password(self):
self.zip.setpassword(b"python")
self.assertEqual(self.zip.read("test.txt"), self.plain)
self.zip2.setpassword(b"12345")
self.assertEqual(self.zip2.read("zero"), self.plain2)
def test_unicode_password(self):
self.assertRaises(TypeError, self.zip.setpassword, "unicode")
self.assertRaises(TypeError, self.zip.read, "test.txt", "python")
self.assertRaises(TypeError, self.zip.open, "test.txt", pwd="python")
self.assertRaises(TypeError, self.zip.extract, "test.txt", pwd="python")
def test_seek_tell(self):
self.zip.setpassword(b"python")
txt = self.plain
test_word = b'encryption'
bloc = txt.find(test_word)
bloc_len = len(test_word)
with self.zip.open("test.txt", "r") as fp:
fp.seek(bloc, os.SEEK_SET)
self.assertEqual(fp.tell(), bloc)
fp.seek(-bloc, os.SEEK_CUR)
self.assertEqual(fp.tell(), 0)
fp.seek(bloc, os.SEEK_CUR)
self.assertEqual(fp.tell(), bloc)
self.assertEqual(fp.read(bloc_len), txt[bloc:bloc+bloc_len])
# Make sure that the second read after seeking back beyond
# _readbuffer returns the same content (ie. rewind to the start of
# the file to read forward to the required position).
old_read_size = fp.MIN_READ_SIZE
fp.MIN_READ_SIZE = 1
fp._readbuffer = b''
fp._offset = 0
fp.seek(0, os.SEEK_SET)
self.assertEqual(fp.tell(), 0)
fp.seek(bloc, os.SEEK_CUR)
self.assertEqual(fp.read(bloc_len), txt[bloc:bloc+bloc_len])
fp.MIN_READ_SIZE = old_read_size
fp.seek(0, os.SEEK_END)
self.assertEqual(fp.tell(), len(txt))
fp.seek(0, os.SEEK_SET)
self.assertEqual(fp.tell(), 0)
# Read the file completely to definitely call any eof integrity
# checks (crc) and make sure they still pass.
fp.read()
class AbstractTestsWithRandomBinaryFiles:
@classmethod
def setUpClass(cls):
datacount = randint(16, 64)*1024 + randint(1, 1024)
cls.data = b''.join(struct.pack('<f', random()*randint(-1000, 1000))
for i in range(datacount))
def setUp(self):
# Make a source file with some lines
with open(TESTFN, "wb") as fp:
fp.write(self.data)
def tearDown(self):
unlink(TESTFN)
unlink(TESTFN2)
def make_test_archive(self, f, compression):
# Create the ZIP archive
with zipfile.ZipFile(f, "w", compression) as zipfp:
zipfp.write(TESTFN, "another.name")
zipfp.write(TESTFN, TESTFN)
def zip_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r", compression) as zipfp:
testdata = zipfp.read(TESTFN)
self.assertEqual(len(testdata), len(self.data))
self.assertEqual(testdata, self.data)
self.assertEqual(zipfp.read("another.name"), self.data)
def test_read(self):
for f in get_files(self):
self.zip_test(f, self.compression)
def zip_open_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r", compression) as zipfp:
zipdata1 = []
with zipfp.open(TESTFN) as zipopen1:
while True:
read_data = zipopen1.read(256)
if not read_data:
break
zipdata1.append(read_data)
zipdata2 = []
with zipfp.open("another.name") as zipopen2:
while True:
read_data = zipopen2.read(256)
if not read_data:
break
zipdata2.append(read_data)
testdata1 = b''.join(zipdata1)
self.assertEqual(len(testdata1), len(self.data))
self.assertEqual(testdata1, self.data)
testdata2 = b''.join(zipdata2)
self.assertEqual(len(testdata2), len(self.data))
self.assertEqual(testdata2, self.data)
def test_open(self):
for f in get_files(self):
self.zip_open_test(f, self.compression)
def zip_random_open_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r", compression) as zipfp:
zipdata1 = []
with zipfp.open(TESTFN) as zipopen1:
while True:
read_data = zipopen1.read(randint(1, 1024))
if not read_data:
break
zipdata1.append(read_data)
testdata = b''.join(zipdata1)
self.assertEqual(len(testdata), len(self.data))
self.assertEqual(testdata, self.data)
def test_random_open(self):
for f in get_files(self):
self.zip_random_open_test(f, self.compression)
class StoredTestsWithRandomBinaryFiles(AbstractTestsWithRandomBinaryFiles,
unittest.TestCase):
compression = zipfile.ZIP_STORED
@requires_zlib()
class DeflateTestsWithRandomBinaryFiles(AbstractTestsWithRandomBinaryFiles,
unittest.TestCase):
compression = zipfile.ZIP_DEFLATED
@requires_bz2()
class Bzip2TestsWithRandomBinaryFiles(AbstractTestsWithRandomBinaryFiles,
unittest.TestCase):
compression = zipfile.ZIP_BZIP2
@requires_lzma()
class LzmaTestsWithRandomBinaryFiles(AbstractTestsWithRandomBinaryFiles,
unittest.TestCase):
compression = zipfile.ZIP_LZMA
# Provide the tell() method but not seek()
class Tellable:
def __init__(self, fp):
self.fp = fp
self.offset = 0
def write(self, data):
n = self.fp.write(data)
self.offset += n
return n
def tell(self):
return self.offset
def flush(self):
self.fp.flush()
class Unseekable:
def __init__(self, fp):
self.fp = fp
def write(self, data):
return self.fp.write(data)
def flush(self):
self.fp.flush()
class UnseekableTests(unittest.TestCase):
def test_writestr(self):
for wrapper in (lambda f: f), Tellable, Unseekable:
with self.subTest(wrapper=wrapper):
f = io.BytesIO()
f.write(b'abc')
bf = io.BufferedWriter(f)
with zipfile.ZipFile(wrapper(bf), 'w', zipfile.ZIP_STORED) as zipfp:
zipfp.writestr('ones', b'111')
zipfp.writestr('twos', b'222')
self.assertEqual(f.getvalue()[:5], b'abcPK')
with zipfile.ZipFile(f, mode='r') as zipf:
with zipf.open('ones') as zopen:
self.assertEqual(zopen.read(), b'111')
with zipf.open('twos') as zopen:
self.assertEqual(zopen.read(), b'222')
def test_write(self):
for wrapper in (lambda f: f), Tellable, Unseekable:
with self.subTest(wrapper=wrapper):
f = io.BytesIO()
f.write(b'abc')
bf = io.BufferedWriter(f)
with zipfile.ZipFile(wrapper(bf), 'w', zipfile.ZIP_STORED) as zipfp:
self.addCleanup(unlink, TESTFN)
with open(TESTFN, 'wb') as f2:
f2.write(b'111')
zipfp.write(TESTFN, 'ones')
with open(TESTFN, 'wb') as f2:
f2.write(b'222')
zipfp.write(TESTFN, 'twos')
self.assertEqual(f.getvalue()[:5], b'abcPK')
with zipfile.ZipFile(f, mode='r') as zipf:
with zipf.open('ones') as zopen:
self.assertEqual(zopen.read(), b'111')
with zipf.open('twos') as zopen:
self.assertEqual(zopen.read(), b'222')
def test_open_write(self):
for wrapper in (lambda f: f), Tellable, Unseekable:
with self.subTest(wrapper=wrapper):
f = io.BytesIO()
f.write(b'abc')
bf = io.BufferedWriter(f)
with zipfile.ZipFile(wrapper(bf), 'w', zipfile.ZIP_STORED) as zipf:
with zipf.open('ones', 'w') as zopen:
zopen.write(b'111')
with zipf.open('twos', 'w') as zopen:
zopen.write(b'222')
self.assertEqual(f.getvalue()[:5], b'abcPK')
with zipfile.ZipFile(f) as zipf:
self.assertEqual(zipf.read('ones'), b'111')
self.assertEqual(zipf.read('twos'), b'222')
@requires_zlib()
class TestsWithMultipleOpens(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.data1 = b'111' + randbytes(10000)
cls.data2 = b'222' + randbytes(10000)
def make_test_archive(self, f):
# Create the ZIP archive
with zipfile.ZipFile(f, "w", zipfile.ZIP_DEFLATED) as zipfp:
zipfp.writestr('ones', self.data1)
zipfp.writestr('twos', self.data2)
def test_same_file(self):
# Verify that (when the ZipFile is in control of creating file objects)
# multiple open() calls can be made without interfering with each other.
for f in get_files(self):
self.make_test_archive(f)
with zipfile.ZipFile(f, mode="r") as zipf:
with zipf.open('ones') as zopen1, zipf.open('ones') as zopen2:
data1 = zopen1.read(500)
data2 = zopen2.read(500)
data1 += zopen1.read()
data2 += zopen2.read()
self.assertEqual(data1, data2)
self.assertEqual(data1, self.data1)
def test_different_file(self):
# Verify that (when the ZipFile is in control of creating file objects)
# multiple open() calls can be made without interfering with each other.
for f in get_files(self):
self.make_test_archive(f)
with zipfile.ZipFile(f, mode="r") as zipf:
with zipf.open('ones') as zopen1, zipf.open('twos') as zopen2:
data1 = zopen1.read(500)
data2 = zopen2.read(500)
data1 += zopen1.read()
data2 += zopen2.read()
self.assertEqual(data1, self.data1)
self.assertEqual(data2, self.data2)
def test_interleaved(self):
# Verify that (when the ZipFile is in control of creating file objects)
# multiple open() calls can be made without interfering with each other.
for f in get_files(self):
self.make_test_archive(f)
with zipfile.ZipFile(f, mode="r") as zipf:
with zipf.open('ones') as zopen1:
data1 = zopen1.read(500)
with zipf.open('twos') as zopen2:
data2 = zopen2.read(500)
data1 += zopen1.read()
data2 += zopen2.read()
self.assertEqual(data1, self.data1)
self.assertEqual(data2, self.data2)
def test_read_after_close(self):
for f in get_files(self):
self.make_test_archive(f)
with contextlib.ExitStack() as stack:
with zipfile.ZipFile(f, 'r') as zipf:
zopen1 = stack.enter_context(zipf.open('ones'))
zopen2 = stack.enter_context(zipf.open('twos'))
data1 = zopen1.read(500)
data2 = zopen2.read(500)
data1 += zopen1.read()
data2 += zopen2.read()
self.assertEqual(data1, self.data1)
self.assertEqual(data2, self.data2)
def test_read_after_write(self):
for f in get_files(self):
with zipfile.ZipFile(f, 'w', zipfile.ZIP_DEFLATED) as zipf:
zipf.writestr('ones', self.data1)
zipf.writestr('twos', self.data2)
with zipf.open('ones') as zopen1:
data1 = zopen1.read(500)
self.assertEqual(data1, self.data1[:500])
with zipfile.ZipFile(f, 'r') as zipf:
data1 = zipf.read('ones')
data2 = zipf.read('twos')
self.assertEqual(data1, self.data1)
self.assertEqual(data2, self.data2)
def test_write_after_read(self):
for f in get_files(self):
with zipfile.ZipFile(f, "w", zipfile.ZIP_DEFLATED) as zipf:
zipf.writestr('ones', self.data1)
with zipf.open('ones') as zopen1:
zopen1.read(500)
zipf.writestr('twos', self.data2)
with zipfile.ZipFile(f, 'r') as zipf:
data1 = zipf.read('ones')
data2 = zipf.read('twos')
self.assertEqual(data1, self.data1)
self.assertEqual(data2, self.data2)
def test_many_opens(self):
# Verify that read() and open() promptly close the file descriptor,
# and don't rely on the garbage collector to free resources.
self.make_test_archive(TESTFN2)
with zipfile.ZipFile(TESTFN2, mode="r") as zipf:
for x in range(100):
zipf.read('ones')
with zipf.open('ones') as zopen1:
pass
with open(os.devnull) as f:
self.assertLess(f.fileno(), 100)
def test_write_while_reading(self):
with zipfile.ZipFile(TESTFN2, 'w', zipfile.ZIP_DEFLATED) as zipf:
zipf.writestr('ones', self.data1)
with zipfile.ZipFile(TESTFN2, 'a', zipfile.ZIP_DEFLATED) as zipf:
with zipf.open('ones', 'r') as r1:
data1 = r1.read(500)
with zipf.open('twos', 'w') as w1:
w1.write(self.data2)
data1 += r1.read()
self.assertEqual(data1, self.data1)
with zipfile.ZipFile(TESTFN2) as zipf:
self.assertEqual(zipf.read('twos'), self.data2)
def tearDown(self):
unlink(TESTFN2)
class TestWithDirectory(unittest.TestCase):
def setUp(self):
os.mkdir(TESTFN2)
def test_extract_dir(self):
with zipfile.ZipFile(findfile("zipdir.zip")) as zipf:
zipf.extractall(TESTFN2)
self.assertTrue(os.path.isdir(os.path.join(TESTFN2, "a")))
self.assertTrue(os.path.isdir(os.path.join(TESTFN2, "a", "b")))
self.assertTrue(os.path.exists(os.path.join(TESTFN2, "a", "b", "c")))
def test_bug_6050(self):
# Extraction should succeed if directories already exist
os.mkdir(os.path.join(TESTFN2, "a"))
self.test_extract_dir()
def test_write_dir(self):
dirpath = os.path.join(TESTFN2, "x")
os.mkdir(dirpath)
mode = os.stat(dirpath).st_mode & 0xFFFF
with zipfile.ZipFile(TESTFN, "w") as zipf:
zipf.write(dirpath)
zinfo = zipf.filelist[0]
self.assertTrue(zinfo.filename.endswith("/x/"))
self.assertEqual(zinfo.external_attr, (mode << 16) | 0x10)
zipf.write(dirpath, "y")
zinfo = zipf.filelist[1]
self.assertTrue(zinfo.filename, "y/")
self.assertEqual(zinfo.external_attr, (mode << 16) | 0x10)
with zipfile.ZipFile(TESTFN, "r") as zipf:
zinfo = zipf.filelist[0]
self.assertTrue(zinfo.filename.endswith("/x/"))
self.assertEqual(zinfo.external_attr, (mode << 16) | 0x10)
zinfo = zipf.filelist[1]
self.assertTrue(zinfo.filename, "y/")
self.assertEqual(zinfo.external_attr, (mode << 16) | 0x10)
target = os.path.join(TESTFN2, "target")
os.mkdir(target)
zipf.extractall(target)
self.assertTrue(os.path.isdir(os.path.join(target, "y")))
self.assertEqual(len(os.listdir(target)), 2)
def test_writestr_dir(self):
os.mkdir(os.path.join(TESTFN2, "x"))
with zipfile.ZipFile(TESTFN, "w") as zipf:
zipf.writestr("x/", b'')
zinfo = zipf.filelist[0]
self.assertEqual(zinfo.filename, "x/")
self.assertEqual(zinfo.external_attr, (0o40775 << 16) | 0x10)
with zipfile.ZipFile(TESTFN, "r") as zipf:
zinfo = zipf.filelist[0]
self.assertTrue(zinfo.filename.endswith("x/"))
self.assertEqual(zinfo.external_attr, (0o40775 << 16) | 0x10)
target = os.path.join(TESTFN2, "target")
os.mkdir(target)
zipf.extractall(target)
self.assertTrue(os.path.isdir(os.path.join(target, "x")))
self.assertEqual(os.listdir(target), ["x"])
def tearDown(self):
rmtree(TESTFN2)
if os.path.exists(TESTFN):
unlink(TESTFN)
class ZipInfoTests(unittest.TestCase):
def test_from_file(self):
zi = zipfile.ZipInfo.from_file(__file__)
self.assertEqual(posixpath.basename(zi.filename), 'test_zipfile.py')
self.assertFalse(zi.is_dir())
self.assertEqual(zi.file_size, os.path.getsize(__file__))
def test_from_file_pathlike(self):
zi = zipfile.ZipInfo.from_file(pathlib.Path(__file__))
self.assertEqual(posixpath.basename(zi.filename), 'test_zipfile.py')
self.assertFalse(zi.is_dir())
self.assertEqual(zi.file_size, os.path.getsize(__file__))
def test_from_file_bytes(self):
zi = zipfile.ZipInfo.from_file(os.fsencode(__file__), 'test')
self.assertEqual(posixpath.basename(zi.filename), 'test')
self.assertFalse(zi.is_dir())
self.assertEqual(zi.file_size, os.path.getsize(__file__))
def test_from_file_fileno(self):
with open(__file__, 'rb') as f:
zi = zipfile.ZipInfo.from_file(f.fileno(), 'test')
self.assertEqual(posixpath.basename(zi.filename), 'test')
self.assertFalse(zi.is_dir())
self.assertEqual(zi.file_size, os.path.getsize(__file__))
def test_from_dir(self):
dirpath = os.path.dirname(os.path.abspath(__file__))
zi = zipfile.ZipInfo.from_file(dirpath, 'stdlib_tests')
self.assertEqual(zi.filename, 'stdlib_tests/')
self.assertTrue(zi.is_dir())
self.assertEqual(zi.compress_type, zipfile.ZIP_STORED)
self.assertEqual(zi.file_size, 0)
class CommandLineTest(unittest.TestCase):
def zipfilecmd(self, *args, **kwargs):
rc, out, err = script_helper.assert_python_ok('-m', 'zipfile', *args,
**kwargs)
return out.replace(os.linesep.encode(), b'\n')
def zipfilecmd_failure(self, *args):
return script_helper.assert_python_failure('-m', 'zipfile', *args)
def test_bad_use(self):
rc, out, err = self.zipfilecmd_failure()
self.assertEqual(out, b'')
self.assertIn(b'usage', err.lower())
self.assertIn(b'error', err.lower())
self.assertIn(b'required', err.lower())
rc, out, err = self.zipfilecmd_failure('-l', '')
self.assertEqual(out, b'')
self.assertNotEqual(err.strip(), b'')
def test_test_command(self):
zip_name = findfile('zipdir.zip')
for opt in '-t', '--test':
out = self.zipfilecmd(opt, zip_name)
self.assertEqual(out.rstrip(), b'Done testing')
zip_name = findfile('testtar.tar')
rc, out, err = self.zipfilecmd_failure('-t', zip_name)
self.assertEqual(out, b'')
def test_list_command(self):
zip_name = findfile('zipdir.zip')
t = io.StringIO()
with zipfile.ZipFile(zip_name, 'r') as tf:
tf.printdir(t)
expected = t.getvalue().encode('ascii', 'backslashreplace')
for opt in '-l', '--list':
out = self.zipfilecmd(opt, zip_name,
PYTHONIOENCODING='ascii:backslashreplace')
self.assertEqual(out, expected)
@requires_zlib()
def test_create_command(self):
self.addCleanup(unlink, TESTFN)
with open(TESTFN, 'w') as f:
f.write('test 1')
os.mkdir(TESTFNDIR)
self.addCleanup(rmtree, TESTFNDIR)
with open(os.path.join(TESTFNDIR, 'file.txt'), 'w') as f:
f.write('test 2')
files = [TESTFN, TESTFNDIR]
namelist = [TESTFN, TESTFNDIR + '/', TESTFNDIR + '/file.txt']
for opt in '-c', '--create':
try:
out = self.zipfilecmd(opt, TESTFN2, *files)
self.assertEqual(out, b'')
with zipfile.ZipFile(TESTFN2) as zf:
self.assertEqual(zf.namelist(), namelist)
self.assertEqual(zf.read(namelist[0]), b'test 1')
self.assertEqual(zf.read(namelist[2]), b'test 2')
finally:
unlink(TESTFN2)
def test_extract_command(self):
zip_name = findfile('zipdir.zip')
for opt in '-e', '--extract':
with temp_dir() as extdir:
out = self.zipfilecmd(opt, zip_name, extdir)
self.assertEqual(out, b'')
with zipfile.ZipFile(zip_name) as zf:
for zi in zf.infolist():
path = os.path.join(extdir,
zi.filename.replace('/', os.sep))
if zi.is_dir():
self.assertTrue(os.path.isdir(path))
else:
self.assertTrue(os.path.isfile(path))
with open(path, 'rb') as f:
self.assertEqual(f.read(), zf.read(zi))
class TestExecutablePrependedZip(unittest.TestCase):
"""Test our ability to open zip files with an executable prepended."""
def setUp(self):
self.exe_zip = findfile('exe_with_zip', subdir='ziptestdata')
self.exe_zip64 = findfile('exe_with_z64', subdir='ziptestdata')
def _test_zip_works(self, name):
# bpo28494 sanity check: ensure is_zipfile works on these.
self.assertTrue(zipfile.is_zipfile(name),
f'is_zipfile failed on {name}')
# Ensure we can operate on these via ZipFile.
with zipfile.ZipFile(name) as zipfp:
for n in zipfp.namelist():
data = zipfp.read(n)
self.assertIn(b'FAVORITE_NUMBER', data)
def test_read_zip_with_exe_prepended(self):
self._test_zip_works(self.exe_zip)
def test_read_zip64_with_exe_prepended(self):
self._test_zip_works(self.exe_zip64)
@unittest.skipUnless(sys.executable, 'sys.executable required.')
@unittest.skipUnless(os.access('/bin/bash', os.X_OK),
'Test relies on #!/bin/bash working.')
def test_execute_zip2(self):
output = subprocess.check_output([self.exe_zip, sys.executable])
self.assertIn(b'number in executable: 5', output)
@unittest.skipUnless(sys.executable, 'sys.executable required.')
@unittest.skipUnless(os.access('/bin/bash', os.X_OK),
'Test relies on #!/bin/bash working.')
def test_execute_zip64(self):
output = subprocess.check_output([self.exe_zip64, sys.executable])
self.assertIn(b'number in executable: 5', output)
# Poor man's technique to consume a (smallish) iterable.
consume = tuple
# from jaraco.itertools 5.0
class jaraco:
class itertools:
class Counter:
def __init__(self, i):
self.count = 0
self._orig_iter = iter(i)
def __iter__(self):
return self
def __next__(self):
result = next(self._orig_iter)
self.count += 1
return result
def add_dirs(zf):
"""
Given a writable zip file zf, inject directory entries for
any directories implied by the presence of children.
"""
for name in zipfile.CompleteDirs._implied_dirs(zf.namelist()):
zf.writestr(name, b"")
return zf
def build_alpharep_fixture():
"""
Create a zip file with this structure:
.
├── a.txt
├── b
│ ├── c.txt
│ ├── d
│ │ └── e.txt
│ └── f.txt
└── g
└── h
└── i.txt
This fixture has the following key characteristics:
- a file at the root (a)
- a file two levels deep (b/d/e)
- multiple files in a directory (b/c, b/f)
- a directory containing only a directory (g/h)
"alpha" because it uses alphabet
"rep" because it's a representative example
"""
data = io.BytesIO()
zf = zipfile.ZipFile(data, "w")
zf.writestr("a.txt", b"content of a")
zf.writestr("b/c.txt", b"content of c")
zf.writestr("b/d/e.txt", b"content of e")
zf.writestr("b/f.txt", b"content of f")
zf.writestr("g/h/i.txt", b"content of i")
zf.filename = "alpharep.zip"
return zf
def pass_alpharep(meth):
"""
Given a method, wrap it in a for loop that invokes method
with each subtest.
"""
@functools.wraps(meth)
def wrapper(self):
for alpharep in self.zipfile_alpharep():
meth(self, alpharep=alpharep)
return wrapper
class TestPath(unittest.TestCase):
def setUp(self):
self.fixtures = contextlib.ExitStack()
self.addCleanup(self.fixtures.close)
def zipfile_alpharep(self):
with self.subTest():
yield build_alpharep_fixture()
with self.subTest():
yield add_dirs(build_alpharep_fixture())
def zipfile_ondisk(self, alpharep):
tmpdir = pathlib.Path(self.fixtures.enter_context(temp_dir()))
buffer = alpharep.fp
alpharep.close()
path = tmpdir / alpharep.filename
with path.open("wb") as strm:
strm.write(buffer.getvalue())
return path
@pass_alpharep
def test_iterdir_and_types(self, alpharep):
root = zipfile.Path(alpharep)
assert root.is_dir()
a, b, g = root.iterdir()
assert a.is_file()
assert b.is_dir()
assert g.is_dir()
c, f, d = b.iterdir()
assert c.is_file() and f.is_file()
(e,) = d.iterdir()
assert e.is_file()
(h,) = g.iterdir()
(i,) = h.iterdir()
assert i.is_file()
@pass_alpharep
def test_is_file_missing(self, alpharep):
root = zipfile.Path(alpharep)
assert not root.joinpath('missing.txt').is_file()
@pass_alpharep
def test_iterdir_on_file(self, alpharep):
root = zipfile.Path(alpharep)
a, b, g = root.iterdir()
with self.assertRaises(ValueError):
a.iterdir()
@pass_alpharep
def test_subdir_is_dir(self, alpharep):
root = zipfile.Path(alpharep)
assert (root / 'b').is_dir()
assert (root / 'b/').is_dir()
assert (root / 'g').is_dir()
assert (root / 'g/').is_dir()
@pass_alpharep
def test_open(self, alpharep):
root = zipfile.Path(alpharep)
a, b, g = root.iterdir()
with a.open() as strm:
data = strm.read()
assert data == "content of a"
def test_open_write(self):
"""
If the zipfile is open for write, it should be possible to
write bytes or text to it.
"""
zf = zipfile.Path(zipfile.ZipFile(io.BytesIO(), mode='w'))
with zf.joinpath('file.bin').open('wb') as strm:
strm.write(b'binary contents')
with zf.joinpath('file.txt').open('w') as strm:
strm.write('text file')
def test_open_extant_directory(self):
"""
Attempting to open a directory raises IsADirectoryError.
"""
zf = zipfile.Path(add_dirs(build_alpharep_fixture()))
with self.assertRaises(IsADirectoryError):
zf.joinpath('b').open()
@pass_alpharep
def test_open_binary_invalid_args(self, alpharep):
root = zipfile.Path(alpharep)
with self.assertRaises(ValueError):
root.joinpath('a.txt').open('rb', encoding='utf-8')
with self.assertRaises(ValueError):
root.joinpath('a.txt').open('rb', 'utf-8')
def test_open_missing_directory(self):
"""
Attempting to open a missing directory raises FileNotFoundError.
"""
zf = zipfile.Path(add_dirs(build_alpharep_fixture()))
with self.assertRaises(FileNotFoundError):
zf.joinpath('z').open()
@pass_alpharep
def test_read(self, alpharep):
root = zipfile.Path(alpharep)
a, b, g = root.iterdir()
assert a.read_text() == "content of a"
assert a.read_bytes() == b"content of a"
@pass_alpharep
def test_joinpath(self, alpharep):
root = zipfile.Path(alpharep)
a = root.joinpath("a.txt")
assert a.is_file()
e = root.joinpath("b").joinpath("d").joinpath("e.txt")
assert e.read_text() == "content of e"
@pass_alpharep
def test_traverse_truediv(self, alpharep):
root = zipfile.Path(alpharep)
a = root / "a.txt"
assert a.is_file()
e = root / "b" / "d" / "e.txt"
assert e.read_text() == "content of e"
@pass_alpharep
def test_traverse_simplediv(self, alpharep):
"""
Disable the __future__.division when testing traversal.
"""
code = compile(
source="zipfile.Path(alpharep) / 'a'",
filename="(test)",
mode="eval",
dont_inherit=True,
)
eval(code)
@pass_alpharep
def test_pathlike_construction(self, alpharep):
"""
zipfile.Path should be constructable from a path-like object
"""
zipfile_ondisk = self.zipfile_ondisk(alpharep)
pathlike = pathlib.Path(str(zipfile_ondisk))
zipfile.Path(pathlike)
@pass_alpharep
def test_traverse_pathlike(self, alpharep):
root = zipfile.Path(alpharep)
root / pathlib.Path("a")
@pass_alpharep
def test_parent(self, alpharep):
root = zipfile.Path(alpharep)
assert (root / 'a').parent.at == ''
assert (root / 'a' / 'b').parent.at == 'a/'
@pass_alpharep
def test_dir_parent(self, alpharep):
root = zipfile.Path(alpharep)
assert (root / 'b').parent.at == ''
assert (root / 'b/').parent.at == ''
@pass_alpharep
def test_missing_dir_parent(self, alpharep):
root = zipfile.Path(alpharep)
assert (root / 'missing dir/').parent.at == ''
@pass_alpharep
def test_mutability(self, alpharep):
"""
If the underlying zipfile is changed, the Path object should
reflect that change.
"""
root = zipfile.Path(alpharep)
a, b, g = root.iterdir()
alpharep.writestr('foo.txt', 'foo')
alpharep.writestr('bar/baz.txt', 'baz')
assert any(child.name == 'foo.txt' for child in root.iterdir())
assert (root / 'foo.txt').read_text() == 'foo'
(baz,) = (root / 'bar').iterdir()
assert baz.read_text() == 'baz'
HUGE_ZIPFILE_NUM_ENTRIES = 2 ** 13
def huge_zipfile(self):
"""Create a read-only zipfile with a huge number of entries entries."""
strm = io.BytesIO()
zf = zipfile.ZipFile(strm, "w")
for entry in map(str, range(self.HUGE_ZIPFILE_NUM_ENTRIES)):
zf.writestr(entry, entry)
zf.mode = 'r'
return zf
def test_joinpath_constant_time(self):
"""
Ensure joinpath on items in zipfile is linear time.
"""
root = zipfile.Path(self.huge_zipfile())
entries = jaraco.itertools.Counter(root.iterdir())
for entry in entries:
entry.joinpath('suffix')
# Check the file iterated all items
assert entries.count == self.HUGE_ZIPFILE_NUM_ENTRIES
# @func_timeout.func_set_timeout(3)
def test_implied_dirs_performance(self):
data = ['/'.join(string.ascii_lowercase + str(n)) for n in range(10000)]
zipfile.CompleteDirs._implied_dirs(data)
@pass_alpharep
def test_read_does_not_close(self, alpharep):
alpharep = self.zipfile_ondisk(alpharep)
with zipfile.ZipFile(alpharep) as file:
for rep in range(2):
zipfile.Path(file, 'a.txt').read_text()
@pass_alpharep
def test_subclass(self, alpharep):
class Subclass(zipfile.Path):
pass
root = Subclass(alpharep)
assert isinstance(root / 'b', Subclass)
@pass_alpharep
def test_filename(self, alpharep):
root = zipfile.Path(alpharep)
assert root.filename == pathlib.Path('alpharep.zip')
@pass_alpharep
def test_root_name(self, alpharep):
"""
The name of the root should be the name of the zipfile
"""
root = zipfile.Path(alpharep)
assert root.name == 'alpharep.zip' == root.filename.name
@pass_alpharep
def test_root_parent(self, alpharep):
root = zipfile.Path(alpharep)
assert root.parent == pathlib.Path('.')
root.root.filename = 'foo/bar.zip'
assert root.parent == pathlib.Path('foo')
@pass_alpharep
def test_root_unnamed(self, alpharep):
"""
It is an error to attempt to get the name
or parent of an unnamed zipfile.
"""
alpharep.filename = None
root = zipfile.Path(alpharep)
with self.assertRaises(TypeError):
root.name
with self.assertRaises(TypeError):
root.parent
# .name and .parent should still work on subs
sub = root / "b"
assert sub.name == "b"
assert sub.parent
@pass_alpharep
def test_inheritance(self, alpharep):
cls = type('PathChild', (zipfile.Path,), {})
for alpharep in self.zipfile_alpharep():
file = cls(alpharep).joinpath('some dir').parent
assert isinstance(file, cls)
if __name__ == "__main__":
unittest.main()
| 39.56992
| 100
| 0.588188
|
461748b63cde489f06de19d4422b9dac5e9f4253
| 1,195
|
py
|
Python
|
module_test.py
|
UTokyoMDcenter/matching
|
3d19ddb8d7a4cb95ce63c0477d1ba47bbe141973
|
[
"MIT"
] | 4
|
2021-05-24T06:49:19.000Z
|
2021-12-07T04:37:04.000Z
|
module_test.py
|
UTokyoMDcenter/matching
|
3d19ddb8d7a4cb95ce63c0477d1ba47bbe141973
|
[
"MIT"
] | null | null | null |
module_test.py
|
UTokyoMDcenter/matching
|
3d19ddb8d7a4cb95ce63c0477d1ba47bbe141973
|
[
"MIT"
] | null | null | null |
"""
For import test (temporary)
"""
import numpy as np
#import matching
if __name__ == "__main__":
d_prefs = [
[0, 2, 1],
[1, 0, 2],
[0, 1, 2],
[2, 0, 1],
]
h_prefs = [
[0, 2, 1, 3],
[1, 0, 2, 3],
[2, 0, 3, 1],
]
caps = np.array([1, 1, 1])
m = matching.ManyToOneMarket(d_prefs, h_prefs, caps)
print("DA result:", m.deferred_acceptance())
num_doctors = 10
num_hospitals = 2
d_prefs = np.array([
[0, 2, 1] for i in range(3)
] + [
[1, 2, 0] for i in range(num_doctors-3)
])
h_prefs = np.array([
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for i in range(num_hospitals)
])
print("d_prefs:", d_prefs)
print("h_prefs:", h_prefs)
caps = [10, 10]
regions = [0, 0]
regional_caps = [10]
target_caps = [5, 5]
hospital_order = {
0: [0, 1]
}
m = matching.ManyToOneMarketWithRegionalQuotas(
d_prefs, h_prefs, caps, regions, regional_caps)
print("JRMP mechanism result:", m.JRMP_mechanism(target_caps))
print("flexible DA result:", m.flexible_deferred_acceptance(target_caps, hospital_order))
| 20.964912
| 93
| 0.529707
|
1dbed8e70537954c50e8306c419c866c6d7fe376
| 24,304
|
py
|
Python
|
asdl/lang/alive_lang/alive/tests/lit/lit/llvm/config.py
|
AlexShypula/tranX
|
bd3c77c5fc1ae4fd217c8293ee07644641398ff7
|
[
"Apache-2.0"
] | 203
|
2015-01-06T19:19:05.000Z
|
2022-03-14T18:30:52.000Z
|
asdl/lang/alive_lang/alive/tests/lit/lit/llvm/config.py
|
AlexShypula/tranX
|
bd3c77c5fc1ae4fd217c8293ee07644641398ff7
|
[
"Apache-2.0"
] | 34
|
2015-01-28T08:42:36.000Z
|
2020-10-01T06:08:59.000Z
|
asdl/lang/alive_lang/alive/tests/lit/lit/llvm/config.py
|
AlexShypula/tranX
|
bd3c77c5fc1ae4fd217c8293ee07644641398ff7
|
[
"Apache-2.0"
] | 19
|
2015-06-27T01:14:31.000Z
|
2020-09-30T10:03:58.000Z
|
import itertools
import os
import platform
import re
import subprocess
import sys
import lit.util
from lit.llvm.subst import FindTool
from lit.llvm.subst import ToolSubst
class LLVMConfig(object):
def __init__(self, lit_config, config):
self.lit_config = lit_config
self.config = config
features = config.available_features
self.use_lit_shell = False
# Tweak PATH for Win32 to decide to use bash.exe or not.
if sys.platform == 'win32':
# Seek necessary tools in directories and set to $PATH.
path = None
lit_tools_dir = getattr(config, 'lit_tools_dir', None)
required_tools = ['cmp.exe', 'grep.exe', 'sed.exe', 'diff.exe', 'echo.exe']
if lit_tools_dir:
path = self.lit_config.getToolsPath(lit_tools_dir,
config.environment['PATH'],
required_tools)
if path is None:
path = self._find_git_windows_unix_tools(required_tools)
if path is not None:
self.with_environment('PATH', path, append_path=True)
# Many tools behave strangely if these environment variables aren't set.
self.with_system_environment(['SystemDrive', 'SystemRoot', 'TEMP', 'TMP'])
self.use_lit_shell = True
# Choose between lit's internal shell pipeline runner and a real shell. If
# LIT_USE_INTERNAL_SHELL is in the environment, we use that as an override.
lit_shell_env = os.environ.get('LIT_USE_INTERNAL_SHELL')
if lit_shell_env:
self.use_lit_shell = lit.util.pythonize_bool(lit_shell_env)
if not self.use_lit_shell:
features.add('shell')
# Running on Darwin OS
if platform.system() == 'Darwin':
# FIXME: lld uses the first, other projects use the second.
# We should standardize on the former.
features.add('system-linker-mach-o')
features.add('system-darwin')
elif platform.system() == 'Windows':
# For tests that require Windows to run.
features.add('system-windows')
elif platform.system() == 'Linux':
features.add('system-linux')
elif platform.system() in ['FreeBSD']:
features.add('system-freebsd')
elif platform.system() == 'NetBSD':
features.add('system-netbsd')
elif platform.system() == 'AIX':
features.add('system-aix')
elif platform.system() == 'SunOS':
features.add('system-solaris')
# Native compilation: host arch == default triple arch
# Both of these values should probably be in every site config (e.g. as
# part of the standard header. But currently they aren't)
host_triple = getattr(config, 'host_triple', None)
target_triple = getattr(config, 'target_triple', None)
if host_triple and host_triple == target_triple:
features.add('native')
# Sanitizers.
sanitizers = getattr(config, 'llvm_use_sanitizer', '')
sanitizers = frozenset(x.lower() for x in sanitizers.split(';'))
if 'address' in sanitizers:
features.add('asan')
if 'memory' in sanitizers or 'memorywithorigins' in sanitizers:
features.add('msan')
if 'undefined' in sanitizers:
features.add('ubsan')
have_zlib = getattr(config, 'have_zlib', None)
if have_zlib:
features.add('zlib')
# Check if we should run long running tests.
long_tests = lit_config.params.get('run_long_tests', None)
if lit.util.pythonize_bool(long_tests):
features.add('long_tests')
if target_triple:
if re.match(r'^x86_64.*-apple', target_triple):
features.add('x86_64-apple')
host_cxx = getattr(config, 'host_cxx', None)
if 'address' in sanitizers and self.get_clang_has_lsan(host_cxx, target_triple):
self.with_environment(
'ASAN_OPTIONS', 'detect_leaks=1', append_path=True)
if re.match(r'^x86_64.*-linux', target_triple):
features.add('x86_64-linux')
if re.match(r'^i.86.*', target_triple):
features.add('target-x86')
elif re.match(r'^x86_64.*', target_triple):
features.add('target-x86_64')
elif re.match(r'^aarch64.*', target_triple):
features.add('target-aarch64')
elif re.match(r'^arm.*', target_triple):
features.add('target-arm')
use_gmalloc = lit_config.params.get('use_gmalloc', None)
if lit.util.pythonize_bool(use_gmalloc):
# Allow use of an explicit path for gmalloc library.
# Will default to '/usr/lib/libgmalloc.dylib' if not set.
gmalloc_path_str = lit_config.params.get('gmalloc_path',
'/usr/lib/libgmalloc.dylib')
if gmalloc_path_str is not None:
self.with_environment(
'DYLD_INSERT_LIBRARIES', gmalloc_path_str)
def _find_git_windows_unix_tools(self, tools_needed):
assert(sys.platform == 'win32')
if sys.version_info.major >= 3:
import winreg
else:
import _winreg as winreg
# Search both the 64 and 32-bit hives, as well as HKLM + HKCU
masks = [0, winreg.KEY_WOW64_64KEY]
hives = [winreg.HKEY_LOCAL_MACHINE, winreg.HKEY_CURRENT_USER]
for mask, hive in itertools.product(masks, hives):
try:
with winreg.OpenKey(hive, r"SOFTWARE\GitForWindows", access=winreg.KEY_READ | mask) as key:
install_root, _ = winreg.QueryValueEx(key, 'InstallPath')
if not install_root:
continue
candidate_path = os.path.join(install_root, 'usr', 'bin')
if not lit.util.checkToolsPath(candidate_path, tools_needed):
continue
# We found it, stop enumerating.
return candidate_path
except:
continue
return None
def with_environment(self, variable, value, append_path=False):
if append_path:
# For paths, we should be able to take a list of them and process all
# of them.
paths_to_add = value
if lit.util.is_string(paths_to_add):
paths_to_add = [paths_to_add]
def norm(x):
return os.path.normcase(os.path.normpath(x))
current_paths = self.config.environment.get(variable, None)
if current_paths:
current_paths = current_paths.split(os.path.pathsep)
paths = [norm(p) for p in current_paths]
else:
paths = []
# If we are passed a list [a b c], then iterating this list forwards
# and adding each to the beginning would result in b c a. So we
# need to iterate in reverse to end up with the original ordering.
for p in reversed(paths_to_add):
# Move it to the front if it already exists, otherwise insert it at the
# beginning.
p = norm(p)
try:
paths.remove(p)
except ValueError:
pass
paths = [p] + paths
value = os.pathsep.join(paths)
self.config.environment[variable] = value
def with_system_environment(self, variables, append_path=False):
if lit.util.is_string(variables):
variables = [variables]
for v in variables:
value = os.environ.get(v)
if value:
self.with_environment(v, value, append_path)
def clear_environment(self, variables):
for name in variables:
if name in self.config.environment:
del self.config.environment[name]
def get_process_output(self, command):
try:
cmd = subprocess.Popen(
command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=self.config.environment)
stdout, stderr = cmd.communicate()
stdout = lit.util.to_string(stdout)
stderr = lit.util.to_string(stderr)
return (stdout, stderr)
except OSError:
self.lit_config.fatal('Could not run process %s' % command)
def feature_config(self, features):
# Ask llvm-config about the specified feature.
arguments = [x for (x, _) in features]
config_path = os.path.join(self.config.llvm_tools_dir, 'llvm-config')
output, _ = self.get_process_output([config_path] + arguments)
lines = output.split('\n')
for (feature_line, (_, patterns)) in zip(lines, features):
# We should have either a callable or a dictionary. If it's a
# dictionary, grep each key against the output and use the value if
# it matches. If it's a callable, it does the entire translation.
if callable(patterns):
features_to_add = patterns(feature_line)
self.config.available_features.update(features_to_add)
else:
for (re_pattern, feature) in patterns.items():
if re.search(re_pattern, feature_line):
self.config.available_features.add(feature)
# Note that when substituting %clang_cc1 also fill in the include directory of
# the builtin headers. Those are part of even a freestanding environment, but
# Clang relies on the driver to locate them.
def get_clang_builtin_include_dir(self, clang):
# FIXME: Rather than just getting the version, we should have clang print
# out its resource dir here in an easy to scrape form.
clang_dir, _ = self.get_process_output(
[clang, '-print-file-name=include'])
if not clang_dir:
self.lit_config.fatal(
"Couldn't find the include dir for Clang ('%s')" % clang)
clang_dir = clang_dir.strip()
if sys.platform in ['win32'] and not self.use_lit_shell:
# Don't pass dosish path separator to msys bash.exe.
clang_dir = clang_dir.replace('\\', '/')
# Ensure the result is an ascii string, across Python2.5+ - Python3.
return clang_dir
# On macOS, LSan is only supported on clang versions 5 and higher
def get_clang_has_lsan(self, clang, triple):
if not clang:
self.lit_config.warning(
'config.host_cxx is unset but test suite is configured to use sanitizers.')
return False
clang_binary = clang.split()[0]
version_string, _ = self.get_process_output(
[clang_binary, '--version'])
if not 'clang' in version_string:
self.lit_config.warning(
"compiler '%s' does not appear to be clang, " % clang_binary +
'but test suite is configured to use sanitizers.')
return False
if re.match(r'.*-linux', triple):
return True
if re.match(r'^x86_64.*-apple', triple):
version_regex = re.search(r'version ([0-9]+)\.([0-9]+).([0-9]+)', version_string)
major_version_number = int(version_regex.group(1))
minor_version_number = int(version_regex.group(2))
patch_version_number = int(version_regex.group(3))
if ('Apple LLVM' in version_string) or ('Apple clang' in version_string):
# Apple clang doesn't yet support LSan
return False
else:
return major_version_number >= 5
return False
def make_itanium_abi_triple(self, triple):
m = re.match(r'(\w+)-(\w+)-(\w+)', triple)
if not m:
self.lit_config.fatal(
"Could not turn '%s' into Itanium ABI triple" % triple)
if m.group(3).lower() != 'windows':
# All non-windows triples use the Itanium ABI.
return triple
return m.group(1) + '-' + m.group(2) + '-' + m.group(3) + '-gnu'
def make_msabi_triple(self, triple):
m = re.match(r'(\w+)-(\w+)-(\w+)', triple)
if not m:
self.lit_config.fatal(
"Could not turn '%s' into MS ABI triple" % triple)
isa = m.group(1).lower()
vendor = m.group(2).lower()
os = m.group(3).lower()
if os == 'windows' and re.match(r'.*-msvc$', triple):
# If the OS is windows and environment is msvc, we're done.
return triple
if isa.startswith('x86') or isa == 'amd64' or re.match(r'i\d86', isa):
# For x86 ISAs, adjust the OS.
return isa + '-' + vendor + '-windows-msvc'
# -msvc is not supported for non-x86 targets; use a default.
return 'i686-pc-windows-msvc'
def add_tool_substitutions(self, tools, search_dirs=None):
if not search_dirs:
search_dirs = [self.config.llvm_tools_dir]
if lit.util.is_string(search_dirs):
search_dirs = [search_dirs]
tools = [x if isinstance(x, ToolSubst) else ToolSubst(x)
for x in tools]
search_dirs = os.pathsep.join(search_dirs)
substitutions = []
for tool in tools:
match = tool.resolve(self, search_dirs)
# Either no match occurred, or there was an unresolved match that
# is ignored.
if not match:
continue
subst_key, tool_pipe, command = match
# An unresolved match occurred that can't be ignored. Fail without
# adding any of the previously-discovered substitutions.
if not command:
return False
substitutions.append((subst_key, tool_pipe + command))
self.config.substitutions.extend(substitutions)
return True
def use_default_substitutions(self):
tool_patterns = [
ToolSubst('FileCheck', unresolved='fatal'),
# Handle these specially as they are strings searched for during testing.
ToolSubst(r'\| \bcount\b', command=FindTool(
'count'), verbatim=True, unresolved='fatal'),
ToolSubst(r'\| \bnot\b', command=FindTool('not'), verbatim=True, unresolved='fatal')]
self.config.substitutions.append(('%python', '"%s"' % (sys.executable)))
self.add_tool_substitutions(
tool_patterns, [self.config.llvm_tools_dir])
def use_llvm_tool(self, name, search_env=None, required=False, quiet=False):
"""Find the executable program 'name', optionally using the specified
environment variable as an override before searching the
configuration's PATH."""
# If the override is specified in the environment, use it without
# validation.
if search_env:
tool = self.config.environment.get(search_env)
if tool:
return tool
# Otherwise look in the path.
tool = lit.util.which(name, self.config.environment['PATH'])
if required and not tool:
message = "couldn't find '{}' program".format(name)
if search_env:
message = message + \
', try setting {} in your environment'.format(search_env)
self.lit_config.fatal(message)
if tool:
tool = os.path.normpath(tool)
if not self.lit_config.quiet and not quiet:
self.lit_config.note('using {}: {}'.format(name, tool))
return tool
def use_clang(self, additional_tool_dirs=[], additional_flags=[], required=True):
"""Configure the test suite to be able to invoke clang.
Sets up some environment variables important to clang, locates a
just-built or installed clang, and add a set of standard
substitutions useful to any test suite that makes use of clang.
"""
# Clear some environment variables that might affect Clang.
#
# This first set of vars are read by Clang, but shouldn't affect tests
# that aren't specifically looking for these features, or are required
# simply to run the tests at all.
#
# FIXME: Should we have a tool that enforces this?
# safe_env_vars = ('TMPDIR', 'TEMP', 'TMP', 'USERPROFILE', 'PWD',
# 'MACOSX_DEPLOYMENT_TARGET', 'IPHONEOS_DEPLOYMENT_TARGET',
# 'VCINSTALLDIR', 'VC100COMNTOOLS', 'VC90COMNTOOLS',
# 'VC80COMNTOOLS')
possibly_dangerous_env_vars = ['COMPILER_PATH', 'RC_DEBUG_OPTIONS',
'CINDEXTEST_PREAMBLE_FILE', 'LIBRARY_PATH',
'CPATH', 'C_INCLUDE_PATH', 'CPLUS_INCLUDE_PATH',
'OBJC_INCLUDE_PATH', 'OBJCPLUS_INCLUDE_PATH',
'LIBCLANG_TIMING', 'LIBCLANG_OBJTRACKING',
'LIBCLANG_LOGGING', 'LIBCLANG_BGPRIO_INDEX',
'LIBCLANG_BGPRIO_EDIT', 'LIBCLANG_NOTHREADS',
'LIBCLANG_RESOURCE_USAGE',
'LIBCLANG_CODE_COMPLETION_LOGGING']
# Clang/Win32 may refer to %INCLUDE%. vsvarsall.bat sets it.
if platform.system() != 'Windows':
possibly_dangerous_env_vars.append('INCLUDE')
self.clear_environment(possibly_dangerous_env_vars)
# Tweak the PATH to include the tools dir and the scripts dir.
# Put Clang first to avoid LLVM from overriding out-of-tree clang builds.
exe_dir_props = [self.config.name.lower() + '_tools_dir', 'clang_tools_dir', 'llvm_tools_dir']
paths = [getattr(self.config, pp) for pp in exe_dir_props
if getattr(self.config, pp, None)]
paths = additional_tool_dirs + paths
self.with_environment('PATH', paths, append_path=True)
lib_dir_props = [self.config.name.lower() + '_libs_dir', 'clang_libs_dir', 'llvm_shlib_dir', 'llvm_libs_dir']
paths = [getattr(self.config, pp) for pp in lib_dir_props
if getattr(self.config, pp, None)]
self.with_environment('LD_LIBRARY_PATH', paths, append_path=True)
# Discover the 'clang' and 'clangcc' to use.
self.config.clang = self.use_llvm_tool(
'clang', search_env='CLANG', required=required)
shl = getattr(self.config, 'llvm_shlib_dir', None)
pext = getattr(self.config, 'llvm_plugin_ext', None)
if shl:
self.config.substitutions.append(('%llvmshlibdir', shl))
if pext:
self.config.substitutions.append(('%pluginext', pext))
builtin_include_dir = self.get_clang_builtin_include_dir(self.config.clang)
tool_substitutions = [
ToolSubst('%clang', command=self.config.clang, extra_args=additional_flags),
ToolSubst('%clang_analyze_cc1', command='%clang_cc1', extra_args=['-analyze', '%analyze', '-setup-static-analyzer']+additional_flags),
ToolSubst('%clang_cc1', command=self.config.clang, extra_args=['-cc1', '-internal-isystem', builtin_include_dir, '-nostdsysteminc']+additional_flags),
ToolSubst('%clang_cpp', command=self.config.clang, extra_args=['--driver-mode=cpp']+additional_flags),
ToolSubst('%clang_cl', command=self.config.clang, extra_args=['--driver-mode=cl']+additional_flags),
ToolSubst('%clangxx', command=self.config.clang, extra_args=['--driver-mode=g++']+additional_flags),
]
self.add_tool_substitutions(tool_substitutions)
self.config.substitutions.append(('%itanium_abi_triple',
self.make_itanium_abi_triple(self.config.target_triple)))
self.config.substitutions.append(('%ms_abi_triple',
self.make_msabi_triple(self.config.target_triple)))
self.config.substitutions.append(
('%resource_dir', builtin_include_dir))
# The host triple might not be set, at least if we're compiling clang from
# an already installed llvm.
if self.config.host_triple and self.config.host_triple != '@LLVM_HOST_TRIPLE@':
self.config.substitutions.append(('%target_itanium_abi_host_triple',
'--target=%s' % self.make_itanium_abi_triple(self.config.host_triple)))
else:
self.config.substitutions.append(
('%target_itanium_abi_host_triple', ''))
# FIXME: Find nicer way to prohibit this.
self.config.substitutions.append(
(' clang ', """\"*** Do not use 'clang' in tests, use '%clang'. ***\""""))
self.config.substitutions.append(
(' clang\+\+ ', """\"*** Do not use 'clang++' in tests, use '%clangxx'. ***\""""))
self.config.substitutions.append(
(' clang-cc ',
"""\"*** Do not use 'clang-cc' in tests, use '%clang_cc1'. ***\""""))
self.config.substitutions.append(
(' clang-cl ',
"""\"*** Do not use 'clang-cl' in tests, use '%clang_cl'. ***\""""))
self.config.substitutions.append(
(' clang -cc1 -analyze ',
"""\"*** Do not use 'clang -cc1 -analyze' in tests, use '%clang_analyze_cc1'. ***\""""))
self.config.substitutions.append(
(' clang -cc1 ',
"""\"*** Do not use 'clang -cc1' in tests, use '%clang_cc1'. ***\""""))
self.config.substitutions.append(
(' %clang-cc1 ',
"""\"*** invalid substitution, use '%clang_cc1'. ***\""""))
self.config.substitutions.append(
(' %clang-cpp ',
"""\"*** invalid substitution, use '%clang_cpp'. ***\""""))
self.config.substitutions.append(
(' %clang-cl ',
"""\"*** invalid substitution, use '%clang_cl'. ***\""""))
def use_lld(self, additional_tool_dirs=[], required=True):
"""Configure the test suite to be able to invoke lld.
Sets up some environment variables important to lld, locates a
just-built or installed lld, and add a set of standard
substitutions useful to any test suite that makes use of lld.
"""
# Tweak the PATH to include the tools dir and the scripts dir.
exe_dir_props = [self.config.name.lower() + '_tools_dir', 'lld_tools_dir', 'llvm_tools_dir']
paths = [getattr(self.config, pp) for pp in exe_dir_props
if getattr(self.config, pp, None)]
paths = additional_tool_dirs + paths
self.with_environment('PATH', paths, append_path=True)
lib_dir_props = [self.config.name.lower() + '_libs_dir', 'lld_libs_dir', 'llvm_libs_dir']
paths = [getattr(self.config, pp) for pp in lib_dir_props
if getattr(self.config, pp, None)]
self.with_environment('LD_LIBRARY_PATH', paths, append_path=True)
# Discover the 'clang' and 'clangcc' to use.
ld_lld = self.use_llvm_tool('ld.lld', required=required)
lld_link = self.use_llvm_tool('lld-link', required=required)
ld64_lld = self.use_llvm_tool('ld64.lld', required=required)
wasm_ld = self.use_llvm_tool('wasm-ld', required=required)
was_found = ld_lld and lld_link and ld64_lld and wasm_ld
tool_substitutions = []
if ld_lld:
tool_substitutions.append(ToolSubst('ld\.lld', command=ld_lld))
if lld_link:
tool_substitutions.append(ToolSubst('lld-link', command=lld_link))
if ld64_lld:
tool_substitutions.append(ToolSubst('ld64\.lld', command=ld64_lld))
if wasm_ld:
tool_substitutions.append(ToolSubst('wasm-ld', command=wasm_ld))
self.add_tool_substitutions(tool_substitutions)
return was_found
| 45.174721
| 162
| 0.589574
|
dc7edaa77302e84d83b68331c39ac15f529503ed
| 118
|
py
|
Python
|
prova logica.py
|
noudas/treinopython1
|
88379a2f1a564de518e33ea29df14f7ce65583d8
|
[
"0BSD"
] | null | null | null |
prova logica.py
|
noudas/treinopython1
|
88379a2f1a564de518e33ea29df14f7ce65583d8
|
[
"0BSD"
] | null | null | null |
prova logica.py
|
noudas/treinopython1
|
88379a2f1a564de518e33ea29df14f7ce65583d8
|
[
"0BSD"
] | null | null | null |
def somar(n1,n2):
soma = n1+n2
return soma
num1 = 3
num2 = int(input())
x = 2**3-somar(num1,num2)
| 11.8
| 26
| 0.533898
|
fcf2055612f6f0c3fda9cde33a9acf8047cd5887
| 331
|
py
|
Python
|
config.py
|
zackorndorff/revsync
|
17255aebd281edffb3f3330c21cda00039bc51a3
|
[
"MIT"
] | 94
|
2017-05-13T05:39:06.000Z
|
2022-01-11T18:14:54.000Z
|
config.py
|
zackorndorff/revsync
|
17255aebd281edffb3f3330c21cda00039bc51a3
|
[
"MIT"
] | 5
|
2020-06-11T19:09:43.000Z
|
2021-05-01T05:01:55.000Z
|
config.py
|
zackorndorff/revsync
|
17255aebd281edffb3f3330c21cda00039bc51a3
|
[
"MIT"
] | 25
|
2017-05-13T18:15:23.000Z
|
2022-02-03T22:32:41.000Z
|
from __future__ import print_function
import json
import os
try:
# TODO: We can look in $HOME/.config or $HOME/.revsync or something
path = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(path, "config.json"), "r") as f:
config = json.loads(f.read())
except Exception:
raise ImportError
| 25.461538
| 71
| 0.691843
|
ed732718194e34aa16a51882be9da105f8bb89ac
| 8,598
|
py
|
Python
|
train/validate_errpatch.py
|
thisisiron/khaiii-keras
|
c739c6f93198ecd9ac959a9b75c9d28d30ecb88b
|
[
"Apache-2.0"
] | 1
|
2019-02-21T05:24:43.000Z
|
2019-02-21T05:24:43.000Z
|
train/validate_errpatch.py
|
thisisiron/khaiii-keras
|
c739c6f93198ecd9ac959a9b75c9d28d30ecb88b
|
[
"Apache-2.0"
] | 1
|
2019-02-20T09:25:50.000Z
|
2019-02-20T09:25:50.000Z
|
train/validate_errpatch.py
|
thisisiron/khaiii-keras
|
c739c6f93198ecd9ac959a9b75c9d28d30ecb88b
|
[
"Apache-2.0"
] | 1
|
2019-02-20T09:23:26.000Z
|
2019-02-20T09:23:26.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
추출된 오분석 패치 후보를 검증하는 스크립트
__author__ = 'Jamie (jamie.lim@kakaocorp.com)'
__copyright__ = 'Copyright (C) 2019-, Kakao Corp. All rights reserved.'
"""
###########
# imports #
###########
from argparse import ArgumentParser, Namespace
from collections import defaultdict
import io
import logging
import os
import sys
from typing import Dict, Iterator, List, Tuple
from khaiii.khaiii import KhaiiiApi
from khaiii.munjong.sejong_corpus import Sentence, sents
from khaiii.resource.char_align import Aligner, AlignError, align_patch, align_to_tag
from khaiii.resource.morphs import mix_char_tag, WORD_DELIM_NUM, SENT_DELIM_NUM
from khaiii.resource.resource import load_restore_dic, load_vocab_out
#########
# types #
#########
class StrFile(io.StringIO):
"""
StringIO 객체에 name 멤버를 추가해 파일 객체인 것 처럼 동작하도록 하기 위해
"""
def __init__(self, name, buf):
"""
Args:
name: file name
buf: buffer
"""
super().__init__(buf)
self.name = name
#############
# variables #
#############
_CORPUS = [] # corpus cache
##########
# mapper #
##########
def _sent_iter(args: Namespace) -> Iterator[Sentence]:
"""
sentence generator
Args:
args: arguments
Yields:
sentence
"""
if not _CORPUS:
for name in sorted(os.listdir(args.corpus_dir)):
if not name.endswith('.txt'):
continue
path = '{}/{}'.format(args.corpus_dir, name)
_CORPUS.append((name, open(path, 'r', encoding='UTF-8').read()))
_CORPUS.sort()
for name, lines in _CORPUS:
logging.info(name)
for sent in sents(StrFile(name, lines)):
yield sent
def _find_list(haystack: list, needle: list) -> int:
"""
Return the index at which the sequence needle appears in the sequence haystack,
or -1 if it is not found, using the Boyer-Moore-Horspool algorithm.
The elements of needle and haystack must be hashable.
>>> _find_list([10, 10, 20], [10, 20])
1
Args:
haystack: list to find
needle: pattern list
Returns:
start index. -1 if not found
"""
h_len = len(haystack)
n_len = len(needle)
skip = {needle[i]: n_len - i - 1 for i in range(n_len - 1)}
idx = n_len - 1
while idx < h_len:
for jdx in range(n_len):
if haystack[idx - jdx] != needle[-jdx - 1]:
idx += skip.get(haystack[idx], n_len)
break
else:
return idx - n_len + 1
return -1
def _align_sent(rsc_src: Tuple[Aligner, dict, Dict[str, str]], sent: Sentence) -> List[int]:
"""
세종 문장 객체를 정렬하여 음절 별 출력 태그의 벡터로 표현한다.
Args:
rsc_src: (Aligner, restore dic, vocab out) resource triple
sent: sejong_corpus.Sentence 객체
Returns:
list of output tag numbers. empty list for alignment error
"""
aligner, restore_dic, vocab_out = rsc_src
tag_nums = []
restore_new = defaultdict(dict)
vocab_new = defaultdict(list)
for word in sent.words:
try:
word_align = aligner.align(word)
_, word_tag_nums = align_to_tag(word.raw, word_align, (restore_dic, restore_new),
(vocab_out, vocab_new))
except AlignError as algn_err:
logging.debug('alignment error: %s', word)
logging.debug(str(algn_err))
return []
if tag_nums:
tag_nums.append(WORD_DELIM_NUM)
tag_nums.extend(word_tag_nums)
tag_nums.insert(0, SENT_DELIM_NUM)
tag_nums.append(SENT_DELIM_NUM)
return tag_nums
def _analyze_sent(khaiii_api: KhaiiiApi, raw_sent: str) -> List[int]:
"""
원시 문장에 대해 패치를 적용하지 않은 음절별 태깅 결과를 얻는다.
Args:
khaiii_api: khaiii API 객체
raw_sent: 원시 문장
Returns:
list of output tag numbers
"""
tag_nums = khaiii_api.analyze_bfr_errpatch(raw_sent, '')
logging.debug(tag_nums)
return tag_nums
def _cnt_pos_neg(khaiii_api: KhaiiiApi, patch_raw: str, alignment: Tuple[list, list],
rsc_src: Tuple[Aligner, dict, Dict[str, str]], sent: Sentence) -> Tuple[int, int]:
"""
오분석을 정분석으로 바꾼 횟수와, 오분석을 다른 오분석으로 바꾼 횟수를 센다.
Args:
khaiii_api: khaiii API object
patch_raw: raw part of patch
alignment: (left, right) alignment pair
rsc_src: (Aligner, restore dic, vocab out) resource triple
sent: Sentence object
Returns:
오분석 -> 정분석 횟수
오분석 -> 오분석 횟수
"""
raw_sent = sent.raw_str()
if patch_raw not in raw_sent:
# 원문이 문장에서 발견되지 않으면 스킵
return 0, 0
aligner, restore_dic, vocab_out = rsc_src
sent_align = _align_sent((aligner, restore_dic, vocab_out), sent)
if not sent_align:
# 코퍼스 정답이 원문과 정렬이 되지 않고 오류가 발생하면 스킵
return 0, 0
left_align, right_align = alignment
left_needle = mix_char_tag(patch_raw, left_align)
sent_anal = khaiii_api.analyze_bfr_errpatch(raw_sent, '')
sent_haystack = mix_char_tag(raw_sent, sent_anal)
pos_cnt = 0
neg_cnt = 0
found = _find_list(sent_haystack, left_needle)
while found >= 0:
# 패치의 좌측 오분석 열이 분석 결과에서 나타난 경우 우측 정답 열과 코퍼스를 비교
right_corpus = sent_align[found:found + len(left_needle)]
if right_align == right_corpus:
pos_cnt += 1
else:
neg_cnt += 1
del sent_haystack[:found + len(left_needle)]
found = _find_list(sent_haystack, left_needle)
return pos_cnt, neg_cnt
def run(args: Namespace):
"""
actual function which is doing some task
Args:
args: program arguments
"""
aligner = Aligner(args.rsc_src)
restore_dic = load_restore_dic('{}/restore.dic'.format(args.rsc_src))
if not restore_dic:
sys.exit(1)
vocab_out = load_vocab_out(args.rsc_src)
khaiii_api = KhaiiiApi(args.lib_path, args.rsc_dir, '{"errpatch": false}')
for line_num, line in enumerate(sys.stdin, start=1):
line = line.rstrip('\r\n')
if not line or line[0] == '#':
continue
raw, left, right = line.split('\t')
left_align = align_patch((aligner, restore_dic, vocab_out), raw, left)
if not left_align:
logging.info('invalid %d-th line: left align: %s', line_num, line)
continue
right_align = align_patch((aligner, restore_dic, vocab_out), raw, right)
if not right_align:
logging.info('invalid %d-th line: right align: %s', line_num, line)
continue
if len(left_align) != len(right_align):
logging.info('invalid %d-th line: left/right diff: %s', line_num, line)
continue
pos_cnt = 0
neg_cnt = 0
for sent in _sent_iter(args):
pos_cnt_sent, neg_cnt_sent = _cnt_pos_neg(khaiii_api, raw, (left_align, right_align),
(aligner, restore_dic, vocab_out), sent)
pos_cnt += pos_cnt_sent
neg_cnt += neg_cnt_sent
if neg_cnt > 0:
break
if neg_cnt > 0 or pos_cnt == 0:
logging.info('invalid %d-th line: +%d, -%d: %s', line_num, pos_cnt, neg_cnt, line)
continue
print('{}\t{}\t{}'.format(raw, left, right))
########
# main #
########
def main():
"""
main function processes only argument parsing
"""
parser = ArgumentParser(description='추출된 오분석 패치 후보를 검증하는 스크립트')
parser.add_argument('-c', '--corpus-dir', help='corpus dir', metavar='DIR', required=True)
parser.add_argument('--rsc-src', help='resource source dir <default: ../rsc/src>',
metavar='DIR', default='../rsc/src')
parser.add_argument('--lib-path', help='khaiii shared library path', metavar='FILE', default='')
parser.add_argument('--rsc-dir', help='resource dir', metavar='DIR', default='')
parser.add_argument('--input', help='input file <default: stdin>', metavar='FILE')
parser.add_argument('--output', help='output file <default: stdout>', metavar='FILE')
parser.add_argument('--debug', help='enable debug', action='store_true')
args = parser.parse_args()
if args.input:
sys.stdin = open(args.input, 'r', encoding='UTF-8')
if args.output:
sys.stdout = open(args.output, 'w', encoding='UTF-8')
if args.debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
run(args)
if __name__ == '__main__':
main()
| 31.726937
| 100
| 0.604559
|
f6b4c1f315fc1e41e3be0345d46a3db9d2aed9c9
| 1,012
|
py
|
Python
|
app/core/models.py
|
astenstrasser/udemy-api-drf
|
b72d448aa8e9c9d1ee46a10e049550bbdbf94ef5
|
[
"MIT"
] | null | null | null |
app/core/models.py
|
astenstrasser/udemy-api-drf
|
b72d448aa8e9c9d1ee46a10e049550bbdbf94ef5
|
[
"MIT"
] | null | null | null |
app/core/models.py
|
astenstrasser/udemy-api-drf
|
b72d448aa8e9c9d1ee46a10e049550bbdbf94ef5
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, \
PermissionsMixin
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
if not email:
raise ValueError('User must have an email address')
user = self.model(email=self.normalize_email(email), **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
user = self.create_user(email, password)
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = 'email'
| 28.914286
| 76
| 0.69664
|
29f13dd935060c372c9502d508ec49b13e365ac2
| 718
|
py
|
Python
|
.ci/appveyor/pypi_upload.py
|
elbeejay/landlab
|
76ba4ab92f2a1e0586072f0f08f7b28392df719f
|
[
"MIT"
] | 4
|
2019-04-05T16:41:40.000Z
|
2021-06-11T20:33:14.000Z
|
.ci/appveyor/pypi_upload.py
|
elbeejay/landlab
|
76ba4ab92f2a1e0586072f0f08f7b28392df719f
|
[
"MIT"
] | 7
|
2020-04-03T21:40:07.000Z
|
2020-12-02T22:57:12.000Z
|
.ci/appveyor/pypi_upload.py
|
elbeejay/landlab
|
76ba4ab92f2a1e0586072f0f08f7b28392df719f
|
[
"MIT"
] | 4
|
2018-05-21T17:40:58.000Z
|
2020-08-21T05:44:41.000Z
|
import glob
import os
import subprocess
import sys
import traceback
print('Using python: {prefix}'.format(prefix=sys.prefix))
repo_tag = os.environ.get('APPVEYOR_REPO_TAG', 'false')
tag_name = os.environ.get('APPVEYOR_REPO_TAG_NAME', '')
token = os.environ.get('PYPI_PASS', 'NOT_A_TOKEN')
if repo_tag == 'true' and tag_name.startswith('v'):
print('Uploading to PyPI')
try:
cmd = ' '.join(['twine', 'upload', '-u', 'mcflugen', '-p', token,
'dist/*'])
resp = subprocess.check_output(cmd, shell=True)
except subprocess.CalledProcessError:
traceback.print_exc()
else:
print('OK')
else:
print('Not a tagged release. Not deploying to PyPI.')
| 25.642857
| 73
| 0.644847
|
7f9428946fbb3f90f787482d999568014d658a68
| 4,407
|
py
|
Python
|
exercise/venv/lib/python3.7/site-packages/sqreen/events.py
|
assuzzanne/my-sqreen
|
81ae0eab417a1dbc0ae6b1778ebfdd71591c3c5b
|
[
"MIT"
] | null | null | null |
exercise/venv/lib/python3.7/site-packages/sqreen/events.py
|
assuzzanne/my-sqreen
|
81ae0eab417a1dbc0ae6b1778ebfdd71591c3c5b
|
[
"MIT"
] | 1
|
2021-06-02T00:27:34.000Z
|
2021-06-02T00:27:34.000Z
|
exercise/venv/lib/python3.7/site-packages/sqreen/events.py
|
assuzzanne/notifications-dispatcher-api
|
81ae0eab417a1dbc0ae6b1778ebfdd71591c3c5b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2016, 2017, 2018, 2019 Sqreen. All rights reserved.
# Please refer to our terms for more information:
#
# https://www.sqreen.io/terms.html
#
""" Sqreen attack event helpers and placeholder
"""
import traceback
from logging import getLogger
from .config import CONFIG
from .remote_exception import traceback_formatter
from .sanitizer import strip_sensitive_data
LOGGER = getLogger(__name__)
def get_context_payload():
""" Return attack payload dependent on the context, right now stacktrace.
"""
return {
"context": {
"backtrace": list(traceback_formatter(traceback.extract_stack()))
}
}
class Attack(object):
def __init__(self, payload, rule_name):
self.payload = payload
self.rule_name = rule_name
def to_dict(self):
result = {}
rule_payload = self.payload.get("rule", {})
request_payload = self.payload.get("request", {})
local_payload = self.payload.get("local", {})
if "name" in rule_payload:
result["rule_name"] = rule_payload["name"]
if "rulespack_id" in rule_payload:
result["rulespack_id"] = rule_payload["rulespack_id"]
if "test" in rule_payload:
result["test"] = rule_payload["test"]
if "infos" in self.payload:
result["infos"] = self.payload["infos"]
if "time" in local_payload:
result["time"] = local_payload["time"]
if "remote_ip" in request_payload:
result["client_ip"] = request_payload["remote_ip"]
if "request" in self.payload:
result["request"] = self.payload["request"]
if "params" in self.payload:
result["params"] = self.payload["params"]
if "context" in self.payload:
result["context"] = self.payload["context"]
if "headers" in self.payload:
result["headers"] = self.payload["headers"]
return result
class RequestRecord(object):
"""Request record objects."""
VERSION = "20171208"
def __init__(self, payload):
self.payload = payload
def to_dict(self):
"""Export the record as a dict object."""
result = {"version": self.VERSION}
if "observed" in self.payload:
observed_dict = self.payload["observed"]
result["observed"] = observed_dict
rulespack = None
for attack_dict in observed_dict.get("attacks", []):
rulespack = attack_dict.pop("rulespack_id", None) or rulespack
for exc_dict in observed_dict.get("sqreen_exceptions", []):
payload_dict = exc_dict.pop("exception", None)
if payload_dict:
exc_dict["message"] = payload_dict["message"]
exc_dict["klass"] = payload_dict["klass"]
rulespack = exc_dict.pop("rulespack_id", None) or rulespack
if rulespack:
result["rulespack_id"] = rulespack
if "observations" in observed_dict:
result["observed"]["observations"] = [
{"category": cat, "key": key, "value": value, "time": time}
for (cat, time, key, value) in observed_dict[
"observations"
]
]
if "sdk" in observed_dict:
result["observed"]["sdk"] = [
{"name": entry[0], "time": entry[1], "args": entry[2:]}
for entry in observed_dict["sdk"]
]
if "local" in self.payload:
result["local"] = self.payload["local"]
if "request" in self.payload:
request_dict = self.payload["request"]
result["request"] = request_dict
if "client_ip" in request_dict:
result["client_ip"] = request_dict.pop("client_ip")
else:
result["request"] = {}
if "params" in self.payload:
result["request"]["parameters"] = self.payload["params"]
if "headers" in self.payload:
result["request"]["headers"] = self.payload["headers"]
if CONFIG["STRIP_SENSITIVE_DATA"]:
result["request"] = strip_sensitive_data(result["request"])
if "response" in self.payload:
result["response"] = self.payload["response"]
return result
| 36.725
| 79
| 0.572725
|
dc63dbc54c89784d661f85a69077905be80108cc
| 7,161
|
py
|
Python
|
src/models/descatter_gan_model.py
|
mjiUST/VasNet
|
6d39d0915cf6d77ed580de41800982b31ae2aa47
|
[
"MIT"
] | 12
|
2020-04-23T04:50:22.000Z
|
2021-11-29T02:28:21.000Z
|
src/models/descatter_gan_model.py
|
mjiUST/VasNet
|
6d39d0915cf6d77ed580de41800982b31ae2aa47
|
[
"MIT"
] | 1
|
2020-07-21T23:06:16.000Z
|
2020-07-23T07:10:28.000Z
|
src/models/descatter_gan_model.py
|
mjiUST/VasNet
|
6d39d0915cf6d77ed580de41800982b31ae2aa47
|
[
"MIT"
] | 1
|
2021-01-05T15:42:17.000Z
|
2021-01-05T15:42:17.000Z
|
import torch
import itertools
from util.image_pool import ImagePool
from .base_model import BaseModel
from . import networks
class DescatterGANModel(BaseModel):
def name(self):
return 'DescatterGANModel'
@staticmethod
def modify_commandline_options(parser, is_train=True):
# default CycleGAN did not use dropout
parser.set_defaults(no_dropout=True)
if is_train:
parser.add_argument('--lambda_A', type=float, default=10.0, help='weight for cycle loss (A -> B -> A)')
parser.add_argument('--lambda_B', type=float, default=10.0,
help='weight for cycle loss (B -> A -> B)')
parser.add_argument('--lambda_identity', type=float, default=0.5, help='use identity mapping. Setting lambda_identity other than 0 has an effect of scaling the weight of the identity mapping loss. For example, if the weight of the identity loss should be 10 times smaller than the weight of the reconstruction loss, please set lambda_identity = 0.1')
return parser
def initialize(self, opt):
BaseModel.initialize(self, opt)
# specify the training losses you want to print out. The program will call base_model.get_current_losses
self.loss_names = ['D_A', 'G_A', 'cycle_A', 'idt_A', 'G_B', 'cycle_B', 'idt_B']
# specify the images you want to save/display. The program will call base_model.get_current_visuals
visual_names_A = ['real_A', 'fake_B', 'rec_A']
visual_names_B = ['real_B', 'fake_A', 'rec_B']
if self.isTrain and self.opt.lambda_identity > 0.0:
visual_names_A.append('idt_A')
visual_names_B.append('idt_B')
self.visual_names = visual_names_A + visual_names_B
# specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks
if self.isTrain:
self.model_names = ['G_A', 'G_B', 'D_A', 'D_B']
else: # during test time, only load Gs
self.model_names = ['G_A', 'G_B']
# load/define networks
# The naming conversion is different from those used in the paper
# Code (paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X)
self.netG_A = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm,
not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
self.netG_B = networks.define_G(opt.output_nc, opt.input_nc, opt.ngf, opt.netG, opt.norm,
not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
if self.isTrain:
use_sigmoid = opt.no_lsgan
self.netD_A = networks.define_D(opt.output_nc, opt.ndf, opt.netD,
opt.n_layers_D, opt.norm, use_sigmoid, opt.init_type, opt.init_gain, self.gpu_ids)
self.netD_B = networks.define_D(opt.input_nc, opt.ndf, opt.netD,
opt.n_layers_D, opt.norm, use_sigmoid, opt.init_type, opt.init_gain, self.gpu_ids)
if self.isTrain:
self.fake_A_pool = ImagePool(opt.pool_size)
self.fake_B_pool = ImagePool(opt.pool_size)
# define loss functions
self.criterionGAN = networks.GANLoss(use_lsgan=not opt.no_lsgan).to(self.device)
self.criterionCycle = torch.nn.L1Loss()
self.criterionIdt = torch.nn.L1Loss()
# initialize optimizers
self.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_A.parameters(), self.netG_B.parameters()),
lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizer_D = torch.optim.Adam(itertools.chain(self.netD_A.parameters(), self.netD_B.parameters()),
lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizers = []
self.optimizers.append(self.optimizer_G)
self.optimizers.append(self.optimizer_D)
def set_input(self, input):
AtoB = self.opt.direction == 'AtoB'
self.real_A = input['A' if AtoB else 'B'].to(self.device)
self.real_B = input['B' if AtoB else 'A'].to(self.device)
self.image_paths = input['A_paths' if AtoB else 'B_paths']
def forward(self):
self.fake_B = self.netG_A(self.real_A)
self.rec_A = self.netG_B(self.fake_B)
self.fake_A = self.netG_B(self.real_B)
self.rec_B = self.netG_A(self.fake_A)
def backward_D_basic(self, netD, real, fake):
# Real
pred_real = netD(real)
loss_D_real = self.criterionGAN(pred_real, True)
# Fake
pred_fake = netD(fake.detach())
loss_D_fake = self.criterionGAN(pred_fake, False)
# Combined loss
loss_D = (loss_D_real + loss_D_fake) * 0.5
# backward
loss_D.backward()
return loss_D
def backward_D_A(self):
fake_B = self.fake_B_pool.query(self.fake_B)
self.loss_D_A = self.backward_D_basic(self.netD_A, self.real_B, fake_B)
def backward_D_B(self):
fake_A = self.fake_A_pool.query(self.fake_A)
self.loss_D_B = self.backward_D_basic(self.netD_B, self.real_A, fake_A)
def backward_G(self):
lambda_idt = self.opt.lambda_identity
lambda_A = self.opt.lambda_A
lambda_B = self.opt.lambda_B
# Identity loss
if lambda_idt > 0:
# G_A should be identity if real_B is fed.
self.idt_A = self.netG_A(self.real_B)
self.loss_idt_A = self.criterionIdt(self.idt_A, self.real_B) * lambda_B * lambda_idt
# G_B should be identity if real_A is fed.
self.idt_B = self.netG_B(self.real_A)
self.loss_idt_B = self.criterionIdt(self.idt_B, self.real_A) * lambda_A * lambda_idt
else:
self.loss_idt_A = 0
self.loss_idt_B = 0
# GAN loss D_A(G_A(A))
self.loss_G_A = self.criterionGAN(self.netD_A(self.fake_B), True)
# GAN loss D_B(G_B(B))
self.loss_G_B = self.criterionGAN(self.netD_B(self.fake_A), True)
# Forward cycle loss
self.loss_cycle_A = self.criterionCycle(self.rec_A, self.real_A) * lambda_A
# Backward cycle loss
self.loss_cycle_B = self.criterionCycle(self.rec_B, self.real_B) * lambda_B
# combined loss
# self.loss_G = self.loss_G_A + self.loss_G_B + self.loss_cycle_A + self.loss_cycle_B + self.loss_idt_A + self.loss_idt_B
self.loss_G = self.loss_G_A + self.loss_cycle_A + self.loss_idt_A # remove B2A2B branch
self.loss_G.backward()
def optimize_parameters(self):
# forward
self.forward()
# G_A and G_B
self.set_requires_grad([self.netD_A, self.netD_B], False)
self.optimizer_G.zero_grad()
self.backward_G()
self.optimizer_G.step()
# D_A and D_B
self.set_requires_grad([self.netD_A, self.netD_B], True)
self.optimizer_D.zero_grad()
self.backward_D_A()
# self.backward_D_B()
self.optimizer_D.step()
| 47.423841
| 362
| 0.62617
|
8038e06aa085f6ddbcab9303f4ee23fc7bddc13f
| 1,386
|
py
|
Python
|
src/models/tuned_xgboost.py
|
DS-IFT6750-project-collaboration/Hockey-all-star-analytics
|
1fc6a1c55d8d3c5028691c6cf9de31256bf30155
|
[
"MIT"
] | null | null | null |
src/models/tuned_xgboost.py
|
DS-IFT6750-project-collaboration/Hockey-all-star-analytics
|
1fc6a1c55d8d3c5028691c6cf9de31256bf30155
|
[
"MIT"
] | null | null | null |
src/models/tuned_xgboost.py
|
DS-IFT6750-project-collaboration/Hockey-all-star-analytics
|
1fc6a1c55d8d3c5028691c6cf9de31256bf30155
|
[
"MIT"
] | null | null | null |
from os import environ
import numpy as np
import pandas as pd
from comet_ml import Experiment
from xgboost import XGBClassifier
from sklearn.model_selection import train_test_split
np.random.seed(0)
def preprocess_data():
season_plays_df = pd.read_csv("./data/processed/plays_2015-2020.csv", index_col=False)
train_df, test_df = split_dataset(season_plays_df)
def load_data(train=True):
if train:
X = np.load("./data/processed/x_train_2021-11-20.npy")
y = np.load("./data/processed/y_train_2021-11-20.npy")
if not train:
X = np.load("./data/processed/x_test_2021-11-20.npy")
y = np.load("./data/processed/y_test_2021-11-20.npy")
return X, y
def load_params():
params={
"n_estimators": 100,
"max_depth": 4,
"learning_rate": 0.1,
}
return params
def base_xgb(X, y, params):
model = XGBClassifier(objective="binary:logistic", **params)
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
model.fit(x_train, y_train,
eval_set=[(x_test, y_test)],
eval_metric=["logloss", "error", "auc"]
)
return model
if __name__ == "__main__":
experiment = Experiment(project_name="hockey-all-star-analytics")
X, y = load_data()
params = load_params()
model = base_xgb(X, y, params)
experiment.end()
| 26.150943
| 90
| 0.652958
|
34c2f33e64da5a2937b9fcb949a753daedc32d12
| 7,187
|
py
|
Python
|
openprocurement/bot/risk_indicators/bridge.py
|
ProzorroUKR/openprocurement.bot.risk_indicators
|
55ba12812c1494e4b4331a85f5bc79caf42d6bca
|
[
"Apache-2.0"
] | null | null | null |
openprocurement/bot/risk_indicators/bridge.py
|
ProzorroUKR/openprocurement.bot.risk_indicators
|
55ba12812c1494e4b4331a85f5bc79caf42d6bca
|
[
"Apache-2.0"
] | 13
|
2018-07-17T08:29:56.000Z
|
2018-12-21T15:53:07.000Z
|
openprocurement/bot/risk_indicators/bridge.py
|
ProzorroUKR/openprocurement.bot.risk_indicators
|
55ba12812c1494e4b4331a85f5bc79caf42d6bca
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
from collections import defaultdict
from time import sleep
from urllib import quote_plus
import requests
import logging
logging.basicConfig()
logger = logging.getLogger("RiskIndicatorBridge")
class RiskIndicatorBridge(object):
def __init__(self, config):
config = config["main"]
self.indicators_host = config["indicators_host"]
self.indicators_proxy = config.get("indicators_proxy")
self.queue_limit = config.get("queue_limit", 100)
self.monitors_host = config["monitors_host"]
self.monitors_token = config["monitors_token"]
self.skip_monitoring_statuses = config.get("skip_monitoring_statuses", ("active", "draft"))
self.run_interval = timedelta(seconds=config.get("run_interval", 24 * 3600))
self.queue_error_interval = config.get("queue_error_interval", 30 * 60)
self.request_retries = config.get("request_retries", 5)
self.request_timeout = config.get("request_timeout", 10)
self.process_stats = defaultdict(int)
def run(self):
while True:
start = datetime.now()
try:
self.process_risks()
except Exception as e:
logger.exception(e)
sleep_seconds = self.queue_error_interval
else:
run_time = datetime.now() - start
sleep_seconds = (self.run_interval - run_time).seconds
if sleep_seconds > 0:
logger.info("Sleep for {} seconds".format(sleep_seconds))
sleep(sleep_seconds)
def process_risks(self):
self.process_stats = defaultdict(int)
for risk in self.queue:
try:
self.process_risk(risk)
except Exception as e:
logger.exception(e)
self.process_stats["failed"] += 1
logger.info("Risk processing finished: {}".format(dict(self.process_stats)))
def process_risk(self, risk):
self.process_stats["processed"] += 1
if risk["topRisk"]:
self.process_stats["processed_top"] += 1
monitorings = self.get_tender_monitoring_list(risk["tenderOuterId"])
has_live_monitoring = any(m["status"] in self.skip_monitoring_statuses for m in monitorings)
if not has_live_monitoring:
self.process_stats["processed_to_start"] += 1
details = self.get_item_details(risk["tenderId"])
self.start_monitoring(risk, details)
# Access APIs methods #
@property
def queue(self):
regions = self.request("{}region-indicators-queue/regions/".format(self.indicators_host))
for region in regions:
page, total_pages = 0, 1
while page < total_pages:
url = "{}region-indicators-queue/?region={}&limit={}&page={}".format(
self.indicators_host,
quote_plus(region.encode('utf-8')),
self.queue_limit,
page
)
response = self.request(url)
data = response.get("data", [])
for risk in data:
yield risk
total_pages = response.get("pagination", {}).get("totalPages", 1)
page += 1
def get_item_details(self, item_id):
url = "{}tenders/{}".format(self.indicators_host, item_id)
return self.request(url)
def get_tender_monitoring_list(self, tender_id):
url = "{}tenders/{}/monitorings?mode=draft".format(self.monitors_host, tender_id)
response = self.request(
url,
headers={
"Authorization": "Bearer {}".format(self.monitors_token)
}
)
return response["data"]
def start_monitoring(self, risk_info, details):
indicators_info = {i["indicatorId"]: i for i in details["indicatorsInfo"]}
indicators = [(i["indicatorCode"], i["value"])
for key in ("tenderIndicators", "lotIndicators")
for i in details["indicators"].get(key)]
# first with value==True, then sort by id
indicators = list(sorted(indicators, key=lambda e: (not e[1], e[0])))
status_to_stages = {
'active.enquiries': 'planning',
'active.tendering': 'planning',
'active': 'planning',
'active.pre-qualification': 'awarding',
'active.pre-qualification.stand-still': 'awarding',
'active.auction': 'awarding',
'active.qualification': 'awarding',
'active.awarded': 'awarding',
'award:status:active': 'awarding',
'unsuccessful': 'contracting',
'cancelled': 'contracting',
'complete': 'contracting',
}
try:
stages = [status_to_stages[details['status']]]
except KeyError:
logger.warning('Unable to match risk status "%s" to procuringStages: {}' % details['status'])
stages = []
self.request(
"{}monitorings".format(self.monitors_host),
method="post",
json={
"data": {
"tender_id": details["id"],
"reasons": ["indicator"],
"procuringStages": list(stages),
"riskIndicators": [uid for uid, value in indicators if value == 1],
"riskIndicatorsTotalImpact": risk_info.get("tenderScore"),
"riskIndicatorsRegion": risk_info.get("region"),
}
},
headers={
"Authorization": "Bearer {}".format(self.monitors_token)
}
)
self.process_stats["created"] += 1
# Helper methods #
class TerminateExecutionException(Exception):
pass
def request(self, url, method="get", **kwargs):
if url.startswith(self.indicators_host) and self.indicators_proxy:
kwargs.update(proxies={
"http": self.indicators_proxy,
"https": self.indicators_proxy,
})
func = getattr(requests, method)
timeout = kwargs.pop("timeout", self.request_timeout)
tries = self.request_retries
while tries:
try:
response = func(url, timeout=timeout, **kwargs)
except Exception as e:
logger.exception(e)
else:
status_ok = 201 if method == "post" else 200
if response.status_code == status_ok:
try:
json_res = response.json()
except Exception as e:
logger.exception(e)
else:
return json_res
else:
logger.error("Unsuccessful response code: {}".format(response.status_code))
sleep(self.request_retries - tries)
tries -= 1
raise self.TerminateExecutionException("Access problems with {} {}".format(method, url))
| 35.579208
| 105
| 0.558787
|
d8756bce58b2f68b84f58893c616299a922d9c1a
| 5,234
|
py
|
Python
|
result.py
|
AlexLito666/my_project
|
16e92d8e8b5b78a3a6b6ef399539c0876a6839d3
|
[
"CC0-1.0"
] | null | null | null |
result.py
|
AlexLito666/my_project
|
16e92d8e8b5b78a3a6b6ef399539c0876a6839d3
|
[
"CC0-1.0"
] | null | null | null |
result.py
|
AlexLito666/my_project
|
16e92d8e8b5b78a3a6b6ef399539c0876a6839d3
|
[
"CC0-1.0"
] | null | null | null |
from pygame import *
from random import randint
# подгружаем отдельно функции для работы со шрифтом
font.init()
font1 = font.SysFont("verdana", 80)
win = font1.render('YOU WIN!', True, (255, 255, 255))
lose = font1.render('YOU LOSE!', True, (180, 0, 0))
font2 = font.Font(None, 36)
#фоновая музыка
mixer.init()
mixer.music.load('space.ogg')
mixer.music.play()
fire_sound = mixer.Sound('fire.ogg')
# нам нужны такие картинки:
img_back = "galaxy.jpg" # фон игры
img_bullet = "bullet.png" # пуля
img_hero = "rocket.png" # герой
img_enemy = "ufo.png" # враг
score = 0 # сбито кораблей
goal = 10 # столько кораблей нужно сбить для победы
lost = 0 # пропущено кораблей
max_lost = 3 # проиграли, если пропустили столько
# класс-родитель для других спрайтов
class GameSprite(sprite.Sprite):
# конструктор класса
def __init__(self, player_image, player_x, player_y, size_x, size_y, player_speed):
# Вызываем конструктор класса (Sprite):
sprite.Sprite.__init__(self)
# каждый спрайт должен хранить свойство image - изображение
self.image = transform.scale(image.load(player_image), (size_x, size_y))
self.speed = player_speed
# каждый спрайт должен хранить свойство rect - прямоугольник, в который он вписан
self.rect = self.image.get_rect()
self.rect.x = player_x
self.rect.y = player_y
# метод, отрисовывающий героя на окне
def reset(self):
window.blit(self.image, (self.rect.x, self.rect.y))
# класс главного игрока
class Player(GameSprite):
# метод для управления спрайтом стрелками клавиатуры
def update(self):
keys = key.get_pressed()
if keys[K_LEFT] and self.rect.x > 5:
self.rect.x -= self.speed
if keys[K_RIGHT] and self.rect.x < win_width - 80:
self.rect.x += self.speed
# метод "выстрел" (используем место игрока, чтобы создать там пулю)
def fire(self):
bullet = Bullet(img_bullet, self.rect.centerx, self.rect.top, 15, 20, -15)
bullets.add(bullet)
# класс спрайта-врага
class Enemy(GameSprite):
# движение врага
def update(self):
self.rect.y += self.speed
global lost
# исчезает, если дойдет до края экрана
if self.rect.y > win_height:
self.rect.x = randint(80, win_width - 80)
self.rect.y = 0
lost = lost + 1
# класс спрайта-пули
class Bullet(GameSprite):
# движение врага
def update(self):
self.rect.y += self.speed
# исчезает, если дойдет до края экрана
if self.rect.y < 0:
self.kill()
# Создаем окошко
win_width = 700
win_height = 500
display.set_caption("Shooter")
window = display.set_mode((win_width, win_height))
background = transform.scale(image.load(img_back), (win_width, win_height))
# создаем спрайты
ship = Player(img_hero, 5, win_height - 100, 80, 100, 10)
# создание группы спрайтов-врагов
monsters = sprite.Group()
for i in range(1, 6):
monster = Enemy(img_enemy, randint(80, win_width - 80), -40, 80, 50, randint(1, 5))
monsters.add(monster)
bullets = sprite.Group()
# переменная "игра закончилась": как только там True, в основном цикле перестают работать спрайты
finish = False
# Основной цикл игры:
run = True # флаг сбрасывается кнопкой закрытия окна
while run:
# событие нажатия на кнопку Закрыть
for e in event.get():
if e.type == QUIT:
run = False
# событие нажатия на пробел - спрайт стреляет
elif e.type == KEYDOWN:
if e.key == K_SPACE:
fire_sound.play()
ship.fire()
# сама игра: действия спрайтов, проверка правил игры, перерисовка
if not finish:
# обновляем фон
window.blit(background,(0,0))
# пишем текст на экране
text = font2.render("Счет: " + str(score), 1, (255, 255, 255))
window.blit(text, (10, 20))
text_lose = font2.render("Пропущено: " + str(lost), 1, (255, 255, 255))
window.blit(text_lose, (10, 50))
# производим движения спрайтов
ship.update()
monsters.update()
bullets.update()
# обновляем их в новом местоположении при каждой итерации цикла
ship.reset()
monsters.draw(window)
bullets.draw(window)
# проверка столкновения пули и монстров (и монстр, и пуля при касании исчезают)
collides = sprite.groupcollide(monsters, bullets, True, True)
for c in collides:
# этот цикл повторится столько раз, сколько монстров подбито
score = score + 1
monster = Enemy(img_enemy, randint(80, win_width - 80), -40, 80, 50, randint(1, 5))
monsters.add(monster)
# возможный проигрыш: пропустили слишком много или герой столкнулся с врагом
if sprite.spritecollide(ship, monsters, False) or lost >= max_lost:
finish = True # проиграли, ставим фон и больше не управляем спрайтами.
window.blit(lose, (200, 200))
# проверка выигрыша: сколько очков набрали?
if score >= goal:
finish = True
window.blit(win, (200, 200))
display.update()
# цикл срабатывает каждую 0.05 секунд
time.delay(50)
| 32.918239
| 97
| 0.641765
|
05c35c0d36705f2747deecd223796a5941c0cd25
| 6,350
|
py
|
Python
|
src/backends/cgpm_alter/parse.py
|
almartin82/bayeslite
|
a27f243b5f16cc6a01e84336a829e5b65d665b7b
|
[
"Apache-2.0"
] | 964
|
2015-09-24T15:02:05.000Z
|
2022-03-29T21:41:21.000Z
|
src/backends/cgpm_alter/parse.py
|
almartin82/bayeslite
|
a27f243b5f16cc6a01e84336a829e5b65d665b7b
|
[
"Apache-2.0"
] | 435
|
2015-09-23T16:46:58.000Z
|
2020-04-19T12:32:03.000Z
|
src/backends/cgpm_alter/parse.py
|
almartin82/bayeslite
|
a27f243b5f16cc6a01e84336a829e5b65d665b7b
|
[
"Apache-2.0"
] | 86
|
2015-10-24T20:08:30.000Z
|
2021-08-09T13:53:00.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2010-2016, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
from bayeslite.exception import BQLParseError
from bayeslite.util import casefold
from bayeslite.backends.cgpm_schema.parse import flatten
from bayeslite.backends.cgpm_schema.parse import intersperse
import grammar
'''
grep -o 'K_[A-Z][A-Z0-9_]*' < grammar.y | sort -u | awk '
{
sub("^K_", "", $1);
printf(" '\''%s'\'': grammar.K_%s,\n", tolower($1), $1);
}'
'''
KEYWORDS = {
'cluster': grammar.K_CLUSTER,
'context': grammar.K_CONTEXT,
'concentration': grammar.K_CONCENTRATION,
'dependent': grammar.K_DEPENDENT,
'ensure': grammar.K_ENSURE,
'in': grammar.K_IN,
'independent': grammar.K_INDEPENDENT,
'of': grammar.K_OF,
'parameter': grammar.K_PARAMETER,
'row': grammar.K_ROW,
'rows': grammar.K_ROWS,
'set': grammar.K_SET,
'singleton': grammar.K_SINGLETON,
'to': grammar.K_TO,
'variable': grammar.K_VARIABLE,
'variables': grammar.K_VARIABLES,
'view': grammar.K_VIEW,
'within': grammar.K_WITHIN,
}
PUNCTUATION = {
'(': grammar.T_LROUND,
')': grammar.T_RROUND,
'*': grammar.T_STAR,
',': grammar.T_COMMA,
}
def parse(tokens):
semantics = CGpmAlterSemantics()
parser = grammar.Parser(semantics)
for token in tokenize(tokens):
semantics.context.append(token)
if len(semantics.context) > 10:
semantics.context.pop(0)
parser.feed(token)
if semantics.errors:
raise BQLParseError(semantics.errors)
if semantics.failed:
raise BQLParseError(['parse failed mysteriously'])
assert semantics.phrases is not None
return semantics.phrases
def tokenize(tokenses):
for token in intersperse(',', [flatten(tokens) for tokens in tokenses]):
if isinstance(token, str):
if casefold(token) in KEYWORDS:
yield KEYWORDS[casefold(token)], token
elif token in PUNCTUATION:
yield PUNCTUATION[token], token
else: # XXX check for alphanumeric/_
yield grammar.L_NAME, token
elif isinstance(token, (int, float)):
yield grammar.L_NUMBER, token
else:
raise IOError('Invalid token: %r' % (token,))
yield 0, '' # EOF
class CGpmAlterSemantics(object):
def __init__(self):
self.context = []
self.errors = []
self.failed = False
self.phrases = None
def accept(self):
pass
def parse_failed(self):
self.failed = True
def syntax_error(self, (token, text)):
if token == -1: # error
self.errors.append('Bad token: %r' % (text,))
else:
self.errors.append("Syntax error near [%s] after [%s]" % (
text, ' '.join([str(t) for (_t, t) in self.context[:-1]])))
def p_alter_start(self, ps): self.phrases = ps
def p_phrases_one(self, p): return [p] if p else []
def p_phrases_many(self, ps, p):
if p: ps.append(p)
return ps
def p_phrase_none(self,): return None
def p_phrase_set_var_dependency(self, cols, dep):
return SetVarDependency(cols, dep)
def p_phrase_set_var_cluster(self, cols0, col1):
return SetVarCluster(cols0, col1)
def p_phrase_set_var_cluster_singleton(self, cols):
return SetVarCluster(cols, SingletonCluster)
def p_phrase_set_var_cluster_conc(self, conc):
return SetVarClusterConc(conc)
def p_phrase_set_row_cluster(self, rows0, row1, col):
return SetRowCluster(rows0, row1, col)
def p_phrase_set_row_cluster_singleton(self, rows0, col):
return SetRowCluster(rows0, SingletonCluster, col)
def p_phrase_set_row_cluster_conc(self, col, conc):
return SetRowClusterConc(col, conc)
def p_dependency_independent(self): return EnsureIndependent
def p_dependency_dependent(self): return EnsureDependent
def p_columns_one(self, col): return [col]
def p_columns_all(self): return SqlAll
def p_columns_many(self, cols): return cols
def p_column_list_one(self, col): return [col]
def p_column_list_many(self, cols, col): cols.append(col); return cols
def p_column_name_n(self, n): return n
def p_rows_one(self, row): return [row]
def p_rows_all(self): return SqlAll
def p_rows_many(self, rows): return rows
def p_row_list_one(self, row): return [row]
def p_row_list_many(self, rows, row): rows.append(row); return rows
def p_row_index_n(self, n): return n
def p_concentration_c(self, n): return n
SetVarDependency = namedtuple('SetVarCluster', [
'columns', # columns to modify
'dependency' # INDEPENDENT or DEPENDENT
])
SetVarCluster = namedtuple('SetVarCluster', [
'columns0', # columns to modify
'column1' # context column
])
SetVarClusterConc = namedtuple('SetVarClusterConc', [
'concentration' # real valued concentration parameter
])
SetRowCluster = namedtuple('SetRowCluster', [
'rows0', # rows to modify
'row1', # row whose cluster to move rows0 to
'column' # context column
])
SetRowClusterConc = namedtuple('SetRowClusterConc', [
'column', # context column
'concentration' # real valued concentration parameter
])
SqlAll = 'SqlAll'
EnsureDependent = 'EnsureDependent'
EnsureIndependent = 'EnsureIndependent'
SingletonCluster = 'SingletonCluster'
| 32.397959
| 77
| 0.631024
|
4096f4a75ea65f1c882e5e2ce31855fff150377d
| 1,422
|
py
|
Python
|
plugins/setunset.py
|
VenujaBots/Google-Translator-Bot
|
65e4290879c5ff16fb2b796995e1ac6a3bccf9cd
|
[
"Apache-2.0"
] | 4
|
2021-11-23T15:44:23.000Z
|
2021-12-17T14:46:37.000Z
|
plugins/setunset.py
|
really650a/Google-Translator-Bot
|
6d125e0527cfe08e91c3f836b6b9c1a0c3e03a07
|
[
"Apache-2.0"
] | null | null | null |
plugins/setunset.py
|
really650a/Google-Translator-Bot
|
6d125e0527cfe08e91c3f836b6b9c1a0c3e03a07
|
[
"Apache-2.0"
] | 2
|
2022-01-16T03:07:21.000Z
|
2022-03-04T10:54:13.000Z
|
from pyrogram import Client, filters
from pyrogram.types import (
InlineKeyboardButton,
InlineKeyboardMarkup
)
from helper.database import set,unset ,insert
from helper.langlist import langlist
@Client.on_message(filters.private &filters.command(['unset']))
async def unsetlg(client,message):
unset(int(message.chat.id))
await message.reply_text("Successfully removed custom default language")
@Client.on_message(filters.private &filters.command(['set']))
async def setlg(client,message):
user_id = int(message.chat.id)
insert(user_id)
text = message.text
textspit = text.split('/set')
lg_code = textspit[1]
if lg_code:
cd = lg_code.lower().replace(" ", "")
try:
lgcd = list[cd]
except:
await message.reply_text("❗️ This language Not available in My List \n Or Check Your spelling 😉",reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("Check List 🔥" ,url="https://github.com/VenujaBots/Google-Translator-Bot")]]))
return
set(user_id,lgcd)
await message.reply_text(f"Successfully set custom default language **{cd}**")
else:
await message.reply_text(" Please use this command with an argument. \n **For Example:/set English**",reply_markup=InlineKeyboardMarkup([[ InlineKeyboardButton("Support 🙋♀️",url = "https://t.me/VndBotSupport")]]))
| 44.4375
| 244
| 0.674402
|
6c96f8aa51f474f2d8e2f6361e865440b2696316
| 171
|
py
|
Python
|
src/rifa/views.py
|
papeclem/Rifa-public
|
7cd32656b73a6bcc7f5b309fbe025fe7d76c12b8
|
[
"MIT"
] | 1
|
2021-09-06T12:53:37.000Z
|
2021-09-06T12:53:37.000Z
|
src/rifa/views.py
|
papeclem/Rifa-public
|
7cd32656b73a6bcc7f5b309fbe025fe7d76c12b8
|
[
"MIT"
] | null | null | null |
src/rifa/views.py
|
papeclem/Rifa-public
|
7cd32656b73a6bcc7f5b309fbe025fe7d76c12b8
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from django.shortcuts import render
def index(request):
return render(request, "rifa/index.html", context={'date': datetime.today()})
| 21.375
| 81
| 0.748538
|
2c496595962e20c1bcc87c8574de5a28cbf94ffe
| 1,039
|
py
|
Python
|
tensorpack/utils/image.py
|
Arturus/tensorpack
|
74aed2aace7a0ded197a432fac23b5da1fb27ede
|
[
"Apache-2.0"
] | null | null | null |
tensorpack/utils/image.py
|
Arturus/tensorpack
|
74aed2aace7a0ded197a432fac23b5da1fb27ede
|
[
"Apache-2.0"
] | null | null | null |
tensorpack/utils/image.py
|
Arturus/tensorpack
|
74aed2aace7a0ded197a432fac23b5da1fb27ede
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
from . import logger
import cv2
__all__ = ['read_image']
try:
import jpeg4py as jpeg
except ImportError:
logger.warning('jpeg4py not installed, fallback to opencv for jpeg reading')
jpeg = None
def read_image(file_name, bgr=True):
"""
Reads jpeg by jpeg4py library. If jpeg4py is not installed or can't read the file, falls back to opencv imread()
:param file_name: Image file
:param bgr: Return BGR image instead of RGB
:return: 3-channel image in RGB or BGR order
"""
def read_opencv():
image = cv2.imread(file_name, cv2.IMREAD_COLOR)
if not bgr:
image = image[..., ::-1]
return image
if jpeg:
try:
im = jpeg.JPEG(file_name).decode()
if len(im.shape) == 2:
im = np.stack((im,)*3, axis=-1)
if bgr:
im = im[..., ::-1]
return im
except jpeg.JPEGRuntimeError:
# Fallback to read_opencv
pass
return read_opencv()
| 25.975
| 116
| 0.583253
|
02f819fe49aeff2bb8464c94ec362e4166e181f1
| 5,492
|
py
|
Python
|
python/MDSplus/__init__.py
|
dgarnier/mdsplus
|
0643029addf583151e2795d4c014679aeaf587c0
|
[
"BSD-2-Clause"
] | null | null | null |
python/MDSplus/__init__.py
|
dgarnier/mdsplus
|
0643029addf583151e2795d4c014679aeaf587c0
|
[
"BSD-2-Clause"
] | null | null | null |
python/MDSplus/__init__.py
|
dgarnier/mdsplus
|
0643029addf583151e2795d4c014679aeaf587c0
|
[
"BSD-2-Clause"
] | null | null | null |
#
# Copyright (c) 2017, Massachusetts Institute of Technology All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""
MDSplus
==========
Provides a object oriented interface to the MDSplus data system.
Information about the B{I{MDSplus Data System}} can be found at U{the MDSplus Homepage<http://www.mdsplus.org>}
@authors: Tom Fredian(MIT/USA), Gabriele Manduchi(CNR/IT), Josh Stillerman(MIT/USA)
@copyright: 2008
@license: GNU GPL
"""
def _mimport(name, level=1):
try:
if not __package__:
return __import__(name, globals())
return __import__(name, globals(), level=level)
except:
return __import__(name, globals())
# importing required packages
import os
import sys
import ctypes
import ctypes.util
import hashlib
import numpy
if sys.version_info < (2, 6):
raise Exception(
"Python version 2.6 or higher is now required to use the MDSplus python package.")
# importing libs for convenience and to early check if we have what we need in place
_ver = _mimport("version")
class libs:
MdsShr = _ver.load_library('MdsShr')
TreeShr = _ver.load_library('TreeShr')
TdiShr = _ver.load_library('TdiShr')
try:
Mdsdcl = _ver.load_library('Mdsdcl')
except:
Mdsdcl = None
try:
MdsIpShr = _ver.load_library('MdsIpShr')
except:
MdsIpShr = None
# check version
if __name__ == "MDSplus":
version_check = os.getenv("MDSPLUS_VERSION_CHECK", "on").lower()
version_check = not (
version_check == "0" or version_check == "off" or version_check == "no")
else:
version_check = False
try:
_version = _mimport('_version')
__version__ = _version.release_tag
__doc__ = """%s
Version: %s
Release Date: %s
""" % (__doc__, __version__, _version.release_date)
except:
if version_check and 'PYTHONPATH' in os.environ:
sys.stderr.write(
"PYTHONPATH was set to: %s and unable to import version information\n" % os.environ['PYTHONPATH'])
__version__ = "unknown"
if version_check:
def version_check():
try:
libs.MdsShr.MdsRelease.restype = ctypes.c_char_p
verchk = _ver.tostr(libs.MdsShr.MdsRelease())
except:
verchk = "unknown"
if verchk != __version__ or verchk == "unknown":
sys.stderr.write('''Warning:
The MDSplus python module version (%s) does not match
the version of the installed MDSplus libraries (%s).
Upgrade the module using the mdsplus/python/MDSplus directory of the
MDSplus installation or set PYTHONPATH=/usr/local/mdsplus/python/MDSplus.
''' % (__version__, verchk))
version_check()
del version_check
def load_package(gbls={}, version_check=False):
def loadmod_full(name, gbls):
mod = _mimport(name)
for key in mod.__dict__:
if not key.startswith('_'):
gbls[key] = mod.__dict__[key]
for name in ('os', 'sys', 'numpy', 'ctypes', 'libs', '__version__'):
gbls[name] = globals()[name]
loadmod_full('version', gbls)
loadmod_full('mdsdata', gbls)
loadmod_full('mdsscalar', gbls)
loadmod_full('mdsarray', gbls)
loadmod_full('compound', gbls)
loadmod_full('descriptor', gbls)
loadmod_full('apd', gbls)
loadmod_full('event', gbls)
loadmod_full('tree', gbls)
loadmod_full('scope', gbls)
loadmod_full('_mdsshr', gbls)
loadmod_full('mdsExceptions', gbls)
loadmod_full('mdsdcl', gbls)
if libs.MdsIpShr is not None:
loadmod_full('connection', gbls)
gbls["PyLib"] = os.getenv("PyLib")
return gbls
if __name__ == __package__:
def PyLib():
name = ('python%d%d' if sys.platform.startswith('win')
else 'python%d.%d') % sys.version_info[0:2]
try:
lib = ctypes.util.find_library(name)
except:
lib = None
if lib is None:
lib = os.getenv("PyLib", None)
if lib is not None:
return lib
lib = name
libs.MdsShr.MdsPutEnv(_ver.tobytes("%s=%s" % ("PyLib", lib)))
return lib
PyLib = PyLib()
load_package(globals(), True)
try:
_mimport("magic") # load ipython magic
except:
pass
del load_package
del _ver
| 33.084337
| 111
| 0.676438
|
13e093e9d3228e9e9c10ae22ce7bab05e4d658ce
| 608
|
py
|
Python
|
script/train.py
|
mcaniot/simclr-pytorch
|
3d4c1533869c6c54e1004d452eaf8dca3f506cc0
|
[
"MIT"
] | null | null | null |
script/train.py
|
mcaniot/simclr-pytorch
|
3d4c1533869c6c54e1004d452eaf8dca3f506cc0
|
[
"MIT"
] | null | null | null |
script/train.py
|
mcaniot/simclr-pytorch
|
3d4c1533869c6c54e1004d452eaf8dca3f506cc0
|
[
"MIT"
] | null | null | null |
# Basic libraries
import torch
from torchvision import datasets
# Custom libraries
import data_augmentation
from simclr_framework import SimCLRFramework
import tools
trainset = datasets.STL10(
'../data/', download=False, split="unlabeled",
transform=data_augmentation.CreatePosPair(tools.OUTPUT_SIZE))
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=tools.BATCH_SIZE, shuffle=True, drop_last=True)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
framework = SimCLRFramework(trainloader, tools.BATCH_SIZE, device)
framework.train()
framework.save_model()
| 28.952381
| 72
| 0.794408
|
ba01de9cdb1f56ee379570288aca006c33ce0a31
| 359
|
py
|
Python
|
el/commands/el_assetLauncher.py
|
deepakxyz/Edna
|
825393c6ca88bb31d35cc4c369ea7042b9048fcb
|
[
"MIT"
] | 2
|
2021-08-31T23:23:11.000Z
|
2022-03-07T03:18:01.000Z
|
el/commands/el_assetLauncher.py
|
deepakxyz/Elna
|
825393c6ca88bb31d35cc4c369ea7042b9048fcb
|
[
"MIT"
] | null | null | null |
el/commands/el_assetLauncher.py
|
deepakxyz/Elna
|
825393c6ca88bb31d35cc4c369ea7042b9048fcb
|
[
"MIT"
] | null | null | null |
import click
import os
from el.utils.el import Path, current_show, el
from el.core.levels import Level
path = '/mnt/y/pipeline/Shows/Mayday/asset_build'
@click.command()
def cli():
'''Asset Launcher'''
if Level.check('show'):
os.system('cmd.exe /c python Z:/Elna/el/app/assetLauncher.py')
else:
os.system('el goshow --current')
| 23.933333
| 71
| 0.671309
|
e7ae6d9ea2a84ec4fbdd92c64d9512e116dc3315
| 4,184
|
py
|
Python
|
tests/test_utils.py
|
Comcast/pipeline-deploy
|
e3f923d3683a091f4c5ee9e039672c3edcee0eb1
|
[
"Apache-2.0"
] | null | null | null |
tests/test_utils.py
|
Comcast/pipeline-deploy
|
e3f923d3683a091f4c5ee9e039672c3edcee0eb1
|
[
"Apache-2.0"
] | 1
|
2022-02-23T21:49:33.000Z
|
2022-02-23T21:49:33.000Z
|
tests/test_utils.py
|
Comcast/pipeline-deploy
|
e3f923d3683a091f4c5ee9e039672c3edcee0eb1
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2022 Comcast Cable Communications Management, LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
SPDX-License-Identifier: Apache-2.0
"""
from pipeline_deploy import utils
from pytest_mock import MockerFixture
from requests import Response
from requests.exceptions import ConnectionError, HTTPError
class TestEatExceptions:
def test_when_no_exceptions_happen(self):
@utils.eat_exceptions(exit_on_error=False)
def test_function(x):
return x
assert test_function(1) == 1
def test_with_a_401_exception(self, mocker: MockerFixture):
mock_error_and_quit = mocker.patch('pipeline_deploy.utils.error_and_quit')
@utils.eat_exceptions(exit_on_error=False)
def test_function():
response = mocker.MagicMock()
type(response).status_code = mocker.PropertyMock(return_value=401)
raise HTTPError(response=response)
test_function()
assert mock_error_and_quit.call_count == 1
assert 'Your authentication information' in mock_error_and_quit.call_args[0][1]
def test_with_a_404_exception(self, mocker: MockerFixture):
mock_error_and_quit = mocker.patch('pipeline_deploy.utils.error_and_quit')
@utils.eat_exceptions(exit_on_error=False)
def test_function():
response = mocker.MagicMock()
type(response).status_code = mocker.PropertyMock(return_value=404)
raise HTTPError(response=response)
test_function()
assert mock_error_and_quit.call_count == 1
assert 'The requested remote resource could not be located.' == mock_error_and_quit.call_args[0][1]
def test_with_a_not_found_error(self, mocker: MockerFixture):
mock_error_and_quit = mocker.patch('pipeline_deploy.utils.error_and_quit')
@utils.eat_exceptions(exit_on_error=False)
def test_function():
response = mocker.MagicMock()
response.json = mocker.MagicMock(return_value={'error_code': 'RESOURCE_DOES_NOT_EXIST'})
raise HTTPError(response=response)
test_function()
assert mock_error_and_quit.call_count == 1
assert 'The requested remote resource could not be located.' == mock_error_and_quit.call_args[0][1]
def test_with_any_other_http_error(self, mocker: MockerFixture):
mock_error_and_quit = mocker.patch('pipeline_deploy.utils.error_and_quit')
@utils.eat_exceptions(exit_on_error=False)
def test_function():
response = mocker.MagicMock()
type(response).content = mocker.PropertyMock(return_value='Error Content')
raise HTTPError(response=response)
test_function()
assert mock_error_and_quit.call_count == 1
assert 'Error Content' == mock_error_and_quit.call_args[0][1]
def test_with_a_connection_error(self, mocker: MockerFixture):
mock_error_and_quit = mocker.patch('pipeline_deploy.utils.error_and_quit')
@utils.eat_exceptions(exit_on_error=False)
def test_function():
raise ConnectionError()
test_function()
assert mock_error_and_quit.call_count == 1
assert 'A connection could not be established' in mock_error_and_quit.call_args[0][1]
def test_with_an_unknown_error(self, mocker: MockerFixture):
mock_error_and_quit = mocker.patch('pipeline_deploy.utils.error_and_quit')
@utils.eat_exceptions(exit_on_error=False)
def test_function():
raise AttributeError()
test_function()
assert mock_error_and_quit.call_count == 1
assert 'An unexpected error has occurred.' == mock_error_and_quit.call_args[0][1]
| 35.760684
| 107
| 0.716061
|
30e9d9725f37facf0846afa897c9d2a479429185
| 2,640
|
py
|
Python
|
observatory-platform/observatory/platform/utils/airflow_utils.py
|
bmkramer/observatory-platform
|
0f1e439df386d255b521730eea9dc396831842cd
|
[
"Apache-2.0"
] | null | null | null |
observatory-platform/observatory/platform/utils/airflow_utils.py
|
bmkramer/observatory-platform
|
0f1e439df386d255b521730eea9dc396831842cd
|
[
"Apache-2.0"
] | null | null | null |
observatory-platform/observatory/platform/utils/airflow_utils.py
|
bmkramer/observatory-platform
|
0f1e439df386d255b521730eea9dc396831842cd
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Curtin University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Author: Author: Aniek Roelofs
import json
import logging
from typing import Any, List, Optional, Union
import airflow.secrets
from airflow.models import Variable
from google.api_core.exceptions import PermissionDenied
def get_variable(key: str) -> Optional[str]:
""" Get Airflow Variable by iterating over all Secret Backends.
:param key: Variable Key
:return: Variable Value
"""
for secrets_backend in airflow.secrets.ensure_secrets_loaded():
# Added try/except statement.
try:
var_val = secrets_backend.get_variable(key=key)
except PermissionDenied as err:
print(f'Secret does not exist or cannot be accessed: {err}')
var_val = None
if var_val is not None:
return var_val
return None
class AirflowVariable(Variable):
__NO_DEFAULT_SENTINEL = object()
@classmethod
def get(cls, key: str, default_var: Any = __NO_DEFAULT_SENTINEL, deserialize_json: bool = False, session=None):
var_val = get_variable(key=key)
if var_val is None:
if default_var is not cls.__NO_DEFAULT_SENTINEL:
return default_var
else:
raise KeyError('Variable {} does not exist'.format(key))
else:
if deserialize_json:
return json.loads(var_val)
else:
return var_val
def change_task_log_level(new_levels: Union[List, int]) -> list:
""" Change the logging levels of all handlers for an airflow task.
:param new_levels: New logging levels that all handlers will be set to
:return: List of the old logging levels, can be used to restore logging levels.
"""
logger = logging.getLogger("airflow.task")
# stores logging levels
old_levels = []
for count, handler in enumerate(logger.handlers):
old_levels.append(handler.level)
if isinstance(new_levels, int):
handler.setLevel(new_levels)
else:
handler.setLevel(new_levels[count])
return old_levels
| 33
| 115
| 0.682576
|
9c19753ea1f0c2bdd24556ebeaf91781ce83f382
| 508
|
py
|
Python
|
scripts/tf_pt_conv2d_experiment.py
|
Conzel/zipnet
|
7a5a3d629f26cd5debacff3eb768f1dd86c1185e
|
[
"MIT"
] | 6
|
2021-09-16T06:38:47.000Z
|
2022-02-28T18:44:57.000Z
|
scripts/tf_pt_conv2d_experiment.py
|
Conzel/zipnet
|
7a5a3d629f26cd5debacff3eb768f1dd86c1185e
|
[
"MIT"
] | null | null | null |
scripts/tf_pt_conv2d_experiment.py
|
Conzel/zipnet
|
7a5a3d629f26cd5debacff3eb768f1dd86c1185e
|
[
"MIT"
] | null | null | null |
# coding: utf-8
import torch
import numpy as np
import torch.nn.functional as f
import tensorflow as tf
k = np.random.rand(3, 2, 5, 5).astype(np.float32)
x = np.random.rand(1, 2, 10, 10).astype(np.float32)
x_ = np.moveaxis(x, 1,3)
k_ = np.moveaxis(k, [0, 1], [3, 2])
tf.nn.conv2d(x_, k_, strides=1, padding="VALID")
o_ = tf.nn.conv2d(x_, k_, strides=1, padding="VALID")
o = f.conv2d(torch.FloatTensor(x), torch.FloatTensor(k), stride=1)
o = o.numpy()
o = np.moveaxis(o, 1, 3)
o_ = o_.numpy()
(o - o_).sum()
| 28.222222
| 66
| 0.659449
|
0085255405d2ac0bd3c198eb210b20ea0a9617e6
| 6,000
|
py
|
Python
|
code/tensor2tensor/tensor2tensor/utils/devices.py
|
cake-lab/transient_deep_learning
|
87c6717e4026801623cf0327e78ad57f51cb1461
|
[
"Apache-2.0"
] | 11
|
2019-12-12T17:34:04.000Z
|
2022-03-03T11:21:13.000Z
|
tensor2tensor/utils/devices.py
|
smit-hinsu/tensor2tensor
|
8d3d175d649680c8e5b98a1b1c1c5e782ff492ac
|
[
"Apache-2.0"
] | 63
|
2017-12-19T20:29:10.000Z
|
2021-08-04T21:49:36.000Z
|
tensor2tensor/utils/devices.py
|
smit-hinsu/tensor2tensor
|
8d3d175d649680c8e5b98a1b1c1c5e782ff492ac
|
[
"Apache-2.0"
] | 3
|
2019-09-16T19:35:42.000Z
|
2020-09-04T02:23:13.000Z
|
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Device placement and data parallelism."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
from tensor2tensor.utils import expert_utils as eu
import tensorflow as tf
def data_parallelism_from_flags(daisy_chain_variables=True, all_workers=False):
"""Over which devices do we split each training batch.
In old-fashioned async mode, we split the batch over all GPUs on the
current worker.
In sync mode, we split the batch over all the parameter server GPUs.
This function returns an expert_utils.Parallelism object, which can be used
to build the model. It is configured in a way that any variables created
by `tf.get_variable` will be assigned to the parameter servers and shared
between datashards.
Args:
daisy_chain_variables: whether to copy variables in a daisy chain on GPUs.
all_workers: whether the devices are all async workers or just this one.
Returns:
a expert_utils.Parallelism.
"""
dp_arg_names = inspect.getargspec(data_parallelism).args
blacklist = ["daisy_chain_variables", "all_workers"]
kwargs = {}
for arg in dp_arg_names:
if arg in blacklist:
continue
kwargs[arg] = getattr(tf.flags.FLAGS, arg)
return data_parallelism(
daisy_chain_variables=daisy_chain_variables,
all_workers=all_workers,
**kwargs)
def data_parallelism(daisy_chain_variables=True,
all_workers=False,
ps_replicas=0,
ps_job="/job:ps",
ps_gpu=0,
schedule="continuous_train_and_eval",
sync=False,
worker_gpu=1,
worker_replicas=1,
worker_id=0,
gpu_order="",
locally_shard_to_cpu=False,
worker_job="/job:localhost",
no_data_parallelism=False):
"""See data_parallelism_from_flags."""
tf.logging.info("schedule=%s" % schedule)
tf.logging.info("worker_gpu=%s" % worker_gpu)
tf.logging.info("sync=%s" % sync)
def _ps_replicas(all_workers=False):
if all_workers:
return list(range(ps_replicas))
# Worker K will be using replicas {0,...n-1} + K*n if we have n replicas.
num_replicas = ps_replicas // worker_replicas
return [d + worker_id * num_replicas for d in range(num_replicas)]
def _gpu_order(num_gpus):
if gpu_order:
ret = [int(s) for s in gpu_order.split(" ")]
if len(ret) == num_gpus:
return ret
return list(range(num_gpus))
def _ps_gpus(all_workers=False):
ps_gpus = []
for d in _ps_replicas(all_workers=all_workers):
ps_gpus.extend([(d, gpu) for gpu in _gpu_order(ps_gpu)])
return ps_gpus
def ps_devices(all_workers=False):
"""List of ps devices (where to put the experts).
Args:
all_workers: whether the list is for all async workers or just this one.
Returns:
a list of device names
"""
if ps_replicas > 0:
if ps_gpu > 0:
return [
ps_job + "/task:%d/GPU:%d" % (d, gpu)
for (d, gpu) in _ps_gpus(all_workers=all_workers)
]
else:
return [
ps_job + "/task:%d" % d
for d in _ps_replicas(all_workers=all_workers)
]
else:
if worker_gpu > 0:
return ["gpu:%d" % d for d in _gpu_order(worker_gpu)]
else:
return [""]
def _replica_device_setter(worker_device):
if ps_replicas == 0:
return worker_device
return tf.train.replica_device_setter(
worker_device=worker_device,
ps_tasks=ps_replicas,
ps_device=ps_job + "/GPU:0" if ps_gpu > 0 else ps_job)
is_single_machine = ps_replicas == 0 and worker_replicas == 1
if no_data_parallelism:
datashard_devices = [""]
caching_devices = None
elif is_single_machine:
tf.logging.warn(
"Schedule=%s. Assuming that training is running on a single machine.",
schedule)
datashard_devices = ["gpu:%d" % d for d in _gpu_order(worker_gpu)]
if locally_shard_to_cpu or worker_gpu < 1:
datashard_devices += ["cpu:0"]
caching_devices = None
elif sync and ps_replicas > 0:
# compute on ps
datashard_devices = [
_replica_device_setter(d) for d in ps_devices(all_workers=all_workers)
]
if ps_gpu > 0 and ps_replicas > 1:
caching_devices = [
ps_job + "/task:%d/cpu:0" % d
for (d, _) in _ps_gpus(all_workers=all_workers)
]
else:
caching_devices = None
else:
# compute on worker - this is either a single-worker setup or asynchronous
# with parameter servers.
if worker_gpu > 1:
datashard_devices = [
_replica_device_setter(worker_job + "/GPU:%d" % d)
for d in _gpu_order(worker_gpu)
]
caching_devices = None
else:
datashard_devices = [_replica_device_setter(worker_job)]
caching_devices = None
tf.logging.info("datashard_devices: %s", datashard_devices)
tf.logging.info("caching_devices: %s", caching_devices)
tf.logging.info("ps_devices: %s", ps_devices(all_workers=all_workers))
return eu.Parallelism(
datashard_devices,
caching_devices=caching_devices,
daisy_chain_variables=daisy_chain_variables,
ps_devices=ps_devices(all_workers=all_workers))
| 33.519553
| 79
| 0.666
|
6fd9ea4ef5445d5c6aba24ced013617ccc50a903
| 13,503
|
py
|
Python
|
tests/test_data/test_pipelines/test_indoor_pipeline.py
|
konyul/monovideo
|
f50db55c324c2ab4a66d414b47f0558b72a3a009
|
[
"Apache-2.0"
] | 5
|
2022-01-26T13:03:12.000Z
|
2022-01-27T03:59:09.000Z
|
tests/test_data/test_pipelines/test_indoor_pipeline.py
|
konyul/monovideo
|
f50db55c324c2ab4a66d414b47f0558b72a3a009
|
[
"Apache-2.0"
] | 1
|
2022-03-31T08:33:12.000Z
|
2022-03-31T08:35:55.000Z
|
tests/test_data/test_pipelines/test_indoor_pipeline.py
|
konyul/monovideo
|
f50db55c324c2ab4a66d414b47f0558b72a3a009
|
[
"Apache-2.0"
] | 1
|
2022-03-30T04:08:39.000Z
|
2022-03-30T04:08:39.000Z
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import numpy as np
import torch
from os import path as osp
from mmdet3d.core.bbox import DepthInstance3DBoxes
from mmdet3d.datasets.pipelines import Compose
def test_scannet_pipeline():
class_names = ('cabinet', 'bed', 'chair', 'sofa', 'table', 'door',
'window', 'bookshelf', 'picture', 'counter', 'desk',
'curtain', 'refrigerator', 'showercurtrain', 'toilet',
'sink', 'bathtub', 'garbagebin')
np.random.seed(0)
pipelines = [
dict(
type='LoadPointsFromFile',
coord_type='DEPTH',
shift_height=True,
load_dim=6,
use_dim=[0, 1, 2]),
dict(
type='LoadAnnotations3D',
with_bbox_3d=True,
with_label_3d=True,
with_mask_3d=True,
with_seg_3d=True),
dict(type='GlobalAlignment', rotation_axis=2),
dict(
type='PointSegClassMapping',
valid_cat_ids=(3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33,
34, 36, 39)),
dict(type='PointSample', num_points=5),
dict(
type='RandomFlip3D',
sync_2d=False,
flip_ratio_bev_horizontal=1.0,
flip_ratio_bev_vertical=1.0),
dict(
type='GlobalRotScaleTrans',
rot_range=[-0.087266, 0.087266],
scale_ratio_range=[1.0, 1.0],
shift_height=True),
dict(type='DefaultFormatBundle3D', class_names=class_names),
dict(
type='Collect3D',
keys=[
'points', 'gt_bboxes_3d', 'gt_labels_3d', 'pts_semantic_mask',
'pts_instance_mask'
]),
]
pipeline = Compose(pipelines)
info = mmcv.load('./tests/data/scannet/scannet_infos.pkl')[0]
results = dict()
data_path = './tests/data/scannet'
results['pts_filename'] = osp.join(data_path, info['pts_path'])
if info['annos']['gt_num'] != 0:
scannet_gt_bboxes_3d = info['annos']['gt_boxes_upright_depth'].astype(
np.float32)
scannet_gt_labels_3d = info['annos']['class'].astype(np.long)
else:
scannet_gt_bboxes_3d = np.zeros((1, 6), dtype=np.float32)
scannet_gt_labels_3d = np.zeros((1, ), dtype=np.long)
results['ann_info'] = dict()
results['ann_info']['pts_instance_mask_path'] = osp.join(
data_path, info['pts_instance_mask_path'])
results['ann_info']['pts_semantic_mask_path'] = osp.join(
data_path, info['pts_semantic_mask_path'])
results['ann_info']['gt_bboxes_3d'] = DepthInstance3DBoxes(
scannet_gt_bboxes_3d, box_dim=6, with_yaw=False)
results['ann_info']['gt_labels_3d'] = scannet_gt_labels_3d
results['ann_info']['axis_align_matrix'] = \
info['annos']['axis_align_matrix']
results['img_fields'] = []
results['bbox3d_fields'] = []
results['pts_mask_fields'] = []
results['pts_seg_fields'] = []
results = pipeline(results)
points = results['points']._data
gt_bboxes_3d = results['gt_bboxes_3d']._data
gt_labels_3d = results['gt_labels_3d']._data
pts_semantic_mask = results['pts_semantic_mask']._data
pts_instance_mask = results['pts_instance_mask']._data
expected_points = torch.tensor(
[[1.8339e+00, 2.1093e+00, 2.2900e+00, 2.3895e+00],
[3.6079e+00, 1.4592e-01, 2.0687e+00, 2.1682e+00],
[4.1886e+00, 5.0614e+00, -1.0841e-01, -8.8736e-03],
[6.8790e+00, 1.5086e+00, -9.3154e-02, 6.3816e-03],
[4.8253e+00, 2.6668e-01, 1.4917e+00, 1.5912e+00]])
expected_gt_bboxes_3d = torch.tensor(
[[-1.1835, -3.6317, 1.8565, 1.7577, 0.3761, 0.5724, 0.0000],
[-3.1832, 3.2269, 1.5268, 0.6727, 0.2251, 0.6715, 0.0000],
[-0.9598, -2.2864, 0.6165, 0.7506, 2.5709, 1.2145, 0.0000],
[-2.6988, -2.7354, 0.9722, 0.7680, 1.8877, 0.2870, 0.0000],
[3.2989, 0.2885, 1.0712, 0.7600, 3.8814, 2.1603, 0.0000]])
expected_gt_labels_3d = np.array([
6, 6, 4, 9, 11, 11, 10, 0, 15, 17, 17, 17, 3, 12, 4, 4, 14, 1, 0, 0, 0,
0, 0, 0, 5, 5, 5
])
expected_pts_semantic_mask = np.array([0, 18, 18, 18, 18])
expected_pts_instance_mask = np.array([44, 22, 10, 10, 57])
assert torch.allclose(points, expected_points, 1e-2)
assert torch.allclose(gt_bboxes_3d.tensor[:5, :], expected_gt_bboxes_3d,
1e-2)
assert np.all(gt_labels_3d.numpy() == expected_gt_labels_3d)
assert np.all(pts_semantic_mask.numpy() == expected_pts_semantic_mask)
assert np.all(pts_instance_mask.numpy() == expected_pts_instance_mask)
def test_scannet_seg_pipeline():
class_names = ('wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table',
'door', 'window', 'bookshelf', 'picture', 'counter', 'desk',
'curtain', 'refrigerator', 'showercurtrain', 'toilet',
'sink', 'bathtub', 'otherfurniture')
np.random.seed(0)
pipelines = [
dict(
type='LoadPointsFromFile',
coord_type='DEPTH',
shift_height=False,
use_color=True,
load_dim=6,
use_dim=[0, 1, 2, 3, 4, 5]),
dict(
type='LoadAnnotations3D',
with_bbox_3d=False,
with_label_3d=False,
with_mask_3d=False,
with_seg_3d=True),
dict(
type='PointSegClassMapping',
valid_cat_ids=(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24,
28, 33, 34, 36, 39),
max_cat_id=40),
dict(
type='IndoorPatchPointSample',
num_points=5,
block_size=1.5,
ignore_index=len(class_names),
use_normalized_coord=True,
enlarge_size=0.2,
min_unique_num=None),
dict(type='NormalizePointsColor', color_mean=None),
dict(type='DefaultFormatBundle3D', class_names=class_names),
dict(type='Collect3D', keys=['points', 'pts_semantic_mask'])
]
pipeline = Compose(pipelines)
info = mmcv.load('./tests/data/scannet/scannet_infos.pkl')[0]
results = dict()
data_path = './tests/data/scannet'
results['pts_filename'] = osp.join(data_path, info['pts_path'])
results['ann_info'] = dict()
results['ann_info']['pts_semantic_mask_path'] = osp.join(
data_path, info['pts_semantic_mask_path'])
results['pts_seg_fields'] = []
results = pipeline(results)
points = results['points']._data
pts_semantic_mask = results['pts_semantic_mask']._data
# build sampled points
scannet_points = np.fromfile(
osp.join(data_path, info['pts_path']), dtype=np.float32).reshape(
(-1, 6))
scannet_choices = np.array([87, 34, 58, 9, 18])
scannet_center = np.array([-2.1772466, -3.4789145, 1.242711])
scannet_center[2] = 0.0
scannet_coord_max = np.amax(scannet_points[:, :3], axis=0)
expected_points = np.concatenate([
scannet_points[scannet_choices, :3] - scannet_center,
scannet_points[scannet_choices, 3:] / 255.,
scannet_points[scannet_choices, :3] / scannet_coord_max
],
axis=1)
expected_pts_semantic_mask = np.array([13, 13, 12, 2, 0])
assert np.allclose(points.numpy(), expected_points, atol=1e-6)
assert np.all(pts_semantic_mask.numpy() == expected_pts_semantic_mask)
def test_s3dis_seg_pipeline():
class_names = ('ceiling', 'floor', 'wall', 'beam', 'column', 'window',
'door', 'table', 'chair', 'sofa', 'bookcase', 'board',
'clutter')
np.random.seed(0)
pipelines = [
dict(
type='LoadPointsFromFile',
coord_type='DEPTH',
shift_height=False,
use_color=True,
load_dim=6,
use_dim=[0, 1, 2, 3, 4, 5]),
dict(
type='LoadAnnotations3D',
with_bbox_3d=False,
with_label_3d=False,
with_mask_3d=False,
with_seg_3d=True),
dict(
type='PointSegClassMapping',
valid_cat_ids=tuple(range(len(class_names))),
max_cat_id=13),
dict(
type='IndoorPatchPointSample',
num_points=5,
block_size=1.0,
ignore_index=len(class_names),
use_normalized_coord=True,
enlarge_size=0.2,
min_unique_num=None),
dict(type='NormalizePointsColor', color_mean=None),
dict(type='DefaultFormatBundle3D', class_names=class_names),
dict(type='Collect3D', keys=['points', 'pts_semantic_mask'])
]
pipeline = Compose(pipelines)
info = mmcv.load('./tests/data/s3dis/s3dis_infos.pkl')[0]
results = dict()
data_path = './tests/data/s3dis'
results['pts_filename'] = osp.join(data_path, info['pts_path'])
results['ann_info'] = dict()
results['ann_info']['pts_semantic_mask_path'] = osp.join(
data_path, info['pts_semantic_mask_path'])
results['pts_seg_fields'] = []
results = pipeline(results)
points = results['points']._data
pts_semantic_mask = results['pts_semantic_mask']._data
# build sampled points
s3dis_points = np.fromfile(
osp.join(data_path, info['pts_path']), dtype=np.float32).reshape(
(-1, 6))
s3dis_choices = np.array([87, 37, 60, 18, 31])
s3dis_center = np.array([2.691, 2.231, 3.172])
s3dis_center[2] = 0.0
s3dis_coord_max = np.amax(s3dis_points[:, :3], axis=0)
expected_points = np.concatenate([
s3dis_points[s3dis_choices, :3] - s3dis_center,
s3dis_points[s3dis_choices, 3:] / 255.,
s3dis_points[s3dis_choices, :3] / s3dis_coord_max
],
axis=1)
expected_pts_semantic_mask = np.array([0, 1, 0, 8, 0])
assert np.allclose(points.numpy(), expected_points, atol=1e-6)
assert np.all(pts_semantic_mask.numpy() == expected_pts_semantic_mask)
def test_sunrgbd_pipeline():
class_names = ('bed', 'table', 'sofa', 'chair', 'toilet', 'desk',
'dresser', 'night_stand', 'bookshelf', 'bathtub')
np.random.seed(0)
pipelines = [
dict(
type='LoadPointsFromFile',
coord_type='DEPTH',
shift_height=True,
load_dim=6,
use_dim=[0, 1, 2]),
dict(type='LoadAnnotations3D'),
dict(
type='RandomFlip3D',
sync_2d=False,
flip_ratio_bev_horizontal=1.0,
),
dict(
type='GlobalRotScaleTrans',
rot_range=[-0.523599, 0.523599],
scale_ratio_range=[0.85, 1.15],
shift_height=True),
dict(type='PointSample', num_points=5),
dict(type='DefaultFormatBundle3D', class_names=class_names),
dict(
type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']),
]
pipeline = Compose(pipelines)
results = dict()
info = mmcv.load('./tests/data/sunrgbd/sunrgbd_infos.pkl')[0]
data_path = './tests/data/sunrgbd'
results['pts_filename'] = osp.join(data_path, info['pts_path'])
if info['annos']['gt_num'] != 0:
gt_bboxes_3d = info['annos']['gt_boxes_upright_depth'].astype(
np.float32)
gt_labels_3d = info['annos']['class'].astype(np.long)
else:
gt_bboxes_3d = np.zeros((1, 7), dtype=np.float32)
gt_labels_3d = np.zeros((1, ), dtype=np.long)
# prepare input of pipeline
results['ann_info'] = dict()
results['ann_info']['gt_bboxes_3d'] = DepthInstance3DBoxes(gt_bboxes_3d)
results['ann_info']['gt_labels_3d'] = gt_labels_3d
results['img_fields'] = []
results['bbox3d_fields'] = []
results['pts_mask_fields'] = []
results['pts_seg_fields'] = []
results = pipeline(results)
points = results['points']._data
gt_bboxes_3d = results['gt_bboxes_3d']._data
gt_labels_3d = results['gt_labels_3d']._data
expected_points = torch.tensor([[0.8678, 1.3470, 0.1105, 0.0905],
[0.8707, 1.3635, 0.0437, 0.0238],
[0.8636, 1.3511, 0.0504, 0.0304],
[0.8690, 1.3461, 0.1265, 0.1065],
[0.8668, 1.3434, 0.1216, 0.1017]])
# Depth coordinate system update: only yaw changes since rotation in depth
# is counter-clockwise and yaw angle is clockwise originally
# But heading angles in sunrgbd data also reverses the sign
# and after horizontal flip the sign reverse again
rotation_angle = info['annos']['rotation_y']
expected_gt_bboxes_3d = torch.tensor(
[[
-1.2136, 4.0206, -0.2412, 2.2493, 1.8444, 1.9245,
1.3989 + 0.047001579467984445 * 2 - 2 * rotation_angle[0]
],
[
-2.7420, 4.5777, -0.7686, 0.5718, 0.8629, 0.9510,
1.4446 + 0.047001579467984445 * 2 - 2 * rotation_angle[1]
],
[
0.9729, 1.9087, -0.1443, 0.6965, 1.5273, 2.0563,
2.9924 + 0.047001579467984445 * 2 - 2 * rotation_angle[2]
]]).float()
expected_gt_labels_3d = np.array([0, 7, 6])
assert torch.allclose(gt_bboxes_3d.tensor, expected_gt_bboxes_3d, 1e-3)
assert np.allclose(gt_labels_3d.flatten(), expected_gt_labels_3d)
assert torch.allclose(points, expected_points, 1e-2)
| 39.59824
| 79
| 0.586981
|
bdeb7f486ccd9f1453b9d20c5687c9ebdc70853b
| 3,058
|
py
|
Python
|
yolo_dataset.py
|
marcin-sielski/keras-yolo3
|
cbde1c8d7daf0c7f710d06d55535ed8fe6bded20
|
[
"MIT"
] | null | null | null |
yolo_dataset.py
|
marcin-sielski/keras-yolo3
|
cbde1c8d7daf0c7f710d06d55535ed8fe6bded20
|
[
"MIT"
] | null | null | null |
yolo_dataset.py
|
marcin-sielski/keras-yolo3
|
cbde1c8d7daf0c7f710d06d55535ed8fe6bded20
|
[
"MIT"
] | null | null | null |
import sys
import os
import argparse
from yolo import YOLO
from PIL import Image
from uuid import uuid4
def parse_args():
parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS)
parser.add_argument(
'--model_path', type=str,
help='path to model weight file, default ' +
YOLO.get_defaults("model_path")
)
parser.add_argument(
'--anchors_path', type=str,
help='path to anchor definitions, default ' +
YOLO.get_defaults("anchors_path")
)
parser.add_argument(
'--classes_path', type=str,
help='path to class definitions, default ' +
YOLO.get_defaults("classes_path")
)
parser.add_argument(
'--gpu_num', type=int,
help='Number of GPU to use, default ' +
str(YOLO.get_defaults("gpu_num"))
)
parser.add_argument(
'--score', type=float,
help='Score to use, default ' +
str(YOLO.get_defaults("score"))
)
parser.add_argument(
'--input_path', nargs='?', type=str, required=True, default='',
help = 'Input dataset path'
)
parser.add_argument(
'--output_path', nargs='?', type=str, required=True, default='',
help = 'Output dataset path'
)
parser.add_argument(
'--class_name', nargs='?', type=str, required=True, default='',
help = 'Name of the class used to create dataset'
)
parser.add_argument(
'--width', nargs='?', type=int, required=True,
help = 'With of the output images'
)
parser.add_argument(
'--height', nargs='?', type=int, required=True,
help = 'Height of the output images'
)
return parser.parse_args()
def create_dataset(**kwargs):
yolo = YOLO(**dict(kwargs))
input_path = os.path.expanduser(kwargs.get('input_path', ''))
output_path = os.path.expanduser(kwargs.get('output_path', ''))
class_name = kwargs.get('class_name', '')
class_names = yolo._get_class()
if class_name not in class_names:
yolo.close_session()
return
class_name = class_name.replace(' ', '_')
output_path = output_path + '/' + class_name + '_dataset/'
try:
os.makedirs(output_path)
except OSError:
pass
for root, _, files in os.walk(input_path):
label = os.path.basename(root).lower()
if len(files) > 0:
try:
os.makedirs(output_path + label)
except:
pass
for file in files:
input_file = root + '/' + file
try:
image = Image.open(input_file)
except:
continue
else:
_, images = yolo.detect_image(image)
for image in images:
output_file = output_path + label + '/' + str(uuid4()) + \
'.png'
image.save(output_file)
yolo.close_session()
if __name__ == '__main__':
create_dataset(**vars(parse_args()))
| 25.697479
| 78
| 0.567037
|
4171b657bef5b27e648b940e3d6d1ff1d323c211
| 1,385
|
py
|
Python
|
opencart_autogen.py
|
AndrewJanuary/playwright_demo
|
27501f2d6fa5be3912a6062faacf27430be4d747
|
[
"MIT"
] | null | null | null |
opencart_autogen.py
|
AndrewJanuary/playwright_demo
|
27501f2d6fa5be3912a6062faacf27430be4d747
|
[
"MIT"
] | null | null | null |
opencart_autogen.py
|
AndrewJanuary/playwright_demo
|
27501f2d6fa5be3912a6062faacf27430be4d747
|
[
"MIT"
] | null | null | null |
from playwright.sync_api import sync_playwright
def run(playwright):
browser = playwright.chromium.launch(headless=False)
context = browser.new_context()
# Open new page
page = context.new_page()
# Go to http://opencart.abstracta.us/
page.goto("http://opencart.abstracta.us/")
# Click [placeholder="Search"]
page.click("[placeholder=\"Search\"]")
# Fill [placeholder="Search"]
page.fill("[placeholder=\"Search\"]", "iphone")
# Click text=Your Store 0 item(s) - $0.00 Your shopping cart is empty! >> button
page.click("text=Your Store 0 item(s) - $0.00 Your shopping cart is empty! >> button")
# assert page.url == "http://opencart.abstracta.us/index.php?route=product/search&search=iphone"
# Click a:has-text("iPhone")
page.click("a:has-text(\"iPhone\")")
# assert page.url == "http://opencart.abstracta.us/index.php?route=product/product&product_id=40&search=iphone"
# Click text=Add to Cart
page.click("text=Add to Cart")
# Click button:has-text("1 item(s) - $123.20")
page.click("button:has-text(\"1 item(s) - $123.20\")")
# Click text=View Cart
page.click("text=View Cart")
# assert page.url == "http://opencart.abstracta.us/index.php?route=checkout/cart"
# ---------------------
context.close()
browser.close()
with sync_playwright() as playwright:
run(playwright)
| 40.735294
| 115
| 0.652708
|
4e9db236f08b466b5c3406a87bf149350a085824
| 3,143
|
py
|
Python
|
tests/test_annotated_regions.py
|
anonymousWork000/TVMfuzz
|
0ccbb33af89758b8ead59a8c686645246ccd0545
|
[
"Apache-2.0"
] | 16
|
2021-05-22T07:39:53.000Z
|
2022-02-23T14:50:38.000Z
|
tests/test_annotated_regions.py
|
anonymousWork000/TVMfuzz
|
0ccbb33af89758b8ead59a8c686645246ccd0545
|
[
"Apache-2.0"
] | null | null | null |
tests/test_annotated_regions.py
|
anonymousWork000/TVMfuzz
|
0ccbb33af89758b8ead59a8c686645246ccd0545
|
[
"Apache-2.0"
] | 3
|
2021-05-28T07:12:14.000Z
|
2021-11-28T02:10:48.000Z
|
import tvm
from tvm import relay
from tvm.relay.op.annotation import compiler_begin, compiler_end
def check_region(region_set, target, args, nodes, rets):
region = region_set.get_region(args[0])
assert region
assert target == region.target
assert set(args) == set(region.args)
assert set(nodes) == set(region.nodes)
assert set(rets) == set(region.rets)
def test_region_set_creator_diamond():
data = relay.var("data", shape=(10, 10))
cb_1 = compiler_begin(data, "test_target")
O_1 = relay.abs(cb_1)
ce_1 = compiler_end(O_1, "test_target")
ce_2 = compiler_end(O_1, "test_target")
cb_2 = compiler_begin(ce_1, "test_target")
O_2 = relay.nn.relu(cb_2)
ce_3 = compiler_end(O_2, "test_target")
cb_d = compiler_begin(ce_2, "default")
X = relay.tanh(cb_d)
ce_d = compiler_end(X, "default")
cb_3 = compiler_begin(ce_3, "test_target")
cb_4 = compiler_begin(ce_d, "test_target")
O_3 = relay.add(cb_3, cb_4)
ce_4 = compiler_end(O_3, "test_target")
diamond = relay.Function([data], ce_4)
region_set = relay.analysis.AnnotatedRegionSet(
diamond, relay.op.get("annotation.compiler_begin"), relay.op.get("annotation.compiler_end")
)
assert len(region_set) == 4
check_region(
region_set,
"test_target",
[cb_1],
[cb_1, O_1, ce_1, ce_2],
[ce_1, ce_2],
)
check_region(
region_set,
"test_target",
[cb_2],
[cb_2, O_2, ce_3],
[ce_3],
)
check_region(
region_set,
"default",
[cb_d],
[cb_d, X, ce_d],
[ce_d],
)
check_region(
region_set,
"test_target",
[cb_3, cb_4],
[cb_3, cb_4, O_3, ce_4],
[ce_4],
)
def test_region_set_creator_merged():
data = relay.var("data", shape=(10, 10))
cb_1 = compiler_begin(data, "test_target")
O_1 = relay.abs(cb_1)
ce_2 = compiler_end(O_1, "test_target")
O_2 = relay.nn.relu(O_1)
ce_3 = compiler_end(O_2, "test_target")
cb_d = compiler_begin(ce_2, "default")
X = relay.tanh(cb_d)
ce_d = compiler_end(X, "default")
cb_3 = compiler_begin(ce_3, "test_target")
cb_4 = compiler_begin(ce_d, "test_target")
O_3 = relay.add(cb_3, cb_4)
O_4 = relay.add(cb_3, cb_4)
O_5 = relay.Tuple([O_3, O_4])
ce_4 = compiler_end(O_5, "test_target")
merged = relay.Function([data], ce_4)
region_set = relay.analysis.AnnotatedRegionSet(
merged, relay.op.get("annotation.compiler_begin"), relay.op.get("annotation.compiler_end")
)
assert len(region_set) == 3
check_region(
region_set,
"test_target",
[cb_1],
[cb_1, O_1, O_2, ce_2, ce_3],
[ce_2, ce_3],
)
check_region(
region_set,
"default",
[cb_d],
[cb_d, X, ce_d],
[ce_d],
)
check_region(
region_set,
"test_target",
[cb_3, cb_4],
[cb_3, cb_4, O_3, O_4, O_5, ce_4],
[ce_4],
)
if __name__ == "__main__":
test_region_set_creator_diamond()
test_region_set_creator_merged()
| 27.330435
| 99
| 0.604836
|
801629454fa5a1a3b006a268aa7b2f0475723251
| 5,673
|
py
|
Python
|
events/worldcon75/management/commands/setup_worldcon75.py
|
Siikakala/kompassi
|
14cdcd966ab689d762cc885e28b6d15465c216f0
|
[
"CC-BY-3.0"
] | null | null | null |
events/worldcon75/management/commands/setup_worldcon75.py
|
Siikakala/kompassi
|
14cdcd966ab689d762cc885e28b6d15465c216f0
|
[
"CC-BY-3.0"
] | null | null | null |
events/worldcon75/management/commands/setup_worldcon75.py
|
Siikakala/kompassi
|
14cdcd966ab689d762cc885e28b6d15465c216f0
|
[
"CC-BY-3.0"
] | null | null | null |
import os
from datetime import datetime, timedelta
from django.conf import settings
from django.core.management.base import BaseCommand
from django.utils.timezone import now
from dateutil.tz import tzlocal
from core.utils import slugify
def mkpath(*parts):
return os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', *parts))
class Setup(object):
def __init__(self):
self._ordering = 0
def get_ordering_number(self):
self._ordering += 10
return self._ordering
def setup(self, test=False):
self.test = test
self.tz = tzlocal()
self.setup_core()
self.setup_labour()
def setup_core(self):
from core.models import Venue, Event
self.venue, unused = Venue.objects.get_or_create(name='Messukeskus', defaults=dict(
name_inessive='Messukeskuksessa',
))
self.event, unused = Event.objects.get_or_create(slug='worldcon75', defaults=dict(
name='Worldcon 75',
name_genitive='Worldcon 75 -tapahtuman',
name_illative='Worldcon 75 -tapahtumaan',
name_inessive='Worldcon 75 -tapahtumassa',
homepage_url='http://www.worldcon.fi',
organization_name='Maa ja ilma ry',
organization_url='http://www.worldcon.fi',
start_time=datetime(2017, 8, 9, 9, 0, tzinfo=self.tz),
end_time=datetime(2017, 8, 13, 18, 0, tzinfo=self.tz),
venue=self.venue,
))
def setup_labour(self):
from core.models import Person
from labour.models import (
AlternativeSignupForm,
Job,
JobCategory,
LabourEventMeta,
PersonnelClass,
Qualification,
)
from ...models import SignupExtra
from django.contrib.contenttypes.models import ContentType
labour_admin_group, = LabourEventMeta.get_or_create_groups(self.event, ['admins'])
if self.test:
person, unused = Person.get_or_create_dummy()
labour_admin_group.user_set.add(person.user)
content_type = ContentType.objects.get_for_model(SignupExtra)
labour_event_meta_defaults = dict(
signup_extra_content_type=content_type,
work_begins=self.event.start_time - timedelta(days=1),
work_ends=self.event.end_time + timedelta(hours=4),
admin_group=labour_admin_group,
contact_email='Popcult Helsingin työvoimavastaava <virve.honkala@popcult.fi>',
)
if self.test:
t = now()
labour_event_meta_defaults.update(
registration_opens=t - timedelta(days=60),
registration_closes=t + timedelta(days=60),
)
labour_event_meta, unused = LabourEventMeta.objects.get_or_create(
event=self.event,
defaults=labour_event_meta_defaults,
)
for pc_name, pc_slug, pc_app_label in [
('Järjestyksenvalvoja', 'jv', 'labour'),
]:
personnel_class, created = PersonnelClass.objects.get_or_create(
event=self.event,
slug=pc_slug,
defaults=dict(
name=pc_name,
app_label=pc_app_label,
priority=self.get_ordering_number(),
),
)
jv = PersonnelClass.objects.get(event=self.event, slug='jv')
for jc_data in [
(
'Järjestyksenvalvoja',
(
'Worldcon 75 hakee Kompassin kautta ainoastaan järjestyksenvalvojia. '
'Mikäli kortillisia järjestyksenvalvojia ei saada tarpeeksi, heitä voidaan myös kouluttaa. '
'Mikäli sinulla on JV-kortti, muistathan täyttää sen numeron '
'<a href="/profile/qualifications" target="_blank">profiiliisi</a>. '
'Ilmoita myös, jos sinulla on ensiapukortti.'
),
[jv]
),
]:
if len(jc_data) == 3:
name, description, pcs = jc_data
job_names = []
elif len(jc_data) == 4:
name, description, pcs, job_names = jc_data
else:
raise ValueError("Length of jc_data must be 3 or 4")
job_category, created = JobCategory.objects.get_or_create(
event=self.event,
slug=slugify(name),
defaults=dict(
name=name,
description=description,
)
)
if created:
job_category.personnel_classes = pcs
job_category.save()
for job_name in job_names:
job, created = Job.objects.get_or_create(
job_category=job_category,
slug=slugify(job_name),
defaults=dict(
title=job_name,
)
)
labour_event_meta.create_groups()
for jc_name, qualification_name in [
# ('Järjestyksenvalvoja', 'JV-kortti'), # no!
]:
jc = JobCategory.objects.get(event=self.event, name=jc_name)
qual = Qualification.objects.get(name=qualification_name)
if not jc.required_qualifications.exists():
jc.required_qualifications = [qual]
jc.save()
class Command(BaseCommand):
args = ''
help = 'Setup worldcon75 specific stuff'
def handle(self, *args, **opts):
Setup().setup(test=settings.DEBUG)
| 33.97006
| 112
| 0.568835
|
f97b64a4c719fd0aaaf48dc919d089eadbb1626d
| 1,620
|
py
|
Python
|
game.py
|
dijkmeneer/Guess-the-number
|
7ac619288294237c9d19a9ffa1fcf7baff4dc057
|
[
"MIT"
] | 1
|
2020-08-27T20:22:23.000Z
|
2020-08-27T20:22:23.000Z
|
game.py
|
dijkmeneer/Guess-the-number
|
7ac619288294237c9d19a9ffa1fcf7baff4dc057
|
[
"MIT"
] | null | null | null |
game.py
|
dijkmeneer/Guess-the-number
|
7ac619288294237c9d19a9ffa1fcf7baff4dc057
|
[
"MIT"
] | null | null | null |
import random
import time
import os
import sys
input("Welcome to guess the number! press enter to start.")
print("How many tries do you want? (5 is standard.)")
tries = int(input())
number = random.randint(0, 10)
while tries > 0:
print("choose a number. (0-10)")
guess = int(input())
if guess == number:
print("correct")
time.sleep(1)
answer = None
while answer not in ("yes", "no"):
answer = input("Do you want to play again? (yes/no) ")
if answer == "yes":
print("Okay")
time.sleep(1)
os.execl(sys.executable, sys.executable, * sys.argv)
time.sleep(1)
elif answer == "no":
print("Bye!")
time.sleep(1)
sys.exit()
else:
print("Please type yes or no.")
else:
tries -= 1
if tries > 1:
print("Wrong! you still have " +str(tries)+ " tries.")
elif tries == 0:
print("You lost!")
else:
print("Wrong! you still have " +str(tries)+ " try.")
answer = None
while answer not in ("yes", "no"):
answer = input("Do you want to play again? (yes/no) ")
if answer == "yes":
print("Okay")
time.sleep(1)
os.execl(sys.executable, sys.executable, * sys.argv)
time.sleep(1)
elif answer == "no":
print("Bye!")
time.sleep(1)
sys.exit()
else:
print("Please type yes or no.")
| 25.714286
| 69
| 0.474074
|
eb11ad6fea3356f2e37d5ab3c74f624b6709bbe0
| 1,815
|
py
|
Python
|
SciKitLearn/TreeClassifier/SkLearn_TreeClassifier_Modified.py
|
EmbeddedKen/MachineLearning
|
6998e44b64023941b70b12c9526efd6b99fa82e1
|
[
"MIT"
] | null | null | null |
SciKitLearn/TreeClassifier/SkLearn_TreeClassifier_Modified.py
|
EmbeddedKen/MachineLearning
|
6998e44b64023941b70b12c9526efd6b99fa82e1
|
[
"MIT"
] | null | null | null |
SciKitLearn/TreeClassifier/SkLearn_TreeClassifier_Modified.py
|
EmbeddedKen/MachineLearning
|
6998e44b64023941b70b12c9526efd6b99fa82e1
|
[
"MIT"
] | null | null | null |
#SciKit (SK) Learn Initial Test Program
#Modfied from Siraj Raval's SourceCode...
#"Introduction- Learn Python for Data Science"
#By Kenneth Vorseth, 2/4/18
#<Import Tree from Sci-Kit Learn>
from sklearn import tree
#<Initial Training Data>
#(Height, Weight, Shoe Size)
X = [[181, 80, 44], #Data 0x00
[177, 70, 43], #Data 0x01
[160, 60, 38], #Data 0x02
[154, 54, 37], #Data 0x03
[166, 65, 40], #Data 0x04
[190, 90, 47], #Data 0x05
[175, 64, 39], #Data 0x06
[177, 70, 40], #Data 0x07
[159, 55, 37], #Data 0x08
[171, 75, 42], #Data 0x09
[181, 85, 43]] #Data 0x0A
#<Labels for Associated Data>
Y = ['male', #Data 0x00
'female', #Data 0x01
'female', #Data 0x02
'female', #Data 0x03
'male', #Data 0x04
'male', #Data 0x05
'male', #Data 0x06
'female', #Data 0x07
'male', #Data 0x08
'female', #Data 0x09
'male'] #Data 0x0A
#<Choose Model for Machine Learning (Tree Classifier)>
clf = tree.DecisionTreeClassifier()
#<Adjust the Tree to Learn from Labeled Data>
clf = clf.fit(X, Y)
#<Get Input From User to Make Predictions>
pState = 'y' #Initial Program State (For Repeating)
while (pState == 'y'):
#> Grab Input Data from User for Prediction
print("Predict Gender from Data...") #Program Header
bHeight = int(input("Height (cm): ")) #Body Weight
bWeight = int(input("Weight (kg): ")) #Body Height
sSize = int(input("Shoe Size: ")) #Shoe Size
#> Bind DataSet to New List
dataSet = [bHeight, bWeight, sSize]
#> Make a Prediction Based on Our Fitted Tree
prediction = clf.predict([dataSet])
#> Display the Prediction to the User
print("Predicted Gender:", prediction)
#> Ask User Whether to Repeat Program
pState = input("Run Again? (y/n): ")
| 33
| 57
| 0.618733
|
60d87e6d8efa76939330e2e9008c651b0ebed03d
| 314
|
py
|
Python
|
law/api/search_api.py
|
B-Stefan/law-crawler
|
1be853ff21bd97786c3758dd914a1f8323569805
|
[
"Apache-2.0"
] | 1
|
2019-03-10T21:38:36.000Z
|
2019-03-10T21:38:36.000Z
|
law/api/search_api.py
|
B-Stefan/law-crawler
|
1be853ff21bd97786c3758dd914a1f8323569805
|
[
"Apache-2.0"
] | 40
|
2019-01-31T04:38:31.000Z
|
2021-08-03T04:13:29.000Z
|
law/api/search_api.py
|
B-Stefan/law-service
|
1be853ff21bd97786c3758dd914a1f8323569805
|
[
"Apache-2.0"
] | null | null | null |
from flask_restful import reqparse
from law.api.law_resource import LawResource
class SearchAPI(LawResource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('q', type=str)
args = parser.parse_args()
return self.service.get_fulltext_result(args['q'])
| 20.933333
| 58
| 0.697452
|
7fc9055afb2de4f1636fae08239b753c87ab7a50
| 445
|
py
|
Python
|
Lib/site-packages/plotly/validators/isosurface/_x.py
|
tytanya/my-first-blog
|
2b40adb0816c3546e90ad6ca1e7fb50d924c1536
|
[
"bzip2-1.0.6"
] | 12
|
2020-04-18T18:10:22.000Z
|
2021-12-06T10:11:15.000Z
|
plotly/validators/isosurface/_x.py
|
Vesauza/plotly.py
|
e53e626d59495d440341751f60aeff73ff365c28
|
[
"MIT"
] | 6
|
2021-03-18T22:27:08.000Z
|
2022-03-11T23:40:50.000Z
|
plotly/validators/isosurface/_x.py
|
Vesauza/plotly.py
|
e53e626d59495d440341751f60aeff73ff365c28
|
[
"MIT"
] | 6
|
2020-04-18T23:07:08.000Z
|
2021-11-18T07:53:06.000Z
|
import _plotly_utils.basevalidators
class XValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name='x', parent_name='isosurface', **kwargs):
super(XValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'calc+clearAxisTypes'),
role=kwargs.pop('role', 'data'),
**kwargs
)
| 31.785714
| 76
| 0.651685
|
5c73409afae1d0f5682f64afb8104516525cec3d
| 154
|
py
|
Python
|
django_pymorphy2/morph.py
|
ITrex/django-pymorphy2
|
d3557da0f845c177367df948afddc106d48862f4
|
[
"MIT"
] | 4
|
2017-07-25T16:28:37.000Z
|
2021-07-08T06:31:23.000Z
|
django_pymorphy2/morph.py
|
ITrex/django-pymorphy2
|
d3557da0f845c177367df948afddc106d48862f4
|
[
"MIT"
] | null | null | null |
django_pymorphy2/morph.py
|
ITrex/django-pymorphy2
|
d3557da0f845c177367df948afddc106d48862f4
|
[
"MIT"
] | 7
|
2015-05-07T08:47:51.000Z
|
2020-07-06T12:40:58.000Z
|
#coding: utf-8
from __future__ import unicode_literals, absolute_import
from pymorphy2 import MorphAnalyzer
__all__ = ['morph']
morph = MorphAnalyzer()
| 19.25
| 56
| 0.798701
|
0584bb3a02089d3c346d982ad863d9196d6f88fc
| 10,262
|
py
|
Python
|
docs/conf.py
|
Solanar/django-q
|
5e5676292b1df83c78fe224a67d212850dfaf8a9
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
Solanar/django-q
|
5e5676292b1df83c78fe224a67d212850dfaf8a9
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
Solanar/django-q
|
5e5676292b1df83c78fe224a67d212850dfaf8a9
|
[
"MIT"
] | 1
|
2020-08-11T21:18:10.000Z
|
2020-08-11T21:18:10.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Django Q documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 26 22:18:36 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import alabaster
myPath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, myPath + '/../')
os.environ['DJANGO_SETTINGS_MODULE'] = 'django_q.tests.settings'
nitpick_ignore = [('py:class', 'datetime')]
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'alabaster',
'sphinx.ext.todo',
'sphinx.ext.intersphinx',
# 'sphinx.ext.autodoc'
]
intersphinx_mapping = {'python': ('https://docs.python.org/3.5', None),
'django': ('https://docs.djangoproject.com/en/2.2/',
'https://docs.djangoproject.com/en/2.2/_objects/')}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Django Q'
copyright = '2015-2018, Ilan Steemers'
author = 'Ilan Steemers'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.None
html_theme_options = {
'description': "A multiprocessing task queue for Django",
'logo': 'logo.png',
'github_user': 'Koed00',
'github_repo': 'django-q',
'github_banner': True,
'travis_button': True,
'analytics_id': 'UA-64807059-1'
}
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'searchbox.html',
]
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [alabaster.get_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '_static/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'DjangoQdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'DjangoQ.tex', 'Django Q Documentation',
'Ilan Steemers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = '_static/logo_large.png'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'djangoq', 'Django Q Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'DjangoQ', 'Django Q Documentation',
author, 'DjangoQ', 'A multiprocessing distributed task queue for Django.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
| 32.27044
| 85
| 0.700741
|
5976dc4df02e279d131c9569b6ec761d7dcac3cd
| 590
|
py
|
Python
|
pyzoo/zoo/util/__init__.py
|
abdolence/analytics-zoo
|
364856abcbe9aff7f7b6cf9b9f8648d51e07ca64
|
[
"Apache-2.0"
] | 2,970
|
2017-06-08T00:24:43.000Z
|
2022-03-30T12:14:55.000Z
|
pyzoo/zoo/util/__init__.py
|
abdolence/analytics-zoo
|
364856abcbe9aff7f7b6cf9b9f8648d51e07ca64
|
[
"Apache-2.0"
] | 3,530
|
2017-05-09T08:29:10.000Z
|
2022-03-21T02:11:45.000Z
|
pyzoo/zoo/util/__init__.py
|
abdolence/analytics-zoo
|
364856abcbe9aff7f7b6cf9b9f8648d51e07ca64
|
[
"Apache-2.0"
] | 972
|
2017-05-09T07:03:50.000Z
|
2022-03-23T07:48:48.000Z
|
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 36.875
| 74
| 0.759322
|
c603cb122ab11370f89306bfb6e53d326f5f583e
| 6,049
|
py
|
Python
|
catkin_ws/src/o2as_new_parts_description/scripts/generate_urdf_from_meshes.py
|
DevwratJoshi/ur-o2as
|
265249c27908a79a301014168394db0c0dc2204c
|
[
"MIT"
] | null | null | null |
catkin_ws/src/o2as_new_parts_description/scripts/generate_urdf_from_meshes.py
|
DevwratJoshi/ur-o2as
|
265249c27908a79a301014168394db0c0dc2204c
|
[
"MIT"
] | null | null | null |
catkin_ws/src/o2as_new_parts_description/scripts/generate_urdf_from_meshes.py
|
DevwratJoshi/ur-o2as
|
265249c27908a79a301014168394db0c0dc2204c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import csv
import os
import rospy
import rospkg
rp = rospkg.RosPack()
print("D1")
# Read in the files
filenames = os.listdir(os.path.join(rp.get_path("o2as_new_parts_description"), "meshes"))
filenames_strip1 = []
filenames_no_ext = []
for name in filenames:
filenames_strip1.append(os.path.splitext(name)[0])
for name in filenames_strip1:
filenames_no_ext.append(os.path.splitext(name)[0]) # This removes the .vhacd from ".vhacd.dae" files
partnames = list(sorted(set(filenames_no_ext))) # Removes duplicates and sorts (because sets do not allow duplicate entries)
print("D1")
out_dir = os.path.join(rp.get_path("o2as_new_parts_description"), "urdf/generated")
# Read in the templates
extra_joint_filename = os.path.join(rp.get_path("o2as_new_parts_description"), "urdf/templates", "extra_frames.csv")
macro_template_filename = os.path.join(rp.get_path("o2as_new_parts_description"), "urdf/templates", "macro_template.urdf.xacro")
f = open(macro_template_filename,'r')
macro_template = f.read()
f.close()
macro_frames_only_template_filename = os.path.join(rp.get_path("o2as_new_parts_description"), "urdf/templates", "macro_frames_only_template.urdf.xacro")
f = open(macro_frames_only_template_filename,'r')
macro_frames_only_template = f.read()
f.close()
non_macro_template_filename = os.path.join(rp.get_path("o2as_new_parts_description"), "urdf/templates", "non_macro_template.urdf.xacro")
f = open(non_macro_template_filename,'r')
non_macro_template = f.read()
f.close()
spawn_template_filename = os.path.join(rp.get_path("o2as_new_parts_description"), "urdf/templates", "spawn_template.urdf")
f = open(spawn_template_filename,'r')
spawn_template = f.read()
f.close()
print("Reading")
extra_frames = []
with open(extra_joint_filename, 'r') as f:
reader = csv.reader(f)
header = next(reader)
for row in reader:
row_stripped = []
for el in row:
row_stripped.append(el.strip()) # Removes whitespaces
extra_frames.append(row_stripped)
# Write the spawn files
# --- This is ignored for now
# Write the macros
if not os.path.exists(out_dir):
os.makedirs(out_dir)
for part_num, partname in enumerate(partnames):
print("Writing partname")
macrofile = open(os.path.join(out_dir, partname+"_macro.urdf.xacro"),'w+')
macro_frames_only_file = open(os.path.join(out_dir, partname+"_frames_only_macro.urdf.xacro"),'w+')
mname_int = "assy_part_" + partname[0:2] # = "assy_part_01" for part 01.
mname_ext = mname_int
mname_fo_int = mname_int
mname_fo_ext = "assy_part_frames_only_" + partname[0:2] # This only changes the name of the macro, not the joint/link names
macrofile_content = macro_template.replace("PARTNAME", partname)
macrofile_content = macrofile_content.replace("MACRONAME_INTERNAL", mname_int)
macrofile_content = macrofile_content.replace("MACRONAME_EXTERNAL", mname_ext)
macro_frames_only_filecontent = macro_frames_only_template.replace("MACRONAME_INTERNAL", mname_fo_int)
macro_frames_only_filecontent = macro_frames_only_filecontent.replace("MACRONAME_EXTERNAL", mname_fo_ext)
if int(partname[0:2]) in [1,2,3,4]:
macrofile_content = macrofile_content.replace("vhacd.dae", "stl")
macrofile_content = macrofile_content.replace("dae", "stl")
# if int(partname[0:2]) == 4:
# macrofile_content = macrofile_content.replace("0.001", "0.000001") # To correct for that mesh's scale
extra_frames_urdf = ""
for entry in extra_frames:
if int(entry[0]) == int(partname[0:2]):
new_joint = ""
new_joint += " <joint name=\"${prefix}" + mname_int + "_LINK_NAME_joint\" type=\"fixed\"> \n"
new_joint += " <parent link=\"${prefix}" + mname_int + "\"/> \n"
new_joint += " <child link=\"${prefix}" + mname_int + "_LINK_NAME\"/> \n"
new_joint += " <origin rpy=\"${" + \
entry[2] + "} ${" + \
entry[3] + "} ${" + \
entry[4] + "}\" xyz=\"${" + \
entry[5] + "} ${" + \
entry[6] + "} ${" + \
entry[7] + "}\"/> \n"
new_joint += " </joint> \n"
new_joint += " <link name=\"${prefix}" + mname_int + "_LINK_NAME\"/> \n"
new_joint += " \n"
new_joint = new_joint.replace("LINK_NAME", entry[1])
extra_frames_urdf += new_joint
if extra_frames_urdf:
macrofile_content = macrofile_content.replace("<!-- #EXTRAFRAMES -->", extra_frames_urdf)
macro_frames_only_filecontent = macro_frames_only_filecontent.replace("<!-- #EXTRAFRAMES -->", extra_frames_urdf)
macrofile.write(macrofile_content)
macro_frames_only_file.write(macro_frames_only_filecontent)
print("Wrote " + os.path.join(out_dir, partname+"_macro.urdf.xacro"))
print("Wrote " + os.path.join(out_dir, partname+"_frames_only_macro.urdf.xacro"))
for part_num, partname in enumerate(partnames):
non_macrofile = open(os.path.join(out_dir, partname+"_non_macro.urdf.xacro"),'w+')
mname_ext = "assy_part_" + partname[0:2]
non_macrofile_content = non_macro_template.replace("MACRONAME_EXTERNAL", mname_ext)
non_macrofile_content = non_macrofile_content.replace("PARTNAME", partname)
non_macrofile.write(non_macrofile_content)
print("Wrote " + os.path.join(out_dir, partname+"_macro.urdf.xacro"))
# Convert xacro files to urdf (necessary for the URDF-to-msg converter)
import subprocess
for part_num, partname in enumerate(partnames):
print("Convert xacro to urdf: {}".format(partname))
non_macrofilepath = os.path.join( out_dir, partname+"_non_macro.urdf.xacro")
out_urdf_filepath = os.path.join( os.path.join(out_dir, "collision_object_urdfs"), partname+"_non_macro.urdf")
cmd = 'xacro --inorder ' + non_macrofilepath + " -o " + out_urdf_filepath
subprocess.check_call(cmd, shell=True)
| 47.257813
| 152
| 0.67879
|
68687238f860da0d031fc14ae3a2dee35fc9e9d3
| 3,649
|
py
|
Python
|
SmartTransportSystem/settings.py
|
shahriarshafin/smart-transport-system
|
9b22ea5b7aeac2d4cad5edadfda90ba374056336
|
[
"MIT"
] | 1
|
2022-03-20T21:16:34.000Z
|
2022-03-20T21:16:34.000Z
|
SmartTransportSystem/settings.py
|
shahriarshafin/SmartTransportSystem
|
9b22ea5b7aeac2d4cad5edadfda90ba374056336
|
[
"MIT"
] | 7
|
2021-09-08T03:36:20.000Z
|
2022-01-13T20:56:44.000Z
|
SmartTransportSystem/settings.py
|
shahriarshafin/smart-transport-system
|
9b22ea5b7aeac2d4cad5edadfda90ba374056336
|
[
"MIT"
] | null | null | null |
"""
Django settings for SmartTransportSystem project.
Generated by 'django-admin startproject' using Django 3.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
from pathlib import Path
from decouple import config
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('KEY1')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config('DEBUG')
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'crispy_forms',
# custom installed apps
'smartTracking',
'accounts',
'home_page',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'SmartTransportSystem.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# templates folder directory
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'SmartTransportSystem.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
# data base linkup
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static') # to dynamic all static files
]
LOGIN_URL = 'login'
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
EMAIL_HOST_USER = os.environ.get('Email_user')
EMAIL_HOST_PASSWORD = os.environ.get('Email_password')
| 27.231343
| 91
| 0.705125
|
78f1c9a594d6a4b9895cd3f4b12005f721541364
| 5,413
|
py
|
Python
|
mergify_engine/web/simulator.py
|
v1v/mergify-engine
|
21f63be9987740e1466459f966b186392a235051
|
[
"Apache-2.0"
] | null | null | null |
mergify_engine/web/simulator.py
|
v1v/mergify-engine
|
21f63be9987740e1466459f966b186392a235051
|
[
"Apache-2.0"
] | 261
|
2020-10-15T15:56:15.000Z
|
2022-03-31T07:08:30.000Z
|
mergify_engine/web/simulator.py
|
v1v/mergify-engine
|
21f63be9987740e1466459f966b186392a235051
|
[
"Apache-2.0"
] | null | null | null |
# -*- encoding: utf-8 -*-
#
# Copyright © 2020 Mergify SAS
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from urllib.parse import urlsplit
import fastapi
from starlette import requests
from starlette import responses
from starlette.middleware import cors
import voluptuous
from mergify_engine import context
from mergify_engine import exceptions
from mergify_engine import rules
from mergify_engine import subscription
from mergify_engine import utils
from mergify_engine.clients import github
from mergify_engine.clients import http
from mergify_engine.engine import actions_runner
from mergify_engine.web import auth
from mergify_engine.web import redis
app = fastapi.FastAPI()
app.add_middleware(
cors.CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
class PullRequestUrlInvalid(voluptuous.Invalid): # type: ignore[misc]
pass
@voluptuous.message("expected a Pull Request URL", cls=PullRequestUrlInvalid)
def PullRequestUrl(v):
_, owner, repo, _, pull_number = urlsplit(v).path.split("/")
pull_number = int(pull_number)
return owner, repo, pull_number
SimulatorSchema = voluptuous.Schema(
{
voluptuous.Required("pull_request"): voluptuous.Any(None, PullRequestUrl()),
voluptuous.Required("mergify.yml"): voluptuous.And(
voluptuous.Coerce(rules.YAML),
rules.UserConfigurationSchema,
),
}
)
def voluptuous_error(error):
if error.path:
if error.path[0] == "mergify.yml":
error.path.pop(0)
return str(rules.InvalidRules(error, ""))
@app.exception_handler(voluptuous.Invalid)
async def voluptuous_errors(
request: requests.Request, exc: voluptuous.Invalid
) -> responses.JSONResponse:
# Replace payload by our own
if isinstance(exc, voluptuous.MultipleInvalid):
payload = {"errors": list(map(voluptuous_error, sorted(exc.errors, key=str)))}
else:
payload = {"errors": [voluptuous_error(exc)]}
return responses.JSONResponse(status_code=400, content=payload)
async def _simulator(redis_cache, pull_request_rules, owner, repo, pull_number, token):
try:
if token:
auth = github.GithubTokenAuth(token)
else:
auth = github.get_auth(owner)
async with github.aget_client(auth=auth) as client:
try:
data = await client.item(f"/repos/{owner}/{repo}/pulls/{pull_number}")
except http.HTTPNotFound:
raise PullRequestUrlInvalid(
message=f"Pull request {owner}/{repo}/pulls/{pull_number} not found"
)
sub = await subscription.Subscription.get_subscription(
redis_cache, client.auth.owner_id
)
installation = context.Installation(
client.auth.owner_id,
owner,
sub,
client,
redis_cache,
)
repository = context.Repository(installation, repo)
ctxt = await repository.get_pull_request_context(data["number"], data)
ctxt.sources = [{"event_type": "mergify-simulator", "data": []}]
match = await pull_request_rules.get_pull_request_rule(ctxt)
return await actions_runner.gen_summary(ctxt, match)
except exceptions.MergifyNotInstalled:
raise PullRequestUrlInvalid(
message=f"Mergify not installed on repository '{owner}/{repo}'"
)
@app.post("/", dependencies=[fastapi.Depends(auth.signature_or_token)])
async def simulator(
request: requests.Request,
redis_cache: utils.RedisCache = fastapi.Depends( # noqa: B008
redis.get_redis_cache
),
) -> responses.JSONResponse:
token = request.headers.get("Authorization")
if token:
token = token[6:] # Drop 'token '
try:
raw_json = await request.json()
except json.JSONDecodeError:
return responses.JSONResponse(status_code=400, content="invalid json")
data = SimulatorSchema(raw_json)
if data["pull_request"]:
title, summary = await _simulator(
redis_cache,
data["mergify.yml"]["pull_request_rules"],
owner=data["pull_request"][0],
repo=data["pull_request"][1],
pull_number=data["pull_request"][2],
token=token,
)
else:
title, summary = ("The configuration is valid", None)
pull_request_rules_conditions = [
[cond.tree for cond in rule.conditions]
for rule in data["mergify.yml"]["pull_request_rules"]
]
return responses.JSONResponse(
status_code=200,
content={
"title": title,
"summary": summary,
"conditions": {
"pull_request_rules": pull_request_rules_conditions,
},
},
)
| 31.841176
| 88
| 0.659708
|
521d5b5213adce96a8f2cee773aac2a92830908d
| 1,377
|
py
|
Python
|
figure_concept_meg_demo.py
|
dengemann/engemann-2020-multimodal-brain-age
|
ceffb1e01658e31d19dfc4dc0be7aff1d6d21af5
|
[
"BSD-3-Clause"
] | 6
|
2020-11-11T21:26:20.000Z
|
2022-01-18T17:18:45.000Z
|
figure_concept_meg_demo.py
|
dengemann/engemann-2020-multimodal-brain-age
|
ceffb1e01658e31d19dfc4dc0be7aff1d6d21af5
|
[
"BSD-3-Clause"
] | 1
|
2022-03-14T07:56:17.000Z
|
2022-03-14T07:56:17.000Z
|
figure_concept_meg_demo.py
|
dengemann/engemann-2020-multimodal-brain-age
|
ceffb1e01658e31d19dfc4dc0be7aff1d6d21af5
|
[
"BSD-3-Clause"
] | 3
|
2020-06-10T08:34:04.000Z
|
2022-03-14T01:37:08.000Z
|
import mne
import numpy as np
import mne
from mne import io, read_proj, read_selection
from mne.datasets import sample
from mne.time_frequency import psd_welch
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
proj_fname = data_path + '/MEG/sample/sample_audvis_eog-proj.fif'
tmin, tmax = 0, 240 # use the first 60s of data
# Setup for reading the raw data (to save memory, crop before loading)
raw = io.read_raw_fif(raw_fname).crop(tmin, tmax).load_data()
raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more
# Add SSP projection vectors to reduce EOG and ECG artifacts
projs = read_proj(proj_fname)
raw.add_proj(projs, remove_existing=True)
raw.apply_proj()
raw.filter(1, 80)
fmin, fmax = 1, 50 # look at frequencies between 2 and 300Hz
n_fft = 8196 # the FFT size (n_fft). Ideally a power of 2
picks = mne.pick_types(raw.info, meg='mag')
psds, freqs = psd_welch(raw, tmin=tmin, tmax=tmax,
fmin=fmin, fmax=fmax, proj=True, picks=picks,
n_fft=n_fft, n_jobs=1)
psds = 10 * np.log10(psds)
import pandas as pd
dfs = list()
for ii, psd in enumerate(psds):
data = pd.DataFrame(
dict(psd=psd,
freqs=freqs)
)
data['channel'] = raw.ch_names[picks[ii]]
dfs.append(data)
dfs = pd.concat(dfs, axis=0)
dfs.to_csv("./outputs/demo_meg_psd.csv")
| 29.297872
| 70
| 0.689179
|
6b2a079d2428cf9c1df49debda403587ae5b0779
| 278
|
py
|
Python
|
feed/api/permissions.py
|
Nicholas-David-K/profiles-api
|
8758b1d2eb1cfce9e59e2b0ac310f4cc02d14a73
|
[
"MIT"
] | null | null | null |
feed/api/permissions.py
|
Nicholas-David-K/profiles-api
|
8758b1d2eb1cfce9e59e2b0ac310f4cc02d14a73
|
[
"MIT"
] | null | null | null |
feed/api/permissions.py
|
Nicholas-David-K/profiles-api
|
8758b1d2eb1cfce9e59e2b0ac310f4cc02d14a73
|
[
"MIT"
] | null | null | null |
from rest_framework import permissions
class IFeedAuthorOrReadOnly(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
if request.method in permissions.SAFE_METHODS:
return True
return obj.user == request.user
| 30.888889
| 56
| 0.726619
|
cb3551f5c033116440f89f43769191cdb2042db4
| 11,565
|
py
|
Python
|
mergify_engine/tasks/engine/__init__.py
|
bowlofeggs/mergify-engine
|
463811a15835c1439fe75e3168113aa497892c77
|
[
"Apache-2.0"
] | null | null | null |
mergify_engine/tasks/engine/__init__.py
|
bowlofeggs/mergify-engine
|
463811a15835c1439fe75e3168113aa497892c77
|
[
"Apache-2.0"
] | null | null | null |
mergify_engine/tasks/engine/__init__.py
|
bowlofeggs/mergify-engine
|
463811a15835c1439fe75e3168113aa497892c77
|
[
"Apache-2.0"
] | null | null | null |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import daiquiri
import github
from mergify_engine import check_api
from mergify_engine import config
from mergify_engine import rules
from mergify_engine.tasks.engine import v1
from mergify_engine.tasks.engine import v2
from mergify_engine.worker import app
LOG = daiquiri.getLogger(__name__)
def get_github_pull_from_sha(g, repo, installation_id, sha):
# TODO(sileht): Replace this optimisation when we drop engine v1
pull = v1.Caching(repository=repo,
installation_id=installation_id
).get_pr_for_sha(sha)
if pull:
return pull
issues = list(g.search_issues("repo:%s is:pr is:open %s" %
(repo.full_name, sha)))
if not issues:
return
if len(issues) > 1: # pragma: no cover
# NOTE(sileht): It's that technically possible, but really ?
LOG.warning("sha attached to multiple pull requests", sha=sha)
for i in issues:
try:
pull = repo.get_pull(i.number)
except github.GithubException as e: # pragma: no cover
if e.status != 404:
raise
if pull and not pull.merged:
return pull
def get_github_pull_from_event(g, repo, installation_id,
event_type, data):
if "pull_request" in data:
return github.PullRequest.PullRequest(
repo._requester, {}, data["pull_request"], completed=True
)
elif event_type == "status":
return get_github_pull_from_sha(g, repo, installation_id, data["sha"])
elif event_type in ["check_suite", "check_run"]:
if event_type == "check_run":
pulls = data["check_run"]["check_suite"]["pull_requests"]
sha = data["check_run"]["head_sha"]
else:
pulls = data["check_suite"]["pull_requests"]
sha = data["check_suite"]["head_sha"]
if not pulls:
return get_github_pull_from_sha(g, repo, installation_id, sha)
if len(pulls) > 1: # pragma: no cover
# NOTE(sileht): It's that technically possible, but really ?
LOG.warning("check_suite/check_run attached on multiple pulls")
for p in pulls:
pull = v1.Caching(repository=repo,
installation_id=installation_id
).get_pr_for_pull_number(
p["base"]["ref"],
p["number"])
if not pull:
try:
pull = repo.get_pull(p["number"])
except github.UnknownObjectException: # pragma: no cover
pass
if pull and not pull.merged:
return pull
@app.task
def pull_request_opened():
"""Dumb method to record number of new PR for stats."""
@app.task
def pull_request_merged():
"""Dumb method to record number of closed PR for stats."""
@app.task
def pull_request_merged_by_mergify():
"""Dumb method to record number of closed PR for stats."""
def create_metrics(event_type, data):
# prometheus_client is a mess with multiprocessing, so we generate tasks
# that will be recorded by celery and exported with celery exporter
if event_type == "pull_request" and data["action"] == "opened":
pull_request_opened.apply_async()
elif (event_type == "pull_request" and data["action"] == "closed" and
data["pull_request"]["merged"]):
pull_request_merged.apply_async()
if data["pull_request"]["merged_by"]["login"] in ["mergify[bot]",
"mergify-test[bot]"]:
pull_request_merged_by_mergify.apply_async()
def check_configuration_changes(event_type, data, event_pull):
if event_pull.base.repo.default_branch == event_pull.base.ref:
ref = None
for f in event_pull.get_files():
if f.filename == ".mergify.yml":
ref = f.contents_url.split("?ref=")[1]
if ref is not None:
try:
mergify_config = rules.get_mergify_config(
event_pull.base.repo, ref=ref)
if "rules" in mergify_config:
rules.get_branch_rule(mergify_config['rules'],
event_pull.base.ref)
except rules.InvalidRules as e: # pragma: no cover
# Not configured, post status check with the error message
# TODO(sileht): we can annotate the .mergify.yml file in Github
# UI with that API
check_api.set_check_run(
event_pull, "Mergify — future config checker", "completed",
"failure", output={
"title": "The new Mergify configuration is invalid",
"summary": str(e)
})
else:
check_api.set_check_run(
event_pull, "Mergify — future config checker", "completed",
"success", output={
"title": "The new Mergify configuration is valid",
"summary": "No action required",
})
check_api.set_check_run(
event_pull, "Mergify — disabled due to configuration change",
"completed", "success", output={
"title": "Mergify configuration has been modified",
"summary": "The pull request needs to be merged manually",
})
return True
return False
@app.task
def run(event_type, data, subscription):
"""Everything starts here."""
integration = github.GithubIntegration(config.INTEGRATION_ID,
config.PRIVATE_KEY)
installation_id = data["installation"]["id"]
try:
installation_token = integration.get_access_token(
installation_id).token
except github.UnknownObjectException: # pragma: no cover
LOG.error("token for install %d does not exists anymore (%s)",
installation_id, data["repository"]["full_name"])
return
g = github.Github(installation_token)
try:
if config.LOG_RATELIMIT: # pragma: no cover
rate = g.get_rate_limit().rate
LOG.info("ratelimit: %s/%s, reset at %s",
rate.remaining, rate.limit, rate.reset,
repository=data["repository"]["name"])
repo = g.get_repo(data["repository"]["owner"]["login"] + "/" +
data["repository"]["name"])
event_pull = get_github_pull_from_event(g, repo, installation_id,
event_type, data)
if not event_pull: # pragma: no cover
LOG.info("No pull request found in the event %s, "
"ignoring", event_type)
return
LOG.info("Pull request found in the event %s", event_type,
repo=repo.full_name,
pull_request=event_pull)
if ("base" not in event_pull.raw_data or
"repo" not in event_pull.raw_data["base"] or
len(list(event_pull.raw_data["base"]["repo"].keys())) < 70):
LOG.warning("the pull request payload looks suspicious",
event_type=event_type,
data=data,
pull_request=event_pull.raw_data,
repo=repo.fullname)
if (event_type == "status" and
event_pull.head.sha != data["sha"]): # pragma: no cover
LOG.info("No need to proceed queue (got status of an old commit)",
repo=repo.full_name,
pull_request=event_pull)
return
elif (event_type in ["status", "check_suite", "check_run"] and
event_pull.merged): # pragma: no cover
LOG.info("No need to proceed queue (got status of a merged "
"pull request)",
repo=repo.full_name,
pull_request=event_pull)
return
elif (event_type in ["check_suite", "check_run"] and
event_pull.head.sha != data[event_type]["head_sha"]
): # pragma: no cover
LOG.info("No need to proceed queue (got %s of an old "
"commit)", event_type,
repo=repo.full_name,
pull_request=event_pull)
return
if check_configuration_changes(event_type, data, event_pull):
LOG.info("Configuration changed, ignoring",
repo=repo.full_name,
pull_request=event_pull)
return
# BRANCH CONFIGURATION CHECKING
try:
mergify_config = rules.get_mergify_config(repo)
except rules.NoRules: # pragma: no cover
LOG.info("No need to proceed queue (.mergify.yml is missing)",
repo=repo.full_name,
pull_request=event_pull)
return
except rules.InvalidRules as e: # pragma: no cover
# Not configured, post status check with the error message
if (event_type == "pull_request" and
data["action"] in ["opened", "synchronize"]):
check_api.set_check_run(
event_pull, "current-config-checker", "completed",
"failure", output={
"title": "The Mergify configuration is invalid",
"summary": str(e)
})
return
create_metrics(event_type, data)
# NOTE(sileht): At some point we may need to reget the
# installation_token within each next tasks, in case we reach the
# expiration
if "rules" in mergify_config:
v1.handle.s(installation_id, installation_token, subscription,
mergify_config["rules"], event_type, data,
event_pull.raw_data).apply_async()
elif "pull_request_rules" in mergify_config:
v2.handle.s(
installation_id, installation_token, subscription,
mergify_config["pull_request_rules"].as_dict(),
event_type, data, event_pull.raw_data
).apply_async()
else: # pragma: no cover
raise RuntimeError("Unexpected configuration version")
except github.BadCredentialsException: # pragma: no cover
LOG.error("token for install %d is no longuer valid (%s)",
data["installation"]["id"],
data["repository"]["full_name"])
except github.RateLimitExceededException: # pragma: no cover
LOG.error("rate limit reached for install %d (%s)",
data["installation"]["id"],
data["repository"]["full_name"])
| 40.017301
| 79
| 0.56671
|
f39fe1574333e7a59047b87a8e9659a378dc3809
| 933
|
py
|
Python
|
quick_post/posts/models.py
|
GhostWolfRider/graphql-django-demo
|
850a7a606e6579bfdf34c57815e687d354c6af3f
|
[
"MIT"
] | 1
|
2021-04-12T06:18:25.000Z
|
2021-04-12T06:18:25.000Z
|
quick_post/posts/models.py
|
GhostWolfRider/graphql-django-demo
|
850a7a606e6579bfdf34c57815e687d354c6af3f
|
[
"MIT"
] | null | null | null |
quick_post/posts/models.py
|
GhostWolfRider/graphql-django-demo
|
850a7a606e6579bfdf34c57815e687d354c6af3f
|
[
"MIT"
] | null | null | null |
import uuid
from django.db import models
class Person(models.Model):
"""
Person model is for storing all the author details.
"""
# UUID of person
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
# title of the post
name = models.CharField(max_length=100)
# person age
age = models.CharField(max_length=10)
def __str__(self):
return self.name
class Meta:
ordering = ('name',)
class Post(models.Model):
"""
Person model is for storing all the author details.
"""
# UUID of post
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
# title of the post
title = models.CharField(max_length=100)
# post created by person
author = models.ForeignKey(Person, on_delete=models.CASCADE)
def __str__(self):
return self.title
class Meta:
ordering = ('title',)
| 19.851064
| 79
| 0.648446
|
ce6f6f114b2a2cee69a8a6a651e15429248de814
| 13,573
|
py
|
Python
|
src/covid19sim/run.py
|
mila-iqia/COVI-AgentSim
|
7e4dea42ad9c5dd251aa8d7546c647ad4f173d28
|
[
"Apache-2.0"
] | 13
|
2020-10-25T20:15:25.000Z
|
2022-03-14T06:34:32.000Z
|
src/covid19sim/run.py
|
mila-iqia/COVI-AgentSim
|
7e4dea42ad9c5dd251aa8d7546c647ad4f173d28
|
[
"Apache-2.0"
] | 6
|
2020-10-30T02:09:48.000Z
|
2022-03-09T12:48:22.000Z
|
src/covid19sim/run.py
|
mila-iqia/COVI-AgentSim
|
7e4dea42ad9c5dd251aa8d7546c647ad4f173d28
|
[
"Apache-2.0"
] | 6
|
2020-10-29T15:36:40.000Z
|
2021-12-05T18:06:45.000Z
|
"""
Main entrypoint for the execution of simulations.
The experimental settings of the simulations are managed via [Hydra](https://github.com/facebookresearch/hydra).
The root configuration file is located at `src/covid19sim/configs/simulation/config.yaml`.
All settings provided via commandline will override the ones loaded through the configuration files.
"""
import datetime
import logging
import os
import time
import typing
from pathlib import Path
import hydra
import numpy as np
from omegaconf import DictConfig
from covid19sim.locations.city import City
from covid19sim.utils.env import Env
from covid19sim.utils.constants import SECONDS_PER_DAY, SECONDS_PER_HOUR
from covid19sim.log.console_logger import ConsoleLogger
from covid19sim.inference.server_utils import DataCollectionServer
from covid19sim.utils.utils import dump_conf, dump_tracker_data, extract_tracker_data, parse_configuration, log
def _get_intervention_string(conf):
"""
Consolidates all the parameters to one single string.
Args:
conf (dict): yaml configuration of the experiment
Returns:
(str): a string to identify type of intervention being run
Raises:
(ValueError): if RISK_MODEL is unknown
"""
if conf['RISK_MODEL'] == "":
type_of_run = "UNMITIGATED"
if conf['INTERPOLATE_CONTACTS_USING_LOCKDOWN_CONTACTS']:
type_of_run = "LOCKDOWN"
if conf['N_BEHAVIOR_LEVELS'] > 2:
type_of_run = "POST-LOCKDOWN NO TRACING"
return type_of_run
risk_model = conf['RISK_MODEL']
n_behavior_levels = conf['N_BEHAVIOR_LEVELS']
hhld_behavior = conf['MAKE_HOUSEHOLD_BEHAVE_SAME_AS_MAX_RISK_RESIDENT']
type_of_run = f"{risk_model} | HHLD_BEHAVIOR_SAME_AS_MAX_RISK_RESIDENT: {hhld_behavior} | N_BEHAVIOR_LEVELS:{n_behavior_levels} |"
if risk_model == "digital":
type_of_run += f" N_LEVELS_USED: 2 (1st and last) |"
type_of_run += f" TRACING_ORDER:{conf['TRACING_ORDER']} |"
type_of_run += f" TRACE_SYMPTOMS: {conf['TRACE_SYMPTOMS']} |"
type_of_run += f" INTERPOLATE_USING_LOCKDOWN_CONTACTS:{conf['INTERPOLATE_CONTACTS_USING_LOCKDOWN_CONTACTS']} |"
type_of_run += f" MODIFY_BEHAVIOR: {conf['SHOULD_MODIFY_BEHAVIOR']}"
return type_of_run
if risk_model == "transformer":
type_of_run += f" USE_ORACLE: {conf['USE_ORACLE']}"
type_of_run += f" N_LEVELS_USED: {n_behavior_levels} |"
type_of_run += f" INTERPOLATE_USING_LOCKDOWN_CONTACTS:{conf['INTERPOLATE_CONTACTS_USING_LOCKDOWN_CONTACTS']} |"
type_of_run += f" REC_LEVEL_THRESHOLDS: {conf['REC_LEVEL_THRESHOLDS']} |"
type_of_run += f" MAX_RISK_LEVEL: {conf['MAX_RISK_LEVEL']} |"
type_of_run += f" MODIFY_BEHAVIOR: {conf['SHOULD_MODIFY_BEHAVIOR']} "
type_of_run += f"\n RISK_MAPPING: {conf['RISK_MAPPING']}"
return type_of_run
if risk_model in ['heuristicv1', 'heuristicv2', 'heuristicv3', 'heuristicv4']:
type_of_run += f" N_LEVELS_USED: {n_behavior_levels} |"
type_of_run += f" INTERPOLATE_USING_LOCKDOWN_CONTACTS:{conf['INTERPOLATE_CONTACTS_USING_LOCKDOWN_CONTACTS']} |"
type_of_run += f" MAX_RISK_LEVEL: {conf['MAX_RISK_LEVEL']} |"
type_of_run += f" MODIFY_BEHAVIOR: {conf['SHOULD_MODIFY_BEHAVIOR']}"
return type_of_run
raise ValueError(f"Unknown risk model:{risk_model}")
@hydra.main(config_path="configs/simulation/config.yaml")
def main(conf: DictConfig):
"""
Enables command line execution of the simulator.
Args:
conf (DictConfig): yaml configuration file
"""
# -------------------------------------------------
# ----- Load the experimental configuration -----
# -------------------------------------------------
conf = parse_configuration(conf)
# -------------------------------------
# ----- Create Output Directory -----
# -------------------------------------
if conf["outdir"] is None:
conf["outdir"] = str(Path(__file__) / "output")
timenow = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
conf[
"outdir"
] = "{}/sim_v2_people-{}_days-{}_init-{}_uptake-{}_seed-{}_{}_{}".format(
conf["outdir"],
conf["n_people"],
conf["simulation_days"],
conf["init_fraction_sick"],
conf["APP_UPTAKE"],
conf["seed"],
timenow,
str(time.time_ns())[-6:],
)
if Path(conf["outdir"]).exists():
out_path = Path(conf["outdir"])
out_idx = 1
while (out_path.parent / (out_path.name + f"_{out_idx}")).exists():
out_idx += 1
conf["outdir"] = str(out_path.parent / (out_path.name + f"_{out_idx}"))
os.makedirs(conf["outdir"])
logfile = f"{conf['outdir']}/log_{timenow}.txt"
outfile = os.path.join(conf["outdir"], "data")
# ---------------------------------
# ----- Filter-Out Warnings -----
# ---------------------------------
import warnings
# warnings.filterwarnings("ignore")
# ----------------------------
# ----- Run Simulation -----
# ----------------------------
# correctness of configuration file
assert not conf['RISK_MODEL'] != "" or conf['INTERVENTION_DAY'] >= 0, "risk model is given, but no intervnetion day specified"
assert conf['N_BEHAVIOR_LEVELS'] >= 2, "At least 2 behavior levels are required to model behavior changes"
if conf['TRACE_SYMPTOMS']:
warnings.warn("TRACE_SYMPTOMS: True hasn't been implemented. It will have no affect.")
log(f"RISK_MODEL = {conf['RISK_MODEL']}", logfile)
log(f"INTERVENTION_DAY = {conf['INTERVENTION_DAY']}", logfile)
log(f"seed: {conf['seed']}", logfile)
# complete decsription of intervention
type_of_run = _get_intervention_string(conf)
conf['INTERVENTION'] = type_of_run
log(f"Type of run: {type_of_run}", logfile)
if conf['COLLECT_TRAINING_DATA']:
data_output_path = os.path.join(conf["outdir"], "train.zarr")
collection_server = DataCollectionServer(
data_output_path=data_output_path,
config_backup=conf,
human_count=conf['n_people'],
simulation_days=conf['simulation_days'],
)
collection_server.start()
else:
collection_server = None
conf["outfile"] = outfile
city = simulate(
n_people=conf["n_people"],
init_fraction_sick=conf["init_fraction_sick"],
start_time=conf["start_time"],
simulation_days=conf["simulation_days"],
outfile=conf["outfile"],
out_chunk_size=conf["out_chunk_size"],
seed=conf["seed"],
conf=conf,
logfile=logfile
)
# write the full configuration file along with git commit hash
dump_conf(city.conf, "{}/full_configuration.yaml".format(city.conf["outdir"]))
# log the simulation statistics
city.tracker.write_metrics()
# (baseball-cards) write full simulation data
if hasattr(city, "tracker") and \
hasattr(city.tracker, "collection_server") and \
isinstance(city.tracker.collection_server, DataCollectionServer) and \
city.tracker.collection_server is not None:
city.tracker.collection_server.stop_gracefully()
city.tracker.collection_server.join()
# if COLLECT_TRAINING_DATA is true
if not conf["tune"]:
# ----------------------------------------------
# ----- Not Tune: Collect Training Data -----
# ----------------------------------------------
# write values to train with
train_priors = os.path.join(f"{conf['outdir']}/train_priors.pkl")
city.tracker.write_for_training(city.humans, train_priors, conf)
timenow = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
log("Dumping Tracker Data in {}".format(conf["outdir"]), logfile)
Path(conf["outdir"]).mkdir(parents=True, exist_ok=True)
filename = f"tracker_data_n_{conf['n_people']}_seed_{conf['seed']}_{timenow}.pkl"
data = extract_tracker_data(city.tracker, conf)
dump_tracker_data(data, conf["outdir"], filename)
else:
# ------------------------------------------------------
# ----- Tune: Write logs And Tacker Data -----
# ------------------------------------------------------
timenow = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
log("Dumping Tracker Data in {}".format(conf["outdir"]), logfile)
Path(conf["outdir"]).mkdir(parents=True, exist_ok=True)
filename = f"tracker_data_n_{conf['n_people']}_seed_{conf['seed']}_{timenow}.pkl"
data = extract_tracker_data(city.tracker, conf)
dump_tracker_data(data, conf["outdir"], filename)
# Shutdown the data collection server if one's running
if collection_server is not None:
collection_server.stop_gracefully()
collection_server.join()
# Remove the IPCs if they were stored somewhere custom
if os.environ.get("COVID19SIM_IPC_PATH", None) is not None:
print("<<<<<<<< Cleaning Up >>>>>>>>")
for file in Path(os.environ.get("COVID19SIM_IPC_PATH")).iterdir():
if file.name.endswith(".ipc"):
print(f"Removing {str(file)}...")
os.remove(str(file))
return conf
def simulate(
n_people: int = 1000,
init_fraction_sick: float = 0.01,
start_time: datetime.datetime = datetime.datetime(2020, 2, 28, 0, 0),
simulation_days: int = 30,
outfile: typing.Optional[typing.AnyStr] = None,
out_chunk_size: typing.Optional[int] = None,
seed: int = 0,
conf: typing.Optional[typing.Dict] = None,
logfile: str = None,
):
"""
Runs a simulation.
Args:
n_people (int, optional): population size in simulation. Defaults to 1000.
init_fraction_sick (float, optional): population fraction initialized with Covid-19. Defaults to 0.01.
start_time (datetime, optional): Initial calendar date. Defaults to February 28, 2020.
simulation_days (int, optional): Number of days to run the simulation. Defaults to 10.
outfile (str, optional): Location to write logs. Defaults to None.
out_chunk_size (int, optional): size of chunks to write in logs. Defaults to None.
seed (int, optional): [description]. Defaults to 0.
conf (dict): yaml configuration of the experiment.
logfile (str): filepath where the console output and final tracked metrics will be logged. Prints to the console only if None.
Returns:
city (covid19sim.locations.city.City): The city object referencing people, locations, and the tracker post-simulation.
"""
if conf is None:
conf = {}
conf["n_people"] = n_people
conf["init_fraction_sick"] = init_fraction_sick
conf["start_time"] = start_time
conf["simulation_days"] = simulation_days
conf["outfile"] = outfile
conf["out_chunk_size"] = out_chunk_size
conf["seed"] = seed
conf['logfile'] = logfile
# set days and mixing constants
conf['_MEAN_DAILY_UNKNOWN_CONTACTS'] = conf['MEAN_DAILY_UNKNOWN_CONTACTS']
conf['_ENVIRONMENTAL_INFECTION_KNOB'] = conf['ENVIRONMENTAL_INFECTION_KNOB']
conf['_CURRENT_PREFERENTIAL_ATTACHMENT_FACTOR'] = conf['BEGIN_PREFERENTIAL_ATTACHMENT_FACTOR']
start_time_offset_days = conf['COVID_START_DAY']
intervention_start_days = conf['INTERVENTION_DAY']
# start of COVID spread
conf['COVID_SPREAD_START_TIME'] = start_time
# start of intervention
conf['INTERVENTION_START_TIME'] = None
if intervention_start_days >= 0:
conf['INTERVENTION_START_TIME'] = start_time + datetime.timedelta(days=intervention_start_days)
# start of simulation without COVID
start_time -= datetime.timedelta(days=start_time_offset_days)
conf['SIMULATION_START_TIME'] = str(start_time)
# adjust the simulation days
conf['simulation_days'] += conf['COVID_START_DAY']
simulation_days = conf['simulation_days']
console_logger = ConsoleLogger(frequency=SECONDS_PER_DAY, logfile=logfile, conf=conf)
logging.root.setLevel(getattr(logging, conf["LOGGING_LEVEL"].upper()))
rng = np.random.RandomState(seed)
env = Env(start_time)
city_x_range = (0, 1000)
city_y_range = (0, 1000)
city = City(
env, n_people, init_fraction_sick, rng, city_x_range, city_y_range, conf, logfile
)
# we might need to reset the state of the clusters held in shared memory (server or not)
if conf.get("RESET_INFERENCE_SERVER", False):
if conf.get("USE_INFERENCE_SERVER"):
inference_frontend_address = conf.get("INFERENCE_SERVER_ADDRESS", None)
print("requesting cluster reset from inference server...")
from covid19sim.inference.server_utils import InferenceClient
temporary_client = InferenceClient(
server_address=inference_frontend_address
)
temporary_client.request_reset()
else:
from covid19sim.inference.heavy_jobs import DummyMemManager
DummyMemManager.global_cluster_map = {}
# Initiate city process, which runs every hour
env.process(city.run(SECONDS_PER_HOUR, outfile))
# initiate humans
for human in city.humans:
env.process(human.run())
env.process(console_logger.run(env, city=city))
# Run simulation until termination
env.run(until=env.ts_initial + simulation_days * SECONDS_PER_DAY)
return city
if __name__ == "__main__":
main()
| 40.516418
| 134
| 0.644957
|
1106f3dc35563fcdbf81a23ba878820bbba3bd2c
| 1,858
|
py
|
Python
|
2019/19.py
|
GillesArcas/Advent_of_Code
|
1f57eb1686875df2684b0d56916b1d20724e9fb9
|
[
"MIT"
] | null | null | null |
2019/19.py
|
GillesArcas/Advent_of_Code
|
1f57eb1686875df2684b0d56916b1d20724e9fb9
|
[
"MIT"
] | null | null | null |
2019/19.py
|
GillesArcas/Advent_of_Code
|
1f57eb1686875df2684b0d56916b1d20724e9fb9
|
[
"MIT"
] | null | null | null |
import itertools
import functools
import intcode
DATA = '19.txt'
def code1():
with open(DATA) as f:
strcode = f.readline().strip()
code = intcode.parse_data(strcode)
computer = intcode.Intcode(code)
computer.verbose_output = False
zone = [['.'] * 50 for _ in range(50)]
nbeams = 0
for x in range(50):
for y in range(50):
computer.reset()
computer.run([x, y], return_output=True)
if computer.outvalues[0] == 1:
zone[y][x] = '#'
nbeams += 1
for _ in zone:
print(''.join(_))
print('1>', nbeams)
def code2():
"""
hypoteses: xmin and xmax are increasing and there is no gap between xmin and xmax
"""
with open(DATA) as f:
strcode = f.readline().strip()
code = intcode.parse_data(strcode)
computer = intcode.Intcode(code)
computer.verbose_output = False
@functools.lru_cache(maxsize=None)
def beam_bounds(xmin, y):
for x in itertools.count(xmin):
computer.reset()
computer.run([x, y], return_output=True)
if computer.outvalues[0] == 1:
xmin = x
break
for x in itertools.count(x + 1):
computer.reset()
computer.run([x, y], return_output=True)
if computer.outvalues[0] == 0:
xmax = x - 1
break
return xmin, xmax
y0 = 100
xmin, xmax = beam_bounds(0, y0)
for y in itertools.count(y0 + 1):
xmin, xmax = beam_bounds(xmin, y)
if xmax - xmin + 1 >= 100:
computer.reset()
computer.run([xmax - 99, y + 99], return_output=True)
if computer.outvalues[0] == 1:
print('2>', xmax - 99, y, (xmax - 99) * 10000 + y)
break
code2()
| 25.452055
| 85
| 0.527449
|
b898b60fe47126b6d157b0f4fb35806a35b7844d
| 13,234
|
py
|
Python
|
python/paddle/tensor/__init__.py
|
jurafish/Paddle
|
15724e745409cf6af3df99ae3eec90511e482cbc
|
[
"Apache-2.0"
] | 17,085
|
2016-11-18T06:40:52.000Z
|
2022-03-31T22:52:32.000Z
|
python/paddle/tensor/__init__.py
|
jurafish/Paddle
|
15724e745409cf6af3df99ae3eec90511e482cbc
|
[
"Apache-2.0"
] | 29,769
|
2016-11-18T06:35:22.000Z
|
2022-03-31T16:46:15.000Z
|
python/paddle/tensor/__init__.py
|
jurafish/Paddle
|
15724e745409cf6af3df99ae3eec90511e482cbc
|
[
"Apache-2.0"
] | 4,641
|
2016-11-18T07:43:33.000Z
|
2022-03-31T15:15:02.000Z
|
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .attribute import rank # noqa: F401
from .attribute import shape # noqa: F401
from .attribute import real # noqa: F401
from .attribute import imag # noqa: F401
from .creation import to_tensor # noqa: F401
from .creation import diag # noqa: F401
from .creation import diagflat # noqa: F401
from .creation import eye # noqa: F401
from .creation import linspace # noqa: F401
from .creation import ones # noqa: F401
from .creation import ones_like # noqa: F401
from .creation import zeros # noqa: F401
from .creation import zeros_like # noqa: F401
from .creation import arange # noqa: F401
from .creation import full # noqa: F401
from .creation import full_like # noqa: F401
from .creation import triu # noqa: F401
from .creation import tril # noqa: F401
from .creation import meshgrid # noqa: F401
from .creation import empty # noqa: F401
from .creation import empty_like # noqa: F401
from .linalg import matmul # noqa: F401
from .linalg import dot # noqa: F401
from .linalg import norm # noqa: F401
from .linalg import cond # noqa: F401
from .linalg import transpose # noqa: F401
from .linalg import dist # noqa: F401
from .linalg import t # noqa: F401
from .linalg import cross # noqa: F401
from .linalg import cholesky # noqa: F401
from .linalg import bmm # noqa: F401
from .linalg import histogram # noqa: F401
from .linalg import mv # noqa: F401
from .linalg import eig # noqa: F401
from .linalg import matrix_power # noqa: F401
from .linalg import qr # noqa: F401
from .linalg import eigvals # noqa: F401
from .linalg import multi_dot # noqa: F401
from .linalg import svd # noqa: F401
from .linalg import eigh # noqa: F401
from .linalg import pinv # noqa: F401
from .linalg import solve # noqa: F401
from .logic import equal # noqa: F401
from .logic import greater_equal # noqa: F401
from .logic import greater_than # noqa: F401
from .logic import is_empty # noqa: F401
from .logic import less_equal # noqa: F401
from .logic import less_than # noqa: F401
from .logic import logical_and # noqa: F401
from .logic import logical_not # noqa: F401
from .logic import logical_or # noqa: F401
from .logic import logical_xor # noqa: F401
from .logic import bitwise_and # noqa: F401
from .logic import bitwise_or # noqa: F401
from .logic import bitwise_xor # noqa: F401
from .logic import bitwise_not # noqa: F401
from .logic import not_equal # noqa: F401
from .logic import allclose # noqa: F401
from .logic import equal_all # noqa: F401
from .logic import is_tensor # noqa: F401
from .manipulation import cast # noqa: F401
from .manipulation import concat # noqa: F401
from .manipulation import expand # noqa: F401
from .manipulation import broadcast_to # noqa: F401
from .manipulation import broadcast_tensors # noqa: F401
from .manipulation import expand_as # noqa: F401
from .manipulation import tile # noqa: F401
from .manipulation import flatten # noqa: F401
from .manipulation import flatten_ # noqa: F401
from .manipulation import gather # noqa: F401
from .manipulation import gather_nd # noqa: F401
from .manipulation import reshape # noqa: F401
from .manipulation import reshape_ # noqa: F401
from .manipulation import flip as reverse # noqa: F401
from .manipulation import scatter # noqa: F401
from .manipulation import scatter_ # noqa: F401
from .manipulation import scatter_nd_add # noqa: F401
from .manipulation import scatter_nd # noqa: F401
from .manipulation import shard_index # noqa: F401
from .manipulation import slice # noqa: F401
from .manipulation import split # noqa: F401
from .manipulation import squeeze # noqa: F401
from .manipulation import squeeze_ # noqa: F401
from .manipulation import stack # noqa: F401
from .manipulation import strided_slice # noqa: F401
from .manipulation import unique # noqa: F401
from .manipulation import unique_consecutive # noqa: F401
from .manipulation import unsqueeze # noqa: F401
from .manipulation import unsqueeze_ # noqa: F401
from .manipulation import unstack # noqa: F401
from .manipulation import flip # noqa: F401
from .manipulation import unbind # noqa: F401
from .manipulation import roll # noqa: F401
from .manipulation import chunk # noqa: F401
from .manipulation import tensordot # noqa: F401
from .math import abs # noqa: F401
from .math import acos # noqa: F401
from .math import asin # noqa: F401
from .math import atan # noqa: F401
from .math import ceil # noqa: F401
from .math import ceil_ # noqa: F401
from .math import cos # noqa: F401
from .math import tan # noqa: F401
from .math import cosh # noqa: F401
from .math import cumsum # noqa: F401
from .math import cumprod # noqa: F401
from .math import exp # noqa: F401
from .math import exp_ # noqa: F401
from .math import expm1 # noqa: F401
from .math import floor # noqa: F401
from .math import floor_ # noqa: F401
from .math import increment # noqa: F401
from .math import log # noqa: F401
from .math import multiplex # noqa: F401
from .math import pow # noqa: F401
from .math import reciprocal # noqa: F401
from .math import reciprocal_ # noqa: F401
from .math import round # noqa: F401
from .math import round_ # noqa: F401
from .math import rsqrt # noqa: F401
from .math import rsqrt_ # noqa: F401
from .math import scale # noqa: F401
from .math import scale_ # noqa: F401
from .math import sign # noqa: F401
from .math import sin # noqa: F401
from .math import sinh # noqa: F401
from .math import sqrt # noqa: F401
from .math import sqrt_ # noqa: F401
from .math import square # noqa: F401
from .math import stanh # noqa: F401
from .math import sum # noqa: F401
from .math import tanh # noqa: F401
from .math import tanh_ # noqa: F401
from .math import add_n # noqa: F401
from .math import max # noqa: F401
from .math import maximum # noqa: F401
from .math import min # noqa: F401
from .math import minimum # noqa: F401
from .math import mm # noqa: F401
from .math import divide # noqa: F401
from .math import floor_divide # noqa: F401
from .math import remainder # noqa: F401
from .math import mod # noqa: F401
from .math import floor_mod # noqa: F401
from .math import multiply # noqa: F401
from .math import add # noqa: F401
from .math import add_ # noqa: F401
from .math import subtract # noqa: F401
from .math import subtract_ # noqa: F401
from .math import atan2 # noqa: F401
from .math import logsumexp # noqa: F401
from .math import inverse # noqa: F401
from .math import log2 # noqa: F401
from .math import log10 # noqa: F401
from .math import log1p # noqa: F401
from .math import erf # noqa: F401
from .math import addmm # noqa: F401
from .math import clip # noqa: F401
from .math import clip_ # noqa: F401
from .math import trace # noqa: F401
from .math import kron # noqa: F401
from .math import isfinite # noqa: F401
from .math import isinf # noqa: F401
from .math import isnan # noqa: F401
from .math import prod # noqa: F401
from .math import all # noqa: F401
from .math import any # noqa: F401
from .math import broadcast_shape # noqa: F401
from .math import conj # noqa: F401
from .math import trunc # noqa: F401
from .math import digamma # noqa: F401
from .math import neg # noqa: F401
from .math import lgamma # noqa: F401
from .math import diagonal # noqa: F401
from .random import multinomial # noqa: F401
from .random import standard_normal # noqa: F401
from .random import normal # noqa: F401
from .random import uniform # noqa: F401
from .random import uniform_ # noqa: F401
from .random import randn # noqa: F401
from .random import rand # noqa: F401
from .random import randint # noqa: F401
from .random import randperm # noqa: F401
from .search import argmax # noqa: F401
from .search import argmin # noqa: F401
from .search import argsort # noqa: F401
from .search import searchsorted # noqa: F401
from .search import topk # noqa: F401
from .search import where # noqa: F401
from .search import index_select # noqa: F401
from .search import nonzero # noqa: F401
from .search import sort # noqa: F401
from .search import index_sample # noqa: F401
from .search import masked_select # noqa: F401
from .stat import mean # noqa: F401
from .stat import std # noqa: F401
from .stat import var # noqa: F401
from .stat import numel # noqa: F401
from .stat import median # noqa: F401
from .to_string import set_printoptions # noqa: F401
from .array import array_length # noqa: F401
from .array import array_read # noqa: F401
from .array import array_write # noqa: F401
from .array import create_array # noqa: F401
from .einsum import einsum # noqa: F401
from . import fft
from . import signal
#this list used in math_op_patch.py for _binary_creator_
tensor_method_func = [ #noqa
'matmul',
'dot',
'norm',
'cond',
'transpose',
'dist',
't',
'cross',
'cholesky',
'bmm',
'histogram',
'mv',
'matrix_power',
'qr',
'eigvals',
'abs',
'acos',
'all',
'any',
'asin',
'atan',
'ceil',
'ceil_',
'cos',
'cosh',
'cumsum',
'cumprod',
'exp',
'exp_',
'floor',
'floor_',
'increment',
'log',
'log2',
'log10',
'logsumexp',
'multiplex',
'pow',
'prod',
'reciprocal',
'reciprocal_',
'round',
'round_',
'rsqrt',
'rsqrt_',
'scale',
'scale_',
'sign',
'sin',
'sinh',
'sqrt',
'sqrt_',
'square',
'stanh',
'sum',
'tanh',
'tanh_',
'add_n',
'max',
'maximum',
'min',
'minimum',
'mm',
'divide',
'floor_divide',
'remainder',
'mod',
'floor_mod',
'multiply',
'add',
'add_',
'subtract',
'subtract_',
'atan',
'logsumexp',
'inverse',
'log1p',
'erf',
'addmm',
'clip',
'clip_',
'trace',
'kron',
'isfinite',
'isinf',
'isnan',
'broadcast_shape',
'conj',
'neg',
'lgamma',
'equal',
'equal_all',
'greater_equal',
'greater_than',
'is_empty',
'less_equal',
'less_than',
'logical_and',
'logical_not',
'logical_or',
'logical_xor',
'not_equal',
'allclose',
'is_tensor',
'cast',
'concat',
'expand',
'broadcast_to',
'expand_as',
'flatten',
'flatten_',
'gather',
'gather_nd',
'reshape',
'reshape_',
'reverse',
'scatter',
'scatter_',
'scatter_nd_add',
'scatter_nd',
'shard_index',
'slice',
'split',
'chunk',
'tensordot',
'squeeze',
'squeeze_',
'stack',
'strided_slice',
'transpose',
'unique',
'unique_consecutive',
'unsqueeze',
'unsqueeze_',
'unstack',
'flip',
'unbind',
'roll',
'tile',
'argmax',
'argmin',
'argsort',
'masked_select',
'topk',
'where',
'index_select',
'nonzero',
'sort',
'index_sample',
'mean',
'std',
'var',
'numel',
'median',
'rank',
'shape',
'real',
'imag',
'digamma',
'diagonal',
'trunc',
'bitwise_and',
'bitwise_or',
'bitwise_xor',
'bitwise_not',
'broadcast_tensors',
'eig',
'uniform_',
'multi_dot',
'solve',
]
#this list used in math_op_patch.py for magic_method bind
magic_method_func = [
('__and__', 'bitwise_and'),
('__or__', 'bitwise_or'),
('__xor__', 'bitwise_xor'),
('__invert__', 'bitwise_not'),
]
| 32.515971
| 74
| 0.617803
|
7ba48f47c944357c4338a0bdf160afbe96b02afa
| 32
|
py
|
Python
|
ast-transformations-core/src/test/resources/org/jetbrains/research/ml/ast/gumtree/diff/data/update/src_8_logic_operators.py
|
JetBrains-Research/ast-transformations
|
0ab408af3275b520cc87a473f418c4b4dfcb0284
|
[
"MIT"
] | 8
|
2021-01-19T21:15:54.000Z
|
2022-02-23T19:16:25.000Z
|
ast-transformations-core/src/test/resources/org/jetbrains/research/ml/ast/gumtree/diff/data/update/src_8_logic_operators.py
|
JetBrains-Research/ast-transformations
|
0ab408af3275b520cc87a473f418c4b4dfcb0284
|
[
"MIT"
] | 4
|
2020-11-17T14:28:25.000Z
|
2022-02-24T07:54:28.000Z
|
ast-transformations-core/src/test/resources/org/jetbrains/research/ml/ast/gumtree/diff/data/update/src_8_logic_operators.py
|
nbirillo/ast-transformations
|
717706765a2da29087a0de768fc851698886dd65
|
[
"MIT"
] | 1
|
2022-02-23T19:16:30.000Z
|
2022-02-23T19:16:30.000Z
|
a = True
b = False
res = a and b
| 10.666667
| 13
| 0.59375
|
eeaa920c3cc48053ab864c88759164610aac5de1
| 613
|
py
|
Python
|
credential.py
|
imekenye/Password-Locker
|
d5e75a593c313cd160261d5cb059118b87737973
|
[
"MIT"
] | null | null | null |
credential.py
|
imekenye/Password-Locker
|
d5e75a593c313cd160261d5cb059118b87737973
|
[
"MIT"
] | null | null | null |
credential.py
|
imekenye/Password-Locker
|
d5e75a593c313cd160261d5cb059118b87737973
|
[
"MIT"
] | null | null | null |
import random
class Credential:
"""
Class that creates new instance of credentials
"""
credential_list = []
def __init__(self, username, password, email):
self.username = username
self.password = password
self.email = email
def save_credential(self):
"""
save_contact method saves credentials objects into credential_list
"""
Credential.credential_list.append(self)
@classmethod
def display_credential(cls):
"""
method that returns the credentials list
"""
return cls.credential_list
| 22.703704
| 78
| 0.624796
|
2faba0d545aed6ad243a032dbad3ed6c11c9c952
| 1,867
|
py
|
Python
|
count_carbonyls.py
|
choderalab/nano-drugbank
|
32f735c18c0d61f5bd128fc045ff5978d13a31f8
|
[
"MIT"
] | 5
|
2017-07-21T22:30:08.000Z
|
2021-12-03T10:24:12.000Z
|
count_carbonyls.py
|
choderalab/nano-drugbank
|
32f735c18c0d61f5bd128fc045ff5978d13a31f8
|
[
"MIT"
] | null | null | null |
count_carbonyls.py
|
choderalab/nano-drugbank
|
32f735c18c0d61f5bd128fc045ff5978d13a31f8
|
[
"MIT"
] | 6
|
2017-03-31T18:32:12.000Z
|
2021-04-13T03:55:06.000Z
|
from __future__ import print_function
__author__ = 'isikm'
import pandas as pd
from openeye import oechem, oedepict
df_drugbank_smiles=pd.DataFrame.from_csv('df_drugbank_smiles.csv', encoding='utf-8')
#Count number of carbonyls
for i, row in enumerate(df_drugbank_smiles.iterrows()):
# Count number of non-aromatic carbonyls in each row using SMARTS matching
smiles = df_drugbank_smiles.loc[i,"smiles"]
#del mol
mol = oechem.OEGraphMol()
oechem.OESmilesToMol(mol, str(smiles))
#create a substructure search object - non-aromatic carbonyl
queried_substructure="[CX3]=[OX1]"
ss = oechem.OESubSearch(queried_substructure)
oechem.OEPrepareSearch(mol, ss)
# loop over matches to count
matched_ss_list=[]
count=0
for index, match in enumerate(ss.Match(mol)):
if ss.SingleMatch(mol) == True:
matched_ss_list.append((index, match))
count = len(matched_ss_list)
non_aromatic_carbonyl_count=count
# Count number of aromatic carbonyls in each row using SMARTS matching
#create a substructure search object - aromatic carbonyl
queried_substructure="[cX3]=[OX1]"
ss2 = oechem.OESubSearch(queried_substructure)
oechem.OEPrepareSearch(mol, ss2)
# loop over matches to count
matched_ss2_list=[]
count=0
for index, match in enumerate(ss2.Match(mol)):
if ss2.SingleMatch(mol) == True:
matched_ss2_list.append((index, match))
count = len(matched_ss2_list)
aromatic_carbonyl_count=count
# Total number of carbonyl groups
total_carbonyl_count = non_aromatic_carbonyl_count + aromatic_carbonyl_count
# add number of matches to dataframe
df_drugbank_smiles.loc[i,"carbonyl"] = total_carbonyl_count
#write to csv
df_drugbank_smiles.to_csv("df_drugbank_smiles.csv", encoding='utf-8')
print("Done.")
| 30.112903
| 84
| 0.720407
|
34281474bcee7f244673d335ae38e85274d859ce
| 70,152
|
py
|
Python
|
Gym/env_wrapper.py
|
Alee08/MultiUAV-RL-RB
|
96a0b350b3176ff6011abe97491de9707416c289
|
[
"ImageMagick"
] | 4
|
2021-07-22T17:16:30.000Z
|
2022-03-03T11:14:12.000Z
|
Gym/env_wrapper.py
|
Alee08/MultiUAV-RL-RB
|
96a0b350b3176ff6011abe97491de9707416c289
|
[
"ImageMagick"
] | null | null | null |
Gym/env_wrapper.py
|
Alee08/MultiUAV-RL-RB
|
96a0b350b3176ff6011abe97491de9707416c289
|
[
"ImageMagick"
] | null | null | null |
import numpy as np
import sys
import pickle
from os import mkdir
from os.path import join, isdir
from numpy import linalg as LA
from math import sqrt, inf
from decimal import Decimal
import time
import gym
import envs
from gym import spaces, logger
from scenario_objects import Point, Cell, User, Environment
import plotting
from configuration import Config
from configuration import j
from settings.dir_gen import *
import agent
from Astar import *
from settings.hosp_scenario import hosp_features
conf = Config()
if (conf.HOSP_SCENARIO == True):
from restraining_bolt import RestrainingBolt
from temp_wrapper import UAVsTemporalWrapper
from temprl.temprl.wrapper import *
#from iter import j
hosp_f = hosp_features()
# _________ Main training parameters:_________
if conf.HOSP_SCENARIO == True:
SHOW_EVERY = 50
LEARNING_RATE = 0.01 #alpha
#DISCOUNT = 0.95 originale
DISCOUNT = 0.99 #gamma
EPSILON = 0.01
EPSILON_DECR = False
EPSILON_DECREMENT = 0.999
#EPSILON_MIN = 0.01
#EPSILON_MIN2 = 0.4 originale
#EPSILON_MIN2 = 0.1
EPSILON_MIN = 0.01
EPSILON_MIN2 = 0.1
max_value_for_Rmax = 100
#ITERATIONS_PER_EPISODE = 160
else:
SHOW_EVERY = 30
LEARNING_RATE = 1.0
DISCOUNT = 0.95
EPSILON = 1.0
EPSILON_DECREMENT = 0.998
EPSILON_MIN = 0.01
EPSILON_MIN2 = 0.4
max_value_for_Rmax = 100
conf.ITERATIONS_PER_EPISODE = 30
#--------------------------------------------
'''#parametri marco
SHOW_EVERY = 50
LEARNING_RATE = 0.1 #alpha
DISCOUNT = 0.99 #gamma
EPSILON = 0.1
EPSILON_DECREMENT = 0.99
EPSILON_MIN = 1
EPSILON_MIN2 = 0.01
max_value_for_Rmax = 1
ITERATIONS_PER_EPISODE = 30'''
# _____________________________________________
policy_n = []
#with open("iter.py", "w") as text_file:
# text_file.write('j =' + '{}'.format(j))
'''os.system('python3 scenario_objects.py')
os.system('python3 plotting.py')
os.system('python3 my_utils.py')
#os.system('python3 load_and_save_data.py')
os.system('python3 restraining_bolt.py')
os.system('python3 temp_wrapper.py')
os.system('python3 ./envs/custom_env_dir/custom_uav_env.py')
os.system('python3 ./temprl/temprl/automata.py')
os.system('python3 ./temprl/temprl/wrapper.py')
os.system('python3 agent.py')'''
#ITERATIONS_PER_EPISODE = iteration_rest()
#Colors, UAVS_POS = UAV_i(UAVS_POS, hosp_pos, Colors, j)
#UAVS_POS = UAVS_POS1[j]
#hosp_pos = [(5, 9), (3, 2), (7,7), (6,6)]
#Colors = [['None', 'gold', 'purple'], ['None', 'orange', 'brown']]
#nb_colors = 2
#UAVS_POS = env.UAVS_POS
'''def changePos_idx(newIdx):
import my_utils
my_utils.o = newIdx
changePos_idx(j)'''
# __________________ Main loadings: __________________
env = gym.make('UAVEnv-v0')
MAX_UAV_HEIGHT = env.max_uav_height
n_actions = env.nb_actions
actions_indeces = range(n_actions)
cs_cells = env.cs_cells
cs_cells_coords_for_UAVs = [(cell._x_coord, cell._y_coord) for cell in cs_cells] if conf.DIMENSION_2D==True else [(cell._x_coord, cell._y_coord, cell._z_coord) for cell in cs_cells]
#cells_matrix = env.cells_matrix
action_set_min = env.action_set_min
if (conf.UNLIMITED_BATTERY==False):
q_table_action_set = env.q_table_action_set
charging_set = env.charging_set
come_home_set = env.come_home_set
reset_uavs = env.reset_uavs
reset_priority = env.reset_priority
print(env.max_uav_height, env.nb_actions, range(n_actions), env.cs_cells,
[(cell._x_coord, cell._y_coord) for cell in cs_cells], action_set_min)
#breakpoint()
plot = plotting.Plot()
centroids = env.cluster_centroids
# Scale centroids according to the selected resolution:
env_centroids = [(centroid[0]/conf.CELL_RESOLUTION_PER_COL, centroid[1]/conf.CELL_RESOLUTION_PER_ROW) for centroid in centroids]
# _____________________________________________________
def show_and_save_info(q_table_init, q_table, dimension_space, battery_type, users_request, reward_func,
case_directory):
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Save the main settings in 'env_and_train_info.txt' file and show them before training start. #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
info = []
info1 = "\n\n_______________________________________ENVIRONMENT AND TRAINING INFO: _______________________________________\n"
info.append(info1)
info2 = "\nTraining:\n"
info.append(info2)
info3 = "\nconf.EPISODES: " + str(conf.EPISODES)
info.append(info3)
info4 = "\nITERATIONS PER EPISODE: " + str(conf.ITERATIONS_PER_EPISODE)
info.append(info4)
info5 = "\nINITIAL EPSILON: " + str(EPSILON)
info.append(info5)
info6 = "\nMINIMUM EPSILON: " + str(EPSILON_MIN)
info.append(info6)
info31 = "\nEPSILON DECREMENT: " + str(EPSILON_DECREMENT)
info.append(info31)
info7 = "\nLEARNING RATE: " + str(LEARNING_RATE)
info.append(info7)
info8 = "\nDISCOUNT RATE: " + str(DISCOUNT)
info.append(info8)
if (q_table_init == "Max Reward"):
info9 = "\nQ-TABLE INITIALIZATION: " + q_table_init + " with a Rmax value equal to: " + str(max_value_for_Rmax)
else:
info9 = "\nQ-TABLE INITIALIZATION: " + q_table_init
info.append(info9)
info28 = "\nQ-TABLE DIMENSION PER UAV: " + str(len(q_table))
info.append(info28)
info29 = "\nREWARD FUNCTION USED: " + str(reward_func) + "\n\n"
info.append(info29)
info10 = "\nEnvironment:\n"
info.append(info10)
if dimension_space == "2D":
info11 = "\nMAP DIMENSION AT MINIMUM RESOLUTION: " + str(conf.AREA_WIDTH) + "X" + str(conf.AREA_HEIGHT)
info12 = "\nMAP DIMENSION AT DESIRED RESOLUTION: " + str(conf.CELLS_ROWS) + "X" + str(conf.CELLS_COLS)
info.append(info11)
info.append(info12)
else:
Z_DIM = MAX_UAV_HEIGHT - conf.MIN_UAV_HEIGHT
info11 = "\nMAP DIMENSION AT MINIMUM RESOLUTION: " + str(conf.AREA_WIDTH) + "X" + str(
conf.AREA_HEIGHT) + "X" + str(Z_DIM)
info12 = "\nMAP DIMENSION AT DESIRED RESOLUTION: " + str(conf.CELLS_ROWS) + "X" + str(
conf.CELLS_COLS) + "X" + str(Z_DIM)
info13 = "\nMINIMUM UAVs FLIGHT HEIGHT: " + str(conf.MIN_UAV_HEIGHT)
info14 = "\nMAXIMUM UAVs FLIGHT HEIGHT: " + str(MAX_UAV_HEIGHT)
info32 = "\nMINIMUM COVERAGE PERCENTAGE OF OBSTACLES: " + str(conf.MIN_OBS_PER_AREA * 100) + " %"
info33 = "\nMAXIMUM COVERAGE PERCENTAGE OF OBSTACELS: " + str(conf.MAX_OBS_PER_AREA * 100) + " %"
info34 = "\nMAXIMUM FLIGHT HEIGHT OF A UAV: " + str(
MAX_UAV_HEIGHT) + ", equal to the height of the highest obstacle"
info35 = "\nMINIMUM FLIGHT HEIGHT OF A UAV: " + str(
conf.MIN_UAV_HEIGHT) + ", equal to the height of the Charging Stations"
info36 = "\nUAV MOTION STEP ALONG Z-AXIS: " + str(conf.UAV_Z_STEP)
info.append(info36)
info.append(info11)
info.append(info12)
info.append(info13)
info.append(info14)
info.append(info32)
info.append(info33)
info.append(info34)
info.append(info35)
info.append(info36)
info15 = "\nUAVs NUMBER: " + str(conf.N_UAVS)
info.append(info15)
if (dimension_space == "2D"):
uavs_coords = [
"UAV " + str(uav_idx + 1) + ": " + str((env.agents[uav_idx]._x_coord, env.agents[uav_idx]._y_coord)) for
uav_idx in range(conf.N_UAVS)]
info16 = "\nUAVs INITIAL COORDINATES: " + str(uavs_coords)
else:
uavs_coords = ["UAV " + str(uav_idx + 1) + ": " + str(
(env.agents[uav_idx]._x_coord, env.agents[uav_idx]._y_coord, env.agents[uav_idx]._z_coord)) for uav_idx in
range(conf.N_UAVS)]
info16 = "\nUAVs INITIAL COORDINATES: " + str(uavs_coords)
info40 = "\n UAVs FOOTPRINT DIMENSION: " + str(conf.ACTUAL_UAV_FOOTPRINT)
info.append(info40)
info.append(info16)
info17 = "\nUSERS CLUSTERS NUMBER: " + str(len(env.cluster_centroids))
info30 = "\nUSERS INITIAL NUMBER: " + str(env.n_users)
info.append(info17)
info.append(info30)
centroids_coords = ["CENTROIDS: " + str(centroid_idx + 1) + ": " + str(
(env.cluster_centroids[centroid_idx][0], env.cluster_centroids[centroid_idx][1])) for centroid_idx in
range(len(env.cluster_centroids))]
info18 = "\nUSERS CLUSTERS PLANE-COORDINATES: " + str(centroids_coords)
info37 = "\nCLUSTERS RADIUSES: " + str(env.clusters_radiuses)
info.append(info37)
info.append(info18)
info19 = "\nDIMENION SPACE: " + str(dimension_space)
info.append(info19)
info20 = "\nBATTERY: " + str(battery_type)
info.append(info20)
info21 = "\nUSERS SERVICE TIME REQUEST: " + str(users_request)
info.append(info21)
if (conf.STATIC_REQUEST == True):
info22 = "\nUSERS REQUEST: Static"
else:
info22 = "\nUSERS REQUEST: Dynamic"
info.append(info22)
if (conf.USERS_PRIORITY == False):
info23 = "\nUSERS ACCOUNTS: all the same"
else:
info23 = "\nUSERS ACCOUNTS: " + str(USERS_ACCOUNTS)
info.append(info23)
if (conf.INF_REQUEST == True):
# If the users service request is infite, then we assume that the UAVs are providing only one service.
info24 = "\nNUMBER SERVICES PROVIDED BY UAVs: 1"
else:
info24 = "\nNUMBER SERVICES PROVIDED BY UAVs: 3"
info.append(info24)
if (conf.UNLIMITED_BATTERY == True):
info25 = "\nCHARGING STATIONS NUMBER: N.D."
else:
info25 = "\nCHARGING STATIONS NUMBER: " + str(conf.N_CS)
info_37 = "\nCHARGING STATIONS COORDINATES: " + str(
[(cell._x_coord, cell._y_coord, cell._z_coord) for cell in env.cs_cells])
info.append(info_37)
info38 = "\nTHRESHOLD BATTERY LEVEL PERCENTAGE CONSIDERED CRITICAL: " + str(conf.PERC_CRITICAL_BATTERY_LEVEL)
info.append(info38)
info39 = "\nBATTERY LEVELS WHEN CHARGING SHOWED EVERY " + str(
conf.SHOW_BATTERY_LEVEL_FOR_CHARGING_INSTANT) + " CHARGES"
info.append(info39)
info.append(info25)
if (conf.CREATE_ENODEB == True):
info26 = "\nENODEB: Yes"
else:
info26 = "\nENODEB: No"
info.append(info26)
info27 = "\n__________________________________________________________________________________________________________________\n\n"
info.append(info27)
file = open(join(saving_directory, "env_and_train_info.txt"), "w")
for i in info:
print(i)
file.write(i)
file.close()
# time.sleep(5)
def compute_subareas(area_width, area_height, x_split, y_split):
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Return 'xy_limits' and middle points for some subareas (oo the all considered map) which can be used for a Q-table initialization based on 'a priori knowledge'. #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
subW_min = subH_min = 0
subW_max = area_width / x_split
subH_max = area_height / y_split
subareas_xy_limits = []
subareas_middle_points = []
for x_subarea in range(1, x_split + 1):
W_max = subW_max * x_subarea
for y_subarea in range(1, y_split + 1):
H_max = subH_max * y_subarea
x_limits = (subW_min, W_max)
y_limits = (subH_min, H_max)
subareas_xy_limits.append([x_limits, y_limits])
# Compute the middle point of each subarea:
subareas_middle_points.append((subW_min + (W_max - subW_min) / 2, subH_min + (H_max - subH_min) / 2))
subH_min = H_max
subW_min = W_max
subH_min = 0
return subareas_xy_limits, subareas_middle_points
def compute_prior_rewards(agent_pos_xy, best_prior_knowledge_points):
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Compute the values used to inizialized the Q-table based on 'a priori initialization'. #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
actions = env.q_table_action_set
# Initialize a random agent just to use easily the 'move' methods for 2D and 3D cases:
agent_test = agent.Agent((agent_pos_xy[0], agent_pos_xy[1], 0), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
# Compute all the possible positions according to the available actions
if (conf.DIMENSION_2D == True):
# Prior knowledge obvoiusly does not take into account the battery level:
new_agent_pos_per_action = [agent_test.move_2D_unlimited_battery((agent_pos_xy[0], agent_pos_xy[1]), action) for
action in actions]
else:
new_agent_pos_per_action = [
agent_test.move_3D_unlimited_battery((agent_pos_xy[0], agent_pos_xy[1], agent_pos_xy[2]), action) for action
in actions]
prior_rewards = []
for pos in new_agent_pos_per_action:
current_distances_from_best_points = [LA.norm(np.array([pos[0], pos[1]]) - np.array(best_point)) for best_point
in best_prior_knowledge_points]
# The reference distance for the reward in the current state is based on the distance between the agent and the closer 'best point':
current_reference_distance = min(current_distances_from_best_points)
current_normalized_ref_dist = current_reference_distance / diagonal_area_value
prior_rewards.append(1 - current_normalized_ref_dist)
return prior_rewards
# _________________________________ Check if Q-table initialization is based on 'a prior knoledge' _________________________________
if (conf.PRIOR_KNOWLEDGE == True):
subareas_limits, subareas_middle_points = compute_subareas(conf.CELLS_COLS, conf.CELLS_ROWS, conf.X_SPLIT,
conf.Y_SPLIT)
best_prior_knowledge_points = []
diagonal_area_value = sqrt(pow(conf.CELLS_ROWS, 2) + pow(conf.CELLS_COLS,
2)) # --> The diagonal is the maximum possibile distance between two points, and then it will be used to normalize the distance when the table is initialized under the assumption of 'prior knowledge'. 0.5 is used because the UAV is assumed to move from the middle of a cell to the middle of another one.
for centroid in env_centroids:
centroid_x = centroid[0]
centroid_y = centroid[1]
for subarea in range(conf.N_SUBAREAS):
current_subarea = subareas_limits[subarea]
if (((centroid_x >= current_subarea[0][0]) and (centroid_x < current_subarea[0][1])) and (
(centroid_y >= current_subarea[1][0]) and (centroid_y < current_subarea[1][1]))):
best_prior_knowledge_points.append(subareas_middle_points[subarea])
DEFAULT_CLOSEST_CS = (None, None, None) if conf.DIMENSION_2D == False else (None, None)
# ___________________________________________________________________________________________________________________________________
def go_to_recharge(action, agent):
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Perform SIDE EFFECT on '_path_to_the_closest_CS' and '_current_pos_in_path_to_CS'. #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# agent._coming_home is set equal to True inside 'move' method, which is called inside 'step' method (just after 'choose_action'). Thus, here it is not needed.
closest_CS = agent._cs_goal
# Compute the path to the closest CS only if needed:
if (closest_CS == DEFAULT_CLOSEST_CS):
_ = agent.compute_distances(
cs_cells) # --> Update the closest CS just in case the agent need to go the CS (which is obviously the closest one).
agent._path_to_the_closest_CS = astar(env.cells_matrix, env.get_agent_pos(agent), agent._cs_goal)
agent._current_pos_in_path_to_CS = 0 # --> when compute the path, set to 0 the counter indicating the current position (which belongs to the computed path) you have to go.
def choose_action(uavs_q_tables, which_uav, obs, agent, battery_in_CS_history, cells_matrix):
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Return the chosen action according to the current set of action which depends on the considered scenario, battery level and other parameters: #
# SIDE EFFECT on different agents attributes are performed. #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
if ((ANALYZED_CASE == 1) or (ANALYZED_CASE == 3)): # --> UNLIMITED BATTERY:
# print("obsssssssssssssssssssssssssssssssssssss", obs)
if (conf.HOSP_SCENARIO == False):
obs = tuple([round(ob, 1) for ob in obs])
else: # --> LIMITED BATTERY:
if (conf.HOSP_SCENARIO == False):
coords = tuple([round(ob, 1) for ob in obs[0]])
obs = tuple([coords, obs[1]])
# obs = tuple([round(Decimal(ob), 1) for ob in obs])
# print("weeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee", obs)
all_actions_values = [values for values in uavs_q_tables[which_uav][obs]]
current_actions_set = agent._action_set
if (conf.UAV_STANDARD_BEHAVIOUR == False):
if conf.UNLIMITED_BATTERY == False:
if (current_actions_set == action_set_min): # == ACTION_SPACE_3D_MIN
all_actions_values[GO_TO_CS_INDEX] = -inf
all_actions_values[CHARGE_INDEX] = -inf
elif (current_actions_set == come_home_set): # == ACTION_SPACE_3D_COME_HOME
if ((agent._coming_home == True) and (env.get_agent_pos(agent) != agent._cs_goal)):
action = GO_TO_CS_INDEX
if (conf.Q_LEARNING == True):
agent._current_pos_in_path_to_CS += 1
return action
elif (agent._coming_home == False):
all_actions_values[CHARGE_INDEX] = -inf
agent._required_battery_to_CS = agent.needed_battery_to_come_home()
elif ((agent._coming_home == True) and agent.check_if_on_CS()):
agent._n_recharges += 1
n_recharges = agent._n_recharges
if (n_recharges % conf.SHOW_BATTERY_LEVEL_FOR_CHARGING_INSTANT == 0):
battery_in_CS_history.append(agent._battery_level)
action = CHARGE_INDEX
return action
elif (current_actions_set == charging_set): # == ACTION_SPACE_3D_WHILE_CHARGING
if ((agent.check_if_on_CS()) and (agent._battery_level < conf.FULL_BATTERY_LEVEL)):
action = CHARGE_INDEX
# agent._coming_home is set equal to True inside 'move' method, which is called inside 'step' method (just after 'choose_action'). Thus, here it is not needed.
return action
elif (agent._battery_level >= conf.FULL_BATTERY_LEVEL):
agent._battery_level = conf.FULL_BATTERY_LEVEL
all_actions_values[CHARGE_INDEX] = -inf
all_actions_values[GO_TO_CS_INDEX] = -inf
rand = np.random.random()
if (conf.UAV_STANDARD_BEHAVIOUR == False):
if (rand > EPSILON):
# Select the best action so far (based on the current action set of the agent, assign actions values to a reduce action set):
action = np.argmax(all_actions_values)
# action = np.argmax(uavs_q_tables[0][obs])
# print(action, "action")
else:
n_actions_to_not_consider = n_actions - agent._n_actions
n_current_actions_to_consider = n_actions - n_actions_to_not_consider
prob_per_action = 1 / n_current_actions_to_consider
probabilities = [prob_per_action if act_idx < n_current_actions_to_consider else 0.0 for act_idx in
actions_indeces]
# Select the action randomly:
action = np.random.choice(actions_indeces, p=probabilities)
# action = np.random.randint(0, 4)
else:
if (conf.UNLIMITED_BATTERY == False):
if ((agent._coming_home == True) and (env.get_agent_pos(agent) in cs_cells_coords_for_UAVs)):
action = conf.CHARGE
elif ((agent._charging == True) and (agent._battery_level < conf.FULL_BATTERY_LEVEL)):
action = conf.CHARGE
elif (agent._battery_level <= conf.CRITICAL_BATTERY_LEVEL):
action = GO_TO_CS
agent._current_pos_in_path_to_CS += 1
go_to_recharge(GO_TO_CS_INDEX, agent)
elif (which_uav == 2):
action = agent.action_for_standard_h(env.cells_matrix)
elif (which_uav == 1):
action = agent.action_for_standard_v(env.cells_matrix)
elif (which_uav == 0):
action = agent.action_for_standard_square_clockwise(env.cells_matrix)
else:
if (which_uav == 2):
action = agent.action_for_standard_h(env.cells_matrix)
elif (which_uav == 1):
action = agent.action_for_standard_v(env.cells_matrix)
elif (which_uav == 0):
action = agent.action_for_standard_square_clockwise(env.cells_matrix)
return action
if (action == GO_TO_CS_INDEX):
go_to_recharge(action, agent)
return action
# ________________________________________ Assign a number_id to each analyzed case in order to initialize the proper Q-table based on te current case: ________________________________________
ANALYZED_CASE = 0
if ((conf.USERS_PRIORITY == False) and (conf.CREATE_ENODEB == False)):
# 2D case with UNLIMITED UAVs battery autonomy:
if ((conf.DIMENSION_2D == True) and (conf.UNLIMITED_BATTERY == True)):
ANALYZED_CASE = 1
considered_case_directory = "2D_un_bat"
dimension_space = "2D"
battery_type = "Unlimited"
reward_func = "Reward function 1"
# 2D case with LIMITED UAVs battery autonomy:
elif ((conf.DIMENSION_2D == True) and (conf.UNLIMITED_BATTERY == False)):
ANALYZED_CASE = 2
considered_case_directory = "2D_lim_bat"
dimension_space = "2D"
battery_type = "Limited"
reward_func = "Reward function 2"
# 3D case with UNLIMITED UAVs battery autonomy:
elif ((conf.DIMENSION_2D == False) and (conf.UNLIMITED_BATTERY == True)):
ANALYZED_CASE = 3
considered_case_directory = "3D_un_bat"
dimension_space = "3D"
battery_type = "Unlimited"
reward_func = "Reward function 1"
# 3D case with LIMITED UAVs battery autonomy:
elif ((conf.DIMENSION_2D == False) and (conf.UNLIMITED_BATTERY == False)):
ANALYZED_CASE = 4
considered_case_directory = "3D_lim_bat"
dimension_space = "3D"
battery_type = "Limited"
reward_func = "Reward function 2"
if (conf.INF_REQUEST == True):
# setting_not_served_users = agent.Agent.set_not_served_users_inf_request
service_request_per_epoch = env.n_users * conf.ITERATIONS_PER_EPISODE
considered_case_directory += "_inf_req"
users_request = "Continue"
else:
# setting_not_served_users = agent.Agent.set_not_served_users
service_request_per_epoch = 0 # --> TO SET --> !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
considered_case_directory += "_lim_req"
users_request = "Discrete"
if (conf.MULTI_SERVICE == True):
considered_case_directory += "_multi_service_limited_bandwidth"
reward_func = "Reward function 3"
else:
# conf.CREATE_ENODEB = False
# conf.DIMENSION_2D = False
# conf.UNLIMITED_BATTERY = True
# conf.INF_REQUEST = True
# conf.STATIC_REQUEST = True
# conf.USERS_PRIORITY = False
assert False, "Environment parameters combination not implemented yet: conf.STATIC_REQUEST: %s, conf.DIMENSION_2D: %s, conf.UNLIMITED_BATTERY: %s, conf.INF_REQUEST: %s, conf.USERS_PRIORITY: %s, conf.CREATE_ENODEB: %s" % (
conf.STATIC_REQUEST, conf.DIMENSION_2D, conf.UNLIMITED_BATTERY, conf.INF_REQUEST, conf.USERS_PRIORITY,
conf.CREATE_ENODEB)
pass # TO DO --> !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# ______________________________________________________________________________________________________________________________________________________________________________________________
# ________________________________________ Directory cration to save images and data: ________________________________________
considered_case_directory += "_" + str(conf.N_UAVS) + "UAVs" + "_" + str(len(env.cluster_centroids)) + "clusters"
cases_directory = "Cases"
if (conf.R_MAX == True):
sub_case_dir = "Max Initialization"
q_table_init = "Max Reward"
elif (conf.PRIOR_KNOWLEDGE == True):
sub_case_dir = "Prior Initialization"
q_table_init = "Prior Knowledge"
else:
sub_case_dir = "Random Initialization"
q_table_init = "Random Reward"
saving_directory = join(cases_directory, considered_case_directory, sub_case_dir)
if not isdir(cases_directory): mkdir(cases_directory)
if not isdir(join(cases_directory, considered_case_directory)): mkdir(join(cases_directory, considered_case_directory))
if not isdir(saving_directory): mkdir(saving_directory)
# _____________________________________________________________________________________________________________________________
# ________________________________________________________________________________ Q-table initialization: ________________________________________________________________________________________________
map_width = conf.CELLS_COLS
map_length = conf.CELLS_ROWS
map_height = conf.MAXIMUM_AREA_HEIGHT
# Agents initialization:
agents = env.agents
# It is called Q-table, but if conf.SARSA algorithm is used, it will be actually a conf.SARSA-table:
uavs_q_tables = None
uavs_e_tables = None
if uavs_q_tables is None:
print("Q-TABLES INITIALIZATION . . .")
uavs_q_tables = [None for uav in range(conf.N_UAVS)]
explored_states_q_tables = [None for uav in range(conf.N_UAVS)]
uavs_e_tables = [None for uav in range(conf.N_UAVS)]
uav_counter = 0
# print(uavs_q_tables)
# breakpoint()
for uav in range(conf.N_UAVS):
current_uav_q_table = {}
current_uav_explored_table = {}
for x_agent in np.arange(0, map_width + 1, 1): # range(map_width)
for y_agent in np.arange(0, map_length + 1, 1): # range(map_length)
x_agent = round(x_agent, 1)
y_agent = round(y_agent, 1)
# 2D case with UNLIMITED UAVs battery autonomy:
if (ANALYZED_CASE == 1):
if (conf.PRIOR_KNOWLEDGE == True):
prior_rewards = compute_prior_rewards((x_agent, y_agent), best_prior_knowledge_points)
current_uav_q_table[(x_agent, y_agent)] = [prior_rewards[action] for action in range(n_actions)]
elif (conf.R_MAX == True):
current_uav_q_table[(x_agent, y_agent)] = [max_value_for_Rmax for action in range(n_actions)]
elif (conf.HOSP_SCENARIO == True):
for prior in conf.HOSP_PRIORITIES:
# current_uav_q_table[((x_agent, y_agent), 1, prior)] = [np.random.uniform(0, 1) for action in range(n_actions)] #da vedere se mettere 0
# current_uav_q_table[((x_agent, y_agent), 0, prior)] = [np.random.uniform(0, 1) for action in range(n_actions)] # da vedere se mettere 0
current_uav_q_table[((x_agent, y_agent), 1, prior)] = [0 for action in range(
n_actions)] # da vedere se mettere 0
current_uav_q_table[((x_agent, y_agent), 0, prior)] = [0 for action in range(
n_actions)] # da vedere se mettere 0
# current_uav_q_table[((x_agent, y_agent), 1, 0)] = [np.random.uniform(0, 1) for action in range(n_actions)]
# current_uav_q_table[((x_agent, y_agent), 0, 0)] = [np.random.uniform(0, 1) for action in range(n_actions)]
current_uav_q_table[((x_agent, y_agent), 1, 0)] = [0 for action in range(n_actions)]
current_uav_q_table[((x_agent, y_agent), 0, 0)] = [0 for action in range(n_actions)]
else:
current_uav_q_table[(x_agent, y_agent)] = [np.random.uniform(0, 1) for action in
range(n_actions)]
current_uav_explored_table[(x_agent, y_agent)] = [False for action in range(n_actions)]
# 2D case with LIMITED UAVs battery autonomy:
elif (ANALYZED_CASE == 2):
for battery_level in np.arange(0, conf.FULL_BATTERY_LEVEL + 1, conf.PERC_CONSUMPTION_PER_ITERATION):
if (conf.PRIOR_KNOWLEDGE == True):
prior_rewards = compute_prior_rewards((x_agent, y_agent), best_prior_knowledge_points)
current_uav_q_table[((x_agent, y_agent), battery_level)] = [(1 - prior_rewards) for action
in range(n_actions)]
elif (conf.R_MAX == True):
current_uav_q_table[((x_agent, y_agent), battery_level)] = [max_value_for_Rmax for action in
range(n_actions)]
else:
current_uav_q_table[((x_agent, y_agent), battery_level)] = [np.random.uniform(0, 1) for
action in range(n_actions)]
current_uav_explored_table[(x_agent, y_agent), battery_level] = [False for action in
range(n_actions)]
# 3D case with UNLIMITED UAVs battery autonomy:
elif (ANALYZED_CASE == 3):
for z_agent in range(conf.MIN_UAV_HEIGHT, MAX_UAV_HEIGHT, conf.UAV_Z_STEP):
if (conf.PRIOR_KNOWLEDGE == True):
prior_rewards = compute_prior_rewards((x_agent, y_agent), best_prior_knowledge_points)
current_uav_q_table[(x_agent, y_agent, z_agent)] = [(1 - prior_rewards) for action in
range(n_actions)]
elif (conf.R_MAX == True):
current_uav_q_table[(x_agent, y_agent, z_agent)] = [max_value_for_Rmax for action in
range(n_actions)]
else:
current_uav_q_table[(x_agent, y_agent, z_agent)] = [np.random.uniform(0, 1) for action in
range(n_actions)]
current_uav_explored_table[(x_agent, y_agent, z_agent)] = [False for action in range(n_actions)]
# 3D case with LIMITED UAVs battery autonomy:
elif (ANALYZED_CASE == 4):
if (conf.UAV_STANDARD_BEHAVIOUR == False):
range_for_z = range(conf.MIN_UAV_HEIGHT, MAX_UAV_HEIGHT, conf.UAV_Z_STEP)
else:
range_for_z = range(conf.MIN_UAV_HEIGHT, MAX_UAV_HEIGHT + conf.UAV_Z_STEP + 1, conf.UAV_Z_STEP)
for z_agent in range_for_z:
for battery_level in np.arange(0, conf.FULL_BATTERY_LEVEL + 1,
conf.PERC_CONSUMPTION_PER_ITERATION):
if (conf.PRIOR_KNOWLEDGE == True):
prior_rewards = compute_prior_rewards((x_agent, y_agent), best_prior_knowledge_points)
current_uav_q_table[((x_agent, y_agent, z_agent), battery_level)] = [(1 - prior_rewards)
for action in
range(n_actions)]
elif (conf.R_MAX == True):
current_uav_q_table[((x_agent, y_agent, z_agent), battery_level)] = [max_value_for_Rmax
for action in
range(n_actions)]
else:
current_uav_q_table[((x_agent, y_agent, z_agent), battery_level)] = [
np.random.uniform(0, 1) for action in range(n_actions)]
current_uav_explored_table[(x_agent, y_agent, z_agent), battery_level] = [False for action
in
range(n_actions)]
uavs_q_tables[uav] = current_uav_q_table
# uavs_e_tables[uav] = current_uav_q_table
if conf.HOSP_SCENARIO == False:
explored_states_q_tables[uav] = current_uav_explored_table
print("Q-Table for Uav ", uav, " created")
print("Q-TABLES INITIALIZATION COMPLETED.")
else:
with open(start_q_table, "rb") as f:
q_table = pickle.load(f)
# _________________________________________________________________________________________________________________________________________________________________________________________________________
show_and_save_info(q_table_init, uavs_q_tables[0], dimension_space, battery_type, users_request, reward_func,
saving_directory)
q_tables_dir = "QTables"
q_tables_directory = join(saving_directory, q_tables_dir)
uav_ID = "UAV"
uavs_directories = [0 for uav in range(conf.N_UAVS)]
q_tables_directories = [0 for uav in range(conf.N_UAVS)]
for uav in range(1, conf.N_UAVS + 1):
current_uav_dir = join(saving_directory, uav_ID + str(uav))
if not isdir(current_uav_dir): mkdir(current_uav_dir)
uavs_directories[uav - 1] = current_uav_dir
current_q_table_dir = join(current_uav_dir, q_tables_dir)
q_tables_directories[uav - 1] = join(current_uav_dir, q_tables_dir)
if not isdir(current_q_table_dir): mkdir(current_q_table_dir)
uav_directory = uav_ID
# ________________________________________ Variable initialization before training loop: __________________________________________
best_reward_episode = [[] for uav in range(conf.N_UAVS)]
uavs_episode_rewards = [[] for uav in range(conf.N_UAVS)]
users_in_foots = [[] for uav in range(conf.N_UAVS)]
best_policy = [0 for uav in range(conf.N_UAVS)]
best_policy_obs = [[] for uav in range(conf.N_UAVS)]
obs_recorder = [[() for it in range(conf.ITERATIONS_PER_EPISODE)] for uav in range(conf.N_UAVS)]
avg_QoE1_per_epoch = [0 for ep in range(conf.EPISODES)]
avg_QoE2_per_epoch = [0 for ep in range(conf.EPISODES)]
avg_QoE3_per_epoch = [0 for ep in range(conf.EPISODES)]
q_values = [[] for episode in range(conf.N_UAVS)]
if (conf.DIMENSION_2D == True):
GO_TO_CS_INDEX = conf.GO_TO_CS_2D_INDEX
CHARGE_INDEX = conf.CHARGE_2D_INDEX
else:
CHARGE_INDEX = conf.CHARGE_3D_INDEX
CHARGE_INDEX_WHILE_CHARGING = conf.CHARGE_3D_INDEX_WHILE_CHARGING
GO_TO_CS_INDEX = conf.GO_TO_CS_3D_INDEX
GO_TO_CS_INDEX_HOME_SPACE = conf.GO_TO_CS_3D_INDEX_HOME_SPACE
# time.sleep(5)
epsilon_history = [0 for ep in range(conf.EPISODES)]
crashes_history = [0 for ep in range(conf.EPISODES)]
battery_in_CS_history = [[] for uav in range(conf.N_UAVS)]
n_active_users_per_epoch = [0 for ep in range(conf.EPISODES)]
provided_services_per_epoch = [[0, 0, 0] for ep in range(conf.EPISODES)]
n_active_users_per_episode = [0 for ep in range(conf.EPISODES)]
UAVs_used_bandwidth = [[0 for ep in range(conf.EPISODES)] for uav in range(conf.N_UAVS)]
users_bandwidth_request_per_UAVfootprint = [[0 for ep in range(conf.EPISODES)] for uav in range(conf.N_UAVS)]
MOVE_USERS = False
# TEMPORAL GOAL --> !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if (conf.HOSP_SCENARIO == True):
print("Computing the temporal goal . . .")
tg = [RestrainingBolt.make_uavs_goal() for uav in range(conf.N_UAVS)] # tg = RestrainingBolt.make_uavs_goal()
# print("GUARDA QUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA :", tgs)
env = UAVsTemporalWrapper(env, temp_goals=tg) # env = UAVsTemporalWrapper(env, temp_goals=[tg])
print("Temporal goal computed.")
# q_values_current_episode = [0 for uav in range(conf.N_UAVS)]
# ________________________________________________________________________________ Training start: _____________________________________________________________________________________________________________
print("\nSTART TRAINING . . .\n")
for episode in range(1, conf.EPISODES + 1):
# Guarda qui
# reset_uavs(agents[0]) #Da decidere se fare due funzioni separate per batteria e reset della posizione ad ogni episodio
# reset_uavs(agents[1])
if conf.HOSP_SCENARIO == True:
reset_priority()
env.reset()
else:
if (conf.STATIC_REQUEST == False):
if (episode % conf.MOVE_USERS_EACH_N_EPOCHS == 0):
env.compute_users_walk_steps()
MOVE_USERS = True
else:
MOVE_USERS = False
if (conf.INF_REQUEST == False):
if (episode % conf.UPDATE_USERS_REQUESTS_EACH_N_ITERATIONS == 0):
env.update_users_requests(env.users)
epsilon_history[episode - 1] = EPSILON
print("| EPISODE: {ep:3d} | Epsilon: {eps:6f}".format(ep=episode, eps=EPSILON), "BEST POLICY:", best_policy)
QoEs_store = [[], []] # --> Store QoE1 and QoE2
current_QoE3 = 0
users_served_time = 0
users_request_service_elapsed_time = 0
current_provided_services = [0, 0, 0]
current_UAV_bandwidth = [0 for uav in range(conf.N_UAVS)]
current_requested_bandwidth = [0 for uav in range(conf.N_UAVS)]
uavs_episode_reward = [0 for uav in range(conf.N_UAVS)]
crashes_current_episode = [False for uav in range(conf.N_UAVS)]
q_values_current_episode = [0 for uav in range(conf.N_UAVS)]
n_active_users_current_it = 0
tr_active_users_current_it = 0
ec_active_users_current_it = 0
dg_active_users_current_it = 0
if conf.SARSA_lambda == True:
for i in uavs_e_tables[0]:
# print(i, uavs_e_tables[0][i])
uavs_e_tables[0][i] = [0 for action in range(5)]
# Inizializzo la e_tables a ogni episodio
# 30minutes
for i in range(conf.ITERATIONS_PER_EPISODE):
# print("\nITERATIONS: ", i, "----------------------------------------------")
if conf.HOSP_SCENARIO == False:
if (conf.INF_REQUEST == True):
n_active_users = env.n_users
else:
n_active_users, tr_active_users, ec_active_users, dg_active_users, n_tr_served, n_ec_served, n_dg_served = env.get_active_users()
tr_active_users_current_it += tr_active_users
ec_active_users_current_it += ec_active_users
dg_active_users_current_it += dg_active_users
n_active_users_current_it += n_active_users
if (MOVE_USERS == True):
env.move_users(i + 1)
env.all_users_in_all_foots = []
for UAV in range(conf.N_UAVS):
# print("ID AGENTE: ", agents[UAV]._uav_ID)
# Skip analyzing the current UAV features until it starts to work (you can set a delayed start for each uav):
current_iteration = i + 1
if (episode == 1):
# Case in which are considered the UAVs from the 2-th to the n-th:
if (UAV > 0):
if (current_iteration != (conf.DELAYED_START_PER_UAV * (UAV))):
pass
# continue
# drone_pos = (UAVS_POS[0][0], UAVS_POS[0][1])
# env.agents_paths[UAV][i] = env.get_agent_pos(agents[UAV])
if (conf.NOISE_ON_POS_MEASURE == True):
drone_pos = env.noisy_measure_or_not(env.get_agent_pos(agents[UAV]))
else:
drone_pos = env.get_agent_pos(agents[UAV]) # env.agents_paths[UAV][i]
# print(drone_pos, "drone_posdrone_posdrone_posdrone_posdrone_posdrone_posdrone_posdrone_posdrone_posdrone_posdrone_pos")
if (conf.HOSP_SCENARIO == True):
if (conf.UNLIMITED_BATTERY == True):
obs = (
drone_pos, agents[UAV].beep, hosp_f.get_color_id(env.cells_matrix[drone_pos[1]][drone_pos[0]]._priority))
else:
obs = (drone_pos, agents[UAV]._battery_level, agents[UAV].beep,
hosp_f.get_color_id(env.cells_matrix[drone_pos[1]][drone_pos[0]]._priority))
else:
if (conf.UNLIMITED_BATTERY == True):
obs = (drone_pos)
else:
obs = (drone_pos, agents[
UAV]._battery_level) # --> The observation will be different when switch from 2D to 3D scenario and viceversa.
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# Questa 'obs' viene passata a 'choose_action' senza 'beep' e senza colore prioritario (perhè in 'choose_action' questi due valori non vengono ancora considerati);
# 'beep' e il colore prioritario vengono invece restituiti in 'obs_' da 'step_agent(..)'.
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if (conf.UNLIMITED_BATTERY == False):
env.set_action_set(agents[UAV])
action = choose_action(uavs_q_tables, UAV, obs, agents[UAV], battery_in_CS_history[UAV], env.cells_matrix)
obs_, reward, done, info = env.step_agent(agents[UAV], action)
##print(reward, "env_wrapper")
# -----------------------------------------------NO-INTERFERENCE-REWARD--------------------------------------------------
if conf.NO_INTERFERENCE_REWARD == True:
drone_pos_ = list(drone_pos)
if drone_pos_ in traj_csv:
reward += -1.0
# print("drone_pos", drone_pos, "\n traj_csv", traj_csv)
# -----------------------------------------------------------------------------------------------------------------------
if conf.HOSP_SCENARIO == False:
crashes_current_episode[UAV] = agents[UAV]._crashed
print(" - Iteration: {it:1d} - Reward per UAV {uav:1d}: {uav_rew:6f}".format(it=i + 1, uav=UAV + 1,
uav_rew=reward), end="\r",
flush=True)
if (conf.UAV_STANDARD_BEHAVIOUR == True):
action = conf.ACTION_SPACE_STANDARD_BEHAVIOUR.index(action)
if ((ANALYZED_CASE == 1) or (ANALYZED_CASE == 3)): # --> UNLIMITED BATTERY
if (conf.HOSP_SCENARIO == False):
obs = tuple([round(ob, 1) for ob in obs])
obs_ = tuple([round(ob, 1) for ob in obs_])
# obs = tuple([round(ob, 1) for ob in obs])
# obs_ = tuple([round(ob, 1) for ob in obs_[0][0]]) #CASO MARCO
# obs_ = tuple([round(ob, 1) for ob in (obs_[0][0]['x'], obs_[0][0]['y'])]) #CASO MARCO
else:
# print("OBSSSSSSSSSSSSSSSSSSSSSSSSSS", obs)
obs_ = ((obs_[0][0]['x'], obs_[0][0]['y']), obs_[0][0]['beep'], obs_[0][0]['color'])
else: # --> LIMITED BATTERY
coords = tuple([round(ob, 1) for ob in obs[0]])
obs = tuple([coords, obs[1]])
coords_ = tuple([round(ob, 1) for ob in obs_[0]])
obs_ = tuple([coords, obs_[1]])
if conf.HOSP_SCENARIO == False:
if not explored_states_q_tables[UAV][obs_][action]:
explored_states_q_tables[UAV][obs_][action] = True
# print("1111111111111111111111111111111111", obs, obs_)
if conf.HOSP_SCENARIO == True:
obs_recorder[UAV][i] = obs_ # conf.HOSP_SCENARIO == FALSE: obs_recorder[UAV][i] = obs
# print("1 - obs_recorder[UAV]", obs_recorder[UAV])
uavs_episode_reward[UAV] += reward
else:
obs_recorder[UAV][i] = obs
if (conf.UNLIMITED_BATTERY == False):
if (info == "IS CHARGING"):
# print("sbagliatoooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo")
# breakpoint()
if uavs_episode_reward[UAV] > best_policy[UAV]:
best_policy[UAV] = uavs_episode_reward[UAV]
best_policy_obs[UAV] = obs_recorder[UAV]
obs_recorder[UAV] = [() for i in range(conf.ITERATIONS_PER_EPISODE)]
continue
else: # --> i.e., crashed case:
obs_recorder[UAV] = [() for i in range(conf.ITERATIONS_PER_EPISODE)]
else:
# print("0 - best_policy[UAV]", best_policy[UAV], ">", "uavs_episode_reward[UAV]???", uavs_episode_reward[UAV])
if conf.HOSP_SCENARIO == True:
if (current_iteration == conf.ITERATIONS_PER_EPISODE or done):
# print(uavs_episode_reward[UAV], best_policy[UAV])
if uavs_episode_reward[UAV] > best_policy[UAV]:
# print("2 - obs_recorder[UAV]", obs_recorder[UAV])
best_policy[UAV] = uavs_episode_reward[UAV]
best_policy_obs[UAV] = obs_recorder[UAV]
# print("2 - best_policy[UAV]", best_policy[UAV])
# print("SALVO LA POLICY !!!!!!!!!!!!!!!!!!!!!!!!!")
obs_recorder[UAV] = [() for i in range(conf.ITERATIONS_PER_EPISODE)]
# print("obs_recorder[UAV]", obs_recorder[UAV])
# print("best_policy_obs[UAV]", best_policy_obs[UAV])
# print("best_policy[UAV]", best_policy[UAV])
# print("best_policy_obs[UAV]", best_policy_obs[UAV])
else:
if (current_iteration == conf.ITERATIONS_PER_EPISODE):
if uavs_episode_reward[UAV] > best_policy[UAV]:
best_policy[UAV] = uavs_episode_reward[UAV]
best_policy_obs[UAV] = obs_recorder[UAV]
obs_recorder[UAV] = [() for i in range(conf.ITERATIONS_PER_EPISODE)]
# Set all the users which could be no more served after the current UAV action (use different arguments according to the considered case!!!!!!!!!!!!!!!!!!):
if conf.HOSP_SCENARIO == False:
agent.Agent.set_not_served_users(env.users, env.all_users_in_all_foots, UAV + 1, QoEs_store, i + 1,
current_provided_services)
# setting_not_served_users(env.users, env.all_users_in_all_foots, UAV+1, QoEs_store, i+1) # --> This make a SIDE_EFFECT on users by updating their info.
if not done or conf.HOSP_SCENARIO == False:
if (conf.Q_LEARNING == True):
# print("obs____________________________", obs_)
# print("obssssssssssssssssssssssssssssss", obs)
max_future_q = np.max(uavs_q_tables[UAV][obs_])
if conf.HOSP_SCENARIO == True:
current_q = uavs_q_tables[UAV][obs_][action]
else:
current_q = uavs_q_tables[UAV][obs_][action]
new_q = (1 - LEARNING_RATE) * current_q + LEARNING_RATE * (reward + DISCOUNT * max_future_q)
else:
if (conf.SARSA == True):
action_ = choose_action(uavs_q_tables, UAV, obs_, agents[UAV], battery_in_CS_history[UAV],
env.cells_matrix)
future_reward = uavs_q_tables[UAV][obs_][action_]
current_q = uavs_q_tables[UAV][obs_][action]
new_q = (1 - LEARNING_RATE) * current_q + LEARNING_RATE * (reward + DISCOUNT * future_reward)
if (conf.SARSA_lambda == True):
"""Implements conf.SARSA-Lambda action value function update.
e(s,a) = lambda*gamma*e(s,a) + 1(s_t = s, a_t = a).
delta_t = reward_t + gamma*q(s_t+1, a_t+1) - q(s_t, a_t).
q(s,a) = q(s,a) + alpha*e(s,a)*delta_t.
Here we assume gamma=1 (undiscounted).
alpha = 1 / N(s,a)."""
lambda_value = 0.9
action_ = choose_action(uavs_q_tables, UAV, obs_, agents[UAV], battery_in_CS_history[UAV],
env.cells_matrix)
future_reward = uavs_q_tables[UAV][obs_][action_]
current_q = uavs_q_tables[UAV][obs][action]
current_e = uavs_e_tables[UAV][obs][action]
# -------------------------PROVA1----------------------------------------------------------------------------
'''delta = reward + DISCOUNT * future_reward - current_q
print("uavs_e_tables[UAV][obs][action]", uavs_e_tables[UAV][obs][action])
current_e += 1
print("uavs_e_tables[UAV][obs][action]", uavs_e_tables[UAV][obs][action])
new_q = current_q + (1 - LEARNING_RATE) * delta * current_e
current_e = DISCOUNT * lambda_value * current_e'''
# -------------------------PROVA2----------------------------------------------------------------------------
print("ooooo", current_q)
# Computing the error
delta = reward + DISCOUNT * future_reward - current_q
# Setting the eligibility traces
print("222222", uavs_e_tables[UAV][obs])
uavs_e_tables[UAV][obs] = [i * DISCOUNT * lambda_value for i in uavs_e_tables[UAV][obs]]
print("333333", uavs_e_tables[UAV][obs])
uavs_e_tables[UAV][obs][action] = 1
# Updating the Q values
q_tabl = [i * (1 - LEARNING_RATE) * delta for i in uavs_e_tables[UAV][obs]]
uavs_q_tables[UAV][obs] = [x
+ y for x, y in zip(uavs_q_tables[UAV][obs], q_tabl)]
print("44444", uavs_q_tables[UAV][obs])
print("55555", uavs_e_tables[UAV][obs])
print("fine", uavs_e_tables[UAV][obs][action])
# uavs_e_tables[UAV][obs][action] = uavs_e_tables[UAV][obs][action] + (1 - LEARNING_RATE) * delta
# breakpoint()
'''for obs in (uavs_q_tables[UAV][obs]):
for action in (uavs_q_tables[UAV][action]):
print(uavs_q_tables[UAV][obs][action])
uavs_q_tables[UAV][s][a] += (1 - LEARNING_RATE) * delta * uavs_e_tables[UAV][s][a]
uavs_e_tables[UAV][s][a] = DISCOUNT * lambda_value * uavs_e_tables[UAV][s][a]'''
# -------------------------PROVA2----------------------------------------------------------------------------
elif (conf.SARSA_lambda == True) and (conf.SARSA == True) or (conf.SARSA_lambda == True) and (
conf.Q_LEARNING == True) or (conf.SARSA == True) and (conf.Q_LEARNING == True):
assert False, "Invalid algorithm selection."
if conf.SARSA_lambda == True:
q_values_current_episode[UAV] = uavs_q_tables[UAV][obs][action]
uavs_q_tables[UAV][obs][action] = uavs_q_tables[UAV][obs][action]
else:
q_values_current_episode[UAV] = new_q
uavs_q_tables[UAV][obs][action] = new_q
if conf.HOSP_SCENARIO == False:
# uavs_episode_reward[UAV] += reward
uavs_episode_reward[UAV] += reward
current_UAV_bandwidth[UAV] += conf.UAV_BANDWIDTH - agents[UAV]._bandwidth
current_requested_bandwidth[UAV] += env.current_requested_bandwidth
if conf.HOSP_SCENARIO == False:
reset_uavs(agents[UAV])
if done and conf.HOSP_SCENARIO == True:
# print("uavs_episode_reward[UAV]", uavs_episode_reward[UAV], reward)
# reset_uavs(agents[1])
break
if done and conf.HOSP_SCENARIO == True:
# print("uavs_episode_reward[UAV]", uavs_episode_reward[UAV], reward)
reset_uavs(agents[UAV])
break
# reset_uavs(agents[UAV])
if conf.HOSP_SCENARIO == False:
current_QoE3 += len(env.all_users_in_all_foots) / len(env.users) if len(
env.users) != 0 else 0 # --> Percentage of covered users (including also the users which are not requesting a service but which are considered to have the communication with UAVs on)
n_active_users_per_episode[episode - 1] = n_active_users
if conf.HOSP_SCENARIO == False:
if (conf.INF_REQUEST == False):
tr_active_users_current_ep = tr_active_users_current_it / conf.ITERATIONS_PER_EPISODE
ec_active_users_current_ep = ec_active_users_current_it / conf.ITERATIONS_PER_EPISODE
dg_active_users_current_ep = dg_active_users_current_it / conf.ITERATIONS_PER_EPISODE
for service_idx in range(N_SERVICES):
n_users_provided_for_current_service = current_provided_services[
service_idx] / conf.ITERATIONS_PER_EPISODE
if (service_idx == 0):
n_active_users_per_current_service = tr_active_users_current_ep # tr_active_users
elif (service_idx == 1):
n_active_users_per_current_service = ec_active_users_current_ep # ec_active_users
elif (service_idx == 2):
n_active_users_per_current_service = dg_active_users_current_ep # dg_active_users
perc_users_provided_for_current_service = n_users_provided_for_current_service / n_active_users_per_current_service if n_active_users_per_current_service != 0 else 0
provided_services_per_epoch[episode - 1][service_idx] = perc_users_provided_for_current_service
if (conf.UNLIMITED_BATTERY == False):
crashes_history[episode - 1] = crashes_current_episode
if conf.HOSP_SCENARIO == False:
n_active_users_current_ep = n_active_users_current_it / conf.ITERATIONS_PER_EPISODE
users_served_time = sum(QoEs_store[0]) / (len(QoEs_store[0])) if len(QoEs_store[
0]) != 0 else 0 # --> It is divided by its lenght, because here it is measured the percenteage according to which a service is completed (once it starts).
users_request_service_elapsed_time = sum(QoEs_store[
1]) / n_active_users_current_ep if n_active_users_current_ep != 0 else 0 # --> It is divided by the # of active users, beacuse here it is measured the avg elapsed time (among the active users) between a service request and its provision.
QoE3_for_current_epoch = current_QoE3 / conf.ITERATIONS_PER_EPISODE
User.avg_QoE(episode, users_served_time, users_request_service_elapsed_time, QoE3_for_current_epoch,
avg_QoE1_per_epoch, avg_QoE2_per_epoch,
avg_QoE3_per_epoch) # --> users_request_service_elapsed_time has to be divided by the iterations number if you do not have the mean value!!!
print(" - Iteration: {it:1d} - Reward per UAV {uav:1d}: {uav_rew:6f}".format(it=i + 1, uav=UAV + 1, uav_rew=reward))
for UAV in range(conf.N_UAVS):
if conf.HOSP_SCENARIO == False:
UAVs_used_bandwidth[UAV][episode - 1] = current_UAV_bandwidth[UAV] / conf.ITERATIONS_PER_EPISODE
users_bandwidth_request_per_UAVfootprint[UAV][episode - 1] = current_requested_bandwidth[
UAV] / conf.ITERATIONS_PER_EPISODE
best_reward_episode[UAV].append(best_policy[UAV])
current_mean_reward = uavs_episode_reward[UAV] / conf.ITERATIONS_PER_EPISODE
uavs_episode_rewards[UAV].append(current_mean_reward)
current_q_mean = q_values_current_episode[UAV] / conf.ITERATIONS_PER_EPISODE
q_values[UAV].append(current_q_mean)
print(" - Mean reward per UAV{uav:3d}: {uav_rew:6f}".format(uav=UAV + 1, uav_rew=current_mean_reward), end=" ")
print()
# env.render(saving_directory, episode, 1)
# breakpoint()
# print("\nRendering animation for episode:", episode)
# env.render()
# print("Animation rendered.\n")
# if ((episode%500)==0): #(episode%250)==0 # --> You can change the condition to show (to save actually) or not the scenario of the current episode.
if conf.HOSP_SCENARIO == False:
env.render(saving_directory, episode, 500)
# env.render(saving_directory, episode, 10000) # --> DO not work properly when using UAVsTemporalWrapper --> !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# breakpoint()
'''if ((episode%500)==0):
env.render()
plot.users_wait_times(env.n_users, env.users, saving_directory, episode)'''
# env.agents_paths = [[0 for iteration in range(conf.ITERATIONS_PER_EPISODE)] for uav in range(conf.N_UAVS)] # --> Actually is should be useless because the values are overwritten on the previous ones.
if conf.HOSP_SCENARIO == False:
n_discovered_users = len(env.discovered_users)
if ((n_discovered_users / env.n_users) >= 0.85):
EPSILON = EPSILON * EPSILON_DECREMENT if EPSILON > EPSILON_MIN else EPSILON_MIN
else:
EPSILON = EPSILON * EPSILON_DECREMENT if EPSILON > EPSILON_MIN else EPSILON_MIN2
# print(fine)
else:
print("best_policy", best_policy[0])
if (EPSILON_DECR == True):
if ((best_policy[0] / 20) >= 0.95):
EPSILON = EPSILON * EPSILON_DECREMENT if EPSILON > EPSILON_MIN else EPSILON_MIN
else:
EPSILON = EPSILON * EPSILON_DECREMENT if EPSILON > EPSILON_MIN else EPSILON_MIN2
else:
pass
# EPSILON_MIN = 0.1
# EPSILON_MIN2 = 0.01
'''if ((best_policy[0]/ 20) >= 0.95):
if EPSILON > EPSILON_MIN2:
EPSILON = EPSILON * EPSILON_DECREMENT
else:
EPSILON_MIN2
else:
if EPSILON > EPSILON_MIN:
EPSILON = EPSILON * EPSILON_DECREMENT
else:
EPSILON_MIN'''
if (conf.REDUCE_ITERATION_PER_EPISODE == True):
if (done and uavs_episode_reward[UAV] / 20 >= 0.95): # best_policy[UAV] ):
conf.ITERATIONS_PER_EPISODE = current_iteration
if (conf.ITERATIONS_PER_EPISODE < 90):
print("conf.ITERATIONS_PER_EPISODE:", conf.ITERATIONS_PER_EPISODE)
if (uavs_episode_reward[UAV] > 2 and conf.ITERATIONS_PER_EPISODE > current_iteration): # best_policy[UAV] ):
conf.ITERATIONS_PER_EPISODE = current_iteration
conf.EPISODES_BP = episode
if (conf.ITERATIONS_PER_EPISODE < 90):
print("Number of iterations (best policy):", conf.ITERATIONS_PER_EPISODE, "Episode: ", conf.EPISODES_BP)
file = open(join(saving_directory, "env_and_train_info.txt"), "a")
print("\nTRAINING COMPLETED.\n")
# breakpoint()
# ________________________________________________________________________________ Training end ________________________________________________________________________________________________________________
# ________________________________________________________________________________ Results Saving: ______________________________________________________________________________________________________
for uav_idx in range(conf.N_UAVS):
if (conf.UNLIMITED_BATTERY == False):
print("\nSaving battery levels when start to charge . . .")
plot.battery_when_start_to_charge(battery_in_CS_history, uavs_directories)
print("Battery levels when start to charge saved.")
print("Saving UAVs crashes . . .")
plot.UAVS_crashes(conf.EPISODES, crashes_history, saving_directory)
print("UAVs crashes saved.")
if conf.HOSP_SCENARIO == False:
list_of_lists_of_actions = list(explored_states_q_tables[uav_idx].values())
actions_values = [val for sublist in list_of_lists_of_actions for val in sublist]
file.write("\nExploration percentage of the Q-Table for UAV:\n")
actual_uav_id = uav_idx + 1
value_of_interest = np.mean(actions_values)
file.write(str(actual_uav_id) + ": " + str(value_of_interest))
print("Exploration percentage of the Q-Table for UAV:\n", actual_uav_id, ":", value_of_interest)
print("Saving the best policy for each UAV . . .")
np.save(uavs_directories[uav_idx] + f"/best_policy.npy", best_policy_obs[uav_idx])
print("Best policies saved.")
for qoe_num in range(1, 4):
file.write("\nQoE" + str(qoe_num) + " : ")
if (qoe_num == 1):
file.write(str(np.mean(avg_QoE1_per_epoch)))
if (qoe_num == 2):
file.write(str(np.mean(avg_QoE2_per_epoch)))
if (qoe_num == 3):
file.write(str(np.mean(avg_QoE3_per_epoch)))
file.close()
print("\nBEST POLICY:\n")
print(len(best_policy_obs))
print(best_policy_obs)
print("\n")
# DA CAPIRE
if conf.HOSP_SCENARIO == False:
print("\nSaving QoE charts, UAVs rewards and Q-values . . .")
legend_labels = []
plot.QoE_plot(avg_QoE1_per_epoch, conf.EPISODES, join(saving_directory, "QoE1"), "QoE1")
plot.QoE_plot(avg_QoE2_per_epoch, conf.EPISODES, join(saving_directory, "QoE2"), "QoE2")
plot.QoE_plot(avg_QoE3_per_epoch, conf.EPISODES, join(saving_directory, "QoE3"), "QoE3")
if (conf.INF_REQUEST == False):
plot.bandwidth_for_each_epoch(conf.EPISODES, saving_directory, UAVs_used_bandwidth,
users_bandwidth_request_per_UAVfootprint)
plot.users_covered_percentage_per_service(provided_services_per_epoch, conf.EPISODES,
join(saving_directory, "Services Provision"))
for uav in range(conf.N_UAVS):
# plot.UAVS_reward_plot(conf.EPISODES, best_reward_episode, saving_directory, best_reward_episodee=True)
plot.UAVS_reward_plot(conf.EPISODES, uavs_episode_rewards, saving_directory)
plot.UAVS_reward_plot(conf.EPISODES, q_values, saving_directory, q_values=True)
print("Qoe charts, UAVs rewards and Q-values saved.")
print("\nSaving Epsilon chart trend . . .")
plot.epsilon(epsilon_history, conf.EPISODES, saving_directory)
print("Epsilon chart trend saved.")
print("Saving Q-Tables for episode", episode, ". . .")
for uav in range(1, conf.N_UAVS + 1):
saving_qtable_name = q_tables_directories[uav - 1] + f"/qtable-ep{episode}.npy"
np.save(saving_qtable_name, uavs_q_tables)
print("Q-Tables saved.\n")
with open(join(saving_directory, "q_tables.pickle"), 'wb') as f:
pickle.dump(uavs_q_tables, f)
'''print("Saving Min and Max values related to the Q-Tables for episode", episode, ". . .")
for uav in range(1, conf.N_UAVS+1):
plot.actions_min_max_per_epoch(uavs_q_tables, q_tables_directories[uav-1], episode, uav)
print("Min and Max values related to the Q-Tables for episode saved.\n")'''
policy_n.append(best_policy_obs)
print("Policy:", policy_n, '\n')
print(best_policy_obs, "best_policy_obs", len(best_policy_obs[0]))
for i in range(len(best_policy_obs[0])): # Elimino le tuple vuote in caso ci fossero.
if (best_policy_obs[0][i] != ()):
if conf.HOSP_SCENARIO == True:
traj_j.append(list(best_policy_obs[0][i][0]))
plot_policy.append(best_policy_obs[0][i][0])
traj_j_ID.append(list(best_policy_obs[0][i][0]))
else:
traj_j.append(list(best_policy_obs[0][i]))
plot_policy.append(best_policy_obs[0][i])
traj_j_ID.append(list(best_policy_obs[0][i]))
else:
break
policy_per_plot.append(plot_policy)
plot_policy = []
print("TRAJ_J:", traj_j, '\n')
print("policy_per_plot:", policy_per_plot, '\n')
traj_j_ = [traj_j]
traj = []
for i in range(len(traj_j_)):
for k in range(len(traj_j_[i])):
traj.append(traj_j_[i][k])
print("Old_traj_j", traj, '\n')
for i in range(len(traj_j_ID)):
traj_j_ID[i].insert(0, j)
traj_j_ID[i].insert(1, i)
traj_j_ID[i].insert(4, conf.UAV_HEIGHT_Z) # --> ?????????????????????????????????????????????????
CSV_writer("best_policy_obs.csv", best_policy_obs) # Policy con hovering su ospedali
with open('./' + BP_DIRECTORY_NAME + "policy_per_plot_.csv", "a") as my_csv:
csvWriter = csv.writer(my_csv, delimiter=',')
csvWriter.writerows(policy_per_plot)
CSV_writer("trajectory.csv", traj)
out = open('./' + BP_DIRECTORY_NAME + 'Best_policys.csv', 'a') # Best policy della missione con ID
for row in traj_j_ID:
for column in row:
out.write('%d, ' % column)
out.write('\n')
out.close()
generate_trajectories('UAV-' + str(j) + '_BestPolicy.csv',
traj_j_ID) # Best policy contenuta dentro la cartella con l' 'ID' dell'UAV
env.close()
if conf.HOSP_SCENARIO == True:
env.reset()
if j != conf.N_MISSION-1:
import shutil
myfile_initial_users = "./initial_users/"
myfile_map_data = "./map_data/"
myfile_map_status = "./map_status/"
myfile__pycache__ = "./__pycache__/"
shutil.rmtree(myfile_initial_users, ignore_errors=True)
shutil.rmtree(myfile_map_data, ignore_errors=True)
shutil.rmtree(myfile_map_status, ignore_errors=True)
shutil.rmtree(myfile__pycache__, ignore_errors=True)
else:
pass
#breakpoint()
# ________________________________________________________________________________________________________________________________________________________________________________________________________
| 50.03709
| 366
| 0.588679
|
6f8939dc27bfbddb906bd76cdcf69403ed788c1e
| 12,113
|
py
|
Python
|
owtf/net/scanner.py
|
Udbhavbisarya23/owtf
|
27623937677caf975569f8de8af7983ca57611bc
|
[
"BSD-3-Clause"
] | 1,514
|
2015-01-15T18:42:58.000Z
|
2022-03-25T08:14:40.000Z
|
owtf/net/scanner.py
|
justdvnsh/owtf
|
3a543b4eb2a7ad67155eb96dd2d99efbc181498d
|
[
"BSD-3-Clause"
] | 652
|
2015-01-09T18:27:37.000Z
|
2022-03-21T18:41:01.000Z
|
owtf/net/scanner.py
|
justdvnsh/owtf
|
3a543b4eb2a7ad67155eb96dd2d99efbc181498d
|
[
"BSD-3-Clause"
] | 506
|
2015-01-02T09:28:47.000Z
|
2022-03-10T23:27:27.000Z
|
"""
owtf.net.scanner
~~~~~~~~~~~~~~~~
The scan_network scans the network for different ports and call network plugins for different services running on target
"""
import logging
import re
from owtf.config import config_handler
from owtf.db.session import get_scoped_session
from owtf.managers.plugin import get_plugins_by_group
from owtf.settings import NET_SCANS_PATH
from owtf.shell.base import shell
from owtf.utils.file import FileOperations
__all__ = ["Scanner"]
# Folder under which all scans will be saved
PING_SWEEP_FILE = "{}/00_ping_sweep".format(NET_SCANS_PATH)
DNS_INFO_FILE = "{}/01_dns_info".format(NET_SCANS_PATH)
FAST_SCAN_FILE = "{}/02_fast_scan".format(NET_SCANS_PATH)
STD_SCAN_FILE = "{}/03_std_scan".format(NET_SCANS_PATH)
FULL_SCAN_FILE = "{}/04_full_scan".format(NET_SCANS_PATH)
class Scanner(object):
def __init__(self):
self.shell = shell
self.session = get_scoped_session()
# Create the missing scans folder inside the owtf_review directory.
FileOperations.create_missing_dirs(NET_SCANS_PATH)
def ping_sweep(self, target, scantype):
"""Do a ping sweep
:param target: Target to scan
:type target: `str`
:param scantype: Type of scan
:type scantype: `str`
:return: None
:rtype: None
"""
if scantype == "full":
logging.info("Performing Intense Host discovery")
self.shell.shell_exec(
"nmap -n -v -sP -PE -PP -PS21,22,23,25,80,443,113,21339 -PA80,113,443,10042"
" --source_port 53 {!s} -oA {!s}".format(target, PING_SWEEP_FILE)
)
if scantype == "arp":
logging.info("Performing ARP host discovery")
self.shell.shell_exec(
"nmap -n -v -sP -PR {!s} -oA {!s}".format(target, PING_SWEEP_FILE)
)
self.shell.shell_exec(
'grep Up {!s}.gnmap | cut -f2 -d" " > {!s}.ips'.format(
PING_SWEEP_FILE, PING_SWEEP_FILE
)
)
def dns_sweep(self, file_with_ips, file_prefix):
"""Do a DNS sweep
:param file_with_ips: Path of file with IP addresses
:type file_with_ips: `str`
:param file_prefix: File name prefix
:type file_prefix: `str`
:return: None
:rtype: None
"""
logging.info(
"Finding misconfigured DNS servers that might allow zone transfers among live ips .."
)
self.shell.shell_exec(
"nmap -PN -n -sS -p 53 -iL {!s} -oA {!s}".format(file_with_ips, file_prefix)
)
# Step 2 - Extract IPs
dns_servers = "{!s}.dns_server.ips".format(file_prefix)
self.shell.shell_exec(
'grep "53/open/tcp" {!s}.gnmap | cut -f 2 -d " " > {!s}'.format(
file_prefix, dns_servers
)
)
file = FileOperations.open(dns_servers)
domain_names = "{!s}.domain_names".format(file_prefix)
self.shell.shell_exec("rm -f {!s}".format(domain_names))
num_dns_servers = 0
for line in file:
if line.strip("\n"):
dns_server = line.strip("\n")
self.shell.shell_exec(
"host {} {} | grep 'domain name' | cut -f 5 -d' ' | cut -f 2,3,4,5,6,7 -d. "
"| sed 's/\.$//' >> {}".format(dns_server, dns_server, domain_names)
)
num_dns_servers += 1
try:
file = FileOperations.open(domain_names, owtf_clean=False)
except IOError:
return
for line in file:
domain = line.strip("\n")
raw_axfr = "{!s}.{!s}.{!s}.axfr.raw".format(file_prefix, dns_server, domain)
self.shell.shell_exec(
"host -l {!s} {!s} | grep {!s} > {!s}".format(
domain, dns_server, domain, raw_axfr
)
)
success = self.shell.shell_exec(
"wc -l {!s} | cut -f 1 -d ' '".format(raw_axfr)
)
if success > 3:
logging.info(
"Attempting zone transfer on $dns_server using domain {!s}.. Success!".format(
domain
)
)
axfr = "{!s}.{!s}.{!s}.axfr".format(file_prefix, dns_server, domain)
self.shell.shell_exec("rm -f {!s}".format(axfr))
logging.info(
self.shell.shell_exec(
"grep 'has address' {!s} | cut -f 1,4 -d ' ' | sort -k 2 -t ' ' "
"| sed 's/ /#/g'".format(raw_axfr)
)
)
else:
logging.info(
"Attempting zone transfer on $dns_server using domain %s.. Success!",
domain,
)
self.shell.shell_exec("rm -f {!s}".format(raw_axfr))
if num_dns_servers == 0:
return
def scan_and_grab_banners(
self, file_with_ips, file_prefix, scan_type, nmap_options
):
"""Scan targets and grab service banners
:param file_with_ips: Path to file with IPs
:type file_with_ips: `str`
:param file_prefix: File name prefix
:type file_prefix: `str`
:param scan_type: Type of scan
:type scan_type: `str`
:param nmap_options: nmap options
:type nmap_options: `str`
:return: None
:rtype: None
"""
if scan_type == "tcp":
logging.info(
"Performing TCP portscan, OS detection, Service detection, banner grabbing, etc"
)
self.shell.shell_exec(
"nmap -PN -n -v --min-parallelism=10 -iL {!s} -sS -sV -O -oA {!s}.tcp {!s}".format(
file_with_ips, file_prefix, nmap_options
)
)
self.shell.shell_exec(
"amap -1 -i {!s}.tcp.gnmap -Abq -m -o {!s}.tcp.amap -t 90 -T 90 -c 64".format(
file_prefix, file_prefix
)
)
if scan_type == "udp":
logging.info(
"Performing UDP portscan, Service detection, banner grabbing, etc"
)
self.shell.shell_exec(
"nmap -PN -n -v --min-parallelism=10 -iL {!s} -sU -sV -O -oA {!s}.udp {!s}".format(
file_with_ips, file_prefix, nmap_options
)
)
self.shell.shell_exec(
"amap -1 -i {}.udp.gnmap -Abq -m -o {}.udp.amap".format(
file_prefix, file_prefix
)
)
@staticmethod
def get_nmap_services_file():
"""Return default NMAP services file
:return: Path to the file
:rtype: `str`
"""
return "/usr/share/nmap/nmap-services"
def get_ports_for_service(self, service, protocol):
"""Get ports for different services
:param service: Service name
:type service: `str`
:param protocol: Protocol
:type protocol: `str`
:return: List of ports
:rtype: `list`
"""
regexp = "(.*?)\t(.*?/.*?)\t(.*?)($|\t)(#.*){0,1}"
re.compile(regexp)
list = []
f = FileOperations.open(self.get_nmap_services_file())
for line in f.readlines():
if line.lower().find(service) >= 0:
match = re.findall(regexp, line)
if match:
port = match[0][1].split("/")[0]
prot = match[0][1].split("/")[1]
if not protocol or protocol == prot and port not in list:
list.append(port)
f.close()
return list
def target_service(self, nmap_file, service):
"""Services for a target
:param nmap_file: Path to nmap file
:type nmap_file: `str`
:param service: Service to get
:type service: `str`
:return: Response
:rtype: `str`
"""
ports_for_service = self.get_ports_for_service(service, "")
f = FileOperations.open(nmap_file.strip())
response = ""
for host_ports in re.findall("Host: (.*?)\tPorts: (.*?)[\t\n]", f.read()):
host = host_ports[0].split(" ")[0] # Remove junk at the end
ports = host_ports[1].split(",")
for port_info in ports:
if len(port_info) < 1:
continue
chunk = port_info.split("/")
port = chunk[0].strip()
port_state = chunk[1].strip()
# No point in wasting time probing closed/filtered ports!!
# (nmap sometimes adds these to the gnmap file for some reason ..)
if port_state in ["closed", "filtered"]:
continue
try:
prot = chunk[2].strip()
except BaseException:
continue
if port in ports_for_service:
response += "{!s}:{!s}:{!s}##".format(host, port, prot)
f.close()
return response
def probe_service_for_hosts(self, nmap_file, target):
"""Probe a service for a domain
:param nmap_file: Path to nmap file
:type nmap_file: `str`
:param target: Target name
:type target: `str`
:return: List of services
:rtype: `list`
"""
services = []
# Get all available plugins from network plugin order file
net_plugins = get_plugins_by_group(self.session, plugin_group="network")
for plugin in net_plugins:
services.append(plugin["Name"])
services.append("http")
total_tasks = 0
tasklist = ""
plugin_list = []
http = []
for service in services:
if plugin_list.count(service) > 0:
continue
tasks_for_service = len(
self.target_service(nmap_file, service).split("##")
) - 1
total_tasks += tasks_for_service
tasklist = "{!s} [ {!s} - {!s} tasks ]".format(
tasklist, service, str(tasks_for_service)
)
for line in self.target_service(nmap_file, service).split("##"):
if line.strip("\n"):
ip = line.split(":")[0]
port = line.split(":")[1]
plugin_to_invoke = service
service1 = plugin_to_invoke
config_handler.set(
"{!s}_PORT_NUMBER".format(service1.upper()), port
)
if service != "http":
plugin_list.append(plugin_to_invoke)
http.append(port)
logging.info(
"We have to probe %s:%s for service %s",
str(ip),
str(port),
plugin_to_invoke,
)
return http
def scan_network(self, target):
"""Do a ping sweep for a target
:param target: Target url
:type target: `str`
:return: None
:rtype: None
"""
self.ping_sweep(target.split("//")[1], "full")
self.dns_sweep("{}.ips".format(PING_SWEEP_FILE), DNS_INFO_FILE)
def probe_network(self, target, protocol, port):
"""Probe network for services
:param target: target url
:type target: `str`
:param protocol: Protocol scan
:type protocol: `str`
:param port: Port number for target
:type port: `str`
:return: List of services running
:rtype: list
"""
self.scan_and_grab_banners(
"{0}.ips".format(PING_SWEEP_FILE),
FAST_SCAN_FILE,
protocol,
"-p" + str(port),
)
return self.probe_service_for_hosts(
"{0}.{1}.gnmap".format(FAST_SCAN_FILE, protocol), target.split("//")[1]
)
| 36.158209
| 120
| 0.515562
|
e48c322cca861aed08126335bb59c3f13ea5d8ed
| 12,392
|
py
|
Python
|
avionix/chart/chart_builder.py
|
Maxiimeeb/avionix
|
c149e4319c8c8c00d50450ec1644545340ff7322
|
[
"BSD-3-Clause"
] | 51
|
2020-07-17T11:42:44.000Z
|
2022-03-17T23:51:28.000Z
|
avionix/chart/chart_builder.py
|
Maxiimeeb/avionix
|
c149e4319c8c8c00d50450ec1644545340ff7322
|
[
"BSD-3-Clause"
] | 55
|
2020-07-14T21:21:14.000Z
|
2022-03-04T22:43:10.000Z
|
avionix/chart/chart_builder.py
|
Maxiimeeb/avionix
|
c149e4319c8c8c00d50450ec1644545340ff7322
|
[
"BSD-3-Clause"
] | 9
|
2021-01-05T01:52:14.000Z
|
2022-02-16T12:42:18.000Z
|
from logging import error, info
import os
from pathlib import Path
import re
import shutil
import subprocess
from typing import Dict, List, Optional
import yaml
from avionix._process_utils import custom_check_output
from avionix.chart.chart_info import ChartInfo
from avionix.chart.utils import get_helm_installations
from avionix.chart.values_yaml import Values
from avionix.errors import (
ChartNotInstalledError,
ErrorFactory,
post_uninstall_handle_error,
)
from avionix.kube.base_objects import KubernetesBaseObject
class ChartBuilder:
"""
Main builder object. Accepts kubernetes objects and generates the helm chart
structure. Can also perform the installation onto the server
:param chart_info: Contains all chart metadata and dependency info
:param kubernetes_objects: A list of kubernetes objects
:param output_directory: A path to the directory in which to place the generated \
chart
:param keep_chart: Whether or not to keep the chart after installation
:param namespace: The namespace in which all chart components should be installed \
This allows the convenience of not passing the namespace option to both \
install and uninstall
"""
def __init__(
self,
chart_info: ChartInfo,
kubernetes_objects: List[KubernetesBaseObject],
output_directory: Optional[str] = None,
keep_chart: bool = False,
namespace: Optional[str] = None,
values: Optional[Values] = None,
):
self.chart_info = chart_info
self.kubernetes_objects = kubernetes_objects
self.chart_folder_path = Path(self.chart_info.name)
self.__templates_directory = self.chart_folder_path / "templates"
self.__chart_yaml = self.chart_folder_path / "Chart.yaml"
self.__keep_chart = keep_chart
self.__values = values
self.namespace = namespace
if output_directory:
self.__templates_directory = Path(output_directory) / str(
self.__templates_directory
)
self.__chart_yaml = Path(output_directory) / str(self.__chart_yaml)
self.chart_folder_path = Path(output_directory) / str(
self.chart_folder_path
)
def __delete_chart_directory(self):
if os.path.exists(self.chart_info.name):
shutil.rmtree(self.chart_info.name)
def generate_chart(self):
"""
Generates the chart but does not install it on kubernetes
:returns The template directory
"""
self.__delete_chart_directory()
os.makedirs(self.__templates_directory, exist_ok=True)
with open(self.__chart_yaml, "w+") as chart_yaml_file:
chart_yaml_file.write(str(self.chart_info))
kind_count: Dict[str, int] = {}
for kubernetes_object in self.kubernetes_objects:
if kubernetes_object.kind not in kind_count:
kind_count[kubernetes_object.kind] = 0
else:
kind_count[kubernetes_object.kind] += 1
with open(
self.__templates_directory / f"{kubernetes_object.kind}-"
f"{kind_count[kubernetes_object.kind]}.yaml",
"w",
) as template:
template.write(str(kubernetes_object))
with open(
self.__templates_directory.parent / "values.yaml", "w"
) as values_file:
values_file.write(self.__get_values_yaml())
return self.__templates_directory
def _helm_list_repos(self) -> List[str]:
try:
return custom_check_output("helm repo list").split("\n")[1:]
except subprocess.CalledProcessError as err:
error_message = err.output.decode("utf-8").strip()
if error_message == "Error: no repositories to show":
return []
error(error_message)
raise err
def get_helm_repos(self):
repo_lines = self._helm_list_repos()
repo_to_url_dict = {}
for repo_line in repo_lines:
repo_line_no_extra_space = repo_line.strip()
match = re.match(
r"(?P<repo_name>.+?)\s+(?P<url>.+)", repo_line_no_extra_space
)
if not repo_line_no_extra_space:
continue
if not match:
raise Exception(
f"Could not match repo name pattern from output for "
f"line {repo_line_no_extra_space}"
)
repo_to_url_dict[match.group("repo_name")] = match.group("url")
return repo_to_url_dict
def add_dependency_repos(self):
"""
Adds repos for all dependencies listed
"""
info("Adding dependencies...")
installed_repos = self.get_helm_repos()
for dependency in self.chart_info.dependencies:
if (
installed_repos.get(dependency.local_repo_name) == dependency.repository
or dependency.is_local
):
continue
dependency.add_repo()
installed_repos[dependency.local_repo_name] = dependency.repository
def __get_values_yaml(self):
values = {}
for dependency in self.chart_info.dependencies:
values.update(dependency.get_values_yaml())
if self.__values:
values.update(self.__values.values)
return yaml.dump(values)
@staticmethod
def __parse_options(options: Optional[Dict[str, Optional[str]]] = None):
option_string = ""
if options is None:
return option_string
for option in options:
option_string += f" --{option}"
# Add value after flag if one is given
value = options[option]
if value:
option_string += f" {value}"
return option_string
def __get_helm_install_command(
self, options: Optional[Dict[str, Optional[str]]] = None
):
command = (
f"helm install {self.chart_info.name} {self.chart_folder_path.resolve()}"
)
return self.__handle_namespace(command) + self.__parse_options(options)
def run_helm_install(self, options: Optional[Dict[str, Optional[str]]] = None):
"""
Runs helm install on the chart
:param options: A dictionary of command line arguments to pass to helm
:Example:
To run an install with updated dependencies and with verbose logging:
>>> self.run_helm_install({"dependency_update": None, "v": "info"})
"""
custom_check_output(self.__get_helm_install_command(options))
def __handle_installation(self, options: Optional[Dict[str, Optional[str]]] = None):
try:
info(f"Installing helm chart {self.chart_info.name}...")
self.run_helm_install(options)
except subprocess.CalledProcessError as err:
decoded = err.output.decode("utf-8")
error = ErrorFactory(decoded).get_error()
if error is not None:
raise error
if self.is_installed:
self.uninstall_chart()
raise post_uninstall_handle_error(decoded)
def install_chart(self, options: Optional[Dict[str, Optional[str]]] = None):
"""
Generates and installs the helm chart onto kubernetes and handles all failures.
It will also add the repos of all listed dependencies.
Note that the generated chart will be deleted if *keep_chart* is not set to
true on ChartBuilder
WARNING: If the helm chart installation fails, the chart will be uninstalled,
so if working with an existing chart, please use upgrade_chart instead
:param options: A dictionary of command line arguments to pass to helm
For example, to run an install with updated dependencies and with verbose
logging:
>>> self.helm_install({"dependency_update": None, "v": "info"})
"""
self.generate_chart()
self.add_dependency_repos()
self.__handle_installation(options)
if not self.__keep_chart:
self.__delete_chart_directory()
def __get_helm_uninstall_command(
self, options: Optional[Dict[str, Optional[str]]] = None
):
command = f"helm uninstall {self.chart_info.name}"
return self.__handle_namespace(command) + self.__parse_options(options)
def run_helm_uninstall(self, options: Optional[Dict[str, Optional[str]]] = None):
"""
Runs helm uninstall
:param options: A dictionary of command line arguments to pass to helm
:Example:
>>> self.run_helm_uninstall(
>>> {"dry-run": None,
>>> "description": "My uninstall description"
>>> }
>>> )
"""
info(f"Uninstalling chart {self.chart_info.name}")
custom_check_output(self.__get_helm_uninstall_command(options))
def __check_if_installed(self):
info(f"Checking if helm chart {self.chart_info.name} is installed")
if not self.is_installed:
raise ChartNotInstalledError(
f'Error: chart "{self.chart_info.name}" is not installed'
)
def __handle_uninstallation(
self, options: Optional[Dict[str, Optional[str]]] = None
):
self.__check_if_installed()
self.run_helm_uninstall(options)
def uninstall_chart(self, options: Optional[Dict[str, Optional[str]]] = None):
"""
Uninstalls the chart if present, if not present, raises an error
:param options: A dictionary of command line arguments to pass to helm
:Example:
>>> self.uninstall_chart(
>>> {"dry-run": None,
>>> "description": "My uninstall description"
>>> }
>>> )
"""
self.__handle_uninstallation(options)
def __handle_namespace(self, command: str):
if self.namespace is not None:
return command + f" -n {self.namespace}"
return command
def __get_helm_upgrade_command(
self, options: Optional[Dict[str, Optional[str]]] = None
):
command = f"helm upgrade {self.chart_info.name} {self.chart_folder_path}"
return self.__handle_namespace(command) + self.__parse_options(options)
def __handle_upgrade(self, options: Optional[Dict[str, Optional[str]]] = None):
try:
self.run_helm_upgrade(options)
except subprocess.CalledProcessError as err:
decoded = err.output.decode("utf-8")
error = ErrorFactory(decoded).get_error()
if error is not None:
raise error
raise post_uninstall_handle_error(decoded)
def run_helm_upgrade(self, options: Optional[Dict[str, Optional[str]]] = None):
"""
Runs 'helm upgrade' on the chart
:param options: A dictionary of command line arguments to pass to helm
:Example:
>>> self.run_helm_upgrade(options={"atomic": None, "version": "2.0"})
"""
info(f"Upgrading helm chart {self.chart_info.name}")
custom_check_output(self.__get_helm_upgrade_command(options))
def upgrade_chart(self, options: Optional[Dict[str, Optional[str]]] = None):
"""
Generates and upgrades the helm chart
:param options: A dictionary of command line arguments to pass to helm
:Example:
>>> self.upgrade_chart(options={"atomic": None, "version": "2.0"})
"""
self.__check_if_installed()
self.generate_chart()
self.add_dependency_repos()
update_depenedencies = "dependency-update"
if options is not None and update_depenedencies in options:
custom_check_output(
f"helm dependency update {self.chart_folder_path.resolve()}"
)
del options[update_depenedencies]
self.__handle_upgrade(options)
@property
def is_installed(self):
"""
:return: True if chart with the given name is already installed in the chart \
builders namespace, else False
"""
installations = get_helm_installations(self.namespace)
if not installations:
return False
return self.chart_info.name in installations["NAME"]
| 36.771513
| 88
| 0.63194
|
9e2c71366b0ea917833b6388c96d4201a9b7c068
| 3,760
|
py
|
Python
|
generate_plugins.py
|
jacobgb24/gog-galaxy-dolphin
|
69dc73b9f17e91c3b60e0377c926f073674c8f6a
|
[
"MIT"
] | 3
|
2021-08-10T09:27:03.000Z
|
2021-12-05T14:42:16.000Z
|
generate_plugins.py
|
jacobgb24/gog-galaxy-dolphin
|
69dc73b9f17e91c3b60e0377c926f073674c8f6a
|
[
"MIT"
] | 4
|
2020-12-23T14:17:12.000Z
|
2022-03-18T17:27:00.000Z
|
generate_plugins.py
|
jacobgb24/gog-galaxy-dolphin
|
69dc73b9f17e91c3b60e0377c926f073674c8f6a
|
[
"MIT"
] | null | null | null |
"""
This utilities file generates the output binaries that should be placed in the GOG plugins folder.
Two binaries are generated, one for gamecube and another for wii. The only necessary changes are
the directory name and manifest.
"""
import json
import shutil
import argparse
import os
import sys
import utils
from galaxy.api.consts import Platform
gamecube = {"platform": Platform.NintendoGameCube.value, "guid": "c732be30-0407-463f-bc30-6d8b3809fef4"}
wii = {"platform": Platform.NintendoWii.value, "guid": "c732be30-0407-463f-bc30-6d8b3809fef5"}
if __name__ == '__main__':
if sys.platform == 'win32':
GOG_DIR = os.environ['localappdata'] + '\\GOG.com\\Galaxy\\plugins\\installed'
LOG_DIR = os.environ['programdata'] + '\\GOG.com\\Galaxy\\logs'
elif sys.platform == 'darwin':
GOG_DIR = os.path.realpath("~/Library/Application Support/GOG.com/Galaxy/plugins/installed")
LOG_DIR = os.path.realpath("/Users/Shared/GOG.com/Galaxy/Logs")
else:
GOG_DIR = None
LOG_DIR = None
parser = argparse.ArgumentParser(description="Generates output plugins. "
"By default places in GOG location. Modify with `-o`")
output = parser.add_mutually_exclusive_group()
output.add_argument('-o', '--output-dir', help="Directory to output to. Default is GOG installed folder",
default=GOG_DIR)
output.add_argument('-z', '--zip', action='store_true', help="Output a zip to current dir for github release")
output.add_argument('-c', '--clear-logs', action='store_true', help="If set, attempts to remove log files")
args = parser.parse_args()
base_manifest = utils.get_manifest()
if args.output_dir is not None and not args.zip:
base_dir = args.output_dir
else:
base_dir = base_manifest["name"]
if args.clear_logs:
print("Removing log files")
for f in [f'{LOG_DIR}/plugin-{gamecube["platform"]}-{gamecube["guid"]}.log',
f'{LOG_DIR}/plugin-{wii["platform"]}-{gamecube["guid"]}.log',
f'{LOG_DIR}/GalaxyClient.log']:
try:
os.remove(f)
except (FileNotFoundError, PermissionError) as e:
print(f"Can't delete log for {f}")
print(f' {e}')
continue
gc_path = f'{base_dir}/gog-dolphin-{gamecube["platform"]}-{gamecube["guid"]}'
wii_path = f'{base_dir}/gog-dolphin-{wii["platform"]}-{wii["guid"]}'
ignored_files = shutil.ignore_patterns(f'{base_manifest["name"]}*', ".*", "__*", "manifest.json", __file__)
# gc
print("Creating ncube files...")
print(f" path: {gc_path}")
shutil.rmtree(gc_path, ignore_errors=True)
shutil.copytree(".", gc_path, ignore=ignored_files)
gc_manifest = base_manifest.copy()
gc_manifest["name"] = base_manifest["name"] + f"_Gamecube"
gc_manifest.update(gamecube)
with open(f"{gc_path}/manifest.json", "w") as m:
json.dump(gc_manifest, m, indent=2)
# wii
print("Creating nwii files...")
print(f" path: {wii_path}")
shutil.rmtree(wii_path, ignore_errors=True)
shutil.copytree(".", wii_path, ignore=ignored_files)
wii_manifest = base_manifest.copy()
wii_manifest["name"] = base_manifest["name"] + f"_Wii"
wii_manifest.update(wii)
with open(f"{wii_path}/manifest.json", "w") as m:
json.dump(wii_manifest, m, indent=2)
if args.zip:
print("Outputting to zip")
zip_name = f'{base_dir}_v{base_manifest["version"]}'
if os.path.exists(f'{zip_name}.zip'):
os.remove(f'{zip_name}.zip')
shutil.make_archive(zip_name, 'zip', base_dir)
shutil.rmtree(base_dir)
print("Done!")
| 40
| 114
| 0.639894
|
493b0560df13e1c7e91509057ae96e741f5c6d82
| 768
|
py
|
Python
|
setup.py
|
cmihai/winmoncon
|
656c47aeeec5a1fbaa11ad72be575491cc0e315f
|
[
"Unlicense"
] | 1
|
2018-02-05T14:42:56.000Z
|
2018-02-05T14:42:56.000Z
|
setup.py
|
cmihai/winmoncon
|
656c47aeeec5a1fbaa11ad72be575491cc0e315f
|
[
"Unlicense"
] | null | null | null |
setup.py
|
cmihai/winmoncon
|
656c47aeeec5a1fbaa11ad72be575491cc0e315f
|
[
"Unlicense"
] | null | null | null |
from os import path
from setuptools import setup, find_packages
import codecs
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst')) as f:
long_description = codecs.decode(f.read(), 'utf-8')
setup(
name='winmoncon',
version='0.0.1',
description='A Python inteface to the Windows Monitor Configuration API',
long_description=long_description,
author='Mihai Ciumeică',
license='Public Domain',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: Public Domain',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7'
],
packages=find_packages(exclude=['tests', 'docs', 'examples'])
)
| 25.6
| 77
| 0.657552
|
1cee08feb0a27aabc62482104e6fa6304da3ad74
| 8,050
|
py
|
Python
|
tests/components/deconz/test_init.py
|
don66/home-assistant
|
a277470363c0758bb305410aad49c257ff8bac40
|
[
"Apache-2.0"
] | 7
|
2018-08-03T10:15:36.000Z
|
2019-03-25T13:31:55.000Z
|
tests/components/deconz/test_init.py
|
don66/home-assistant
|
a277470363c0758bb305410aad49c257ff8bac40
|
[
"Apache-2.0"
] | null | null | null |
tests/components/deconz/test_init.py
|
don66/home-assistant
|
a277470363c0758bb305410aad49c257ff8bac40
|
[
"Apache-2.0"
] | 3
|
2018-10-09T08:37:48.000Z
|
2019-11-16T08:32:27.000Z
|
"""Test deCONZ component setup process."""
from unittest.mock import Mock, patch
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.setup import async_setup_component
from homeassistant.components import deconz
from tests.common import mock_coro
async def test_config_with_host_passed_to_config_entry(hass):
"""Test that configured options for a host are loaded via config entry."""
with patch.object(hass, 'config_entries') as mock_config_entries, \
patch.object(deconz, 'configured_hosts', return_value=[]), \
patch.object(deconz, 'load_json', return_value={}):
assert await async_setup_component(hass, deconz.DOMAIN, {
deconz.DOMAIN: {
deconz.CONF_HOST: '1.2.3.4',
deconz.CONF_PORT: 80
}
}) is True
# Import flow started
assert len(mock_config_entries.flow.mock_calls) == 2
async def test_config_file_passed_to_config_entry(hass):
"""Test that configuration file for a host are loaded via config entry."""
with patch.object(hass, 'config_entries') as mock_config_entries, \
patch.object(deconz, 'configured_hosts', return_value=[]), \
patch.object(deconz, 'load_json',
return_value={'host': '1.2.3.4'}):
assert await async_setup_component(hass, deconz.DOMAIN, {
deconz.DOMAIN: {}
}) is True
# Import flow started
assert len(mock_config_entries.flow.mock_calls) == 2
async def test_config_without_host_not_passed_to_config_entry(hass):
"""Test that a configuration without a host does not initiate an import."""
with patch.object(hass, 'config_entries') as mock_config_entries, \
patch.object(deconz, 'configured_hosts', return_value=[]), \
patch.object(deconz, 'load_json', return_value={}):
assert await async_setup_component(hass, deconz.DOMAIN, {
deconz.DOMAIN: {}
}) is True
# No flow started
assert len(mock_config_entries.flow.mock_calls) == 0
async def test_config_already_registered_not_passed_to_config_entry(hass):
"""Test that an already registered host does not initiate an import."""
with patch.object(hass, 'config_entries') as mock_config_entries, \
patch.object(deconz, 'configured_hosts',
return_value=['1.2.3.4']), \
patch.object(deconz, 'load_json', return_value={}):
assert await async_setup_component(hass, deconz.DOMAIN, {
deconz.DOMAIN: {
deconz.CONF_HOST: '1.2.3.4',
deconz.CONF_PORT: 80
}
}) is True
# No flow started
assert len(mock_config_entries.flow.mock_calls) == 0
async def test_config_discovery(hass):
"""Test that a discovered bridge does not initiate an import."""
with patch.object(hass, 'config_entries') as mock_config_entries:
assert await async_setup_component(hass, deconz.DOMAIN, {}) is True
# No flow started
assert len(mock_config_entries.flow.mock_calls) == 0
async def test_setup_entry_already_registered_bridge(hass):
"""Test setup entry doesn't allow more than one instance of deCONZ."""
hass.data[deconz.DOMAIN] = True
assert await deconz.async_setup_entry(hass, {}) is False
async def test_setup_entry_no_available_bridge(hass):
"""Test setup entry fails if deCONZ is not available."""
entry = Mock()
entry.data = {'host': '1.2.3.4', 'port': 80, 'api_key': '1234567890ABCDEF'}
with patch('pydeconz.DeconzSession.async_load_parameters',
return_value=mock_coro(False)):
assert await deconz.async_setup_entry(hass, entry) is False
async def test_setup_entry_successful(hass):
"""Test setup entry is successful."""
entry = Mock()
entry.data = {'host': '1.2.3.4', 'port': 80, 'api_key': '1234567890ABCDEF'}
with patch.object(hass, 'async_add_job') as mock_add_job, \
patch.object(hass, 'config_entries') as mock_config_entries, \
patch('pydeconz.DeconzSession.async_load_parameters',
return_value=mock_coro(True)):
assert await deconz.async_setup_entry(hass, entry) is True
assert hass.data[deconz.DOMAIN]
assert hass.data[deconz.DATA_DECONZ_ID] == {}
assert len(hass.data[deconz.DATA_DECONZ_UNSUB]) == 1
assert len(mock_add_job.mock_calls) == 4
assert len(mock_config_entries.async_forward_entry_setup.mock_calls) == 4
assert mock_config_entries.async_forward_entry_setup.mock_calls[0][1] == \
(entry, 'binary_sensor')
assert mock_config_entries.async_forward_entry_setup.mock_calls[1][1] == \
(entry, 'light')
assert mock_config_entries.async_forward_entry_setup.mock_calls[2][1] == \
(entry, 'scene')
assert mock_config_entries.async_forward_entry_setup.mock_calls[3][1] == \
(entry, 'sensor')
async def test_unload_entry(hass):
"""Test being able to unload an entry."""
entry = Mock()
entry.data = {'host': '1.2.3.4', 'port': 80, 'api_key': '1234567890ABCDEF'}
with patch('pydeconz.DeconzSession.async_load_parameters',
return_value=mock_coro(True)):
assert await deconz.async_setup_entry(hass, entry) is True
assert deconz.DATA_DECONZ_EVENT in hass.data
hass.data[deconz.DATA_DECONZ_EVENT].append(Mock())
hass.data[deconz.DATA_DECONZ_ID] = {'id': 'deconzid'}
assert await deconz.async_unload_entry(hass, entry)
assert deconz.DOMAIN not in hass.data
assert len(hass.data[deconz.DATA_DECONZ_UNSUB]) == 0
assert len(hass.data[deconz.DATA_DECONZ_EVENT]) == 0
assert len(hass.data[deconz.DATA_DECONZ_ID]) == 0
async def test_add_new_device(hass):
"""Test adding a new device generates a signal for platforms."""
new_event = {
"t": "event",
"e": "added",
"r": "sensors",
"id": "1",
"sensor": {
"config": {
"on": "True",
"reachable": "True"
},
"name": "event",
"state": {},
"type": "ZHASwitch"
}
}
entry = Mock()
entry.data = {'host': '1.2.3.4', 'port': 80, 'api_key': '1234567890ABCDEF'}
with patch.object(deconz, 'async_dispatcher_send') as mock_dispatch_send, \
patch('pydeconz.DeconzSession.async_load_parameters',
return_value=mock_coro(True)):
assert await deconz.async_setup_entry(hass, entry) is True
hass.data[deconz.DOMAIN].async_event_handler(new_event)
await hass.async_block_till_done()
assert len(mock_dispatch_send.mock_calls) == 1
assert len(mock_dispatch_send.mock_calls[0]) == 3
async def test_add_new_remote(hass):
"""Test new added device creates a new remote."""
entry = Mock()
entry.data = {'host': '1.2.3.4', 'port': 80, 'api_key': '1234567890ABCDEF'}
remote = Mock()
remote.name = 'name'
remote.type = 'ZHASwitch'
remote.register_async_callback = Mock()
with patch('pydeconz.DeconzSession.async_load_parameters',
return_value=mock_coro(True)):
assert await deconz.async_setup_entry(hass, entry) is True
async_dispatcher_send(hass, 'deconz_new_sensor', [remote])
await hass.async_block_till_done()
assert len(hass.data[deconz.DATA_DECONZ_EVENT]) == 1
async def test_do_not_allow_clip_sensor(hass):
"""Test that clip sensors can be ignored."""
entry = Mock()
entry.data = {'host': '1.2.3.4', 'port': 80,
'api_key': '1234567890ABCDEF', 'allow_clip_sensor': False}
remote = Mock()
remote.name = 'name'
remote.type = 'CLIPSwitch'
remote.register_async_callback = Mock()
with patch('pydeconz.DeconzSession.async_load_parameters',
return_value=mock_coro(True)):
assert await deconz.async_setup_entry(hass, entry) is True
async_dispatcher_send(hass, 'deconz_new_sensor', [remote])
await hass.async_block_till_done()
assert len(hass.data[deconz.DATA_DECONZ_EVENT]) == 0
| 41.709845
| 79
| 0.667826
|
94377d881a827375ada1237d6e4916342b0aed87
| 5,108
|
py
|
Python
|
pjproject-2.3/pjsip-apps/src/swig/importsym.py
|
WachterJud/qaul.net_legacy
|
9c2be0a38ad6e90fadc0d1150340e37d220997ae
|
[
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | 4
|
2016-09-29T00:04:31.000Z
|
2021-12-02T08:39:51.000Z
|
pjproject-2.3/pjsip-apps/src/swig/importsym.py
|
WachterJud/qaul.net_legacy
|
9c2be0a38ad6e90fadc0d1150340e37d220997ae
|
[
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | 1
|
2019-06-21T09:52:18.000Z
|
2019-06-21T09:52:18.000Z
|
pjproject-2.3/pjsip-apps/src/swig/importsym.py
|
WachterJud/qaul.net_legacy
|
9c2be0a38ad6e90fadc0d1150340e37d220997ae
|
[
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | 5
|
2019-07-02T02:03:24.000Z
|
2022-03-30T09:58:52.000Z
|
# $Id: importsym.py 4704 2014-01-16 05:30:46Z ming $
#
# importsym.py: Import C symbol decls (structs, enums, etc) and write them
# to another file
#
# Copyright (C)2013 Teluu Inc. (http://www.teluu.com)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import pycparser
from pycparser import c_generator
import sys
import os
def which(program):
import os
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
if sys.platform == 'win32' and not program.endswith(".exe"):
program += ".exe"
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
#
PJ_ROOT_PATH = "../../../"
# CPP is needed by pycparser.
CPP_PATH = which("cpp")
if not CPP_PATH:
print 'Error: need to have cpp in PATH'
sys.exit(1)
# Hardcoded!
if sys.platform == 'win32':
PYCPARSER_DIR="C:/devs/tools/pycparser"
elif sys.platform == "linux2":
PYCPARSER_DIR="/home/bennylp/Desktop/opt/src/pycparser-master"
else:
PYCPARSER_DIR="/Library/Python/2.7/site-packages/pycparser"
if not os.path.exists(PYCPARSER_DIR + '/utils/fake_libc_include'):
print "Error: couldn't find pycparser utils in '%s'" % PYPARSER_DIR
sys.exit(1)
# Heading, to be placed before the source files
C_HEADING_SECTION = """
#define PJ_AUTOCONF 1
#define jmp_buf int
#define __attribute__(x)
"""
# CPP (C preprocessor) settings
CPP_CFLAGS = [
'-I' + PYCPARSER_DIR + '/utils/fake_libc_include',
"-I" + PJ_ROOT_PATH + "pjlib/include",
"-I" + PJ_ROOT_PATH + "pjlib-util/include",
"-I" + PJ_ROOT_PATH + "pjnath/include",
"-I" + PJ_ROOT_PATH + "pjmedia/include",
"-I" + PJ_ROOT_PATH + "pjsip/include"
]
class SymbolVisitor(pycparser.c_ast.NodeVisitor):
def __init__(self, names):
self.nodeDict = {}
for name in names:
self.nodeDict[name] = None
def _add(self, node):
if self.nodeDict.has_key(node.name):
self.nodeDict[node.name] = node
def visit_Struct(self, node):
self._add(node)
def visit_Enum(self, node):
self._add(node)
def visit_Typename(self, node):
self._add(node)
def visit_Typedef(self, node):
self._add(node)
TEMP_FILE="tmpsrc.h"
class SymbolImporter:
"""
Import C selected declarations from C source file and move it
to another file.
Parameters:
- listfile Path of file containing list of C source file
and identifier names to be imported. The format
of the listfile is:
filename name1 name2 name3
for example:
pj/sock_qos.h pj_qos_type pj_qos_flag
pj/types.h pj_status_t PJ_SUCCESS
"""
def __init__(self):
pass
def process(self, listfile, outfile):
# Read listfile
f = open(listfile)
lines = f.readlines()
f.close()
# Process each line in list file, while generating the
# temporary C file to be processed by pycparser
f = open(TEMP_FILE, "w")
f.write(C_HEADING_SECTION)
names = []
fcnt = 0
for line in lines:
spec = line.split()
if len(spec) < 2:
continue
fcnt += 1
f.write("#include <%s>\n" % spec[0])
names.extend(spec[1:])
f.close()
print 'Parsing %d symbols from %d files..' % (len(names), fcnt)
# Parse the temporary C file
ast = pycparser.parse_file(TEMP_FILE, use_cpp=True, cpp_path=CPP_PATH, cpp_args=CPP_CFLAGS)
os.remove(TEMP_FILE)
# Filter the declarations that we wanted
print 'Filtering..'
visitor = SymbolVisitor(names)
visitor.visit(ast)
# Print symbol declarations to outfile
print 'Writing declarations..'
f = open(outfile, 'w')
f.write("// This file is autogenerated by importsym script, do not modify!\n\n")
gen = pycparser.c_generator.CGenerator()
for name in names:
node = visitor.nodeDict[name]
if not node:
print " ** Warning: declaration for '%s' is not found **" % k
else:
print " writing '%s'.." % name
output = gen.visit(node) + ";\n\n"
f.write(output)
f.close()
print "Done."
if __name__ == "__main__":
print "Importing symbols: 'symbols.lst' --> 'symbols.i'"
si = SymbolImporter()
si.process("symbols.lst", "symbols.i")
try:
os.remove("lextab.py")
except OSError:
pass
try:
os.remove("yacctab.py")
except OSError:
pass
| 26.466321
| 93
| 0.671887
|
2e2037c100d3892e498d3be3b76bfc202b012ae7
| 2,131
|
py
|
Python
|
tests/contrib/sqlalchemy/test_mysql.py
|
thieman/dd-trace-py
|
1e87c9bdf7769032982349c4ccc0e1c2e6866a16
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
tests/contrib/sqlalchemy/test_mysql.py
|
thieman/dd-trace-py
|
1e87c9bdf7769032982349c4ccc0e1c2e6866a16
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
tests/contrib/sqlalchemy/test_mysql.py
|
thieman/dd-trace-py
|
1e87c9bdf7769032982349c4ccc0e1c2e6866a16
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 1
|
2021-02-11T10:20:14.000Z
|
2021-02-11T10:20:14.000Z
|
from sqlalchemy.exc import ProgrammingError
import pytest
from .mixins import SQLAlchemyTestMixin
from ..config import MYSQL_CONFIG
from ...base import BaseTracerTestCase
class MysqlConnectorTestCase(SQLAlchemyTestMixin, BaseTracerTestCase):
"""TestCase for mysql-connector engine"""
VENDOR = 'mysql'
SQL_DB = 'test'
SERVICE = 'mysql'
ENGINE_ARGS = {'url': 'mysql+mysqlconnector://%(user)s:%(password)s@%(host)s:%(port)s/%(database)s' % MYSQL_CONFIG}
def setUp(self):
super(MysqlConnectorTestCase, self).setUp()
def tearDown(self):
super(MysqlConnectorTestCase, self).tearDown()
def check_meta(self, span):
# check database connection tags
self.assertEqual(span.get_tag('out.host'), MYSQL_CONFIG['host'])
self.assertEqual(span.get_metric('out.port'), MYSQL_CONFIG['port'])
def test_engine_execute_errors(self):
# ensures that SQL errors are reported
with pytest.raises(ProgrammingError):
with self.connection() as conn:
conn.execute('SELECT * FROM a_wrong_table').fetchall()
traces = self.tracer.writer.pop_traces()
# trace composition
self.assertEqual(len(traces), 1)
self.assertEqual(len(traces[0]), 1)
span = traces[0][0]
# span fields
self.assertEqual(span.name, '{}.query'.format(self.VENDOR))
self.assertEqual(span.service, self.SERVICE)
self.assertEqual(span.resource, 'SELECT * FROM a_wrong_table')
self.assertEqual(span.get_tag('sql.db'), self.SQL_DB)
self.assertIsNone(span.get_tag('sql.rows') or span.get_metric('sql.rows'))
self.check_meta(span)
self.assertEqual(span.span_type, 'sql')
self.assertTrue(span.duration > 0)
# check the error
self.assertEqual(span.error, 1)
self.assertEqual(span.get_tag('error.type'), 'mysql.connector.errors.ProgrammingError')
self.assertTrue("Table 'test.a_wrong_table' doesn't exist" in span.get_tag('error.msg'))
self.assertTrue("Table 'test.a_wrong_table' doesn't exist" in span.get_tag('error.stack'))
| 40.980769
| 119
| 0.674801
|
09041d6f945517d75bc1297e36e736d92e7cd394
| 1,210
|
py
|
Python
|
var/spack/repos/builtin/packages/evemu/package.py
|
robertodr/spack
|
9b809e01b47d48f01b3d257912fe1b752943cd3d
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 9
|
2018-04-18T07:51:40.000Z
|
2021-09-10T03:56:57.000Z
|
var/spack/repos/builtin/packages/evemu/package.py
|
robertodr/spack
|
9b809e01b47d48f01b3d257912fe1b752943cd3d
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 907
|
2018-04-18T11:17:57.000Z
|
2022-03-31T13:20:25.000Z
|
var/spack/repos/builtin/packages/evemu/package.py
|
robertodr/spack
|
9b809e01b47d48f01b3d257912fe1b752943cd3d
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 29
|
2018-11-05T16:14:23.000Z
|
2022-02-03T16:07:09.000Z
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Evemu(AutotoolsPackage):
"""The evemu library and tools are used to describe devices, record data,
create devices and replay data from kernel evdev devices."""
homepage = "https://github.com/freedesktop/evemu"
url = "https://github.com/freedesktop/evemu/archive/v2.7.0.tar.gz"
version('2.7.0', sha256='aee1ecc2b6761134470316d97208b173adb4686dc72548b82b2c2b5d1e5dc259')
version('2.6.0', sha256='dc2382bee4dcb6c413271d586dc11d9b4372a70fa2b66b1e53a7107f2f9f51f8')
version('2.5.0', sha256='ab7cce32800db84ab3504789583d1be0d9b0a5f2689389691367b18cf059b09f')
version('2.4.0', sha256='d346ec59289f588bd93fe3cfa40858c7e048660164338787da79b9ebe3256069')
version('2.3.1', sha256='f2dd97310520bc7824adc38b69ead22c53944a666810c60a3e49592914e14e8a')
depends_on('autoconf', type='build')
depends_on('automake', type='build')
depends_on('libtool', type='build')
depends_on('m4', type='build')
depends_on('libevdev@1.2.99.902:')
| 44.814815
| 95
| 0.754545
|
b056b5f4914d045952988f7dfe09b269ff57e994
| 6,475
|
py
|
Python
|
tarantella/accuracySvgAndTotals.py
|
brobertson/tarantella
|
4eb7485c727bf52ac4907116d983b75dca3550ad
|
[
"BSD-3-Clause"
] | 1
|
2021-01-26T10:14:09.000Z
|
2021-01-26T10:14:09.000Z
|
tarantella/accuracySvgAndTotals.py
|
brobertson/tarantella
|
4eb7485c727bf52ac4907116d983b75dca3550ad
|
[
"BSD-3-Clause"
] | null | null | null |
tarantella/accuracySvgAndTotals.py
|
brobertson/tarantella
|
4eb7485c727bf52ac4907116d983b75dca3550ad
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python3
# given a directory of xhtml files and a directory of corresponding images,
# this generates an svg strip representing the accuracy
# of the ocr output and saves it in the xhtml directory as 'accuracyReport.svg'
def makeTotalsFile(hocrPath):
import os, sys, lxml
from lxml import etree
total_ocr_words = 0
total_correct_words = 0
namespaces = {"html": "http://www.w3.org/1999/xhtml"}
if not os.path.isdir(hocrPath):
sys.exit("Directory '" + hocrPath + "' does not exist.")
if not os.access(hocrPath, os.W_OK):
sys.exit("Directory '" + hocrPath + "' is not writeable.")
for filename in os.listdir(hocrPath):
if filename.endswith((".html", ".xhtml", ".hocr")):
filePath = os.path.join(hocrPath, filename)
try:
tree = etree.parse(filePath)
root = tree.getroot()
number_in_this = len(
root.findall(".//html:span[@class='ocr_word']", namespaces)
)
correct_words_in_this = len(
root.findall(
".//html:span[@class='ocr_word'][@data-spellcheck-mode='True']",
namespaces,
)
)
except lxml.etree.XMLSyntaxError:
print("Error reading XML in", filename)
number_in_this = 0
correct_words_in_this = 0
total_ocr_words += number_in_this
total_correct_words += correct_words_in_this
print("total: ", total_ocr_words, "; total correct:", total_correct_words)
out = (
"<lace:totals xmlns:lace='http://heml.mta.ca/2019/lace'><lace:total_words>"
+ str(total_ocr_words)
+ "</lace:total_words><lace:total_accurate_words>"
+ str(total_correct_words)
+ "</lace:total_accurate_words></lace:totals>"
)
print("writing this data to ", hocrPath + "total.xml")
with open(os.path.join(hocrPath, "totals.xml"), "w") as text_file:
text_file.write(out)
def percentageToHSLString(percentage):
saturation = "73%"
lightness = "55%"
burgundy = "hsl(1, 91%, 50%)"
black = "hsl(0, 0%, 0%)"
if percentage == 0:
return black
else:
out = (
"hsl("
+ str(int(percentage * 200))
+ ","
+ saturation
+ ","
+ lightness
+ ")"
)
return out
percentageToHSLString(0.2)
def pageAccuracy(pageIn):
import lxml
from lxml import etree
namespaces = {"html": "http://www.w3.org/1999/xhtml"}
try:
tree = etree.parse(pageIn)
root = tree.getroot()
number_in_this = len(
root.findall(".//html:span[@data-spellcheck-mode]", namespaces)
)
correct_words_in_this = len(
root.findall(
".//html:span[@class='ocr_word'][@data-spellcheck-mode='True']",
namespaces,
)
)
except lxml.etree.XMLSyntaxError:
print("Error reading XML in", pageIn)
number_in_this = 0
correct_words_in_this = 0
if number_in_this == 0:
return 0
else:
return correct_words_in_this / number_in_this
# given a directory of xhtml files and a directory of corresponding images,
# this generates an svg strip representing the accuracy
# of the ocr output and saves it in the xhtml directory as 'accuracyReport.svg'
def makeAccuracySVG(hocrPath, imagesXar):
import os, sys, zipfile
from lxml import etree
from pathlib import Path
from zipfile import BadZipfile
x_strip_width = 2
svg_height = str(20)
total_ocr_words = 0
total_correct_words = 0
namespaces = {"svg": "http://www.w3.org/2000/svg"}
if not os.path.isfile(imagesXar):
sys.exit(imagesXar + " is not a file. Exiting")
if not os.path.isdir(hocrPath):
sys.exit("Directory '" + hocrPath + "' does not exist. Exiting.")
if not os.access(hocrPath, os.W_OK):
sys.exit("Directory '" + dirPath + "' is not writeable. Exiting.")
try:
z = zipfile.ZipFile(imagesXar)
except BadZipfile:
sys.exit("File " + imagesXar + " is not a proper zip file. Exiting.")
imageFiles = z.namelist()
imageFiles.sort()
# print(imageFiles)
count = 0
width = str(len(imageFiles) * x_strip_width)
svg_root = etree.XML(
"<svg:svg xmlns:svg='http://www.w3.org/2000/svg' width='"
+ width
+ "' height='"
+ svg_height
+ "' id='svg_accuracy_report'></svg:svg>"
)
tree = etree.ElementTree(svg_root)
for filename in imageFiles:
if filename.endswith((".jpg", ".jpeg", ".png")):
# print(filename)
count += 1
corresponding_text_file = Path(filename).stem + ".html"
correspondingfilePath = os.path.join(hocrPath, corresponding_text_file)
if os.path.isfile(correspondingfilePath):
accuracy_percentage_for_page = pageAccuracy(correspondingfilePath)
fill = percentageToHSLString(accuracy_percentage_for_page)
else:
fill = "hsl(0, 0%, 86%)" # light grey
svg_rect = """<svg:a xmlns:svg='http://www.w3.org/2000/svg'
href="side_by_side_view.html?positionInCollection={}">
<svg:rect data-doc-name="{}" x="{}" y="0" width="{}" height="{}" style="fill:{}">
<svg:title>{}</svg:title>
</svg:rect>
</svg:a>""".format(
str(count),
corresponding_text_file,
str(count * x_strip_width),
str(x_strip_width),
svg_height,
fill,
str(count),
)
svg_root.append(etree.XML(svg_rect))
# print(str(etree.tostring(tree.getroot(), encoding='unicode', method='xml')))
with open(os.path.join(hocrPath, "accuracyReport.svg"), "w") as text_file:
text_file.write(
str(etree.tostring(tree.getroot(), encoding="unicode", method="xml"))
)
def main():
import sys, os
if not (len(sys.argv) == 3):
print("usage:", os.path.basename(sys.argv[0]), "hocr_dir_path images_file.xar")
exit(1)
else:
makeTotalsFile(sys.argv[1])
makeAccuracySVG(sys.argv[1], sys.argv[2])
if __name__ == "__main__":
main()
| 35.382514
| 97
| 0.572664
|
8a2f3ac23feac75609a4f718c310e6131fb3e98b
| 7,138
|
py
|
Python
|
selfdrive/car/car_helpers.py
|
joeyv821/openpilot
|
7bb4699c0b184f6daf1096aac9dc18ee1168b892
|
[
"MIT"
] | null | null | null |
selfdrive/car/car_helpers.py
|
joeyv821/openpilot
|
7bb4699c0b184f6daf1096aac9dc18ee1168b892
|
[
"MIT"
] | null | null | null |
selfdrive/car/car_helpers.py
|
joeyv821/openpilot
|
7bb4699c0b184f6daf1096aac9dc18ee1168b892
|
[
"MIT"
] | 1
|
2021-05-09T11:43:01.000Z
|
2021-05-09T11:43:01.000Z
|
import os
from common.params import Params
from common.basedir import BASEDIR
from selfdrive.version import comma_remote, tested_branch
from selfdrive.car.fingerprints import eliminate_incompatible_cars, all_known_cars
from selfdrive.car.vin import get_vin, VIN_UNKNOWN
from selfdrive.car.fw_versions import get_fw_versions, match_fw_to_car
from selfdrive.swaglog import cloudlog
import cereal.messaging as messaging
from selfdrive.car import gen_empty_fingerprint
from selfdrive.car.tesla.readconfig import CarSettings
from cereal import car, log
EventName = car.CarEvent.EventName
HwType = log.HealthData.HwType
def get_startup_event(car_recognized, controller_available):
if comma_remote and tested_branch:
event = EventName.startup
else:
event = EventName.startupMaster
if not car_recognized:
event = EventName.startupNoCar
elif car_recognized and not controller_available:
event = EventName.startupNoControl
return event
def get_one_can(logcan):
while True:
can = messaging.recv_one_retry(logcan)
if len(can.can) > 0:
return can
def load_interfaces(brand_names):
ret = {}
for brand_name in brand_names:
path = ('selfdrive.car.%s' % brand_name)
CarInterface = __import__(path + '.interface', fromlist=['CarInterface']).CarInterface
if os.path.exists(BASEDIR + '/' + path.replace('.', '/') + '/carstate.py'):
CarState = __import__(path + '.carstate', fromlist=['CarState']).CarState
else:
CarState = None
if os.path.exists(BASEDIR + '/' + path.replace('.', '/') + '/carcontroller.py'):
CarController = __import__(path + '.carcontroller', fromlist=['CarController']).CarController
else:
CarController = None
for model_name in brand_names[brand_name]:
ret[model_name] = (CarInterface, CarController, CarState)
return ret
def _get_interface_names():
# read all the folders in selfdrive/car and return a dict where:
# - keys are all the car names that which we have an interface for
# - values are lists of spefic car models for a given car
brand_names = {}
for car_folder in [x[0] for x in os.walk(BASEDIR + '/selfdrive/car')]:
try:
brand_name = car_folder.split('/')[-1]
model_names = __import__('selfdrive.car.%s.values' % brand_name, fromlist=['CAR']).CAR
model_names = [getattr(model_names, c) for c in model_names.__dict__.keys() if not c.startswith("__")]
brand_names[brand_name] = model_names
except (ImportError, IOError):
pass
return brand_names
# imports from directory selfdrive/car/<name>/
interface_names = _get_interface_names()
interfaces = load_interfaces(interface_names)
def only_toyota_left(candidate_cars):
return all(("TOYOTA" in c or "LEXUS" in c) for c in candidate_cars) and len(candidate_cars) > 0
# **** for use live only ****
def fingerprint(logcan, sendcan, has_relay):
fixed_fingerprint = os.environ.get('FINGERPRINT', "")
skip_fw_query = os.environ.get('SKIP_FW_QUERY', False)
if has_relay and not fixed_fingerprint and not skip_fw_query:
# Vin query only reliably works thorugh OBDII
bus = 1
cached_params = Params().get("CarParamsCache")
if cached_params is not None:
cached_params = car.CarParams.from_bytes(cached_params)
if cached_params.carName == "mock":
cached_params = None
if cached_params is not None and len(cached_params.carFw) > 0 and cached_params.carVin is not VIN_UNKNOWN:
cloudlog.warning("Using cached CarParams")
vin = cached_params.carVin
car_fw = list(cached_params.carFw)
else:
cloudlog.warning("Getting VIN & FW versions")
_, vin = get_vin(logcan, sendcan, bus)
car_fw = get_fw_versions(logcan, sendcan, bus)
fw_candidates = match_fw_to_car(car_fw)
else:
vin = VIN_UNKNOWN
fw_candidates, car_fw = set(), []
cloudlog.warning("VIN %s", vin)
Params().put("CarVin", vin)
finger = gen_empty_fingerprint()
candidate_cars = {i: all_known_cars() for i in [0, 1]} # attempt fingerprint on both bus 0 and 1
frame = 0
frame_fingerprint = 10 # 0.1s
car_fingerprint = None
done = False
while not done:
a = get_one_can(logcan)
for can in a.can:
# need to independently try to fingerprint both bus 0 and 1 to work
# for the combo black_panda and honda_bosch. Ignore extended messages
# and VIN query response.
# Include bus 2 for toyotas to disambiguate cars using camera messages
# (ideally should be done for all cars but we can't for Honda Bosch)
if can.src in range(0, 4):
finger[can.src][can.address] = len(can.dat)
for b in candidate_cars:
if (can.src == b or (only_toyota_left(candidate_cars[b]) and can.src == 2)) and \
can.address < 0x800 and can.address not in [0x7df, 0x7e0, 0x7e8]:
candidate_cars[b] = eliminate_incompatible_cars(can, candidate_cars[b])
# if we only have one car choice and the time since we got our first
# message has elapsed, exit
for b in candidate_cars:
# Toyota needs higher time to fingerprint, since DSU does not broadcast immediately
if only_toyota_left(candidate_cars[b]):
frame_fingerprint = 100 # 1s
if len(candidate_cars[b]) == 1:
if frame > frame_fingerprint:
# fingerprint done
car_fingerprint = candidate_cars[b][0]
if (car_fingerprint is None) and CarSettings().forceFingerprintTesla:
print ("Fingerprinting Failed: Returning Tesla (based on branch)")
car_fingerprint = "TESLA MODEL S"
vin = "TESLAFAKEVIN12345"
# bail if no cars left or we've been waiting for more than 2s
failed = all(len(cc) == 0 for cc in candidate_cars.values()) or frame > 200
succeeded = car_fingerprint is not None
done = failed or succeeded
frame += 1
source = car.CarParams.FingerprintSource.can
# If FW query returns exactly 1 candidate, use it
if len(fw_candidates) == 1:
car_fingerprint = list(fw_candidates)[0]
source = car.CarParams.FingerprintSource.fw
if fixed_fingerprint:
car_fingerprint = fixed_fingerprint
source = car.CarParams.FingerprintSource.fixed
cloudlog.warning("fingerprinted %s", car_fingerprint)
return car_fingerprint, finger, vin, car_fw, source
def get_car(logcan, sendcan, has_relay=False):
if CarSettings().forceFingerprintTesla:
candidate="TESLA MODEL S"
fingerprints=["","",""]
vin="TESLAFORCED123456"
#BB
car_fw = []
source=car.CarParams.FingerprintSource.fixed
cloudlog.warning("VIN %s", vin)
Params().put("CarVin", vin)
else:
candidate, fingerprints, vin, car_fw, source = fingerprint(logcan, sendcan, has_relay)
if candidate is None:
cloudlog.warning("car doesn't match any fingerprints: %r", fingerprints)
candidate = "mock"
CarInterface, CarController, CarState = interfaces[candidate]
car_params = CarInterface.get_params(candidate, fingerprints, has_relay, car_fw)
car_params.carVin = vin
car_params.carFw = car_fw
car_params.fingerprintSource = source
return CarInterface(car_params, CarController, CarState), car_params
| 35.336634
| 110
| 0.711544
|
10f1cba6d33e9ed48b7403ea80cf6822f1033131
| 2,176
|
py
|
Python
|
app/contents/management/commands/crawling.py
|
hbyyy/Netflix_Clone_Backend
|
ae0d2bbc436543e32f43bdb16c890f60dee02309
|
[
"MIT"
] | 2
|
2020-03-19T17:17:15.000Z
|
2020-03-19T17:18:27.000Z
|
app/contents/management/commands/crawling.py
|
hbyyy/Netflix_Clone_Backend
|
ae0d2bbc436543e32f43bdb16c890f60dee02309
|
[
"MIT"
] | 58
|
2020-03-17T05:20:58.000Z
|
2022-01-13T02:33:53.000Z
|
app/contents/management/commands/crawling.py
|
hbyyy/Netflix_Clone_Backend
|
ae0d2bbc436543e32f43bdb16c890f60dee02309
|
[
"MIT"
] | 7
|
2020-04-04T12:33:40.000Z
|
2022-03-28T05:56:34.000Z
|
import mimetypes
import magic
import requests
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.management.base import BaseCommand
from contents.management.commands.crawler import get_url, get_item, get_page_url
from contents.models import Contents, Category, Actor, Director
class Command(BaseCommand):
def handle(self, *args, **options):
page_url_list = get_page_url()
all_url_list = []
for url in page_url_list:
all_url_list.append(get_url(url))
for url_list in all_url_list:
for url in url_list:
item = get_item(url)
if not item:
continue
contents, is_create = Contents.objects.get_or_create(contents_title=item['title'])
if is_create:
response = requests.get(item['image_url'])
binary_data = response.content
mime_type = magic.from_buffer(binary_data, mime=True)
ext = mimetypes.guess_extension(mime_type)
file = SimpleUploadedFile(f'{item["title"]}{ext}', binary_data)
contents.contents_summary = item['summary']
contents.contents_title_english = item['title_english']
contents.contents_image = file
contents.contents_rating = item['rating']
contents.contents_length = item['length']
contents.contents_pub_year = item['pub_year']
contents.save()
for category in item['genre']:
c1, _ = Category.objects.get_or_create(category_name=category)
contents.categories.add(c1)
for actor in item['actor']:
a1, _ = Actor.objects.get_or_create(actor_name=actor)
contents.actors.add(a1)
for director in item['director']:
d1, _ = Director.objects.get_or_create(director_name=director)
contents.directors.add(d1)
return self.stdout.write('크롤링 완료')
| 39.563636
| 98
| 0.578125
|
6ecd6dcb3b02a8a828bd4ac60007fecaad5599ac
| 784
|
py
|
Python
|
test/tests.py
|
Triballian/candiapps
|
0602039e48b51e2752da6f8a4a3d93cfeb29b3cf
|
[
"MIT"
] | null | null | null |
test/tests.py
|
Triballian/candiapps
|
0602039e48b51e2752da6f8a4a3d93cfeb29b3cf
|
[
"MIT"
] | null | null | null |
test/tests.py
|
Triballian/candiapps
|
0602039e48b51e2752da6f8a4a3d93cfeb29b3cf
|
[
"MIT"
] | null | null | null |
# tests
import os
os.chdir('\\Users\\Noe\\workspace\\stakenanny\\candiapps')
#import utils.py
import imp
imp.load_source('utils','utils.py')
import utils
import unittest
class NewConfTest(unittest.TestCase):
def test_can_grab_env_from_conf_n_use_it_later(self):
self.assertIn('turbostake, bottlecaps', utils.envars['coinlist'])
self.assertIn('coinlist', utils.envars)
self.assertEqual(utils.envars['coinlist'], 'turbostake, bottlecaps')
self.assertEqual(utils.envars['gitdir'], '/Program Files/Git/bin')
self.assertEqual(utils.envars['gitexe'], 'git.exe')
self.assertEqual(utils.envars['sclone'], '/Users/Noe/workspace/cvim')
#self.fail('Finish the test!')
if __name__ == '__main__':
unittest.main()
| 25.290323
| 77
| 0.686224
|
9e3bde29ab38fc23b45d2b9145a0b6d47a73f432
| 182
|
py
|
Python
|
project/web/steps/home.py
|
albalabkin/PyCats
|
b3fa09e116f38cebbb66faef16785e2795d80aa9
|
[
"Apache-2.0"
] | null | null | null |
project/web/steps/home.py
|
albalabkin/PyCats
|
b3fa09e116f38cebbb66faef16785e2795d80aa9
|
[
"Apache-2.0"
] | null | null | null |
project/web/steps/home.py
|
albalabkin/PyCats
|
b3fa09e116f38cebbb66faef16785e2795d80aa9
|
[
"Apache-2.0"
] | null | null | null |
from project.web.pages.home_page import HomePage
class HomePageSteps(HomePage):
def get_api_key(self):
self.click_api_keys_menu()
return self.lbl_api_key.text
| 20.222222
| 48
| 0.736264
|
04139be3b8c478496806435379b8835c00d904b8
| 1,166
|
py
|
Python
|
bcipy/language_model/lm_modes.py
|
theGreenJedi/BciPy
|
222ac9b79f1ab3374e888be9e9b8d86f88d1bc82
|
[
"MIT"
] | null | null | null |
bcipy/language_model/lm_modes.py
|
theGreenJedi/BciPy
|
222ac9b79f1ab3374e888be9e9b8d86f88d1bc82
|
[
"MIT"
] | 5
|
2021-06-08T23:56:19.000Z
|
2022-03-12T00:57:00.000Z
|
bcipy/language_model/lm_modes.py
|
theGreenJedi/BciPy
|
222ac9b79f1ab3374e888be9e9b8d86f88d1bc82
|
[
"MIT"
] | null | null | null |
from enum import Enum
from bcipy.language_model.lm_server import LmServerConfig
from bcipy.language_model import oclm_language_model
from bcipy.language_model import prelm_language_model
from bcipy.helpers.system_utils import dot
class LmType(Enum):
"""Enum of the registered language model types. The types are associated
with constructors for creating the model.
Ex.
>>> LmType.PRELM.model()
"""
PRELM = prelm_language_model.LangModel
OCLM = oclm_language_model.LangModel
# pylint: disable=unused-argument,protected-access
def __new__(cls, *args, **kwds):
"""Autoincrements the value of each item added to the enum."""
value = len(cls.__members__) + 1
obj = object.__new__(cls)
obj._value_ = value
return obj
def __init__(self, model):
self.model = model
def LangModel(lmtype: LmType, logfile: str = "log", port: int = None):
"""Creates a new Language Model given the LmType."""
assert lmtype, "Language Model type is required"
model = lmtype.model
config = model.DEFAULT_CONFIG
if port:
config.port = port
return model(config, logfile)
| 30.684211
| 76
| 0.698971
|
3f2dc25da6b49b6bcb3188ad86a123afc1d829b7
| 664
|
py
|
Python
|
FLASH4.2.1_save/tools/yt/sphere_quantities.py
|
mtsafarzadeh/FLASHverES
|
4c644e63efa7ee49e35293acb72dd2ea75da77f4
|
[
"Apache-2.0"
] | 1
|
2021-04-22T09:24:08.000Z
|
2021-04-22T09:24:08.000Z
|
FLASH4.2.1_save/tools/yt/sphere_quantities.py
|
mtsafarzadeh/FLASHverES
|
4c644e63efa7ee49e35293acb72dd2ea75da77f4
|
[
"Apache-2.0"
] | null | null | null |
FLASH4.2.1_save/tools/yt/sphere_quantities.py
|
mtsafarzadeh/FLASHverES
|
4c644e63efa7ee49e35293acb72dd2ea75da77f4
|
[
"Apache-2.0"
] | null | null | null |
from yt.mods import *
pf = load("sloshing_low_res_hdf5_plt_cnt_0400")
# Define a sphere object with a center at the domain center and a radius of 100 kpc.
# Note that we can specify a tuple (radius, unit) instead of calculating the radius
# in cgs units
sp = pf.h.sphere(pf.domain_center, (100.0, "kpc"))
# Compute the mass-weighted temperature in the sphere
t_mw = sp.quantities["WeightedAverageQuantity"]("Temperature", "CellMass")
# Compute the total gas mass in the sphere
m_gas = sp.quantities["TotalQuantity"]("CellMassMsun")
# Compute the angular momentum vector of the sphere
L_vec = sp.quantities["AngularMomentumVector"]()
print t_mw, m_gas, L_vec
| 33.2
| 84
| 0.760542
|
05f3bc4ce331b9b05e18f424616a7cd2eed2cdd8
| 154,883
|
bzl
|
Python
|
repositories.bzl
|
bsdnet/boskos
|
557df917e2fba88ec4708f15924dfe9bd41f6e35
|
[
"Apache-2.0"
] | null | null | null |
repositories.bzl
|
bsdnet/boskos
|
557df917e2fba88ec4708f15924dfe9bd41f6e35
|
[
"Apache-2.0"
] | null | null | null |
repositories.bzl
|
bsdnet/boskos
|
557df917e2fba88ec4708f15924dfe9bd41f6e35
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
load("@bazel_gazelle//:deps.bzl", "go_repository")
def go_repositories():
go_repository(
name = "ag_pack_amqp",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "pack.ag/amqp",
sum = "h1:cuNDWLUTbKRtEZwhB0WQBXf9pGbm87pUBXQhvcFxBWg=",
version = "v0.11.2",
)
go_repository(
name = "cc_mvdan_xurls_v2",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "mvdan.cc/xurls/v2",
sum = "h1:r1zSOSNS/kqtpmATyMMMvaZ4/djsesbYz5kr0+qMRWc=",
version = "v2.0.0",
)
go_repository(
name = "co_honnef_go_tools",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "honnef.co/go/tools",
sum = "h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM=",
version = "v0.0.1-2019.2.3",
)
go_repository(
name = "com_github_agnivade_levenshtein",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/agnivade/levenshtein",
sum = "h1:3oJU7J3FGFmyhn8KHjmVaZCN5hxTr7GxgRue+sxIXdQ=",
version = "v1.0.1",
)
go_repository(
name = "com_github_alcortesm_tgz",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/alcortesm/tgz",
sum = "h1:uSoVVbwJiQipAclBbw+8quDsfcvFjOpI5iCf4p/cqCs=",
version = "v0.0.0-20161220082320-9c5fe88206d7",
)
go_repository(
name = "com_github_alecthomas_template",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/alecthomas/template",
sum = "h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM=",
version = "v0.0.0-20190718012654-fb15b899a751",
)
go_repository(
name = "com_github_alecthomas_units",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/alecthomas/units",
sum = "h1:Hs82Z41s6SdL1CELW+XaDYmOH4hkBN4/N9og/AsOv7E=",
version = "v0.0.0-20190717042225-c3de453c63f4",
)
go_repository(
name = "com_github_andreyvit_diff",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/andreyvit/diff",
sum = "h1:bvNMNQO63//z+xNgfBlViaCIJKLlCJ6/fmUseuG0wVQ=",
version = "v0.0.0-20170406064948-c7f18ee00883",
)
go_repository(
name = "com_github_andybalholm_brotli",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/andybalholm/brotli",
sum = "h1:bZ28Hqta7TFAK3Q08CMvv8y3/8ATaEqv2nGoc6yff6c=",
version = "v0.0.0-20190621154722-5f990b63d2d6",
)
go_repository(
name = "com_github_andygrunwald_go_gerrit",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/andygrunwald/go-gerrit",
sum = "h1:uUuUZipfD5nPl2L/i0I3N4iRKJcoO2CPjktaH/kP9gQ=",
version = "v0.0.0-20190120104749-174420ebee6c",
)
go_repository(
name = "com_github_anmitsu_go_shlex",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/anmitsu/go-shlex",
sum = "h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA=",
version = "v0.0.0-20161002113705-648efa622239",
)
go_repository(
name = "com_github_apache_thrift",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/apache/thrift",
sum = "h1:pODnxUFNcjP9UTLZGTdeh+j16A8lJbRvD3rOtrk/7bs=",
version = "v0.12.0",
)
go_repository(
name = "com_github_armon_circbuf",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/armon/circbuf",
sum = "h1:QEF07wC0T1rKkctt1RINW/+RMTVmiwxETico2l3gxJA=",
version = "v0.0.0-20150827004946-bbbad097214e",
)
go_repository(
name = "com_github_armon_consul_api",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/armon/consul-api",
sum = "h1:G1bPvciwNyF7IUmKXNt9Ak3m6u9DE1rF+RmtIkBpVdA=",
version = "v0.0.0-20180202201655-eb2c6b5be1b6",
)
go_repository(
name = "com_github_armon_go_metrics",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/armon/go-metrics",
sum = "h1:8GUt8eRujhVEGZFFEjBj46YV4rDjvGrNxb0KMWYkL2I=",
version = "v0.0.0-20180917152333-f0300d1749da",
)
go_repository(
name = "com_github_armon_go_radix",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/armon/go-radix",
sum = "h1:BUAU3CGlLvorLI26FmByPp2eC2qla6E1Tw+scpcg/to=",
version = "v0.0.0-20180808171621-7fddfc383310",
)
go_repository(
name = "com_github_armon_go_socks5",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/armon/go-socks5",
sum = "h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=",
version = "v0.0.0-20160902184237-e75332964ef5",
)
go_repository(
name = "com_github_asaskevich_govalidator",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/asaskevich/govalidator",
sum = "h1:zV3ejI06GQ59hwDQAvmK1qxOQGB3WuVTRoY0okPTAv0=",
version = "v0.0.0-20200108200545-475eaeb16496",
)
go_repository(
name = "com_github_aws_aws_k8s_tester",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/aws/aws-k8s-tester",
sum = "h1:Zr5NWiRK5fhmRIlhrsTwrY8yB488FyN6iulci2D7VaI=",
version = "v1.0.0",
)
go_repository(
name = "com_github_aws_aws_sdk_go",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/aws/aws-sdk-go",
sum = "h1:i+sSesaMrSxiUt3NJddOApe2mXK+VNBgfcmRTvNFrXM=",
version = "v1.30.5",
)
go_repository(
name = "com_github_azure_azure_amqp_common_go_v2",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/Azure/azure-amqp-common-go/v2",
sum = "h1:+QbFgmWCnPzdaRMfsI0Yb6GrRdBj5jVL8N3EXuEUcBQ=",
version = "v2.1.0",
)
go_repository(
name = "com_github_azure_azure_pipeline_go",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/Azure/azure-pipeline-go",
sum = "h1:OLBdZJ3yvOn2MezlWvbrBMTEUQC72zAftRZOMdj5HYo=",
version = "v0.2.1",
)
go_repository(
name = "com_github_azure_azure_sdk_for_go",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/Azure/azure-sdk-for-go",
sum = "h1:3D2O4g8AwDwyWkM1HpMFVux/ccQJmGJHXsE004Wsu1Q=",
version = "v38.0.0+incompatible",
)
go_repository(
name = "com_github_azure_azure_service_bus_go",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/Azure/azure-service-bus-go",
sum = "h1:G1qBLQvHCFDv9pcpgwgFkspzvnGknJRR0PYJ9ytY/JA=",
version = "v0.9.1",
)
go_repository(
name = "com_github_azure_azure_storage_blob_go",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/Azure/azure-storage-blob-go",
sum = "h1:53qhf0Oxa0nOjgbDeeYPUeyiNmafAFEY95rZLK0Tj6o=",
version = "v0.8.0",
)
go_repository(
name = "com_github_azure_go_ansiterm",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/Azure/go-ansiterm",
sum = "h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=",
version = "v0.0.0-20170929234023-d6e3b3328b78",
)
go_repository(
name = "com_github_azure_go_autorest",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/Azure/go-autorest",
replace = "github.com/Azure/go-autorest",
sum = "h1:2Fxszbg492oAJrcvJlgyVaTqnQYRkxmEK6VPCLLVpBI=",
version = "v12.2.0+incompatible",
)
go_repository(
name = "com_github_azure_go_autorest_autorest",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/Azure/go-autorest/autorest",
sum = "h1:5YWtOnckcudzIw8lPPBcWOnmIFWMtHci1ZWAZulMSx0=",
version = "v0.9.6",
)
go_repository(
name = "com_github_azure_go_autorest_autorest_adal",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/Azure/go-autorest/autorest/adal",
sum = "h1:O1X4oexUxnZCaEUGsvMnr8ZGj8HI37tNezwY4npRqA0=",
version = "v0.8.2",
)
go_repository(
name = "com_github_azure_go_autorest_autorest_date",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/Azure/go-autorest/autorest/date",
sum = "h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM=",
version = "v0.2.0",
)
go_repository(
name = "com_github_azure_go_autorest_autorest_mocks",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/Azure/go-autorest/autorest/mocks",
sum = "h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc=",
version = "v0.3.0",
)
go_repository(
name = "com_github_azure_go_autorest_autorest_to",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/Azure/go-autorest/autorest/to",
sum = "h1:zebkZaadz7+wIQYgC7GXaz3Wb28yKYfVkkBKwc38VF8=",
version = "v0.3.0",
)
go_repository(
name = "com_github_azure_go_autorest_autorest_validation",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/Azure/go-autorest/autorest/validation",
sum = "h1:ISSNzGUh+ZSzizJWOWzs8bwpXIePbGLW4z/AmUFGH5A=",
version = "v0.1.0",
)
go_repository(
name = "com_github_azure_go_autorest_logger",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/Azure/go-autorest/logger",
sum = "h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY=",
version = "v0.1.0",
)
go_repository(
name = "com_github_azure_go_autorest_tracing",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/Azure/go-autorest/tracing",
sum = "h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k=",
version = "v0.5.0",
)
go_repository(
name = "com_github_bazelbuild_buildtools",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/bazelbuild/buildtools",
sum = "h1:3B/ZE1a6eEJ/4Jf/M6RM2KBouN8yKCUcMmXzSyWqa3g=",
version = "v0.0.0-20190917191645-69366ca98f89",
)
go_repository(
name = "com_github_beorn7_perks",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/beorn7/perks",
sum = "h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=",
version = "v1.0.1",
)
go_repository(
name = "com_github_bgentry_speakeasy",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/bgentry/speakeasy",
sum = "h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY=",
version = "v0.1.0",
)
go_repository(
name = "com_github_bitly_go_simplejson",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/bitly/go-simplejson",
sum = "h1:6IH+V8/tVMab511d5bn4M7EwGXZf9Hj6i2xSwkNEM+Y=",
version = "v0.5.0",
)
go_repository(
name = "com_github_bketelsen_crypt",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/bketelsen/crypt",
sum = "h1:+0HFd5KSZ/mm3JmhmrDukiId5iR6w4+BdFtfSy4yWIc=",
version = "v0.0.3-0.20200106085610-5cbc8cc4026c",
)
go_repository(
name = "com_github_blang_semver",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/blang/semver",
sum = "h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=",
version = "v3.5.1+incompatible",
)
go_repository(
name = "com_github_bmizerany_assert",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/bmizerany/assert",
sum = "h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY=",
version = "v0.0.0-20160611221934-b7ed37b82869",
)
go_repository(
name = "com_github_bshuster_repo_logrus_logstash_hook",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/bshuster-repo/logrus-logstash-hook",
sum = "h1:pgAtgj+A31JBVtEHu2uHuEx0n+2ukqUJnS2vVe5pQNA=",
version = "v0.4.1",
)
go_repository(
name = "com_github_bugsnag_bugsnag_go",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/bugsnag/bugsnag-go",
sum = "h1:rFt+Y/IK1aEZkEHchZRSq9OQbsSzIT/OrI8YFFmRIng=",
version = "v0.0.0-20141110184014-b1d153021fcd",
)
go_repository(
name = "com_github_bugsnag_osext",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/bugsnag/osext",
sum = "h1:otBG+dV+YK+Soembjv71DPz3uX/V/6MMlSyD9JBQ6kQ=",
version = "v0.0.0-20130617224835-0dd3f918b21b",
)
go_repository(
name = "com_github_bugsnag_panicwrap",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/bugsnag/panicwrap",
sum = "h1:nvj0OLI3YqYXer/kZD8Ri1aaunCxIEsOst1BVJswV0o=",
version = "v0.0.0-20151223152923-e2c28503fcd0",
)
go_repository(
name = "com_github_burntsushi_toml",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/BurntSushi/toml",
sum = "h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=",
version = "v0.3.1",
)
go_repository(
name = "com_github_burntsushi_xgb",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/BurntSushi/xgb",
sum = "h1:1BDTz0u9nC3//pOCMdNH+CiXJVYJh5UQNCOBG7jbELc=",
version = "v0.0.0-20160522181843-27f122750802",
)
go_repository(
name = "com_github_bwmarrin_snowflake",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/bwmarrin/snowflake",
sum = "h1:dRbqXFjM10uA3wdrVZ8Kh19uhciRMOroUYJ7qAqDLhY=",
version = "v0.0.0",
)
go_repository(
name = "com_github_census_instrumentation_opencensus_proto",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/census-instrumentation/opencensus-proto",
sum = "h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk=",
version = "v0.2.1",
)
go_repository(
name = "com_github_cespare_xxhash",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/cespare/xxhash",
sum = "h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=",
version = "v1.1.0",
)
go_repository(
name = "com_github_cespare_xxhash_v2",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/cespare/xxhash/v2",
sum = "h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=",
version = "v2.1.1",
)
go_repository(
name = "com_github_chai2010_gettext_go",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/chai2010/gettext-go",
sum = "h1:7aWHqerlJ41y6FOsEUvknqgXnGmJyJSbjhAWq5pO4F8=",
version = "v0.0.0-20160711120539-c6fed771bfd5",
)
go_repository(
name = "com_github_cihub_seelog",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/cihub/seelog",
sum = "h1:kHaBemcxl8o/pQ5VM1c8PVE1PubbNx3mjUr09OqWGCs=",
version = "v0.0.0-20170130134532-f561c5e57575",
)
go_repository(
name = "com_github_clarketm_json",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/clarketm/json",
sum = "h1:0JketcMdLC16WGnRGJiNmTXuQznDEQaiknxSPRBxg+k=",
version = "v1.13.4",
)
go_repository(
name = "com_github_client9_misspell",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/client9/misspell",
sum = "h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=",
version = "v0.3.4",
)
go_repository(
name = "com_github_cloudevents_sdk_go",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/cloudevents/sdk-go",
sum = "h1:gS5I0s2qPmdc4GBPlUmzZU7RH30BaiOdcRJ1RkXnPrc=",
version = "v1.0.0",
)
go_repository(
name = "com_github_cockroachdb_datadriven",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/cockroachdb/datadriven",
sum = "h1:OaNxuTZr7kxeODyLWsRMC+OD03aFUH+mW6r2d+MWa5Y=",
version = "v0.0.0-20190809214429-80d97fb3cbaa",
)
go_repository(
name = "com_github_containerd_cgroups",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/containerd/cgroups",
sum = "h1:tSNMc+rJDfmYntojat8lljbt1mgKNpTxUZJsSzJ9Y1s=",
version = "v0.0.0-20190919134610-bf292b21730f",
)
go_repository(
name = "com_github_containerd_console",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/containerd/console",
sum = "h1:uict5mhHFTzKLUCufdSLym7z/J0CbBJT59lYbP9wtbg=",
version = "v0.0.0-20180822173158-c12b1e7919c1",
)
go_repository(
name = "com_github_containerd_containerd",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/containerd/containerd",
sum = "h1:LoIzb5y9x5l8VKAlyrbusNPXqBY0+kviRloxFUMFwKc=",
version = "v1.3.3",
)
go_repository(
name = "com_github_containerd_continuity",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/containerd/continuity",
sum = "h1:kIFnQBO7rQ0XkMe6xEwbybYHBEaWmh/f++laI6Emt7M=",
version = "v0.0.0-20200107194136-26c1120b8d41",
)
go_repository(
name = "com_github_containerd_fifo",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/containerd/fifo",
sum = "h1:PUD50EuOMkXVcpBIA/R95d56duJR9VxhwncsFbNnxW4=",
version = "v0.0.0-20190226154929-a9fb20d87448",
)
go_repository(
name = "com_github_containerd_go_runc",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/containerd/go-runc",
sum = "h1:esQOJREg8nw8aXj6uCN5dfW5cKUBiEJ/+nni1Q/D/sw=",
version = "v0.0.0-20180907222934-5a6d9f37cfa3",
)
go_repository(
name = "com_github_containerd_ttrpc",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/containerd/ttrpc",
sum = "h1:dlfGmNcE3jDAecLqwKPMNX6nk2qh1c1Vg1/YTzpOOF4=",
version = "v0.0.0-20190828154514-0e0f228740de",
)
go_repository(
name = "com_github_containerd_typeurl",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/containerd/typeurl",
sum = "h1:JNn81o/xG+8NEo3bC/vx9pbi/g2WI8mtP2/nXzu297Y=",
version = "v0.0.0-20180627222232-a93fcdb778cd",
)
go_repository(
name = "com_github_coreos_bbolt",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/coreos/bbolt",
sum = "h1:wZwiHHUieZCquLkDL0B8UhzreNWsPHooDAG3q34zk0s=",
version = "v1.3.2",
)
go_repository(
name = "com_github_coreos_etcd",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/coreos/etcd",
sum = "h1:jFneRYjIvLMLhDLCzuTuU4rSJUjRplcJQ7pD7MnhC04=",
version = "v3.3.10+incompatible",
)
go_repository(
name = "com_github_coreos_go_etcd",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/coreos/go-etcd",
sum = "h1:bXhRBIXoTm9BYHS3gE0TtQuyNZyeEMux2sDi4oo5YOo=",
version = "v2.0.0+incompatible",
)
go_repository(
name = "com_github_coreos_go_oidc",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/coreos/go-oidc",
sum = "h1:sdJrfw8akMnCuUlaZU3tE/uYXFgfqom8DBE9so9EBsM=",
version = "v2.1.0+incompatible",
)
go_repository(
name = "com_github_coreos_go_semver",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/coreos/go-semver",
sum = "h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=",
version = "v0.3.0",
)
go_repository(
name = "com_github_coreos_go_systemd",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/coreos/go-systemd",
sum = "h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8=",
version = "v0.0.0-20190321100706-95778dfbb74e",
)
go_repository(
name = "com_github_coreos_pkg",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/coreos/pkg",
sum = "h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg=",
version = "v0.0.0-20180928190104-399ea9e2e55f",
)
go_repository(
name = "com_github_cpuguy83_go_md2man",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/cpuguy83/go-md2man",
sum = "h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk=",
version = "v1.0.10",
)
go_repository(
name = "com_github_cpuguy83_go_md2man_v2",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/cpuguy83/go-md2man/v2",
sum = "h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM=",
version = "v2.0.0",
)
go_repository(
name = "com_github_creack_pty",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/creack/pty",
sum = "h1:6pwm8kMQKCmgUg0ZHTm5+/YvRK0s3THD/28+T6/kk4A=",
version = "v1.1.7",
)
go_repository(
name = "com_github_cyphar_filepath_securejoin",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/cyphar/filepath-securejoin",
sum = "h1:jCwT2GTP+PY5nBz3c/YL5PAIbusElVrPujOBSCj8xRg=",
version = "v0.2.2",
)
go_repository(
name = "com_github_datadog_zstd",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/DataDog/zstd",
sum = "h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM=",
version = "v1.4.1",
)
go_repository(
name = "com_github_davecgh_go_spew",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/davecgh/go-spew",
sum = "h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=",
version = "v1.1.1",
)
go_repository(
name = "com_github_daviddengcn_go_colortext",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/daviddengcn/go-colortext",
sum = "h1:uVsMphB1eRx7xB1njzL3fuMdWRN8HtVzoUOItHMwv5c=",
version = "v0.0.0-20160507010035-511bcaf42ccd",
)
go_repository(
name = "com_github_deislabs_oras",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/deislabs/oras",
sum = "h1:If674KraJVpujYR00rzdi0QAmW4BxzMJPVAZJKuhQ0c=",
version = "v0.8.1",
)
go_repository(
name = "com_github_denisenkom_go_mssqldb",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/denisenkom/go-mssqldb",
sum = "h1:83Wprp6ROGeiHFAP8WJdI2RoxALQYgdllERc3N5N2DM=",
version = "v0.0.0-20191124224453-732737034ffd",
)
go_repository(
name = "com_github_denverdino_aliyungo",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/denverdino/aliyungo",
sum = "h1:p6poVbjHDkKa+wtC8frBMwQtT3BmqGYBjzMwJ63tuR4=",
version = "v0.0.0-20190125010748-a747050bb1ba",
)
go_repository(
name = "com_github_devigned_tab",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/devigned/tab",
sum = "h1:3mD6Kb1mUOYeLpJvTVSDwSg5ZsfSxfvxGRTxRsJsITA=",
version = "v0.1.1",
)
go_repository(
name = "com_github_dgrijalva_jwt_go",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/dgrijalva/jwt-go",
sum = "h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=",
version = "v3.2.0+incompatible",
)
go_repository(
name = "com_github_dgryski_go_sip13",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/dgryski/go-sip13",
sum = "h1:RMLoZVzv4GliuWafOuPuQDKSm1SJph7uCRnnS61JAn4=",
version = "v0.0.0-20181026042036-e10d5fee7954",
)
go_repository(
name = "com_github_dimchansky_utfbom",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/dimchansky/utfbom",
sum = "h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4=",
version = "v1.1.0",
)
go_repository(
name = "com_github_djherbis_atime",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/djherbis/atime",
sum = "h1:ySLvBAM0EvOGaX7TI4dAM5lWj+RdJUCKtGSEHN8SGBg=",
version = "v1.0.0",
)
go_repository(
name = "com_github_dnaeon_go_vcr",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/dnaeon/go-vcr",
sum = "h1:r8L/HqC0Hje5AXMu1ooW8oyQyOFv4GxqpL0nRP7SLLY=",
version = "v1.0.1",
)
go_repository(
name = "com_github_docker_cli",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/docker/cli",
sum = "h1:FwssHbCDJD025h+BchanCwE1Q8fyMgqDr2mOQAWOLGw=",
version = "v0.0.0-20200130152716-5d0cf8839492",
)
go_repository(
name = "com_github_docker_distribution",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/docker/distribution",
sum = "h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=",
version = "v2.7.1+incompatible",
)
go_repository(
name = "com_github_docker_docker",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/docker/docker",
sum = "h1:KXS1Jg+ddGcWA8e1N7cupxaHHZhit5rB9tfDU+mfjyY=",
version = "v1.4.2-0.20200203170920-46ec8731fbce",
)
go_repository(
name = "com_github_docker_docker_credential_helpers",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/docker/docker-credential-helpers",
sum = "h1:zI2p9+1NQYdnG6sMU26EX4aVGlqbInSQxQXLvzJ4RPQ=",
version = "v0.6.3",
)
go_repository(
name = "com_github_docker_go_connections",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/docker/go-connections",
sum = "h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=",
version = "v0.4.0",
)
go_repository(
name = "com_github_docker_go_metrics",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/docker/go-metrics",
sum = "h1:yWHOI+vFjEsAakUTSrtqc/SAHrhSkmn48pqjidZX3QA=",
version = "v0.0.0-20180209012529-399ea8c73916",
)
go_repository(
name = "com_github_docker_go_units",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/docker/go-units",
sum = "h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=",
version = "v0.4.0",
)
go_repository(
name = "com_github_docker_libtrust",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/docker/libtrust",
sum = "h1:ZClxb8laGDf5arXfYcAtECDFgAgHklGI8CxgjHnXKJ4=",
version = "v0.0.0-20150114040149-fa567046d9b1",
)
go_repository(
name = "com_github_docker_spdystream",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/docker/spdystream",
sum = "h1:cenwrSVm+Z7QLSV/BsnenAOcDXdX4cMv4wP0B/5QbPg=",
version = "v0.0.0-20160310174837-449fdfce4d96",
)
go_repository(
name = "com_github_docopt_docopt_go",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/docopt/docopt-go",
sum = "h1:bWDMxwH3px2JBh6AyO7hdCn/PkvCZXii8TGj7sbtEbQ=",
version = "v0.0.0-20180111231733-ee0de3bc6815",
)
go_repository(
name = "com_github_dsnet_compress",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/dsnet/compress",
sum = "h1:PlZu0n3Tuv04TzpfPbrnI0HW/YwodEXDS+oPKahKF0Q=",
version = "v0.0.1",
)
go_repository(
name = "com_github_dsnet_golib",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/dsnet/golib",
sum = "h1:tFh1tRc4CA31yP6qDcu+Trax5wW5GuMxvkIba07qVLY=",
version = "v0.0.0-20171103203638-1ea166775780",
)
go_repository(
name = "com_github_dustin_go_humanize",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/dustin/go-humanize",
sum = "h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=",
version = "v1.0.0",
)
go_repository(
name = "com_github_eapache_go_resiliency",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/eapache/go-resiliency",
sum = "h1:v7g92e/KSN71Rq7vSThKaWIq68fL4YHvWyiUKorFR1Q=",
version = "v1.2.0",
)
go_repository(
name = "com_github_eapache_go_xerial_snappy",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/eapache/go-xerial-snappy",
sum = "h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw=",
version = "v0.0.0-20180814174437-776d5712da21",
)
go_repository(
name = "com_github_eapache_queue",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/eapache/queue",
sum = "h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc=",
version = "v1.1.0",
)
go_repository(
name = "com_github_elazarl_goproxy",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/elazarl/goproxy",
sum = "h1:p1yVGRW3nmb85p1Sh1ZJSDm4A4iKLS5QNbvUHMgGu/M=",
version = "v0.0.0-20170405201442-c4fc26588b6e",
)
go_repository(
name = "com_github_emicklei_go_restful",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/emicklei/go-restful",
sum = "h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk=",
version = "v2.9.5+incompatible",
)
go_repository(
name = "com_github_emirpasic_gods",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/emirpasic/gods",
sum = "h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg=",
version = "v1.12.0",
)
go_repository(
name = "com_github_envoyproxy_go_control_plane",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/envoyproxy/go-control-plane",
sum = "h1:4cmBvAEBNJaGARUEs3/suWRyfyBfhf7I60WBZq+bv2w=",
version = "v0.9.1-0.20191026205805-5f8ba28d4473",
)
go_repository(
name = "com_github_envoyproxy_protoc_gen_validate",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/envoyproxy/protoc-gen-validate",
sum = "h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A=",
version = "v0.1.0",
)
go_repository(
name = "com_github_erikstmartin_go_testdb",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/erikstmartin/go-testdb",
sum = "h1:Yzb9+7DPaBjB8zlTR87/ElzFsnQfuHnVUVqpZZIcV5Y=",
version = "v0.0.0-20160219214506-8d10e4a1bae5",
)
go_repository(
name = "com_github_evanphx_json_patch",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/evanphx/json-patch",
sum = "h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M=",
version = "v4.5.0+incompatible",
)
go_repository(
name = "com_github_exponent_io_jsonpath",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/exponent-io/jsonpath",
sum = "h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM=",
version = "v0.0.0-20151013193312-d6023ce2651d",
)
go_repository(
name = "com_github_fatih_camelcase",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/fatih/camelcase",
sum = "h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8=",
version = "v1.0.0",
)
go_repository(
name = "com_github_fatih_color",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/fatih/color",
sum = "h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s=",
version = "v1.9.0",
)
go_repository(
name = "com_github_flynn_go_shlex",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/flynn/go-shlex",
sum = "h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ=",
version = "v0.0.0-20150515145356-3f9db97f8568",
)
go_repository(
name = "com_github_fortytw2_leaktest",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/fortytw2/leaktest",
sum = "h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw=",
version = "v1.3.0",
)
go_repository(
name = "com_github_frankban_quicktest",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/frankban/quicktest",
sum = "h1:PvpJR0Uq8SdX+zagCMsarBMlhz6ysGTf1+pRmCsRXqY=",
version = "v1.8.1",
)
go_repository(
name = "com_github_fsnotify_fsnotify",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/fsnotify/fsnotify",
sum = "h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=",
version = "v1.4.7",
)
go_repository(
name = "com_github_fsouza_fake_gcs_server",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/fsouza/fake-gcs-server",
sum = "h1:3iml5UHzQtk3cpnYfqW16Ia+1xSuu9tc4BElZu5470M=",
version = "v0.0.0-20180612165233-e85be23bdaa8",
)
go_repository(
name = "com_github_garyburd_redigo",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/garyburd/redigo",
sum = "h1:LofdAjjjqCSXMwLGgOgnE+rdPuvX9DxCqaHwKy7i/ko=",
version = "v0.0.0-20150301180006-535138d7bcd7",
)
go_repository(
name = "com_github_ghodss_yaml",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/ghodss/yaml",
sum = "h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=",
version = "v1.0.0",
)
go_repository(
name = "com_github_gliderlabs_ssh",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/gliderlabs/ssh",
sum = "h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0=",
version = "v0.2.2",
)
go_repository(
name = "com_github_globalsign_mgo",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/globalsign/mgo",
sum = "h1:DujepqpGd1hyOd7aW59XpK7Qymp8iy83xq74fLr21is=",
version = "v0.0.0-20181015135952-eeefdecb41b8",
)
go_repository(
name = "com_github_go_bindata_go_bindata_v3",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/go-bindata/go-bindata/v3",
sum = "h1:F0nVttLC3ws0ojc7p60veTurcOm//D4QBODNM7EGrCI=",
version = "v3.1.3",
)
go_repository(
name = "com_github_go_gl_glfw",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/go-gl/glfw",
sum = "h1:QbL/5oDUmRBzO9/Z7Seo6zf912W/a6Sr4Eu0G/3Jho0=",
version = "v0.0.0-20190409004039-e6da0acd62b1",
)
go_repository(
name = "com_github_go_ini_ini",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/go-ini/ini",
sum = "h1:0wVcG9udk2C3TGgmdIGKK9ScOZHZB5nbG+gwji9fhhc=",
version = "v1.55.0",
)
go_repository(
name = "com_github_go_kit_kit",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/go-kit/kit",
sum = "h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk=",
version = "v0.9.0",
)
go_repository(
name = "com_github_go_logfmt_logfmt",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/go-logfmt/logfmt",
sum = "h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA=",
version = "v0.4.0",
)
go_repository(
name = "com_github_go_logr_logr",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/go-logr/logr",
sum = "h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg=",
version = "v0.1.0",
)
go_repository(
name = "com_github_go_logr_zapr",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/go-logr/zapr",
sum = "h1:qXBXPDdNncunGs7XeEpsJt8wCjYBygluzfdLO0G5baE=",
version = "v0.1.1",
)
go_repository(
name = "com_github_go_openapi_analysis",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/go-openapi/analysis",
sum = "h1:8b2ZgKfKIUTVQpTb77MoRDIMEIwvDVw40o3aOXdfYzI=",
version = "v0.19.5",
)
go_repository(
name = "com_github_go_openapi_errors",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/go-openapi/errors",
sum = "h1:a2kIyV3w+OS3S97zxUndRVD46+FhGOUBDFY7nmu4CsY=",
version = "v0.19.2",
)
go_repository(
name = "com_github_go_openapi_jsonpointer",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/go-openapi/jsonpointer",
sum = "h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w=",
version = "v0.19.3",
)
go_repository(
name = "com_github_go_openapi_jsonreference",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/go-openapi/jsonreference",
sum = "h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o=",
version = "v0.19.3",
)
go_repository(
name = "com_github_go_openapi_loads",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/go-openapi/loads",
sum = "h1:5I4CCSqoWzT+82bBkNIvmLc0UOsoKKQ4Fz+3VxOB7SY=",
version = "v0.19.4",
)
go_repository(
name = "com_github_go_openapi_runtime",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/go-openapi/runtime",
sum = "h1:csnOgcgAiuGoM/Po7PEpKDoNulCcF3FGbSnbHfxgjMI=",
version = "v0.19.4",
)
go_repository(
name = "com_github_go_openapi_spec",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/go-openapi/spec",
sum = "h1:rMMMj8cV38KVXK7SFc+I2MWClbEfbK705+j+dyqun5g=",
version = "v0.19.6",
)
go_repository(
name = "com_github_go_openapi_strfmt",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/go-openapi/strfmt",
sum = "h1:eRfyY5SkaNJCAwmmMcADjY31ow9+N7MCLW7oRkbsINA=",
version = "v0.19.3",
)
go_repository(
name = "com_github_go_openapi_swag",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/go-openapi/swag",
sum = "h1:VRuXN2EnMSsZdauzdss6JBC29YotDqG59BZ+tdlIL1s=",
version = "v0.19.7",
)
go_repository(
name = "com_github_go_openapi_validate",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/go-openapi/validate",
sum = "h1:QhCBKRYqZR+SKo4gl1lPhPahope8/RLt6EVgY8X80w0=",
version = "v0.19.5",
)
go_repository(
name = "com_github_go_sql_driver_mysql",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/go-sql-driver/mysql",
sum = "h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs=",
version = "v1.5.0",
)
go_repository(
name = "com_github_go_stack_stack",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/go-stack/stack",
sum = "h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=",
version = "v1.8.0",
)
go_repository(
name = "com_github_go_test_deep",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/go-test/deep",
sum = "h1:u2CU3YKy9I2pmu9pX0eq50wCgjfGIt539SqR7FbHiho=",
version = "v1.0.4",
)
go_repository(
name = "com_github_gobuffalo_envy",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/gobuffalo/envy",
sum = "h1:X3is06x7v0nW2xiy2yFbbIjwHz57CD6z6MkvqULTCm8=",
version = "v1.6.5",
)
go_repository(
name = "com_github_gobwas_glob",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/gobwas/glob",
sum = "h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=",
version = "v0.2.3",
)
go_repository(
name = "com_github_godbus_dbus",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/godbus/dbus",
sum = "h1:BWhy2j3IXJhjCbC68FptL43tDKIq8FladmaTs3Xs7Z8=",
version = "v0.0.0-20190422162347-ade71ed3457e",
)
go_repository(
name = "com_github_gofrs_flock",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/gofrs/flock",
sum = "h1:DP+LD/t0njgoPBvT5MJLeliUIVQR03hiKR6vezdwHlc=",
version = "v0.7.1",
)
go_repository(
name = "com_github_gogo_googleapis",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/gogo/googleapis",
sum = "h1:kFkMAZBNAn4j7K0GiZr8cRYzejq68VbheufiV3YuyFI=",
version = "v1.1.0",
)
go_repository(
name = "com_github_gogo_protobuf",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/gogo/protobuf",
sum = "h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=",
version = "v1.3.1",
)
go_repository(
name = "com_github_golang_gddo",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/golang/gddo",
sum = "h1:KRMr9A3qfbVM7iV/WcLY/rL5LICqwMHLhwRXKu99fXw=",
version = "v0.0.0-20190419222130-af0f2af80721",
)
go_repository(
name = "com_github_golang_glog",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/golang/glog",
sum = "h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=",
version = "v0.0.0-20160126235308-23def4e6c14b",
)
go_repository(
name = "com_github_golang_groupcache",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/golang/groupcache",
sum = "h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I=",
version = "v0.0.0-20190702054246-869f871628b6",
)
go_repository(
name = "com_github_golang_mock",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/golang/mock",
sum = "h1:qGJ6qTW+x6xX/my+8YUVl4WNpX9B7+/l2tRsHGZ7f2s=",
version = "v1.3.1",
)
go_repository(
name = "com_github_golang_protobuf",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/golang/protobuf",
sum = "h1:87PNWwrRvUSnqS4dlcBU/ftvOIBep4sYuBLlh6rX2wk=",
version = "v1.3.4",
)
go_repository(
name = "com_github_golang_snappy",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/golang/snappy",
sum = "h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=",
version = "v0.0.1",
)
go_repository(
name = "com_github_golang_sql_civil",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/golang-sql/civil",
sum = "h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY=",
version = "v0.0.0-20190719163853-cb61b32ac6fe",
)
go_repository(
name = "com_github_golangplus_bytes",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/golangplus/bytes",
sum = "h1:7xqw01UYS+KCI25bMrPxwNYkSns2Db1ziQPpVq99FpE=",
version = "v0.0.0-20160111154220-45c989fe5450",
)
go_repository(
name = "com_github_golangplus_fmt",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/golangplus/fmt",
sum = "h1:f5gsjBiF9tRRVomCvrkGMMWI8W1f2OBFar2c5oakAP0=",
version = "v0.0.0-20150411045040-2a5d6d7d2995",
)
go_repository(
name = "com_github_golangplus_testing",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/golangplus/testing",
sum = "h1:KhcknUwkWHKZPbFy2P7jH5LKJ3La+0ZeknkkmrSgqb0=",
version = "v0.0.0-20180327235837-af21d9c3145e",
)
go_repository(
name = "com_github_gomodule_redigo",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/gomodule/redigo",
sum = "h1:ZKld1VOtsGhAe37E7wMxEDgAlGM5dvFY+DiOhSkhP9Y=",
version = "v1.7.0",
)
go_repository(
name = "com_github_google_btree",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/google/btree",
sum = "h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=",
version = "v1.0.0",
)
go_repository(
name = "com_github_google_go_cmp",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/google/go-cmp",
sum = "h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=",
version = "v0.4.0",
)
go_repository(
name = "com_github_google_go_containerregistry",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/google/go-containerregistry",
sum = "h1:oGqapkPUiypdS9ch/Vu0npPe03RQ0BhVDYli+OEKNAA=",
version = "v0.0.0-20200115214256-379933c9c22b",
)
go_repository(
name = "com_github_google_go_github",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/google/go-github",
sum = "h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY=",
version = "v17.0.0+incompatible",
)
go_repository(
name = "com_github_google_go_licenses",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/google/go-licenses",
sum = "h1:ZIb3nb+/mHAGRkyuxfPykmYdUi21mr8YTGpr/xGPJ8o=",
version = "v0.0.0-20191112164736-212ea350c932",
)
go_repository(
name = "com_github_google_go_querystring",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/google/go-querystring",
sum = "h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=",
version = "v1.0.0",
)
go_repository(
name = "com_github_google_go_replayers_grpcreplay",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/google/go-replayers/grpcreplay",
sum = "h1:eNb1y9rZFmY4ax45uEEECSa8fsxGRU+8Bil52ASAwic=",
version = "v0.1.0",
)
go_repository(
name = "com_github_google_go_replayers_httpreplay",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/google/go-replayers/httpreplay",
sum = "h1:AX7FUb4BjrrzNvblr/OlgwrmFiep6soj5K2QSDW7BGk=",
version = "v0.1.0",
)
go_repository(
name = "com_github_google_gofuzz",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/google/gofuzz",
sum = "h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=",
version = "v1.1.0",
)
go_repository(
name = "com_github_google_licenseclassifier",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/google/licenseclassifier",
sum = "h1:nVgx26pAe6l/02mYomOuZssv28XkacGw/0WeiTVorqw=",
version = "v0.0.0-20190926221455-842c0d70d702",
)
go_repository(
name = "com_github_google_martian",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/google/martian",
sum = "h1:xmapqc1AyLoB+ddYT6r04bD9lIjlOqGaREovi0SzFaE=",
version = "v2.1.1-0.20190517191504-25dcb96d9e51+incompatible",
)
go_repository(
name = "com_github_google_pprof",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/google/pprof",
sum = "h1:XTnP8fJpa4Kvpw2qARB4KS9izqxPS0Sd92cDlY3uk+w=",
version = "v0.0.0-20190723021845-34ac40c74b70",
)
go_repository(
name = "com_github_google_renameio",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/google/renameio",
sum = "h1:GOZbcHa3HfsPKPlmyPyN2KEohoMXOhdMbHrvbpl2QaA=",
version = "v0.1.0",
)
go_repository(
name = "com_github_google_subcommands",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/google/subcommands",
sum = "h1:/eqq+otEXm5vhfBrbREPCSVQbvofip6kIz+mX5TUH7k=",
version = "v1.0.1",
)
go_repository(
name = "com_github_google_uuid",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/google/uuid",
sum = "h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=",
version = "v1.1.1",
)
go_repository(
name = "com_github_google_wire",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/google/wire",
sum = "h1:imGQZGEVEHpje5056+K+cgdO72p0LQv2xIIFXNGUf60=",
version = "v0.3.0",
)
go_repository(
name = "com_github_googleapis_gax_go",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/googleapis/gax-go",
sum = "h1:silFMLAnr330+NRuag/VjIGF7TLp/LBrV2CJKFLWEww=",
version = "v2.0.2+incompatible",
)
go_repository(
name = "com_github_googleapis_gax_go_v2",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/googleapis/gax-go/v2",
sum = "h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=",
version = "v2.0.5",
)
go_repository(
name = "com_github_googleapis_gnostic",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/googleapis/gnostic",
sum = "h1:WeAefnSUHlBb0iJKwxFDZdbfGwkd7xRNuV+IpXMJhYk=",
version = "v0.3.1",
)
go_repository(
name = "com_github_googlecloudplatform_cloud_builders_gcs_fetcher",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/GoogleCloudPlatform/cloud-builders/gcs-fetcher",
sum = "h1:Pjo3SOZigEnIGevhFqcbFndnqyCH8WimcREd3hRM9vU=",
version = "v0.0.0-20191203181535-308b93ad1f39",
)
go_repository(
name = "com_github_googlecloudplatform_cloudsql_proxy",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/GoogleCloudPlatform/cloudsql-proxy",
sum = "h1:sTOp2Ajiew5XIH92YSdwhYc+bgpUX5j5TKK/Ac8Saw8=",
version = "v0.0.0-20191009163259-e802c2cb94ae",
)
go_repository(
name = "com_github_googlecloudplatform_k8s_cloud_provider",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/GoogleCloudPlatform/k8s-cloud-provider",
sum = "h1:N7lSsF+R7wSulUADi36SInSQA3RvfO/XclHQfedr0qk=",
version = "v0.0.0-20190822182118-27a4ced34534",
)
go_repository(
name = "com_github_googlecloudplatform_testgrid",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/GoogleCloudPlatform/testgrid",
sum = "h1:tKM75ScxinVqDkguwG5AnsQZn9XCNpxAdEey1OfJgiE=",
version = "v0.0.7",
)
go_repository(
name = "com_github_gophercloud_gophercloud",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/gophercloud/gophercloud",
sum = "h1:P/nh25+rzXouhytV2pUHBb65fnds26Ghl8/391+sT5o=",
version = "v0.1.0",
)
go_repository(
name = "com_github_gopherjs_gopherjs",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/gopherjs/gopherjs",
sum = "h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=",
version = "v0.0.0-20181017120253-0766667cb4d1",
)
go_repository(
name = "com_github_gorilla_context",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/gorilla/context",
sum = "h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8=",
version = "v1.1.1",
)
go_repository(
name = "com_github_gorilla_csrf",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/gorilla/csrf",
sum = "h1:QqQ/OWwuFp4jMKgBFAzJVW3FMULdyUW7JoM4pEWuqKg=",
version = "v1.6.2",
)
go_repository(
name = "com_github_gorilla_handlers",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/gorilla/handlers",
sum = "h1:893HsJqtxp9z1SF76gg6hY70hRY1wVlTSnC/h1yUDCo=",
version = "v0.0.0-20150720190736-60c7bfde3e33",
)
go_repository(
name = "com_github_gorilla_mux",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/gorilla/mux",
sum = "h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw=",
version = "v1.7.3",
)
go_repository(
name = "com_github_gorilla_securecookie",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/gorilla/securecookie",
sum = "h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ=",
version = "v1.1.1",
)
go_repository(
name = "com_github_gorilla_sessions",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/gorilla/sessions",
sum = "h1:S7P+1Hm5V/AT9cjEcUD5uDaQSX0OE577aCXgoaKpYbQ=",
version = "v1.2.0",
)
go_repository(
name = "com_github_gorilla_websocket",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/gorilla/websocket",
sum = "h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q=",
version = "v1.4.0",
)
go_repository(
name = "com_github_gosuri_uitable",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/gosuri/uitable",
sum = "h1:IG2xLKRvErL3uhY6e1BylFzG+aJiwQviDDTfOKeKTpY=",
version = "v0.0.4",
)
go_repository(
name = "com_github_gregjones_httpcache",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/gregjones/httpcache",
sum = "h1:f8eY6cV/x1x+HLjOp4r72s/31/V2aTUtg5oKRRPf8/Q=",
version = "v0.0.0-20190212212710-3befbb6ad0cc",
)
go_repository(
name = "com_github_grpc_ecosystem_go_grpc_middleware",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/grpc-ecosystem/go-grpc-middleware",
sum = "h1:z53tR0945TRRQO/fLEVPI6SMv7ZflF0TEaTAoU7tOzg=",
version = "v1.0.1-0.20190118093823-f849b5445de4",
)
go_repository(
name = "com_github_grpc_ecosystem_go_grpc_prometheus",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/grpc-ecosystem/go-grpc-prometheus",
sum = "h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=",
version = "v1.2.0",
)
go_repository(
name = "com_github_grpc_ecosystem_grpc_gateway",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/grpc-ecosystem/grpc-gateway",
sum = "h1:UImYN5qQ8tuGpGE16ZmjvcTtTw24zw1QAp/SlnNrZhI=",
version = "v1.9.5",
)
go_repository(
name = "com_github_h2non_gock",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/h2non/gock",
sum = "h1:17gCehSo8ZOgEsFKpQgqHiR7VLyjxdAG3lkhVvO9QZU=",
version = "v1.0.9",
)
go_repository(
name = "com_github_hashicorp_consul_api",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/hashicorp/consul/api",
sum = "h1:BNQPM9ytxj6jbjjdRPioQ94T6YXriSopn0i8COv6SRA=",
version = "v1.1.0",
)
go_repository(
name = "com_github_hashicorp_consul_sdk",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/hashicorp/consul/sdk",
sum = "h1:LnuDWGNsoajlhGyHJvuWW6FVqRl8JOTPqS6CPTsYjhY=",
version = "v0.1.1",
)
go_repository(
name = "com_github_hashicorp_errwrap",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/hashicorp/errwrap",
sum = "h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=",
version = "v1.0.0",
)
go_repository(
name = "com_github_hashicorp_go_cleanhttp",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/hashicorp/go-cleanhttp",
sum = "h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM=",
version = "v0.5.1",
)
go_repository(
name = "com_github_hashicorp_go_immutable_radix",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/hashicorp/go-immutable-radix",
sum = "h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0=",
version = "v1.0.0",
)
go_repository(
name = "com_github_hashicorp_go_msgpack",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/hashicorp/go-msgpack",
sum = "h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4=",
version = "v0.5.3",
)
go_repository(
name = "com_github_hashicorp_go_multierror",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/hashicorp/go-multierror",
sum = "h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=",
version = "v1.0.0",
)
go_repository(
name = "com_github_hashicorp_go_net",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/hashicorp/go.net",
sum = "h1:sNCoNyDEvN1xa+X0baata4RdcpKwcMS6DH+xwfqPgjw=",
version = "v0.0.1",
)
go_repository(
name = "com_github_hashicorp_go_rootcerts",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/hashicorp/go-rootcerts",
sum = "h1:Rqb66Oo1X/eSV1x66xbDccZjhJigjg0+e82kpwzSwCI=",
version = "v1.0.0",
)
go_repository(
name = "com_github_hashicorp_go_sockaddr",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/hashicorp/go-sockaddr",
sum = "h1:GeH6tui99pF4NJgfnhp+L6+FfobzVW3Ah46sLo0ICXs=",
version = "v1.0.0",
)
go_repository(
name = "com_github_hashicorp_go_syslog",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/hashicorp/go-syslog",
sum = "h1:KaodqZuhUoZereWVIYmpUgZysurB1kBLX2j0MwMrUAE=",
version = "v1.0.0",
)
go_repository(
name = "com_github_hashicorp_go_uuid",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/hashicorp/go-uuid",
sum = "h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE=",
version = "v1.0.1",
)
go_repository(
name = "com_github_hashicorp_golang_lru",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/hashicorp/golang-lru",
sum = "h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=",
version = "v0.5.4",
)
go_repository(
name = "com_github_hashicorp_hcl",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/hashicorp/hcl",
sum = "h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=",
version = "v1.0.0",
)
go_repository(
name = "com_github_hashicorp_logutils",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/hashicorp/logutils",
sum = "h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y=",
version = "v1.0.0",
)
go_repository(
name = "com_github_hashicorp_mdns",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/hashicorp/mdns",
sum = "h1:WhIgCr5a7AaVH6jPUwjtRuuE7/RDufnUvzIr48smyxs=",
version = "v1.0.0",
)
go_repository(
name = "com_github_hashicorp_memberlist",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/hashicorp/memberlist",
sum = "h1:EmmoJme1matNzb+hMpDuR/0sbJSUisxyqBGG676r31M=",
version = "v0.1.3",
)
go_repository(
name = "com_github_hashicorp_serf",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/hashicorp/serf",
sum = "h1:YZ7UKsJv+hKjqGVUUbtE3HNj79Eln2oQ75tniF6iPt0=",
version = "v0.8.2",
)
go_repository(
name = "com_github_hpcloud_tail",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/hpcloud/tail",
sum = "h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=",
version = "v1.0.0",
)
go_repository(
name = "com_github_huandu_xstrings",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/huandu/xstrings",
sum = "h1:yPeWdRnmynF7p+lLYz0H2tthW9lqhMJrQV/U7yy4wX0=",
version = "v1.2.0",
)
go_repository(
name = "com_github_imdario_mergo",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/imdario/mergo",
sum = "h1:CGgOkSJeqMRmt0D9XLWExdT4m4F1vd3FV3VPt+0VxkQ=",
version = "v0.3.8",
)
go_repository(
name = "com_github_inconshreveable_mousetrap",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/inconshreveable/mousetrap",
sum = "h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=",
version = "v1.0.0",
)
go_repository(
name = "com_github_influxdata_influxdb",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/influxdata/influxdb",
sum = "h1:AciJ2ei/llFRundm7CtqwF6B2aOds1A7QG3sMW8QiaQ=",
version = "v0.0.0-20161215172503-049f9b42e9a5",
)
go_repository(
name = "com_github_jbenet_go_context",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/jbenet/go-context",
sum = "h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=",
version = "v0.0.0-20150711004518-d14ea06fba99",
)
go_repository(
name = "com_github_jcmturner_gofork",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/jcmturner/gofork",
sum = "h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8=",
version = "v1.0.0",
)
go_repository(
name = "com_github_jenkins_x_go_scm",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/jenkins-x/go-scm",
sum = "h1:+HIEkc/Dzdq0buJF8q0Keip2wexW40BfkrDXKx88T78=",
version = "v1.5.79",
)
go_repository(
name = "com_github_jessevdk_go_flags",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/jessevdk/go-flags",
sum = "h1:4IU2WS7AumrZ/40jfhf4QVDMsQwqA7VEHozFRrGARJA=",
version = "v1.4.0",
)
go_repository(
name = "com_github_jinzhu_gorm",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/jinzhu/gorm",
sum = "h1:Drgk1clyWT9t9ERbzHza6Mj/8FY/CqMyVzOiHviMo6Q=",
version = "v1.9.12",
)
go_repository(
name = "com_github_jinzhu_inflection",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/jinzhu/inflection",
sum = "h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=",
version = "v1.0.0",
)
go_repository(
name = "com_github_jinzhu_now",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/jinzhu/now",
sum = "h1:g39TucaRWyV3dwDO++eEc6qf8TVIQ/Da48WmqjZ3i7E=",
version = "v1.1.1",
)
go_repository(
name = "com_github_jmespath_go_jmespath",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/jmespath/go-jmespath",
sum = "h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc=",
version = "v0.3.0",
)
go_repository(
name = "com_github_joefitzgerald_rainbow_reporter",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/joefitzgerald/rainbow-reporter",
sum = "h1:AuMG652zjdzI0YCCnXAqATtRBpGXMcAnrajcaTrSeuo=",
version = "v0.1.0",
)
go_repository(
name = "com_github_joho_godotenv",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/joho/godotenv",
sum = "h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc=",
version = "v1.3.0",
)
go_repository(
name = "com_github_jonboulle_clockwork",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/jonboulle/clockwork",
sum = "h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo=",
version = "v0.1.0",
)
go_repository(
name = "com_github_json_iterator_go",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/json-iterator/go",
sum = "h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGns=",
version = "v1.1.9",
)
go_repository(
name = "com_github_jstemmer_go_junit_report",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/jstemmer/go-junit-report",
sum = "h1:rBMNdlhTLzJjJSDIjNEXX1Pz3Hmwmz91v+zycvx9PJc=",
version = "v0.0.0-20190106144839-af01ea7f8024",
)
go_repository(
name = "com_github_jtolds_gls",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/jtolds/gls",
sum = "h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=",
version = "v4.20.0+incompatible",
)
go_repository(
name = "com_github_julienschmidt_httprouter",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/julienschmidt/httprouter",
sum = "h1:TDTW5Yz1mjftljbcKqRcrYhd4XeOoI98t+9HbQbYf7g=",
version = "v1.2.0",
)
go_repository(
name = "com_github_kelseyhightower_envconfig",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/kelseyhightower/envconfig",
sum = "h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8=",
version = "v1.4.0",
)
go_repository(
name = "com_github_kevinburke_ssh_config",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/kevinburke/ssh_config",
sum = "h1:Coekwdh0v2wtGp9Gmz1Ze3eVRAWJMLokvN3QjdzCHLY=",
version = "v0.0.0-20190725054713-01f96b0aa0cd",
)
go_repository(
name = "com_github_kisielk_errcheck",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/kisielk/errcheck",
sum = "h1:reN85Pxc5larApoH1keMBiu2GWtPqXQ1nc9gx+jOU+E=",
version = "v1.2.0",
)
go_repository(
name = "com_github_kisielk_gotool",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/kisielk/gotool",
sum = "h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg=",
version = "v1.0.0",
)
go_repository(
name = "com_github_klauspost_compress",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/klauspost/compress",
sum = "h1:Znfn6hXZAHaLPNnlqUYRrBSReFHYybslgv4PTiyz6P0=",
version = "v1.10.2",
)
go_repository(
name = "com_github_klauspost_cpuid",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/klauspost/cpuid",
sum = "h1:NMpwD2G9JSFOE1/TJjGSo5zG7Yb2bTe7eq1jH+irmeE=",
version = "v1.2.0",
)
go_repository(
name = "com_github_klauspost_pgzip",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/klauspost/pgzip",
sum = "h1:oIPZROsWuPHpOdMVWLuJZXwgjhrW8r1yEX8UqMyeNHM=",
version = "v1.2.1",
)
go_repository(
name = "com_github_konsorten_go_windows_terminal_sequences",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/konsorten/go-windows-terminal-sequences",
sum = "h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s=",
version = "v1.0.2",
)
go_repository(
name = "com_github_kr_logfmt",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/kr/logfmt",
sum = "h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY=",
version = "v0.0.0-20140226030751-b84e30acd515",
)
go_repository(
name = "com_github_kr_pretty",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/kr/pretty",
sum = "h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs=",
version = "v0.2.0",
)
go_repository(
name = "com_github_kr_pty",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/kr/pty",
sum = "h1:AkaSdXYQOWeaO3neb8EM634ahkXXe3jYbVh/F9lq+GI=",
version = "v1.1.8",
)
go_repository(
name = "com_github_kr_text",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/kr/text",
sum = "h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=",
version = "v0.1.0",
)
go_repository(
name = "com_github_lib_pq",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/lib/pq",
sum = "h1:sJZmqHoEaY7f+NPP8pgLB/WxulyR3fewgCM2qaSlBb4=",
version = "v1.1.1",
)
go_repository(
name = "com_github_liggitt_tabwriter",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/liggitt/tabwriter",
sum = "h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0=",
version = "v0.0.0-20181228230101-89fcab3d43de",
)
go_repository(
name = "com_github_lithammer_dedent",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/lithammer/dedent",
sum = "h1:VNzHMVCBNG1j0fh3OrsFRkVUwStdDArbgBWoPAffktY=",
version = "v1.1.0",
)
go_repository(
name = "com_github_lyft_protoc_gen_validate",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/lyft/protoc-gen-validate",
sum = "h1:KNt/RhmQTOLr7Aj8PsJ7mTronaFyx80mRTT9qF261dA=",
version = "v0.0.13",
)
go_repository(
name = "com_github_magiconair_properties",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/magiconair/properties",
sum = "h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=",
version = "v1.8.1",
)
go_repository(
name = "com_github_mailru_easyjson",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/mailru/easyjson",
sum = "h1:aizVhC/NAAcKWb+5QsU1iNOZb4Yws5UO2I+aIprQITM=",
version = "v0.7.0",
)
go_repository(
name = "com_github_makenowjust_heredoc",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/MakeNowJust/heredoc",
sum = "h1:sjQovDkwrZp8u+gxLtPgKGjk5hCxuy2hrRejBTA9xFU=",
version = "v0.0.0-20170808103936-bb23615498cd",
)
go_repository(
name = "com_github_markbates_inflect",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/markbates/inflect",
sum = "h1:5fh1gzTFhfae06u3hzHYO9xe3l3v3nW5Pwt3naLTP5g=",
version = "v1.0.4",
)
go_repository(
name = "com_github_marstr_guid",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/marstr/guid",
sum = "h1:/M4H/1G4avsieL6BbUwCOBzulmoeKVP5ux/3mQNnbyI=",
version = "v1.1.0",
)
go_repository(
name = "com_github_masterminds_goutils",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/Masterminds/goutils",
sum = "h1:zukEsf/1JZwCMgHiK3GZftabmxiCw4apj3a28RPBiVg=",
version = "v1.1.0",
)
go_repository(
name = "com_github_masterminds_semver_v3",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/Masterminds/semver/v3",
sum = "h1:znjIyLfpXEDQjOIEWh+ehwpTU14UzUPub3c3sm36u14=",
version = "v3.0.3",
)
go_repository(
name = "com_github_masterminds_sprig_v3",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/Masterminds/sprig/v3",
sum = "h1:wz22D0CiSctrliXiI9ZO3HoNApweeRGftyDN+BQa3B8=",
version = "v3.0.2",
)
go_repository(
name = "com_github_masterminds_vcs",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/Masterminds/vcs",
sum = "h1:NL3G1X7/7xduQtA2sJLpVpfHTNBALVNSjob6KEjPXNQ=",
version = "v1.13.1",
)
go_repository(
name = "com_github_mattbaird_jsonpatch",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/mattbaird/jsonpatch",
sum = "h1:+J2gw7Bw77w/fbK7wnNJJDKmw1IbWft2Ul5BzrG1Qm8=",
version = "v0.0.0-20171005235357-81af80346b1a",
)
go_repository(
name = "com_github_mattn_go_colorable",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/mattn/go-colorable",
sum = "h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA=",
version = "v0.1.4",
)
go_repository(
name = "com_github_mattn_go_ieproxy",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/mattn/go-ieproxy",
sum = "h1:HfxbT6/JcvIljmERptWhwa8XzP7H3T+Z2N26gTsaDaA=",
version = "v0.0.0-20190610004146-91bb50d98149",
)
go_repository(
name = "com_github_mattn_go_isatty",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/mattn/go-isatty",
sum = "h1:FxPOTFNqGkuDUGi3H/qkUbQO4ZiBa2brKq5r0l8TGeM=",
version = "v0.0.11",
)
go_repository(
name = "com_github_mattn_go_runewidth",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/mattn/go-runewidth",
sum = "h1:3tS41NlGYSmhhe/8fhGRzc+z3AYCw1Fe1WAyLuujKs0=",
version = "v0.0.8",
)
go_repository(
name = "com_github_mattn_go_shellwords",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/mattn/go-shellwords",
sum = "h1:eaB5JspOwiKKcHdqcjbfe5lA9cNn/4NRRtddXJCimqk=",
version = "v1.0.9",
)
go_repository(
name = "com_github_mattn_go_sqlite3",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/mattn/go-sqlite3",
sum = "h1:xQ15muvnzGBHpIpdrNi1DA5x0+TcBZzsIDwmw9uTHzw=",
version = "v2.0.1+incompatible",
)
go_repository(
name = "com_github_mattn_go_zglob",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/mattn/go-zglob",
sum = "h1:xsEx/XUoVlI6yXjqBK062zYhRTZltCNmYPx6v+8DNaY=",
version = "v0.0.1",
)
go_repository(
name = "com_github_matttproud_golang_protobuf_extensions",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/matttproud/golang_protobuf_extensions",
sum = "h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=",
version = "v1.0.1",
)
go_repository(
name = "com_github_maxbrunsfeld_counterfeiter_v6",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/maxbrunsfeld/counterfeiter/v6",
sum = "h1:g+4J5sZg6osfvEfkRZxJ1em0VT95/UOZgi/l7zi1/oE=",
version = "v6.2.2",
)
go_repository(
name = "com_github_mholt_archiver_v3",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/mholt/archiver/v3",
sum = "h1:vWjhY8SQp5yzM9P6OJ/eZEkmi3UAbRrxCq48MxjAzig=",
version = "v3.3.0",
)
go_repository(
name = "com_github_microsoft_go_winio",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/Microsoft/go-winio",
sum = "h1:ygIc8M6trr62pF5DucadTWGdEB4mEyvzi0e2nbcmcyA=",
version = "v0.4.15-0.20190919025122-fc70bd9a86b5",
)
go_repository(
name = "com_github_microsoft_hcsshim",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/Microsoft/hcsshim",
sum = "h1:ptnOoufxGSzauVTsdE+wMYnCWA301PdoN4xg5oRdZpg=",
version = "v0.8.7",
)
go_repository(
name = "com_github_miekg_dns",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/miekg/dns",
sum = "h1:9jZdLNd/P4+SfEJ0TNyxYpsK8N4GtfylBLqtbYN1sbA=",
version = "v1.0.14",
)
go_repository(
name = "com_github_mitchellh_cli",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/mitchellh/cli",
sum = "h1:iGBIsUe3+HZ/AD/Vd7DErOt5sU9fa8Uj7A2s1aggv1Y=",
version = "v1.0.0",
)
go_repository(
name = "com_github_mitchellh_copystructure",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/mitchellh/copystructure",
sum = "h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ=",
version = "v1.0.0",
)
go_repository(
name = "com_github_mitchellh_go_homedir",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/mitchellh/go-homedir",
sum = "h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=",
version = "v1.1.0",
)
go_repository(
name = "com_github_mitchellh_go_testing_interface",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/mitchellh/go-testing-interface",
sum = "h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0=",
version = "v1.0.0",
)
go_repository(
name = "com_github_mitchellh_go_wordwrap",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/mitchellh/go-wordwrap",
sum = "h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4=",
version = "v1.0.0",
)
go_repository(
name = "com_github_mitchellh_gox",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/mitchellh/gox",
sum = "h1:lfGJxY7ToLJQjHHwi0EX6uYBdK78egf954SQl13PQJc=",
version = "v0.4.0",
)
go_repository(
name = "com_github_mitchellh_iochan",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/mitchellh/iochan",
sum = "h1:C+X3KsSTLFVBr/tK1eYN/vs4rJcvsiLU338UhYPJWeY=",
version = "v1.0.0",
)
go_repository(
name = "com_github_mitchellh_ioprogress",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/mitchellh/ioprogress",
sum = "h1:Qa6dnn8DlasdXRnacluu8HzPts0S1I9zvvUPDbBnXFI=",
version = "v0.0.0-20180201004757-6a23b12fa88e",
)
go_repository(
name = "com_github_mitchellh_mapstructure",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/mitchellh/mapstructure",
sum = "h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=",
version = "v1.1.2",
)
go_repository(
name = "com_github_mitchellh_osext",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/mitchellh/osext",
sum = "h1:2+myh5ml7lgEU/51gbeLHfKGNfgEQQIWrlbdaOsidbQ=",
version = "v0.0.0-20151018003038-5e2d6d41470f",
)
go_repository(
name = "com_github_mitchellh_reflectwalk",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/mitchellh/reflectwalk",
sum = "h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY=",
version = "v1.0.0",
)
go_repository(
name = "com_github_modern_go_concurrent",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/modern-go/concurrent",
sum = "h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=",
version = "v0.0.0-20180306012644-bacd9c7ef1dd",
)
go_repository(
name = "com_github_modern_go_reflect2",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/modern-go/reflect2",
sum = "h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=",
version = "v1.0.1",
)
go_repository(
name = "com_github_morikuni_aec",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/morikuni/aec",
sum = "h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=",
version = "v1.0.0",
)
go_repository(
name = "com_github_munnerz_goautoneg",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/munnerz/goautoneg",
sum = "h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=",
version = "v0.0.0-20191010083416-a7dc8b61c822",
)
go_repository(
name = "com_github_mwitkow_go_conntrack",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/mwitkow/go-conntrack",
sum = "h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU=",
version = "v0.0.0-20190716064945-2f068394615f",
)
go_repository(
name = "com_github_mxk_go_flowrate",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/mxk/go-flowrate",
sum = "h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=",
version = "v0.0.0-20140419014527-cca7078d478f",
)
go_repository(
name = "com_github_nats_io_jwt",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/nats-io/jwt",
sum = "h1:+RB5hMpXUUA2dfxuhBTEkMOrYmM+gKIZYS1KjSostMI=",
version = "v0.3.2",
)
go_repository(
name = "com_github_nats_io_nats_go",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/nats-io/nats.go",
sum = "h1:ik3HbLhZ0YABLto7iX80pZLPw/6dx3T+++MZJwLnMrQ=",
version = "v1.9.1",
)
go_repository(
name = "com_github_nats_io_nats_server_v2",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/nats-io/nats-server/v2",
sum = "h1:i2Ly0B+1+rzNZHHWtD4ZwKi+OU5l+uQo1iDHZ2PmiIc=",
version = "v2.1.2",
)
go_repository(
name = "com_github_nats_io_nkeys",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/nats-io/nkeys",
sum = "h1:6JrEfig+HzTH85yxzhSVbjHRJv9cn0p6n3IngIcM5/k=",
version = "v0.1.3",
)
go_repository(
name = "com_github_nats_io_nuid",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/nats-io/nuid",
sum = "h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=",
version = "v1.0.1",
)
go_repository(
name = "com_github_nbio_st",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/nbio/st",
sum = "h1:W6apQkHrMkS0Muv8G/TipAy/FJl/rCYT0+EuS8+Z0z4=",
version = "v0.0.0-20140626010706-e9e8d9816f32",
)
go_repository(
name = "com_github_ncw_swift",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/ncw/swift",
sum = "h1:4DQRPj35Y41WogBxyhOXlrI37nzGlyEcsforeudyYPQ=",
version = "v1.0.47",
)
go_repository(
name = "com_github_nwaples_rardecode",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/nwaples/rardecode",
sum = "h1:r7vGuS5akxOnR4JQSkko62RJ1ReCMXxQRPtxsiFMBOs=",
version = "v1.0.0",
)
go_repository(
name = "com_github_nytimes_gziphandler",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/NYTimes/gziphandler",
sum = "h1:lsxEuwrXEAokXB9qhlbKWPpo3KMLZQ5WB5WLQRW1uq0=",
version = "v0.0.0-20170623195520-56545f4a5d46",
)
go_repository(
name = "com_github_oklog_ulid",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/oklog/ulid",
sum = "h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=",
version = "v1.3.1",
)
go_repository(
name = "com_github_olekukonko_tablewriter",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/olekukonko/tablewriter",
sum = "h1:58+kh9C6jJVXYjt8IE48G2eWl6BjwU5Gj0gqY84fy78=",
version = "v0.0.0-20170122224234-a0225b3f23b5",
)
go_repository(
name = "com_github_oneofone_xxhash",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/OneOfOne/xxhash",
sum = "h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=",
version = "v1.2.2",
)
go_repository(
name = "com_github_onsi_ginkgo",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/onsi/ginkgo",
sum = "h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw=",
version = "v1.11.0",
)
go_repository(
name = "com_github_onsi_gomega",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/onsi/gomega",
sum = "h1:C5Dqfs/LeauYDX0jJXIe2SWmwCbGzx9yF8C8xy3Lh34=",
version = "v1.8.1",
)
go_repository(
name = "com_github_opencontainers_go_digest",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/opencontainers/go-digest",
sum = "h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ=",
version = "v1.0.0-rc1",
)
go_repository(
name = "com_github_opencontainers_image_spec",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/opencontainers/image-spec",
sum = "h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=",
version = "v1.0.1",
)
go_repository(
name = "com_github_opencontainers_runc",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/opencontainers/runc",
sum = "h1:GlxAyO6x8rfZYN9Tt0Kti5a/cP41iuiO2yYT0IJGY8Y=",
version = "v0.1.1",
)
go_repository(
name = "com_github_opencontainers_runtime_spec",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/opencontainers/runtime-spec",
sum = "h1:eNUVfm/RFLIi1G7flU5/ZRTHvd4kcVuzfRnL6OFlzCI=",
version = "v0.1.2-0.20190507144316-5b71a03e2700",
)
go_repository(
name = "com_github_opencontainers_runtime_tools",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/opencontainers/runtime-tools",
sum = "h1:H7DMc6FAjgwZZi8BRqjrAAHWoqEr5e5L6pS4V0ezet4=",
version = "v0.0.0-20181011054405-1d69bd0f9c39",
)
go_repository(
name = "com_github_openzipkin_zipkin_go",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/openzipkin/zipkin-go",
sum = "h1:33/f6xXB6YlOQ9tgTsXVOkdLCJsHTcZJnMy4DnSd6FU=",
version = "v0.2.0",
)
go_repository(
name = "com_github_otiai10_copy",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/otiai10/copy",
sum = "h1:DDNipYy6RkIkjMwy+AWzgKiNTyj2RUI9yEMeETEpVyc=",
version = "v1.0.2",
)
go_repository(
name = "com_github_otiai10_curr",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/otiai10/curr",
sum = "h1:+OLn68pqasWca0z5ryit9KGfp3sUsW4Lqg32iRMJyzs=",
version = "v0.0.0-20150429015615-9b4961190c95",
)
go_repository(
name = "com_github_otiai10_mint",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/otiai10/mint",
sum = "h1:Ady6MKVezQwHBkGzLFbrsywyp09Ah7rkmfjV3Bcr5uc=",
version = "v1.3.0",
)
go_repository(
name = "com_github_pascaldekloe_goe",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/pascaldekloe/goe",
sum = "h1:Lgl0gzECD8GnQ5QCWA8o6BtfL6mDH5rQgM4/fX3avOs=",
version = "v0.0.0-20180627143212-57f6aae5913c",
)
go_repository(
name = "com_github_pborman_uuid",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/pborman/uuid",
sum = "h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g=",
version = "v1.2.0",
)
go_repository(
name = "com_github_pelletier_go_buffruneio",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/pelletier/go-buffruneio",
sum = "h1:U4t4R6YkofJ5xHm3dJzuRpPZ0mr5MMCoAWooScCR7aA=",
version = "v0.2.0",
)
go_repository(
name = "com_github_pelletier_go_toml",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/pelletier/go-toml",
sum = "h1:aetoXYr0Tv7xRU/V4B4IZJ2QcbtMUFoNb3ORp7TzIK4=",
version = "v1.6.0",
)
go_repository(
name = "com_github_peterbourgon_diskv",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/peterbourgon/diskv",
sum = "h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=",
version = "v2.0.1+incompatible",
)
go_repository(
name = "com_github_phayes_freeport",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/phayes/freeport",
sum = "h1:JhzVVoYvbOACxoUmOs6V/G4D5nPVUW73rKvXxP4XUJc=",
version = "v0.0.0-20180830031419-95f893ade6f2",
)
go_repository(
name = "com_github_pierrec_lz4",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/pierrec/lz4",
sum = "h1:6aCX4/YZ9v8q69hTyiR7dNLnTA3fgtKHVVW5BCd5Znw=",
version = "v2.2.6+incompatible",
)
go_repository(
name = "com_github_pkg_errors",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/pkg/errors",
sum = "h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=",
version = "v0.9.1",
)
go_repository(
name = "com_github_pkg_profile",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/pkg/profile",
sum = "h1:F++O52m40owAmADcojzM+9gyjmMOY/T4oYJkgFDH8RE=",
version = "v1.2.1",
)
go_repository(
name = "com_github_pmezard_go_difflib",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/pmezard/go-difflib",
sum = "h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=",
version = "v1.0.0",
)
go_repository(
name = "com_github_posener_complete",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/posener/complete",
sum = "h1:ccV59UEOTzVDnDUEFdT95ZzHVZ+5+158q8+SJb2QV5w=",
version = "v1.1.1",
)
go_repository(
name = "com_github_pquerna_cachecontrol",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/pquerna/cachecontrol",
sum = "h1:0XM1XL/OFFJjXsYXlG30spTkV/E9+gmd5GD1w2HE8xM=",
version = "v0.0.0-20171018203845-0dec1b30a021",
)
go_repository(
name = "com_github_prometheus_client_golang",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/prometheus/client_golang",
sum = "h1:Ctq0iGpCmr3jeP77kbF2UxgvRwzWWz+4Bh9/vJTyg1A=",
version = "v1.5.0",
)
go_repository(
name = "com_github_prometheus_client_model",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/prometheus/client_model",
sum = "h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=",
version = "v0.2.0",
)
go_repository(
name = "com_github_prometheus_common",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/prometheus/common",
sum = "h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U=",
version = "v0.9.1",
)
go_repository(
name = "com_github_prometheus_procfs",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/prometheus/procfs",
sum = "h1:QJQN3jYQhkamO4mhfUWqdDH2asK7ONOI9MTWjyAxNKM=",
version = "v0.0.10",
)
go_repository(
name = "com_github_prometheus_tsdb",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/prometheus/tsdb",
sum = "h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=",
version = "v0.7.1",
)
go_repository(
name = "com_github_puerkitobio_purell",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/PuerkitoBio/purell",
sum = "h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI=",
version = "v1.1.1",
)
go_repository(
name = "com_github_puerkitobio_urlesc",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/PuerkitoBio/urlesc",
sum = "h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M=",
version = "v0.0.0-20170810143723-de5bf2ad4578",
)
go_repository(
name = "com_github_rcrowley_go_metrics",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/rcrowley/go-metrics",
sum = "h1:eUm8ma4+yPknhXtkYlWh3tMkE6gBjXZToDned9s2gbQ=",
version = "v0.0.0-20190706150252-9beb055b7962",
)
go_repository(
name = "com_github_remyoudompheng_bigfft",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/remyoudompheng/bigfft",
sum = "h1:/NRJ5vAYoqz+7sG51ubIDHXeWO8DlTSrToPu6q11ziA=",
version = "v0.0.0-20170806203942-52369c62f446",
)
go_repository(
name = "com_github_rogpeppe_fastuuid",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/rogpeppe/fastuuid",
sum = "h1:gu+uRPtBe88sKxUCEXRoeCvVG90TJmwhiqRpvdhQFng=",
version = "v0.0.0-20150106093220-6724a57986af",
)
go_repository(
name = "com_github_rogpeppe_go_internal",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/rogpeppe/go-internal",
sum = "h1:XU784Pr0wdahMY2bYcyK6N1KuaRAdLtqD4qd8D18Bfs=",
version = "v1.3.2",
)
go_repository(
name = "com_github_rubiojr_go_vhd",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/rubiojr/go-vhd",
sum = "h1:ht7N4d/B7Ezf58nvMNVF3OlvDlz9pp+WHVcRNS0nink=",
version = "v0.0.0-20160810183302-0bfd3b39853c",
)
go_repository(
name = "com_github_russross_blackfriday",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/russross/blackfriday",
sum = "h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo=",
version = "v1.5.2",
)
go_repository(
name = "com_github_russross_blackfriday_v2",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/russross/blackfriday/v2",
sum = "h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=",
version = "v2.0.1",
)
go_repository(
name = "com_github_ryanuber_columnize",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/ryanuber/columnize",
sum = "h1:UFr9zpz4xgTnIE5yIMtWAMngCdZ9p/+q6lTbgelo80M=",
version = "v0.0.0-20160712163229-9b3edd62028f",
)
go_repository(
name = "com_github_satori_go_uuid",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/satori/go.uuid",
sum = "h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww=",
version = "v1.2.0",
)
go_repository(
name = "com_github_sclevine_spec",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/sclevine/spec",
sum = "h1:1Jwdf9jSfDl9NVmt8ndHqbTZ7XCCPbh1jI3hkDBHVYA=",
version = "v1.2.0",
)
go_repository(
name = "com_github_sean_seed",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/sean-/seed",
sum = "h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=",
version = "v0.0.0-20170313163322-e2103e2c3529",
)
go_repository(
name = "com_github_sergi_go_diff",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/sergi/go-diff",
sum = "h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ=",
version = "v1.0.0",
)
go_repository(
name = "com_github_shopify_logrus_bugsnag",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/Shopify/logrus-bugsnag",
sum = "h1:UrqY+r/OJnIp5u0s1SbQ8dVfLCZJsnvazdBP5hS4iRs=",
version = "v0.0.0-20171204204709-577dee27f20d",
)
go_repository(
name = "com_github_shopify_sarama",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/Shopify/sarama",
sum = "h1:XxJBCZEoWJtoWjf/xRbmGUpAmTZGnuuF0ON0EvxxBrs=",
version = "v1.23.1",
)
go_repository(
name = "com_github_shopify_toxiproxy",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/Shopify/toxiproxy",
sum = "h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc=",
version = "v2.1.4+incompatible",
)
go_repository(
name = "com_github_shurcool_githubv4",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/shurcooL/githubv4",
sum = "h1:Cocq9/ZZxCoiybhygOR7hX4E3/PkV8eNbd1AEcUvaHM=",
version = "v0.0.0-20191102174205-af46314aec7b",
)
go_repository(
name = "com_github_shurcool_graphql",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/shurcooL/graphql",
sum = "h1:tygelZueB1EtXkPI6mQ4o9DQ0+FKW41hTbunoXZCTqk=",
version = "v0.0.0-20181231061246-d48a9a75455f",
)
go_repository(
name = "com_github_shurcool_sanitized_anchor_name",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/shurcooL/sanitized_anchor_name",
sum = "h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=",
version = "v1.0.0",
)
go_repository(
name = "com_github_sirupsen_logrus",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/sirupsen/logrus",
sum = "h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=",
version = "v1.4.2",
)
go_repository(
name = "com_github_smartystreets_assertions",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/smartystreets/assertions",
sum = "h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=",
version = "v0.0.0-20180927180507-b2de0cb4f26d",
)
go_repository(
name = "com_github_smartystreets_goconvey",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/smartystreets/goconvey",
sum = "h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=",
version = "v1.6.4",
)
go_repository(
name = "com_github_soheilhy_cmux",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/soheilhy/cmux",
sum = "h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E=",
version = "v0.1.4",
)
go_repository(
name = "com_github_spaolacci_murmur3",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/spaolacci/murmur3",
sum = "h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ=",
version = "v0.0.0-20180118202830-f09979ecbc72",
)
go_repository(
name = "com_github_spf13_afero",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/spf13/afero",
sum = "h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc=",
version = "v1.2.2",
)
go_repository(
name = "com_github_spf13_cast",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/spf13/cast",
sum = "h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng=",
version = "v1.3.1",
)
go_repository(
name = "com_github_spf13_cobra",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/spf13/cobra",
sum = "h1:breEStsVwemnKh2/s6gMvSdMEkwW0sK8vGStnlVBMCs=",
version = "v0.0.6",
)
go_repository(
name = "com_github_spf13_jwalterweatherman",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/spf13/jwalterweatherman",
sum = "h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=",
version = "v1.1.0",
)
go_repository(
name = "com_github_spf13_pflag",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/spf13/pflag",
sum = "h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=",
version = "v1.0.5",
)
go_repository(
name = "com_github_spf13_viper",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/spf13/viper",
sum = "h1:7aKfF+e8/k68gda3LOjo5RxiUqddoFxVq4BKBPrxk5E=",
version = "v1.6.2",
)
go_repository(
name = "com_github_src_d_gcfg",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/src-d/gcfg",
sum = "h1:xXbNR5AlLSA315x2UO+fTSSAXCDf+Ar38/6oyGbDKQ4=",
version = "v1.4.0",
)
go_repository(
name = "com_github_streadway_amqp",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/streadway/amqp",
sum = "h1:0ngsPmuP6XIjiFRNFYlvKwSr5zff2v+uPHaffZ6/M4k=",
version = "v0.0.0-20190404075320-75d898a42a94",
)
go_repository(
name = "com_github_stretchr_objx",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/stretchr/objx",
sum = "h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48=",
version = "v0.2.0",
)
go_repository(
name = "com_github_stretchr_testify",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/stretchr/testify",
sum = "h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=",
version = "v1.5.1",
)
go_repository(
name = "com_github_subosito_gotenv",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/subosito/gotenv",
sum = "h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=",
version = "v1.2.0",
)
go_repository(
name = "com_github_syndtr_gocapability",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/syndtr/gocapability",
sum = "h1:zLV6q4e8Jv9EHjNg/iHfzwDkCve6Ua5jCygptrtXHvI=",
version = "v0.0.0-20170704070218-db04d3cc01c8",
)
go_repository(
name = "com_github_tektoncd_pipeline",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/tektoncd/pipeline",
sum = "h1:kGeWm53R5ggajD/L2KU8kcsZ2lVd4ruN3kdqK1A/NwQ=",
version = "v0.11.0",
)
go_repository(
name = "com_github_tektoncd_plumbing",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/tektoncd/plumbing",
sum = "h1:BksmpUwtap3THXJ8Z4KGcotsvpRdFQKySjDHgtc22lA=",
version = "v0.0.0-20200217163359-cd0db6e567d2",
)
go_repository(
name = "com_github_tektoncd_plumbing_pipelinerun_logs",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/tektoncd/plumbing/pipelinerun-logs",
sum = "h1:9qeyrQsoPZbHOyOPt0OeB1TCYXfYb5swrxlFWzTIYYk=",
version = "v0.0.0-20191206114338-712d544c2c21",
)
go_repository(
name = "com_github_tidwall_pretty",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/tidwall/pretty",
sum = "h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4=",
version = "v1.0.0",
)
go_repository(
name = "com_github_tmc_grpc_websocket_proxy",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/tmc/grpc-websocket-proxy",
sum = "h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ=",
version = "v0.0.0-20190109142713-0ad062ec5ee5",
)
go_repository(
name = "com_github_ugorji_go",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/ugorji/go",
sum = "h1:j4s+tAvLfL3bZyefP2SEWmhBzmuIlH/eqNuPdFPgngw=",
version = "v1.1.4",
)
go_repository(
name = "com_github_ugorji_go_codec",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/ugorji/go/codec",
sum = "h1:3SVOIvH7Ae1KRYyQWRjXWJEA9sS/c/pjvH++55Gr648=",
version = "v0.0.0-20181204163529-d75b2dcb6bc8",
)
go_repository(
name = "com_github_ulikunitz_xz",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/ulikunitz/xz",
sum = "h1:jGHAfXawEGZQ3blwU5wnWKQJvAraT7Ftq9EXjnXYgt8=",
version = "v0.5.6",
)
go_repository(
name = "com_github_urfave_cli",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/urfave/cli",
sum = "h1:fDqGv3UG/4jbVl/QkFwEdddtEDjh/5Ov6X+0B/3bPaw=",
version = "v1.20.0",
)
go_repository(
name = "com_github_valyala_bytebufferpool",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/valyala/bytebufferpool",
sum = "h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=",
version = "v1.0.0",
)
go_repository(
name = "com_github_vdemeester_k8s_pkg_credentialprovider",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/vdemeester/k8s-pkg-credentialprovider",
sum = "h1:IBEhRIcu5HP+Pkhzn9E9z3wV0tp3TFjDkiAQtX2FXFM=",
version = "v1.13.12-1",
)
go_repository(
name = "com_github_vektah_gqlparser",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/vektah/gqlparser",
sum = "h1:ZsyLGn7/7jDNI+y4SEhI4yAxRChlv15pUHMjijT+e68=",
version = "v1.1.2",
)
go_repository(
name = "com_github_vmware_govmomi",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/vmware/govmomi",
sum = "h1:gpw/0Ku+6RgF3jsi7fnCLmlcikBHfKBCUcu1qgc16OU=",
version = "v0.20.3",
)
go_repository(
name = "com_github_xanzy_ssh_agent",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/xanzy/ssh-agent",
sum = "h1:TCbipTQL2JiiCprBWx9frJ2eJlCYT00NmctrHxVAr70=",
version = "v0.2.1",
)
go_repository(
name = "com_github_xdg_scram",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/xdg/scram",
sum = "h1:u40Z8hqBAAQyv+vATcGgV0YCnDjqSL7/q/JyPhhJSPk=",
version = "v0.0.0-20180814205039-7eeb5667e42c",
)
go_repository(
name = "com_github_xdg_stringprep",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/xdg/stringprep",
sum = "h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0=",
version = "v1.0.0",
)
go_repository(
name = "com_github_xeipuuv_gojsonpointer",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/xeipuuv/gojsonpointer",
sum = "h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c=",
version = "v0.0.0-20180127040702-4e3ac2762d5f",
)
go_repository(
name = "com_github_xeipuuv_gojsonreference",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/xeipuuv/gojsonreference",
sum = "h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=",
version = "v0.0.0-20180127040603-bd5ef7bd5415",
)
go_repository(
name = "com_github_xeipuuv_gojsonschema",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/xeipuuv/gojsonschema",
sum = "h1:ngVtJC9TY/lg0AA/1k48FYhBrhRoFlEmWzsehpNAaZg=",
version = "v1.1.0",
)
go_repository(
name = "com_github_xi2_xz",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/xi2/xz",
sum = "h1:nIPpBwaJSVYIxUFsDv3M8ofmx9yWTog9BfvIu0q41lo=",
version = "v0.0.0-20171230120015-48954b6210f8",
)
go_repository(
name = "com_github_xiang90_probing",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/xiang90/probing",
sum = "h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=",
version = "v0.0.0-20190116061207-43a291ad63a2",
)
go_repository(
name = "com_github_xlab_handysort",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/xlab/handysort",
sum = "h1:j2hhcujLRHAg872RWAV5yaUrEjHEObwDv3aImCaNLek=",
version = "v0.0.0-20150421192137-fb3537ed64a1",
)
go_repository(
name = "com_github_xordataexchange_crypt",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/xordataexchange/crypt",
sum = "h1:ESFSdwYZvkeru3RtdrYueztKhOBCSAAzS4Gf+k0tEow=",
version = "v0.0.3-0.20170626215501-b2862e3d0a77",
)
go_repository(
name = "com_github_yvasiyarov_go_metrics",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/yvasiyarov/go-metrics",
sum = "h1:+lm10QQTNSBd8DVTNGHx7o/IKu9HYDvLMffDhbyLccI=",
version = "v0.0.0-20140926110328-57bccd1ccd43",
)
go_repository(
name = "com_github_yvasiyarov_gorelic",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/yvasiyarov/gorelic",
sum = "h1:hlE8//ciYMztlGpl/VA+Zm1AcTPHYkHJPbHqE6WJUXE=",
version = "v0.0.0-20141212073537-a9bba5b9ab50",
)
go_repository(
name = "com_github_yvasiyarov_newrelic_platform_go",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/yvasiyarov/newrelic_platform_go",
sum = "h1:ERexzlUfuTvpE74urLSbIQW0Z/6hF9t8U4NsJLaioAY=",
version = "v0.0.0-20140908184405-b21fdbd4370f",
)
go_repository(
name = "com_google_cloud_go",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "cloud.google.com/go",
replace = "cloud.google.com/go",
sum = "h1:0sMegbmn/8uTwpNkB0q9cLEpZ2W5a6kl+wtBQgPWBJQ=",
version = "v0.44.3",
)
go_repository(
name = "com_google_cloud_go_datastore",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "cloud.google.com/go/datastore",
sum = "h1:Kt+gOPPp2LEPWp8CSfxhsM8ik9CcyE/gYu+0r+RnZvM=",
version = "v1.0.0",
)
go_repository(
name = "com_google_cloud_go_firestore",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "cloud.google.com/go/firestore",
sum = "h1:9x7Bx0A9R5/M9jibeJeZWqjeVEIxYW9fZYqB9a70/bY=",
version = "v1.1.0",
)
go_repository(
name = "com_google_cloud_go_logging",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "cloud.google.com/go/logging",
sum = "h1:kaunpnoEh9L4hu6JUsBa8Y20LBfKnCuDhKUgdZp7oK8=",
version = "v1.0.0",
)
go_repository(
name = "com_google_cloud_go_storage",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "cloud.google.com/go/storage",
sum = "h1:VV2nUM3wwLLGh9lSABFgZMjInyUbJeaRSE64WuAIQ+4=",
version = "v1.0.0",
)
go_repository(
name = "com_shuralyov_dmitri_gpu_mtl",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "dmitri.shuralyov.com/gpu/mtl",
sum = "h1:VpgP7xuJadIUuKccphEpTJnWhS2jkQyMt6Y7pJCD7fY=",
version = "v0.0.0-20190408044501-666a987793e9",
)
go_repository(
name = "dev_gocloud",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "gocloud.dev",
sum = "h1:EDRyaRAnMGSq/QBto486gWFxMLczAfIYUmusV7XLNBM=",
version = "v0.19.0",
)
go_repository(
name = "dev_knative_caching",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "knative.dev/caching",
sum = "h1:mxrur6DsVK8uIjhIq7c1OMls4YjBcRlyvnh3Vx13a0M=",
version = "v0.0.0-20200116200605-67bca2c83dfa",
)
go_repository(
name = "dev_knative_eventing_contrib",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "knative.dev/eventing-contrib",
sum = "h1:xncT+JrokPG+hPUFJwue8ubPpzmziV9GUIZqYt01JDo=",
version = "v0.11.2",
)
go_repository(
name = "dev_knative_pkg",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "knative.dev/pkg",
sum = "h1:52b67wiu9B62n+ZsDAMjHt84sZfiR0CUBTvtF1UEGmo=",
version = "v0.0.0-20200207155214-fef852970f43",
)
go_repository(
name = "in_gopkg_airbrake_gobrake_v2",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "gopkg.in/airbrake/gobrake.v2",
sum = "h1:7z2uVWwn7oVeeugY1DtlPAy5H+KYgB1KeKTnqjNatLo=",
version = "v2.0.9",
)
go_repository(
name = "in_gopkg_alecthomas_kingpin_v2",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "gopkg.in/alecthomas/kingpin.v2",
sum = "h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=",
version = "v2.2.6",
)
go_repository(
name = "in_gopkg_check_v1",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "gopkg.in/check.v1",
sum = "h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=",
version = "v1.0.0-20190902080502-41f04d3bba15",
)
go_repository(
name = "in_gopkg_cheggaaa_pb_v1",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "gopkg.in/cheggaaa/pb.v1",
sum = "h1:Ev7yu1/f6+d+b3pi5vPdRPc6nNtP1umSfcWiEfRqv6I=",
version = "v1.0.25",
)
go_repository(
name = "in_gopkg_errgo_v2",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "gopkg.in/errgo.v2",
sum = "h1:0vLT13EuvQ0hNvakwLuFZ/jYrLp5F3kcWHXdRggjCE8=",
version = "v2.1.0",
)
go_repository(
name = "in_gopkg_fsnotify_v1",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "gopkg.in/fsnotify.v1",
sum = "h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=",
version = "v1.4.7",
)
go_repository(
name = "in_gopkg_gcfg_v1",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "gopkg.in/gcfg.v1",
sum = "h1:0HIbH907iBTAntm+88IJV2qmJALDAh8sPekI9Vc1fm0=",
version = "v1.2.0",
)
go_repository(
name = "in_gopkg_gemnasium_logrus_airbrake_hook_v2",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "gopkg.in/gemnasium/logrus-airbrake-hook.v2",
sum = "h1:OAj3g0cR6Dx/R07QgQe8wkA9RNjB2u4i700xBkIT4e0=",
version = "v2.1.2",
)
go_repository(
name = "in_gopkg_inf_v0",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "gopkg.in/inf.v0",
sum = "h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=",
version = "v0.9.1",
)
go_repository(
name = "in_gopkg_ini_v1",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "gopkg.in/ini.v1",
sum = "h1:j+Lt/M1oPPejkniCg1TkWE2J3Eh1oZTsHSXzMTzUXn4=",
version = "v1.52.0",
)
go_repository(
name = "in_gopkg_jcmturner_aescts_v1",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "gopkg.in/jcmturner/aescts.v1",
sum = "h1:cVVZBK2b1zY26haWB4vbBiZrfFQnfbTVrE3xZq6hrEw=",
version = "v1.0.1",
)
go_repository(
name = "in_gopkg_jcmturner_dnsutils_v1",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "gopkg.in/jcmturner/dnsutils.v1",
sum = "h1:cIuC1OLRGZrld+16ZJvvZxVJeKPsvd5eUIvxfoN5hSM=",
version = "v1.0.1",
)
go_repository(
name = "in_gopkg_jcmturner_gokrb5_v7",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "gopkg.in/jcmturner/gokrb5.v7",
sum = "h1:0709Jtq/6QXEuWRfAm260XqlpcwL1vxtO1tUE2qK8Z4=",
version = "v7.3.0",
)
go_repository(
name = "in_gopkg_jcmturner_rpc_v1",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "gopkg.in/jcmturner/rpc.v1",
sum = "h1:QHIUxTX1ISuAv9dD2wJ9HWQVuWDX/Zc0PfeC2tjc4rU=",
version = "v1.1.0",
)
go_repository(
name = "in_gopkg_natefinch_lumberjack_v2",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "gopkg.in/natefinch/lumberjack.v2",
sum = "h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=",
version = "v2.0.0",
)
go_repository(
name = "in_gopkg_resty_v1",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "gopkg.in/resty.v1",
sum = "h1:CuXP0Pjfw9rOuY6EP+UvtNvt5DSqHpIxILZKT/quCZI=",
version = "v1.12.0",
)
go_repository(
name = "in_gopkg_robfig_cron_v2",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "gopkg.in/robfig/cron.v2",
sum = "h1:E846t8CnR+lv5nE+VuiKTDG/v1U2stad0QzddfJC7kY=",
version = "v2.0.0-20150107220207-be2e0b0deed5",
)
go_repository(
name = "in_gopkg_square_go_jose_v2",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "gopkg.in/square/go-jose.v2",
sum = "h1:orlkJ3myw8CN1nVQHBFfloD+L3egixIa4FvUP6RosSA=",
version = "v2.2.2",
)
go_repository(
name = "in_gopkg_src_d_go_billy_v4",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "gopkg.in/src-d/go-billy.v4",
sum = "h1:0SQA1pRztfTFx2miS8sA97XvooFeNOmvUenF4o0EcVg=",
version = "v4.3.2",
)
go_repository(
name = "in_gopkg_src_d_go_git_fixtures_v3",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "gopkg.in/src-d/go-git-fixtures.v3",
sum = "h1:ivZFOIltbce2Mo8IjzUHAFoq/IylO9WHhNOAJK+LsJg=",
version = "v3.5.0",
)
go_repository(
name = "in_gopkg_src_d_go_git_v4",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "gopkg.in/src-d/go-git.v4",
sum = "h1:SRtFyV8Kxc0UP7aCHcijOMQGPxHSmMOPrzulQWolkYE=",
version = "v4.13.1",
)
go_repository(
name = "in_gopkg_tomb_v1",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "gopkg.in/tomb.v1",
sum = "h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=",
version = "v1.0.0-20141024135613-dd632973f1e7",
)
go_repository(
name = "in_gopkg_warnings_v0",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "gopkg.in/warnings.v0",
sum = "h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=",
version = "v0.1.2",
)
go_repository(
name = "in_gopkg_yaml_v2",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "gopkg.in/yaml.v2",
sum = "h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=",
version = "v2.2.8",
)
go_repository(
name = "in_gopkg_yaml_v3",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "gopkg.in/yaml.v3",
sum = "h1:0efs3hwEZhFKsCoP8l6dDB1AZWMgnEl3yWXWRZTOaEA=",
version = "v3.0.0-20190709130402-674ba3eaed22",
)
go_repository(
name = "io_etcd_go_bbolt",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "go.etcd.io/bbolt",
sum = "h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk=",
version = "v1.3.3",
)
go_repository(
name = "io_etcd_go_etcd",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "go.etcd.io/etcd",
sum = "h1:VcrIfasaLFkyjk6KNlXQSzO+B0fZcnECiDrKJsfxka0=",
version = "v0.0.0-20191023171146-3cf2f69b5738",
)
go_repository(
name = "io_k8s_api",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "k8s.io/api",
replace = "k8s.io/api",
sum = "h1:XAm3PZp3wnEdzekNkcmj/9Y1zdmQYJ1I4GKSBBZ8aG0=",
version = "v0.17.3",
)
go_repository(
name = "io_k8s_apiextensions_apiserver",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "k8s.io/apiextensions-apiserver",
sum = "h1:cP579D2hSZNuO/rZj9XFRzwJNYb41DbNANJb6Kolpss=",
version = "v0.17.2",
)
go_repository(
name = "io_k8s_apimachinery",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "k8s.io/apimachinery",
replace = "k8s.io/apimachinery",
sum = "h1:f+uZV6rm4/tHE7xXgLyToprg6xWairaClGVkm2t8omg=",
version = "v0.17.3",
)
go_repository(
name = "io_k8s_apiserver",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "k8s.io/apiserver",
sum = "h1:NssVvPALll6SSeNgo1Wk1h2myU1UHNwmhxV0Oxbcl8Y=",
version = "v0.17.2",
)
go_repository(
name = "io_k8s_cli_runtime",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "k8s.io/cli-runtime",
sum = "h1:0ZlDdJgJBKsu77trRUynNiWsRuAvAVPBNaQfnt/1qtc=",
version = "v0.17.3",
)
go_repository(
name = "io_k8s_client_go",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "k8s.io/client-go",
replace = "k8s.io/client-go",
sum = "h1:deUna1Ksx05XeESH6XGCyONNFfiQmDdqeqUvicvP6nU=",
version = "v0.17.3",
)
go_repository(
name = "io_k8s_cloud_provider",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "k8s.io/cloud-provider",
sum = "h1:BQZPD1Ja/vnTOj1GKI9/wSpd3qgIDZp9q2NAS3568Ac=",
version = "v0.17.0",
)
go_repository(
name = "io_k8s_code_generator",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "k8s.io/code-generator",
replace = "k8s.io/code-generator",
sum = "h1:q/hDMk2cvFzSxol7k/VA1qCssR7VSMXHQHhzuX29VJ8=",
version = "v0.17.3",
)
go_repository(
name = "io_k8s_component_base",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "k8s.io/component-base",
sum = "h1:0XHf+cerTvL9I5Xwn9v+0jmqzGAZI7zNydv4tL6Cw6A=",
version = "v0.17.2",
)
go_repository(
name = "io_k8s_csi_translation_lib",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "k8s.io/csi-translation-lib",
sum = "h1:8hwWJDMOBCAogaWXtNWy0dYGQ2dZYzOnOzjQMiDaY+E=",
version = "v0.17.0",
)
go_repository(
name = "io_k8s_gengo",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "k8s.io/gengo",
sum = "h1:iraFntD6FA5K/hBaPW2z/ZItJZEG63uc3ak5S0oDVEo=",
version = "v0.0.0-20191108084044-e500ee069b5c",
)
go_repository(
name = "io_k8s_klog",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "k8s.io/klog",
sum = "h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=",
version = "v1.0.0",
)
go_repository(
name = "io_k8s_kube_openapi",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "k8s.io/kube-openapi",
sum = "h1:UcxjrRMyNx/i/y8G7kPvLyy7rfbeuf1PYyBf973pgyU=",
version = "v0.0.0-20191107075043-30be4d16710a",
)
go_repository(
name = "io_k8s_kubectl",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "k8s.io/kubectl",
sum = "h1:QZR8Q6lWiVRjwKslekdbN5WPMp53dS/17j5e+oi5XVU=",
version = "v0.17.2",
)
go_repository(
name = "io_k8s_kubernetes",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "k8s.io/kubernetes",
sum = "h1:qTfB+u5M92k2fCCCVP2iuhgwwSOv1EkAkvQY1tQODD8=",
version = "v1.13.0",
)
go_repository(
name = "io_k8s_legacy_cloud_providers",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "k8s.io/legacy-cloud-providers",
sum = "h1:ITm7sUthpxQyP96MU7K4Ra9M9M1k9eywUWv9IiTaxzc=",
version = "v0.17.0",
)
go_repository(
name = "io_k8s_metrics",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "k8s.io/metrics",
sum = "h1:cuN1ScyUS9/tj4YFI8d0/7yO0BveFHhyQpPNWS8uLr8=",
version = "v0.17.2",
)
go_repository(
name = "io_k8s_sigs_controller_runtime",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "sigs.k8s.io/controller-runtime",
sum = "h1:CbqIy5fbUX+4E9bpnBFd204YAzRYlM9SWW77BbrcDQo=",
version = "v0.5.0",
)
go_repository(
name = "io_k8s_sigs_kustomize",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "sigs.k8s.io/kustomize",
sum = "h1:JUufWFNlI44MdtnjUqVnvh29rR37PQFzPbLXqhyOyX0=",
version = "v2.0.3+incompatible",
)
go_repository(
name = "io_k8s_sigs_structured_merge_diff",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "sigs.k8s.io/structured-merge-diff",
sum = "h1:zD2IemQ4LmOcAumeiyDWXKUI2SO0NYDe3H6QGvPOVgU=",
version = "v1.0.1-0.20191108220359-b1b620dd3f06",
)
go_repository(
name = "io_k8s_sigs_yaml",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "sigs.k8s.io/yaml",
sum = "h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=",
version = "v1.2.0",
)
go_repository(
name = "io_k8s_utils",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "k8s.io/utils",
sum = "h1:GiPwtSzdP43eI1hpPCbROQCCIgCuiMMNF8YUVLF3vJo=",
version = "v0.0.0-20191114184206-e782cd3c129f",
)
go_repository(
name = "io_opencensus_go",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "go.opencensus.io",
sum = "h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8=",
version = "v0.22.3",
)
go_repository(
name = "io_opencensus_go_contrib_exporter_aws",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "contrib.go.opencensus.io/exporter/aws",
sum = "h1:YsbWYxDZkC7x2OxlsDEYvvEXZ3cBI3qBgUK5BqkZvRw=",
version = "v0.0.0-20181029163544-2befc13012d0",
)
go_repository(
name = "io_opencensus_go_contrib_exporter_ocagent",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "contrib.go.opencensus.io/exporter/ocagent",
sum = "h1:TKXjQSRS0/cCDrP7KvkgU6SmILtF/yV2TOs/02K/WZQ=",
version = "v0.5.0",
)
go_repository(
name = "io_opencensus_go_contrib_exporter_prometheus",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "contrib.go.opencensus.io/exporter/prometheus",
sum = "h1:SByaIoWwNgMdPSgl5sMqM2KDE5H/ukPWBRo314xiDvg=",
version = "v0.1.0",
)
go_repository(
name = "io_opencensus_go_contrib_exporter_stackdriver",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "contrib.go.opencensus.io/exporter/stackdriver",
sum = "h1:iXI5hr7pUwMx0IwMphpKz5Q3If/G5JiWFVZ5MPPxP9E=",
version = "v0.12.8",
)
go_repository(
name = "io_opencensus_go_contrib_integrations_ocsql",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "contrib.go.opencensus.io/integrations/ocsql",
sum = "h1:kfg5Yyy1nYUrqzyfW5XX+dzMASky8IJXhtHe0KTYNS4=",
version = "v0.1.4",
)
go_repository(
name = "io_opencensus_go_contrib_resource",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "contrib.go.opencensus.io/resource",
sum = "h1:4r2CANuYhKGmYWP02+5E94rLRcS/YeD+KlxSrOsMxk0=",
version = "v0.1.1",
)
go_repository(
name = "io_rsc_binaryregexp",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "rsc.io/binaryregexp",
sum = "h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE=",
version = "v0.2.0",
)
go_repository(
name = "io_rsc_letsencrypt",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "rsc.io/letsencrypt",
sum = "h1:H7xDfhkaFFSYEJlKeq38RwX2jYcnTeHuDQyT+mMNMwM=",
version = "v0.0.3",
)
go_repository(
name = "ml_vbom_util",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "vbom.ml/util",
sum = "h1:O69FD9pJA4WUZlEwYatBEEkRWKQ5cKodWpdKTrCS/iQ=",
version = "v0.0.0-20180919145318-efcd4e0f9787",
)
go_repository(
name = "org_apache_git_thrift_git",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "git.apache.org/thrift.git",
sum = "h1:CMxsZlAmxKs+VAZMlDDL0wXciMblJcutQbEe3A9CYUM=",
version = "v0.12.0",
)
go_repository(
name = "org_bazil_fuse",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "bazil.org/fuse",
sum = "h1:FNCRpXiquG1aoyqcIWVFmpTSKVcx2bQD38uZZeGtdlw=",
version = "v0.0.0-20180421153158-65cc252bf669",
)
go_repository(
name = "org_golang_google_api",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "google.golang.org/api",
sum = "h1:yzlyyDW/J0w8yNFJIhiAJy4kq74S+1DOLdawELNxFMA=",
version = "v0.15.0",
)
go_repository(
name = "org_golang_google_appengine",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "google.golang.org/appengine",
sum = "h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM=",
version = "v1.6.5",
)
go_repository(
name = "org_golang_google_cloud",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "google.golang.org/cloud",
sum = "h1:Cpp2P6TPjujNoC5M2KHY6g7wfyLYfIWRZaSdIKfDasA=",
version = "v0.0.0-20151119220103-975617b05ea8",
)
go_repository(
name = "org_golang_google_genproto",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "google.golang.org/genproto",
sum = "h1:Ex1mq5jaJof+kRnYi3SlYJ8KKa9Ao3NHyIT5XJ1gF6U=",
version = "v0.0.0-20190911173649-1774047e7e51",
)
go_repository(
name = "org_golang_google_grpc",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "google.golang.org/grpc",
sum = "h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg=",
version = "v1.27.0",
)
go_repository(
name = "org_golang_google_protobuf",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "google.golang.org/protobuf",
sum = "h1:qdOKuR/EIArgaWNjetjgTzgVTAZ+S/WXVrq9HW9zimw=",
version = "v1.21.0",
)
go_repository(
name = "org_golang_x_crypto",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "golang.org/x/crypto",
sum = "h1:xMPOj6Pz6UipU1wXLkrtqpHbR0AVFnyPEQq/wRWz9lM=",
version = "v0.0.0-20200302210943-78000ba7a073",
)
go_repository(
name = "org_golang_x_exp",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "golang.org/x/exp",
sum = "h1:estk1glOnSVeJ9tdEZZc5mAMDZk5lNJNyJ6DvrBkTEU=",
version = "v0.0.0-20190731235908-ec7cb31e5a56",
)
go_repository(
name = "org_golang_x_image",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "golang.org/x/image",
sum = "h1:+qEpEAPhDZ1o0x3tHzZTQDArnOixOzGD9HUJfcg0mb4=",
version = "v0.0.0-20190802002840-cff245a6509b",
)
go_repository(
name = "org_golang_x_lint",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "golang.org/x/lint",
sum = "h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k=",
version = "v0.0.0-20200302205851-738671d3881b",
)
go_repository(
name = "org_golang_x_mobile",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "golang.org/x/mobile",
sum = "h1:b373EGXtj0o+ssqkOkdVphTCZ/fVg2LwhctJn2QQbqA=",
version = "v0.0.0-20190806162312-597adff16ade",
)
go_repository(
name = "org_golang_x_mod",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "golang.org/x/mod",
sum = "h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ=",
version = "v0.2.0",
)
go_repository(
name = "org_golang_x_net",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "golang.org/x/net",
sum = "h1:GuSPYbZzB5/dcLNCwLQLsg3obCJtX9IJhpXkvY7kzk0=",
version = "v0.0.0-20200301022130-244492dfa37a",
)
go_repository(
name = "org_golang_x_oauth2",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "golang.org/x/oauth2",
sum = "h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw=",
version = "v0.0.0-20200107190931-bf48bf16ab8d",
)
go_repository(
name = "org_golang_x_sync",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "golang.org/x/sync",
sum = "h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY=",
version = "v0.0.0-20190911185100-cd5d95a43a6e",
)
go_repository(
name = "org_golang_x_sys",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "golang.org/x/sys",
sum = "h1:uYVVQ9WP/Ds2ROhcaGPeIdVq0RIXVLwsHlnvJ+cT1So=",
version = "v0.0.0-20200302150141-5c8b2ff67527",
)
go_repository(
name = "org_golang_x_text",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "golang.org/x/text",
sum = "h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=",
version = "v0.3.2",
)
go_repository(
name = "org_golang_x_time",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "golang.org/x/time",
sum = "h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs=",
version = "v0.0.0-20191024005414-555d28b269f0",
)
go_repository(
name = "org_golang_x_tools",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "golang.org/x/tools",
sum = "h1:Kh3iY7o/2bMfQXZdwLdL9jDMU1k9HoVn0P1mGCfoFLc=",
version = "v0.0.0-20200303214625-2b0b585e22fe",
)
go_repository(
name = "org_golang_x_xerrors",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "golang.org/x/xerrors",
sum = "h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=",
version = "v0.0.0-20191204190536-9bdfabe68543",
)
go_repository(
name = "org_gonum_v1_gonum",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "gonum.org/v1/gonum",
sum = "h1:OB/uP/Puiu5vS5QMRPrXCDWUPb+kt8f1KW8oQzFejQw=",
version = "v0.0.0-20190331200053-3d26580ed485",
)
go_repository(
name = "org_gonum_v1_netlib",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "gonum.org/v1/netlib",
sum = "h1:jRyg0XfpwWlhEV8mDfdNGBeSJM2fuyh9Yjrnd8kF2Ts=",
version = "v0.0.0-20190331212654-76723241ea4e",
)
go_repository(
name = "org_modernc_cc",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "modernc.org/cc",
sum = "h1:nPibNuDEx6tvYrUAtvDTTw98rx5juGsa5zuDnKwEEQQ=",
version = "v1.0.0",
)
go_repository(
name = "org_modernc_golex",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "modernc.org/golex",
sum = "h1:wWpDlbK8ejRfSyi0frMyhilD3JBvtcx2AdGDnU+JtsE=",
version = "v1.0.0",
)
go_repository(
name = "org_modernc_mathutil",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "modernc.org/mathutil",
sum = "h1:93vKjrJopTPrtTNpZ8XIovER7iCIH1QU7wNbOQXC60I=",
version = "v1.0.0",
)
go_repository(
name = "org_modernc_strutil",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "modernc.org/strutil",
sum = "h1:XVFtQwFVwc02Wk+0L/Z/zDDXO81r5Lhe6iMKmGX3KhE=",
version = "v1.0.0",
)
go_repository(
name = "org_modernc_xc",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "modernc.org/xc",
sum = "h1:7ccXrupWZIS3twbUGrtKmHS2DXY6xegFua+6O3xgAFU=",
version = "v1.0.0",
)
go_repository(
name = "org_mongodb_go_mongo_driver",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "go.mongodb.org/mongo-driver",
sum = "h1:jxcFYjlkl8xaERsgLo+RNquI0epW6zuy/ZRQs6jnrFA=",
version = "v1.1.2",
)
go_repository(
name = "org_uber_go_atomic",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "go.uber.org/atomic",
sum = "h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk=",
version = "v1.6.0",
)
go_repository(
name = "org_uber_go_multierr",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "go.uber.org/multierr",
sum = "h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A=",
version = "v1.5.0",
)
go_repository(
name = "org_uber_go_tools",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "go.uber.org/tools",
sum = "h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4=",
version = "v0.0.0-20190618225709-2cfd321de3ee",
)
go_repository(
name = "org_uber_go_zap",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "go.uber.org/zap",
sum = "h1:nYDKopTbvAPq/NrUVZwT15y2lpROBiLLyoRTbXOYWOo=",
version = "v1.14.1",
)
go_repository(
name = "sh_helm_helm_v3",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "helm.sh/helm/v3",
sum = "h1:aykwPMVyQyncZ8iLNVMXgJ1l3c6W0+LSOPmqp8JdCjs=",
version = "v3.1.1",
)
go_repository(
name = "tools_gotest",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "gotest.tools",
sum = "h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=",
version = "v2.2.0+incompatible",
)
go_repository(
name = "xyz_gomodules_jsonpatch_v2",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "gomodules.xyz/jsonpatch/v2",
sum = "h1:Phva6wqu+xR//Njw6iorylFFgn/z547tw5Ne3HZPQ+k=",
version = "v2.1.0",
)
| 38.318407
| 81
| 0.636777
|
8727018f847560fee0998f399bca0340b9f8401a
| 3,104
|
py
|
Python
|
DQM/Integration/python/config/visualizationPreFilter.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 6
|
2017-09-08T14:12:56.000Z
|
2022-03-09T23:57:01.000Z
|
DQM/Integration/python/config/visualizationPreFilter.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 545
|
2017-09-19T17:10:19.000Z
|
2022-03-07T16:55:27.000Z
|
DQM/Integration/python/config/visualizationPreFilter.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 14
|
2017-10-04T09:47:21.000Z
|
2019-10-23T18:04:45.000Z
|
import FWCore.ParameterSet.Config as cms
#process.hltTriggerTypeFilter
TAG_HLTSEL = 'HLT_*'
hltHighLevel = cms.EDFilter("HLTHighLevel",
TriggerResultsTag = cms.InputTag("TriggerResults","","HLT"),
HLTPaths = cms.vstring( # provide list of HLT paths (or patterns) you want
TAG_HLTSEL
),
eventSetupPathsKey = cms.string(''), # not empty => use read paths from AlCaRecoTriggerBitsRcd via this key
andOr = cms.bool(True), # how to deal with multiple triggers: True (OR) accept if ANY is true, False (AND) accept if ALL are true
throw = cms.bool(False) # throw exception on unknown path names
)
hltfilter = cms.Sequence(hltHighLevel)
from RecoLocalTracker.SiPixelClusterizer.SiPixelClusterizer_cfi import siPixelClusters
filtersiPixelClusters = siPixelClusters.clone()
filtersiPixelClusters.src = cms.InputTag("filtersiPixelDigis")
from EventFilter.SiPixelRawToDigi.SiPixelRawToDigi_cfi import siPixelDigis
filtersiPixelDigis = siPixelDigis.clone()
filtersiPixelDigis.InputLabel = cms.InputTag("rawDataCollector")
import HLTrigger.special.hltPixelActivityFilter_cfi
multFilter = HLTrigger.special.hltPixelActivityFilter_cfi.hltPixelActivityFilter.clone(
inputTag = cms.InputTag('filtersiPixelClusters'),
minClusters = cms.uint32(10000),
maxClusters = cms.uint32(50000)
)
pixelClusterFilter = cms.Sequence(filtersiPixelDigis * filtersiPixelClusters * multFilter)
# process.hltfilter=cms.Path(process.hltHighLevel)
# process.load("HLTrigger.special.HLTTriggerTypeFilter_cfi")
# # 0=random, 1=physics, 2=calibration, 3=technical
# process.hltTriggerTypeFilter.SelectedTriggerType = TAG_TRIGGERTYPE
# process.triggertype=cms.Path(process.hltTriggerTypeFilter)
# # this is for filtering on L1 technical trigger bit
# process.load('L1TriggerConfig.L1GtConfigProducers.L1GtTriggerMaskTechTrigConfig_cff')
# process.load('HLTrigger/HLTfilters/hltLevel1GTSeed_cfi')
# process.hltLevel1GTSeed.L1TechTriggerSeeding = cms.bool(True)
# process.hltLevel1GTSeed.L1SeedsLogicalExpression = cms.string(TAG_L1TTSEL)
# process.techtrigger=cms.Path(process.hltLevel1GTSeed)
# #this is for filtering/tagging PhysDecl bit
# process.physdecl = cms.EDFilter("PhysDecl",
# applyfilter = cms.untracked.bool(False),
# debugOn = cms.untracked.bool(True),
# # the following needs V00-01-19 of DPGAnalysis/Skims!!!
# HLTriggerResults = cms.InputTag("TriggerResults","","HLT")
# )
# process.Monitoring=cms.Path(process.physdecl)
# this is how the path was configured in the initial script from TB
#process.hltinspect
#process.hltTriggerTypeFilter
#RAWTODIGI
#process.hltLevel1GTSeed
#RECO
#process.l1GtTriggerMenuLite+process.beamsplash+process.physdecl+process.dcsstatus
| 43.71831
| 169
| 0.691366
|
4aac410f65ea30f988b59ce4b7a03ec85d192c81
| 20,986
|
py
|
Python
|
scripts/recoding.py
|
Juliane-W/gene-sparc
|
c6f284c02a7eace88d46fdaf3eb6bf6bdee5e2af
|
[
"MIT"
] | null | null | null |
scripts/recoding.py
|
Juliane-W/gene-sparc
|
c6f284c02a7eace88d46fdaf3eb6bf6bdee5e2af
|
[
"MIT"
] | null | null | null |
scripts/recoding.py
|
Juliane-W/gene-sparc
|
c6f284c02a7eace88d46fdaf3eb6bf6bdee5e2af
|
[
"MIT"
] | null | null | null |
import re
import logging
from Bio import SeqFeature
from operator import itemgetter
def recode(gff_iterator, codonmap, endprotect, set_mane):
""" Replaces the codons based on codonmap in the Sequence Record,
but protects nucleotides at the beginning or end of the coding sequence
and handles overlaps based on MANE transcript list and rules definet by set_mane.
"""
complement_dict = {'A':'T','T':'A','C':'G','G':'C'}
for record in gff_iterator:
cds_ranges = getAnnotRange(record,target = 'CDS') # takes into account transcript ID, returns [[start,end,phase,transcript_id, strand]]
# Handle overlaps --> select the cds that should be taken into account. Alternativly, in the long term, a duplication of the overlapping sequence could be introduced here.
# Use set_mane to only choose the well supported transcripts / default transcript per gene.
selected_cds_ranges, conflict_cds_ranges = selectCDS(cds_ranges,set_mane)
logging.debug('Selected cds ranges: {}'.format(selected_cds_ranges))
# Create annotations for conflict cds ranges --> user should look at them manually
logging.info('Conflict cds ranges: {}'.format(conflict_cds_ranges))
for x in conflict_cds_ranges:
create_feature_annot([x[0],x[1]], "recoding-conflict",x[4])
logging.debug(' Recoding conflict annotation created for {}'.format(x))
# Selected CDS: Create a dictionary for every transcript (key) and the corresponding ranges that have been selected (value: list of ranges (list) with 0 - start, 1 - end, 2 - phase, 3 - transcript id, 4 - strand)
cds_ranges_for, cds_ranges_rev = createRangeDict(selected_cds_ranges)
logging.debug('Ranges of relevance: \n Forward: {} \n Reverse: {}'.format(cds_ranges_for, cds_ranges_rev))
# All CDS: Create a dictionary for every transcript (key) and the corresponding ranges (value: list of ranges (list) with 0 - start, 1 - end, 2 - phase, 3 - transcript id, 4 - strand)
cds_ranges_for_all, cds_ranges_rev_all = createRangeDict(cds_ranges) # we need all ranges to take into account splitted ORFs
logging.debug('all ranges: \n Forward: {} \n Reverse: {}'.format(cds_ranges_for_all,cds_ranges_rev_all))
# Create a dictionary with positions where the codon should be inserted, key: codon that should be insterted, value: list of positions where it should be inserted
replace_map_for = getReplaceMap(record,'forward',cds_ranges_for,cds_ranges_for_all,codonmap,endprotect)
replace_map_rev = getReplaceMap(record,'reverse',cds_ranges_rev,cds_ranges_rev_all,codonmap,endprotect)
logging.debug('Following codons will we inserted: \n Forward: {} \n Reverse: {}'.format(replace_map_for, replace_map_rev))
# replace codons
mutable_seq = record.seq.tomutable() # makes sequence mutable
for x in replace_map_for.keys():
codon = list(x)
for pos in replace_map_for[x]:
# create a new annotation for each recoded nucleotide with original nucleotide
for k in pos:
new_feature = create_feature_annot([k,k+1],'replaced-'+mutable_seq[k], 1)
record.features.append(new_feature)
# Recode
mutable_seq = replaceCODON(pos,codon,mutable_seq)
for x in replace_map_rev.keys():
codon = [complement_dict[y] for y in list(x)]
for pos in replace_map_rev[x]:
# create a new annotation for each recoded cnucleotide with original nucleotide
for k in pos:
new_feature = create_feature_annot([k,k+1],'replaced-'+complement_dict[mutable_seq[k]], 0)
record.features.append(new_feature)
mutable_seq = replaceCODON(pos,codon,mutable_seq)
# finish editing
record.seq = mutable_seq.toseq()
yield record
def replaceCODON(positionlist, codon, sequence):
""" Replacing codon in sequence at position """
sequence[positionlist[0]] = codon[0]
sequence[positionlist[1]] = codon[1]
sequence[positionlist[2]] = codon[2]
return(sequence)
def getReplaceMap(record, strand, rangedict, all_ranges_dict, codonmap, endprotect):
""" Find the positions for the codons of interest in the + strand
"""
replace_map = {}
for tID, regionlist in rangedict.items():
i = 0
for region in regionlist:
# determine shift based on region information in gff file
shift = region[2]
# check if ORF is complete or if part of next CDS has to be included
curr_remainder = (region[1] - region[0] - shift) % 3
# Create sub-region with full codons (include/exclude partial codons that are splitted between CDSs)
sub_reg = getSubRegion(record, region, tID, regionlist, all_ranges_dict,curr_remainder, shift, strand)
# find the positions of the nucleotides that should be replaced for each codon
for x in codonmap:
# Find occurences in substring
if strand == 'forward':
occ_list = find_occurence(x[0],str(sub_reg.seq),endprotect) # [[pos of 1st codon, 1 for being last codon / 0 for every codon within CDS]]
# add start of region to have the real position
occ_list = [[x+region[0]+shift,n] for x,n in occ_list]
elif strand == 'reverse':
occ_list = find_occurence(x[0],str(sub_reg.seq.reverse_complement()),endprotect) # [[pos of 1st codon, 1 for being last codon / 0 for every codon within CDS]]
# get the real position
occ_list = [[region[1]-shift-x-1,n] for x,n in occ_list]
else:
logging.error('Strand direction not clearly defined. strand = {}'.format(strand))
# get list of nucleotides that should be replaced
nt_list = getNTReplace(occ_list,regionlist,curr_remainder,strand,i)
# add the nucleotide list to the dictionary
replace_map[x[1]] = replace_map.get(x[1], []) + nt_list
# update i
i += 1
return(replace_map)
def getSubRegion(record, region, tID, regionlist, all_ranges_dict,curr_remainder, shift, strand):
# Find target codon nucleotides in target sequence
if curr_remainder != 0:
# Find current region in all region list
m = all_ranges_dict[tID].index(region) # position of this region in all ranges for this transcript
# Find next region in all region list
try:
next_reg = all_ranges_dict[tID][m+1]
except:
logging.warning('next region not found for current region {}. It seems like the transcripts stops without finishing the reading frame. The last {}nt were not considered for recoding.'.format(region, (3-curr_remainder)))
# If next region is in regionlist of interest, all of them should be taken into account
if next_reg in regionlist:
if strand == 'forward':
next_sub_reg = record[next_reg[0] : next_reg[0] + (3-curr_remainder)] # get the first partial codon from next range
curr_sub_reg = record[region[0]+shift : region[1]]
elif strand == 'reverse':
next_sub_reg = record[next_reg[1]-(3-curr_remainder) : next_reg[1]] # get the last partial codon from next range
curr_sub_reg = record[region[0]:region[1]-shift] # region stops earlier since strand is read reversly
else:
logging.error('Strand direction is not clearly indicated')
# Get target subset of rec but without remainder from previous region
sub_reg = next_sub_reg + curr_sub_reg # join to create a subregion with complete codons
# else, the splitted codon won't be taken into account
else:
if strand == 'forward':
sub_reg = record[region[0]+shift : region[1] - curr_remainder] # region starts later, since its for stand and end earlier due to not taking into account split codon
elif strand == 'reverse':
sub_reg = record[region[0]+curr_remainder:region[1]-shift] # region stops earlier since strand is read reversly, and starts later due to reverse strand and not taking into account split codon
else:
logging.error('Strand direction is not clearly indicated')
else:
# Get target subset of rec but without remainder from previous region
if strand == 'forward':
sub_reg = record[region[0]+shift : region[1]]
elif strand == 'reverse':
sub_reg = record[region[0] : region[1]-shift]
else:
logging.error('Strand direction is not clearly indicated')
return sub_reg
def getNTReplace(occ_list,regionlist,curr_remainder,strand, i):
nt_list = []
for occ in occ_list:
if occ[1] == 1:
if curr_remainder == 2:
if strand == 'forward':
second_nt = occ[0]+1 # still part of current region
third_nt = regionlist[i+1][0]+1 # part of next region
elif strand == 'reverse':
second_nt = occ[0]-1 # still part of current region
third_nt = regionlist[i+1][1]-1 # part of next region starting from the end
else:
logging.error('Strand direction is not clearly indicated')
logging.debug(' last codon positions: {}, remainder 2'.format([occ[0],second_nt,third_nt]))
elif curr_remainder == 1: #current remainder should be one
if strand == 'forward':
second_nt = regionlist[i+1][0]+1 # part of next region
third_nt = regionlist[i+1][0]+2 # also part of next region
elif strand == 'reverse':
second_nt = regionlist[i+1][1]-1 # part of next region starting from the end
third_nt = regionlist[i+1][1]-2 # part of next region starting from the end
logging.debug(' last codon positions: {}, remainder 1'.format([occ[0],second_nt,third_nt]))
elif curr_remainder == 0:
if strand == 'forward':
second_nt = occ[0]+1 # still part of current region
third_nt = occ[0]+2 # part of next region
elif strand == 'reverse':
second_nt = occ[0]-1
third_nt = occ[0]-2
logging.debug(' last codon positions: {}, remainder 0'.format([occ[0],second_nt,third_nt]))
else:
logging.warning("Problem: remainder should be 1 or 2, but is {}".format(curr_remainder))
logging.warning('The codon at {} should be replaced, but failed.'.format(occ))
continue
else:
if strand == 'forward':
second_nt = occ[0]+1 # still part of current region
third_nt = occ[0]+2 # part of next region
elif strand == 'reverse':
second_nt = occ[0]-1
third_nt = occ[0]-2
else:
logging.error('Strand direction not clearly indicated.')
nt_list.append([occ[0],second_nt,third_nt])
return(nt_list)
def selectCDS(rangelist, set_mane):
# sort the list based on start
rangelist.sort(key=lambda interval: interval[0])
# Get settings for transcript priority and list of MANE transcripts
transcriptpriority = set_mane[0]
mane_list = set_mane[1]
# Store selected ranges in list
keep_cds = []
conflict_cds = []
j = 0 # to skip items that have already been taken into account
for i in range(len(rangelist)):
# skip the cds that have already been taken into account
if i < j and i != 0: continue
# for last cds, no comparison with next cds can be made. If it's not skipped, it needs to be recoded
elif i == len(rangelist)-1:
keep_cds.append(rangelist[i])
continue
# if we are only interested in MANE transcripts for recoding: loop to the next range if it's not a MANE transcript
elif transcriptpriority == 'maneonly' and rangelist[i][3] not in mane_list: continue
# check if current start is before the end of the last appended cds --> overlap with added
elif len(keep_cds) != 0 and rangelist[i][0] < keep_cds[-1][1]:
# check the orf: end of last orf + shift - end of current of + shift needs to be dividable by 3
subremainder = (keep_cds[-1][1] + keep_cds[-1][2] - rangelist[i][0] + rangelist[i][2]) % 3
# if the end is also before the end of the last appended cds and they are in the reading frame
if rangelist[i][1] <= keep_cds[-1][1] and subremainder == 0: continue
# if soft, last one has to be mane. So if current is mane add to conflict
elif transcriptpriority == 'soft':
if rangelist[i][3] in mane_list: conflict_cds.append(rangelist[i]) # both are MANE, so add to conflict
else: continue # it overlaps with a MANE, so it's not recoded
# if maximize, last one doesn't have to be MANE
elif transcriptpriority == 'max':
# if both are MANE add the current one to conflict
if rangelist[i][3] and keep_cds[-1][3] in mane_list: conflict_cds.append(rangelist[i])
# if only last one is MANE, don't recode current one
elif keep_cds[-1][3] in mane_list: continue
# if only current one is MANE, don't recode last one
elif rangelist[i][3] in mane_list: keep_cds[-1] = rangelist[i]
else: logging.warning("It looks like you have a special case that has not been considered.")
else:
conflict_cds.append(rangelist[i])
logging.warning("It looks like you have a special case that has not been considered. The current cds will be added to conflict")
# check if end of current cds is higher than the start of the next one --> overlap with next
elif rangelist[i][1] > rangelist[i+1][0]:
# check if more transcripts start at the same position
n = i + 1
subrangelist = [rangelist[i]]
while n <= len(rangelist)-1 and rangelist[n][0] == rangelist[i+1][0]:
subrangelist.append(rangelist[n])
n += 1
j = n
# check which transcript is a MANE transcript
submanelist = []
for k in range(len(subrangelist)):
if subrangelist[k][3] in mane_list:
submanelist.append(k) # k is the position of transcript in the subrangelist
# if there is only one mane transcript, everything is ok and this will be used for recoding
if len(submanelist) == 1:
keep_cds.append(subrangelist[k])
# if no transcript is the mane transcript, check if they are in the same reading frame
elif len(submanelist) == 0:
if transcriptpriority == 'soft':
# in the soft regime, in case of an overlap of two non-MANE transcripts, nothing is recoded
continue
elif transcriptpriority == 'max':
# decide which one to recode
orf_shiftlist = [item[2] for item in subrangelist]
# if they have the same orf shift
if (len(set(orf_shiftlist))==1):
# if they have also the same end:
if len(set([item[1] for item in subrangelist])):
keep_cds.append(subrangelist[0])
# they have different end positions --> take the longer one
else:
cds_id, cds_maxvalue = max(enumerate(map(itemgetter(1), subrangelist)),key=itemgetter(1))
keep_cds.append(subrangelist[cds_id])
# if they don't have the same orf, create annotation that this hasn't been recoded --> add to conflict list
else:
conflict_cds = conflict_cds + subrangelist
else:
logging.warning("This looks like an error. You are neither following the maximize nor the soft regime for recoding.")
# if there are too many mane transcripts, only the longest will be taken into account, but this should never happen
else:
submanes = []
for l in submanelist:
submanes.append(subrangelist[l])
orf_shiftlist = [item[2] for item in submanes]
# if they have the same orf shift
if (len(set(orf_shiftlist))==1):
# if they have also the same end:
if len(set([item[1] for item in submanes])):
keep_cds.append(submanes[0])
logging.info("Too many MANE transcript identified, but they have same length and orf. Appended first transcript {}.".format(submanes[0]))
else:
cds_id, cds_maxvalue = max(enumerate(map(itemgetter(1), submanes)),key=itemgetter(1))
keep_cds.append(submanes[cds_id])
logging.info("Several MANE transcripts identified. Appended longest transcript {} with end at {}".format(submanes[cds_id][3],cds_maxvalue))
# if they don't have the same orf, create annotation that this hasn't been recoded --> add to conflict list
else:
conflict_cds = conflict_cds + submanes
# no overlap --> recode
else:
keep_cds.append(rangelist[i])
return(keep_cds, conflict_cds)
def createRangeDict(ranges):
# Every range consist of 0 - start, 1 - end, 2 - phase, 3 - transcript id, 4 - strand
for_dict = {}
rev_dict = {}
for cds in ranges:
if cds[4] == 1: for_dict[cds [3]] = for_dict.get(cds [3], []) + [[cds[0],cds[1],cds[2]]]
elif cds[4] == -1: rev_dict[cds [3]] = rev_dict.get(cds [3], []) + [[cds[0],cds[1],cds[2]]]
else: logging.warning("Problem finding the strand of {} - not included".format(cds))
# sort them
for transcript in for_dict: for_dict[transcript].sort(key=lambda interval: interval[0])
for transcript in rev_dict: rev_dict[transcript].sort(key=lambda interval: interval[0],reverse = True)
return for_dict, rev_dict
def find_occurence(find_strings, in_string,protect):
""" Find startposition of find_strings in in_strings, exclude n = endprotect nt from start and end """
occ_pos = []
occ_list = [y.start() for x in find_strings for y in re.finditer('(?=' + x +')', in_string)]
occ_list = list(filter(lambda x: (x % 3 == 0), occ_list)) # filter by ORF (divisible by 3)
for x in occ_list:
# don't recode codons if it's at the beginning or end
if x < protect or x >= len(in_string)-protect: continue
if x >= len(in_string)-3:
occ_pos.append([x,1]) # the 1 indicates that it is the last codon in the substring
else: occ_pos.append([x,0]) # the 0 indicates that it is not the last codon in the substring
return(occ_pos)
def create_feature_annot(loc_range, featuretype,s):
""" Create a new feature annotation at loc_range with featuretype on strand s. """
location = SeqFeature.FeatureLocation(SeqFeature.ExactPosition(loc_range[0]),SeqFeature.ExactPosition(loc_range[1]))
new_feature = SeqFeature.SeqFeature(location,type=featuretype,strand=s)
return(new_feature)
def getAnnotRange(record,target = "CDS"):
""" Get position range for annotation type of interest."""
annot_range = []
# Get location ranges of interesting features
for i in range(len(record.features)):
# determine which annotations should be taken into account
if record.features[i].type == target:
# get their start and end location and phase: [[[start,end,phase,transcript_id, strand]]
loc_list = [int(record.features[i].location.start),\
int(record.features[i].location.end),\
int(record.features[i].qualifiers['phase'][0]),\
record.features[i].qualifiers['Parent'][0].split(":")[1],\
record.features[i].location.strand]
annot_range.append(loc_list)
if len(annot_range) > 0 and annot_range[0] == '':
del annot_range[0]
return(annot_range)
| 58.949438
| 231
| 0.609883
|
4ac7f551f6037163d9356d6e4edc2336c1b208ee
| 5,514
|
py
|
Python
|
tensorflow_federated/python/core/templates/iterative_process_test.py
|
munhyunsu/federated
|
71052f457014d8ec00ec2b27128428b8763b3d47
|
[
"Apache-2.0"
] | 1
|
2020-05-02T05:08:14.000Z
|
2020-05-02T05:08:14.000Z
|
tensorflow_federated/python/core/templates/iterative_process_test.py
|
munhyunsu/federated
|
71052f457014d8ec00ec2b27128428b8763b3d47
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_federated/python/core/templates/iterative_process_test.py
|
munhyunsu/federated
|
71052f457014d8ec00ec2b27128428b8763b3d47
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from tensorflow_federated.python.common_libs import test
from tensorflow_federated.python.core.api import computation_types
from tensorflow_federated.python.core.api import computations
from tensorflow_federated.python.core.api import values
from tensorflow_federated.python.core.impl.executors import default_executor
from tensorflow_federated.python.core.impl.executors import executor_stacks
from tensorflow_federated.python.core.templates import iterative_process
# Create two tff.Computations that perform sum on a sequence: initializes the
# state to 0 and add each item in a sequence to the state.
@computations.tf_computation
def initialize():
return tf.constant(0)
@computations.tf_computation(tf.int32, tf.int32)
def add_int32(current, val):
return current + val
@computations.tf_computation(tf.int32, tf.int32)
def add_mul_int32(current, val):
return current + val, current * val
@computations.tf_computation(tf.int32)
def count_int32(current):
return current + 1
class IterativeProcessTest(test.TestCase):
def test_constructor_with_state_only(self):
ip = iterative_process.IterativeProcess(initialize, count_int32)
state = ip.initialize()
iterations = 10
for _ in range(iterations):
# TODO(b/122321354): remove the .item() call on `state` once numpy.int32
# type is supported.
state = ip.next(state.item())
self.assertEqual(state, iterations)
def test_constructor_with_tensors_unknown_dimensions(self):
@computations.tf_computation
def init():
return tf.constant([], dtype=tf.string)
@computations.tf_computation(
computation_types.TensorType(shape=[None], dtype=tf.string))
def next_fn(strings):
return tf.concat([strings, tf.constant(['abc'])], axis=0)
try:
iterative_process.IterativeProcess(init, next_fn)
except: # pylint: disable=bare-except
self.fail('Could not construct an IterativeProcess with parameter types '
'including unknown dimension tennsors.')
def test_constructor_with_state_tuple_arg(self):
ip = iterative_process.IterativeProcess(initialize, add_int32)
state = ip.initialize()
iterations = 10
for val in range(iterations):
state = ip.next(state, val)
self.assertEqual(state, sum(range(iterations)))
def test_constructor_with_state_multiple_return_values(self):
ip = iterative_process.IterativeProcess(initialize, add_mul_int32)
state = ip.initialize()
iterations = 10
for val in range(iterations):
state, product = ip.next(state, val)
self.assertEqual(state, sum(range(iterations)))
self.assertEqual(product, sum(range(iterations - 1)) * (iterations - 1))
def test_constructor_with_initialize_bad_type(self):
with self.assertRaisesRegex(TypeError, r'Expected .*\.Computation, .*'):
iterative_process.IterativeProcess(initialize_fn=None, next_fn=add_int32)
with self.assertRaisesRegex(
TypeError, r'initialize_fn must be a no-arg tff.Computation'):
@computations.federated_computation(tf.int32)
def one_arg_initialize(one_arg):
del one_arg # Unused.
return values.to_value(0)
iterative_process.IterativeProcess(
initialize_fn=one_arg_initialize, next_fn=add_int32)
def test_constructor_with_next_bad_type(self):
with self.assertRaisesRegex(TypeError, r'Expected .*\.Computation, .*'):
iterative_process.IterativeProcess(initialize_fn=initialize, next_fn=None)
def test_constructor_with_type_mismatch(self):
with self.assertRaisesRegex(
TypeError, r'The return type of initialize_fn must be assignable.*'):
@computations.federated_computation(tf.float32, tf.float32)
def add_float32(current, val):
return current + val
iterative_process.IterativeProcess(
initialize_fn=initialize, next_fn=add_float32)
with self.assertRaisesRegex(
TypeError,
'The return type of next_fn must be assignable to the first parameter'):
@computations.federated_computation(tf.int32)
def add_bad_result(_):
return 0.0
iterative_process.IterativeProcess(
initialize_fn=initialize, next_fn=add_bad_result)
with self.assertRaisesRegex(
TypeError,
'The return type of next_fn must be assignable to the first parameter'):
@computations.federated_computation(tf.int32)
def add_bad_multi_result(_):
return 0.0, 0
iterative_process.IterativeProcess(
initialize_fn=initialize, next_fn=add_bad_multi_result)
if __name__ == '__main__':
# Note: num_clients must be explicit here to correctly test the broadcast
# behavior. Otherwise TFF will infer there are zero clients, which is an
# error.
executor = executor_stacks.local_executor_factory(num_clients=3)
default_executor.set_default_executor(executor)
test.main()
| 35.121019
| 80
| 0.745194
|
f080a4077ad58fee5c74d1b7d24ad3a9809b85e7
| 635
|
py
|
Python
|
problems/A/NearestInterestingNumber.py
|
deveshbajpai19/CodeForces
|
707b374f03012ec68054841f791d48b33ae4ef1b
|
[
"MIT"
] | 55
|
2016-06-19T05:45:15.000Z
|
2022-03-31T15:18:53.000Z
|
problems/A/NearestInterestingNumber.py
|
farhadcu/CodeForces-2
|
707b374f03012ec68054841f791d48b33ae4ef1b
|
[
"MIT"
] | null | null | null |
problems/A/NearestInterestingNumber.py
|
farhadcu/CodeForces-2
|
707b374f03012ec68054841f791d48b33ae4ef1b
|
[
"MIT"
] | 25
|
2016-07-29T13:03:15.000Z
|
2021-09-17T01:45:45.000Z
|
__author__ = 'Devesh Bajpai'
'''
https://codeforces.com/problemset/problem/1183/A
Solution: Using the brute-force method to generate all integers >= n, and checking if their digits' sum is divisible
by 4. The answer is bound to occur to using a while true doesn't hurt.
'''
def solve(n):
candidate = n
while True:
if is_digit_sum_div_4(candidate):
return candidate
else:
candidate += 1
def is_digit_sum_div_4(n):
sum = 0
while n > 0:
sum += n % 10
n /= 10
return sum % 4 == 0
if __name__ == "__main__":
n = int(raw_input())
print solve(n)
| 18.142857
| 116
| 0.611024
|
e7b66d64c922a86d2dc1febd6296914971fb685d
| 1,872
|
py
|
Python
|
tools/telemetry/PRESUBMIT.py
|
shaochangbin/chromium-crosswalk
|
634d34e4cf82b4f7400357c53ec12efaffe94add
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2
|
2019-01-16T03:57:28.000Z
|
2021-01-23T15:29:45.000Z
|
tools/telemetry/PRESUBMIT.py
|
shaochangbin/chromium-crosswalk
|
634d34e4cf82b4f7400357c53ec12efaffe94add
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
tools/telemetry/PRESUBMIT.py
|
shaochangbin/chromium-crosswalk
|
634d34e4cf82b4f7400357c53ec12efaffe94add
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2
|
2015-04-17T13:19:09.000Z
|
2021-10-21T12:55:15.000Z
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
PYLINT_BLACKLIST = []
PYLINT_DISABLED_WARNINGS = ['R0923', 'R0201', 'E1101']
def _CommonChecks(input_api, output_api):
results = []
# TODO(nduca): This should call update_docs.IsUpdateDocsNeeded().
# Disabled due to crbug.com/255326.
if False:
update_docs_path = os.path.join(
input_api.PresubmitLocalPath(), 'update_docs')
assert os.path.exists(update_docs_path)
results.append(output_api.PresubmitError(
'Docs are stale. Please run:\n' +
'$ %s' % os.path.abspath(update_docs_path)))
# Importing telemetry.web_components actually brings tvcm into the path.
import telemetry.web_components # pylint: disable=W0612
from tvcm import presubmit_checker
checker = presubmit_checker.PresubmitChecker(input_api, output_api)
results += checker.RunChecks()
results.extend(input_api.canned_checks.RunPylint(
input_api, output_api,
black_list=PYLINT_BLACKLIST,
disabled_warnings=PYLINT_DISABLED_WARNINGS))
return results
def GetPathsToPrepend(input_api):
return [input_api.PresubmitLocalPath()]
def RunWithPrependedPath(prepended_path, fn, *args):
old_path = sys.path
try:
sys.path = prepended_path + old_path
return fn(*args)
finally:
sys.path = old_path
def CheckChangeOnUpload(input_api, output_api):
def go():
results = []
results.extend(_CommonChecks(input_api, output_api))
return results
return RunWithPrependedPath(GetPathsToPrepend(input_api), go)
def CheckChangeOnCommit(input_api, output_api):
def go():
results = []
results.extend(_CommonChecks(input_api, output_api))
return results
return RunWithPrependedPath(GetPathsToPrepend(input_api), go)
| 31.2
| 74
| 0.744124
|
af818ce8ed3fdbb0835903defb23c21edb8c006e
| 90
|
py
|
Python
|
weebhooks/errors.py
|
Vashione/weebhooks
|
2faaec274479edb65419d1c78dbd736d7e80d883
|
[
"MIT"
] | null | null | null |
weebhooks/errors.py
|
Vashione/weebhooks
|
2faaec274479edb65419d1c78dbd736d7e80d883
|
[
"MIT"
] | null | null | null |
weebhooks/errors.py
|
Vashione/weebhooks
|
2faaec274479edb65419d1c78dbd736d7e80d883
|
[
"MIT"
] | null | null | null |
class InvalidArgument(Exception):
"""Raised when given improper arguments."""
pass
| 30
| 47
| 0.722222
|
2ef35040a6f9a84ea955bbe007fa13a54437da0d
| 21,645
|
py
|
Python
|
sparse_mixers/run_classifier.py
|
dumpmemory/google-research
|
bc87d010ab9086b6e92c3f075410fa6e1f27251b
|
[
"Apache-2.0"
] | null | null | null |
sparse_mixers/run_classifier.py
|
dumpmemory/google-research
|
bc87d010ab9086b6e92c3f075410fa6e1f27251b
|
[
"Apache-2.0"
] | null | null | null |
sparse_mixers/run_classifier.py
|
dumpmemory/google-research
|
bc87d010ab9086b6e92c3f075410fa6e1f27251b
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run sequence-level classification (and regression) fine-tuning."""
import functools
import math
import os
from typing import Callable, Dict, Mapping, Optional, Tuple, Union
from absl import logging
from flax import jax_utils
from flax.metrics import tensorboard
from flax.training import common_utils
import jax
from jax import random
import jax.numpy as jnp
import ml_collections
import numpy as np
import optax
from scipy import stats as scipy_stats
from sklearn import metrics as skl_metrics
import tensorflow_datasets as tfds
from sparse_mixers import checkpoints
from sparse_mixers import core_utils
from sparse_mixers import input_pipeline
from sparse_mixers import models
from sparse_mixers import train_utils
import sentencepiece as spm
# Type Stubs
Batch = train_utils.Batch
ClassificationStats = models.ClassificationStats
Loss = train_utils.Loss
Params = train_utils.Params
PRNGKey = train_utils.PRNGKey
FlaxTrainState = train_utils.FlaxTrainState
def _replicate_and_shard_target(target,
sharded_match_fn,
not_sharded_match_fn):
"""Replicates and shards parameters and state accordingly.
Args:
target: Train state or parameters to replicate and shard.
sharded_match_fn: Filter function for identifying sharded (mixture of
expert) parameters.
not_sharded_match_fn: Filter function for identifying replicated parameters.
Returns:
Replicated and (potentially) sharded target.
"""
if sharded_match_fn:
target = core_utils.tree_replicate_by_name(target, not_sharded_match_fn)
target = core_utils.tree_shard_by_name(target, sharded_match_fn)
else:
target = jax_utils.replicate(target)
return target
def _clear_pretrained_output_layer(state_cpu,
ckpt_state):
"""Clear ("classification") output layer weights.
We use a fresh output layer because the classification tasks differ from the
MLM and NSP pre-training tasks.
Args:
state_cpu: CPU-initialized train state, containing shape initialized
parameters.
ckpt_state: Initialized model state (parameters) from restored checkpoint.
Returns:
Inputs parameters, but with output layer cleared.
"""
ckpt_state["params"]["classification"] = state_cpu.params["classification"]
ckpt_state["opt_state"] = core_utils.tree_map_with_names(
jnp.zeros_like,
ckpt_state["opt_state"],
filter_fn=core_utils.match_fn(r".*classification.*"))
return ckpt_state
def _restore_state_from_checkpoint(
workdir, state_cpu,
sharded_match_fn,
not_sharded_match_fn,
config):
"""Attempts to restore train state from latest checkpoint or config.
Args:
workdir: Working directory for model training. We first attempt to resume
training from this directory.
state_cpu: CPU-initialized train state, containing shape initialized
parameters.
sharded_match_fn: Filter function for identifying sharded (mixture of
expert) parameters.
not_sharded_match_fn: Filter function for identifying replicated parameters.
config: Model and training configuration.
Returns:
- Restored and replicated train state.
- Start step based on restored model.
"""
# If current job restarts, attempt to continue from most recent checkpoint.
state = checkpoints.restore_checkpoint(workdir, state_cpu, sharded_match_fn)
if state:
start_step = int(state.step)
state = _replicate_and_shard_target(state, sharded_match_fn,
not_sharded_match_fn)
else:
start_step = 0
if "init_checkpoint_dir" in config and config.init_checkpoint_dir:
# Otherwise, try to restore model state from config checkpoint.
ckpt_state = checkpoints.restore_checkpoint(
config.init_checkpoint_dir,
target=None,
sharded_match_fn=sharded_match_fn)
ckpt_state = _clear_pretrained_output_layer(state_cpu, ckpt_state)
ckpt_state = _replicate_and_shard_target(ckpt_state, sharded_match_fn,
not_sharded_match_fn)
state = jax_utils.replicate(state_cpu)
state = state.restore_state(ckpt_state)
else:
# Failing the above attempts, we replicate all parameters (including any
# experts) equally across all devices.
state = jax_utils.replicate(state_cpu)
return state, start_step
def _init_params(model, key,
config):
"""Initializes model state.
Args:
model: Model to initialize.
key: Random number generator key.
config: Model specifications; used to configure model input shapes.
Returns:
Initial model parameters.
"""
init_batch = {
"input_ids": jnp.ones((1, config.max_seq_length), jnp.int32),
"type_ids": jnp.ones((1, config.max_seq_length), jnp.int32),
"labels": jnp.ones((1, 1), jnp.int32)
}
key, dropout_key, jitter_key = random.split(key, num=3)
# Ensure parameters created in host RAM. Send them to devices as needed.
jit_init = jax.jit(model.init, backend="cpu")
initial_variables = jit_init(
{
"params": key,
"dropout": dropout_key,
"jitter": jitter_key
}, **init_batch)
return initial_variables["params"]
def _compute_loss_and_metrics(
params, batch, rng,
model, is_experts_model,
auxiliary_loss_factor,
router_z_loss_factor):
"""Computes cross-entropy loss and metrics for classification tasks.
Args:
params: Model state (parameters).
batch: Current batch of examples.
rng: Random number generator key.
model: The model itself. Flax separates model state and architecture.
is_experts_model: If true, treat this model as a mixture of experts model
and attempt to retrieve expert diversity metrics.
auxiliary_loss_factor: Factor by which to scale auxiliary load balancing
loss for mixture of experts models.
router_z_loss_factor: Factor by which to scale router z-loss for mixture of
experts models.
Returns:
- Model loss.
- Raw metrics.
"""
inputs = {
"input_ids": batch["input_ids"],
"type_ids": batch["type_ids"],
"labels": batch["label"]
}
dropout_key, jitter_key = random.split(rng)
output, state = model.apply({"params": params},
rngs={
"dropout": dropout_key,
"jitter": jitter_key
},
mutable=["intermediates"],
**inputs)
# To correctly normalize loss, we must first sum the model output across all
# devices.
output = jax.lax.psum(output, axis_name="batch")
total_loss = output.batch_loss / output.num_labels
if is_experts_model:
# Experts are sharded so we can gather their metrics independently on each
# device.
expert_metrics = train_utils.summarize_expert_metrics(
state, auxiliary_loss_factor, router_z_loss_factor)
total_loss += expert_metrics.auxiliary_loss + expert_metrics.router_z_loss
output = output.replace(expert_metrics=expert_metrics)
return total_loss, output
def _compute_stats(
params, batch, model,
scoring_fn):
"""Runs inference and computes model predictions.
Args:
params: Model state (parameters).
batch: Current batch of examples.
model: The model itself. Flax separates model state and architecture.
scoring_fn: Task dependent function mapping raw model output logits to
prediction.
Returns:
Model predictions along with example labels.
"""
inputs = {
"input_ids": batch["input_ids"],
"type_ids": batch["type_ids"],
"deterministic": True
}
y = model.apply({"params": params}, **inputs)
result = {
"idx": batch["idx"],
"label": batch["label"],
"prediction": scoring_fn(y),
"input_ids": batch["input_ids"], # Required for SQuAD F1 metric
}
return result
def _create_eval_metrics_fn(
dataset_name
):
"""Creates a function that computes task-relevant metrics.
Args:
dataset_name: TFDS name of dataset.
Returns:
Relevant metric function.
"""
def get_accuracy(gold, guess):
"""Computes accuracy."""
return (gold == guess).mean()
def get_spearmanr(x, y):
"""Computes Spearman correlation coefficient."""
return scipy_stats.spearmanr(x, y).correlation
eval_metrics = {}
if dataset_name == "glue/stsb":
eval_metrics["spearmanr"] = get_spearmanr
elif (dataset_name == "super_glue/multirc" or
dataset_name == "super_glue/record"):
# MultiRC and ReCoRD answers are grouped by premise/query (see
# maybe_group_stats()), so accuracy over the question group is equivalent to
# the exact match for all question answers.
eval_metrics["exact_match"] = get_accuracy
else:
eval_metrics["accuracy"] = get_accuracy
if dataset_name in ("glue/mrpc", "glue/qqp", "super_glue/record"):
eval_metrics["f1"] = lambda gold, guess, ids: skl_metrics.f1_score( # pylint:disable=g-long-lambda
gold, guess)
elif dataset_name == "super_glue/cb":
eval_metrics["f1"] = lambda gold, guess, ids: skl_metrics.f1_score( # pylint:disable=g-long-lambda
gold, guess, average="macro")
elif dataset_name == "super_glue/multirc":
# F1 on all answer-options.
eval_metrics["f1a"] = lambda gold, guess, ids: skl_metrics.f1_score( # pylint:disable=g-long-lambda
np.concatenate(gold),
np.concatenate(guess),
average="micro")
def maybe_group_stats(
stats):
"""Task-dependent pre-processing of raw model stats.
The original COPA, MultiRC, ReCoRD tasks contain multiple answer examples,
which our data pipeline has split into single answer examples. Here, we
regroup the examples by idx.
- For COPA and ReCoRD, we then use the most likely candidate (per idx) as
the model's True prediction.
- For MultRC, we simply group results and run metrics over the groups.
- All other tasks use the raw (ungrouped) stats.
Args:
stats: Raw model predictions and input batch ids.
Returns:
- For COPA/ReCoRD: Most likely model candidate predictions and labels.
- For MultiRC: Grouped predictions and labels.
- For all other tasks: Unprocessed predictions and labels.
"""
if (dataset_name == "super_glue/copa" or
dataset_name == "super_glue/multirc" or
dataset_name == "super_glue/record"):
grouped = { # pylint:disable=g-complex-comprehension
idx: {
"prediction": [],
"label": [],
} for idx in stats["idx"]
}
for idx, prediction, label in zip(stats["idx"], stats["prediction"],
stats["label"]):
grouped[idx]["prediction"].append(prediction)
grouped[idx]["label"].append(label)
if (dataset_name == "super_glue/record" or
dataset_name == "super_glue/copa"):
predictions = []
labels = []
for idx in grouped:
i = np.asarray(grouped[idx]["prediction"]).argmax()
labels.append(grouped[idx]["label"][i])
# The most likely prediction is always our True prediction.
predictions.append(True)
return np.array(labels), np.array(predictions)
else:
idxs = grouped.keys()
predictions = np.array([grouped[idx]["prediction"] for idx in idxs])
labels = np.array([grouped[idx]["label"] for idx in idxs])
return labels, predictions
else:
return stats["label"], stats["prediction"]
def metrics_fn(stats):
labels, predictions = maybe_group_stats(stats)
res = {}
for name, fn in eval_metrics.items():
res[name] = fn(labels, predictions)
return res
return metrics_fn
def _evaluate(p_eval_step,
params, eval_batch):
"""Computes evaluation metrics.
Args:
p_eval_step: Parallelized evaluation step computation.
params: Model state.
eval_batch: Batch of evaluation examples.
Returns:
Raw model predictions and metrics.
"""
n_devices_per_host = jax.local_device_count()
batch_size = eval_batch["idx"].shape[0]
remainder = batch_size % n_devices_per_host
if remainder:
pad_amount = n_devices_per_host - remainder
def pad(x):
assert x.shape[0] == batch_size
return np.concatenate([x] + [x[:1]] * pad_amount, axis=0)
eval_batch = jax.tree_map(pad, eval_batch)
eval_batch = common_utils.shard(eval_batch)
metrics = p_eval_step(params, eval_batch)
metrics = jax.tree_map(np.array, metrics)
metrics = jax.tree_map(lambda x: x.reshape((-1,) + x.shape[2:]), metrics)
if remainder:
metrics = jax.tree_map(lambda x: x[:-pad_amount], metrics)
return metrics
def train_and_evaluate(config, workdir,
vocab_filepath):
"""Runs a training and evaluation loop.
Args:
config: Model and training configuration.
workdir: Working directory for checkpoints and TensorBoard summaries. If
this contains a checkpoint, training will be resumed from the latest
checkpoint.
vocab_filepath: Absolute path to SentencePiece vocab model.
Raises:
ValueError: If training or eval batch sizes won't fit number of hosts and
devices, or config is underspecified.
"""
# Update config before config validation.
with config.unlocked():
# Numeric floating point type to use for model computations.
config.dtype = jnp.float32
train_utils.validate_config(config)
per_host_train_batch_size = config.train_batch_size // jax.process_count()
per_host_eval_batch_size = config.eval_batch_size // jax.process_count()
if jax.process_index() == 0:
train_summary_writer = tensorboard.SummaryWriter(
os.path.join(workdir, "train"))
eval_summary_writer = tensorboard.SummaryWriter(
os.path.join(workdir, "eval"))
else:
train_summary_writer = None
eval_summary_writer = None
tokenizer = spm.SentencePieceProcessor()
tokenizer.Load(vocab_filepath)
ds_info = tfds.builder(config.dataset_name).info
num_train_examples = ds_info.splits[tfds.Split.TRAIN].num_examples
num_train_steps = int(num_train_examples * config.num_train_epochs //
config.train_batch_size)
num_warmup_steps = int(config.warmup_proportion * num_train_steps)
# Round up evaluation frequency to power of 10.
eval_frequency = int(
math.ceil(config.eval_proportion * num_train_steps / 10)) * 10
# STSB is a regression task. COPA and ReCoRD are treated as scalar/regression
# tasks during training.
is_regression_task = (
config.dataset_name == "glue/stsb" or
config.dataset_name == "super_glue/copa" or
config.dataset_name == "super_glue/record")
if is_regression_task:
num_classes = 1
else:
num_classes = ds_info.features["label"].num_classes
with config.unlocked():
config.vocab_size = tokenizer.GetPieceSize()
config.pad_id = tokenizer.pad_id()
config = ml_collections.FrozenConfigDict(config)
model = models.SequenceClassificationModel(config, num_classes)
rng = random.PRNGKey(config.seed)
rng, init_rng = random.split(rng)
params = _init_params(model, init_rng, config)
learning_rate_fn = train_utils.create_learning_rate_scheduler(
factors="constant * linear_warmup * linear_decay",
base_learning_rate=config.learning_rate,
warmup_steps=num_warmup_steps,
decay_steps=num_train_steps - num_warmup_steps,
)
tx = optax.adamw(
learning_rate_fn, b1=0.9, b2=0.999, eps=1e-6, weight_decay=0.01)
if config.clipped_grad_norm:
tx = optax.chain(optax.clip_by_global_norm(config.clipped_grad_norm), tx)
# jit state creation to ensure arrays are created on same device as input
# (i.e. CPU).
state_cpu = jax.jit(
functools.partial(
FlaxTrainState.create, apply_fn=model.apply, params=params, tx=tx))()
# We access model params only via state.params
del params
if config.num_experts > 1:
sharded_match_fn = core_utils.match_fn(r".*expert.*")
not_sharded_match_fn = lambda name: not sharded_match_fn(name)
else:
sharded_match_fn = None
not_sharded_match_fn = lambda name: True
state, start_step = _restore_state_from_checkpoint(workdir, state_cpu,
sharded_match_fn,
not_sharded_match_fn,
config)
if is_regression_task:
scoring_fn = lambda y: y[Ellipsis, 0]
else:
scoring_fn = lambda y: y.argmax(-1)
compute_stats = functools.partial(
_compute_stats, model=model, scoring_fn=scoring_fn)
classification_inputs = functools.partial(
input_pipeline.classification_inputs,
dataset_name=config.dataset_name,
max_seq_length=config.max_seq_length,
tokenizer=tokenizer)
train_ds = classification_inputs(
split=tfds.Split.TRAIN,
batch_size=per_host_train_batch_size,
training=True)
train_iter = iter(train_ds)
if config.dataset_name == "glue/mnli":
# MNLI contains two validation and test datasets.
split_suffixes = ["_matched", "_mismatched"]
else:
split_suffixes = [""]
# We init the first set of dropout PRNG keys, but update it afterwards inside
# the main pmap'd training update for performance.
rngs = random.split(rng, jax.local_device_count())
loss_and_metrics_fn = functools.partial(
_compute_loss_and_metrics,
model=model,
is_experts_model=config.num_experts > 1,
auxiliary_loss_factor=config.auxiliary_loss_factor,
router_z_loss_factor=config.router_z_loss_factor)
train_step = functools.partial(
train_utils.pmap_train_step,
loss_and_metrics_fn=loss_and_metrics_fn,
axis_name="batch",
sharded_match_fn=sharded_match_fn,
gradient_accum_steps=config.gradient_accum_steps)
p_train_step = jax.pmap(train_step, axis_name="batch")
p_eval_step = jax.pmap(compute_stats, axis_name="batch")
eval_metrics_fn = _create_eval_metrics_fn(config.dataset_name)
train_stats = []
logging.info("Starting training loop.")
logging.info("====================")
for step in range(start_step, num_train_steps):
with jax.profiler.StepTraceContext("train", step_num=step):
train_batch = next(train_iter)
train_batch = common_utils.shard(train_batch)
state, train_step_stats, rngs = p_train_step(state, train_batch, rng=rngs)
train_stats.append(train_step_stats)
if ((step > 0 and config.save_checkpoints_steps and
step % config.save_checkpoints_steps == 0) or
step == num_train_steps - 1):
# We allow all hosts to potentially save checkpoints because some model
# parameters are sharded across devices. Parameters replicated across
# devices (i.e. not sharded) will only be checkpointed by host 0.
unreplicated_train_state = jax.tree_map(
np.array,
core_utils.tree_unreplicate_by_name(state, not_sharded_match_fn))
checkpoints.save_checkpoint(
workdir,
unreplicated_train_state,
sharded_match_fn,
step,
keep=config.checkpoints_to_keep)
del unreplicated_train_state # Only used for checkpointing.
# Periodic metric handling.
if step % eval_frequency != 0 and step < num_train_steps - 1:
continue
logging.info("Gathering training metrics at step: %d", step)
train_metrics = train_utils.collect_metrics(train_stats)
train_summary = train_utils.compute_classification_metrics(
train_metrics, is_regression_task)
train_summary["learning_rate"] = learning_rate_fn(step)
if jax.process_index() == 0:
assert train_summary_writer
for key, val in train_summary.items():
train_summary_writer.scalar(key, val, step)
train_summary_writer.flush()
# Reset metric accumulation for next training evaluation cycle.
train_stats = []
logging.info("Gathering validation metrics at step: %d", step)
for split_suffix in split_suffixes:
eval_ds = classification_inputs(
split=tfds.Split.VALIDATION + split_suffix,
batch_size=per_host_eval_batch_size,
training=False)
eval_stats = []
for _, eval_batch in zip(range(config.max_num_eval_steps), eval_ds):
eval_stats.append(_evaluate(p_eval_step, state.params, eval_batch))
eval_metrics = {}
for k in eval_stats[0]: # All batches of output stats are the same size
eval_metrics[k] = np.concatenate([stat[k] for stat in eval_stats],
axis=0)
eval_summary = eval_metrics_fn(eval_metrics)
if jax.process_index() == 0:
assert eval_summary_writer
for key, val in eval_summary.items():
eval_summary_writer.scalar(f"{key}{split_suffix}", val, step)
eval_summary_writer.flush()
| 34.632
| 104
| 0.693139
|
8b225f8600d283676ceb7b82e64caaf11a090d73
| 14,872
|
py
|
Python
|
tests/app/main/views/test_add_service.py
|
davidbgk/notification-admin
|
ad1a7734f84492a085420ee052c243d9fb189adc
|
[
"MIT"
] | null | null | null |
tests/app/main/views/test_add_service.py
|
davidbgk/notification-admin
|
ad1a7734f84492a085420ee052c243d9fb189adc
|
[
"MIT"
] | null | null | null |
tests/app/main/views/test_add_service.py
|
davidbgk/notification-admin
|
ad1a7734f84492a085420ee052c243d9fb189adc
|
[
"MIT"
] | null | null | null |
import pytest
from flask import session, url_for
from app.main.forms import FieldWithLanguageOptions
from app.utils import is_gov_user
from tests import organisation_json
from tests.conftest import mock_get_organisation_by_domain, normalize_spaces
def test_non_gov_user_cannot_see_add_service_button(
client,
mock_login,
mock_get_non_govuser,
api_nongov_user_active,
mock_get_organisations,
mock_get_organisations_and_services_for_user,
):
client.login(api_nongov_user_active)
response = client.get(url_for('main.choose_account'))
assert 'Add a new service' not in response.get_data(as_text=True)
assert response.status_code == 200
@pytest.mark.parametrize('org_json', (
None,
organisation_json(organisation_type=None),
))
def test_get_should_render_add_service_template(
client_request,
mocker,
org_json,
):
mocker.patch(
'app.organisations_client.get_organisation_by_domain',
return_value=org_json,
)
page = client_request.get('main.add_service')
assert page.select_one('h1').text.strip() == 'Name your service'
assert page.select_one('input[name=name]')['value'] == ''
assert [
label.text.strip() for label in page.select('.multiple-choice label')
] == []
assert [
radio['value'] for radio in page.select('.multiple-choice input')
] == []
def test_get_should_not_render_radios_if_org_type_known(
client_request,
mocker,
):
mock_get_organisation_by_domain(mocker, organisation_type='central')
page = client_request.get('main.add_service')
assert page.select_one('h1').text.strip() == 'Name your service'
assert page.select_one('input[name=name]')['value'] == ''
assert not page.select('.multiple-choice')
def test_visible_branding_choices_on_service_email_from_step(
client_request
):
page = client_request.post(
'main.add_service',
_data={
'name': 'testing the post',
},
current_step='choose_email_from',
_expected_status=200,
)
default_branding = page.select_one('input[name=email_from]')
assert default_branding['type'] == 'text'
def test_form_with_no_branding_should_warn_this_cant_be_empty(
client_request,
):
page = client_request.post(
'main.add_service',
_data={
'default_branding': '',
},
current_step='choose_logo',
_expected_status=200,
)
assert normalize_spaces(page.select_one('.error-message').text) == (
'This cannot be empty'
)
def test_form_with_invalid_branding_should_request_another_valid_value(
client_request,
):
page = client_request.post(
'main.add_service',
_data={
'name': 'Show me the branding Jerry',
'default_branding': '__portuguese__',
},
current_step='choose_logo',
_expected_status=200,
)
assert normalize_spaces(page.select_one('.error-message').text) == (
'You need to choose an option'
)
def test_wizard_no_flow_information_should_go_to_step1(
client_request,
):
page = client_request.post(
'main.add_service',
_data={
},
_expected_status=200,
)
assert page.select_one('h1').text.strip() == 'Name your service'
assert page.select_one('input[name=name]')['value'] == ''
def test_wizard_flow_with_step_1_should_display_service_name_form(
client_request,
):
page = client_request.post(
'main.add_service',
_data={
'name': '',
'next_step': 'choose_service_name',
},
_expected_status=200,
)
assert page.select_one('h1').text.strip() == 'Name your service'
def test_wizard_flow_with_step_1_should_call_service_name_is_unique(
client_request,
mock_service_name_is_unique,
):
client_request.post(
'main.add_service',
_data={
'name': 'Service One'
},
current_step='choose_service_name',
_expected_status=302,
)
assert mock_service_name_is_unique.called is True
def test_wizard_flow_with_step_2_should_display_email_from(
client_request,
):
page = client_request.post(
'main.add_service',
_data={
},
current_step='choose_email_from',
_expected_status=200,
)
assert page.select_one('h1').text.strip() == 'Create sending email address'
def test_wizard_flow_with_step_2_should_call_email_from_is_unique(
client_request,
mock_service_email_from_is_unique,
):
client_request.post(
'main.add_service',
_data={
'email_from': 'email.from.here'
},
current_step='choose_email_from',
_expected_status=302,
)
assert mock_service_email_from_is_unique.called is True
def test_wizard_flow_with_step_3_should_display_branding_form(
client_request,
mock_service_email_from_is_unique
):
page = client_request.get(
'main.add_service',
_data={
},
current_step='choose_logo',
_expected_status=200,
)
assert page.select_one('h1').text.strip() == 'Choose a logo for your service'
def test_wizard_flow_with_non_matching_steps_info_should_fallback_to_step1(
client_request,
):
page = client_request.post(
'main.add_service',
_data={
'current_step': '',
},
_expected_status=200,
)
assert page.select_one('h1').text.strip() == 'Name your service'
def test_wizard_flow_with_junk_step_info_should_fallback_to_step1(
client_request,
mock_service_name_is_unique
):
page = client_request.post(
'main.add_service',
_data={
'name': '',
'email_from': 'junk_from',
'default_branding': FieldWithLanguageOptions.FRENCH_OPTION_VALUE,
},
_follow_redirects=True,
current_step='junk_step',
_expected_status=200,
)
assert page.select_one('h1').text.strip() == 'Name your service'
assert mock_service_name_is_unique.called is False
@pytest.mark.parametrize('email_address', (
# User’s email address doesn’t matter when the organisation is known
'test@tbs-sct.gc.ca',
'test@canada.ca',
))
@pytest.mark.parametrize('inherited, posted, persisted, sms_limit', (
(None, 'central', 'central', 250000),
(None, 'nhs_central', 'nhs_central', 250000),
(None, 'nhs_gp', 'nhs_gp', 25000),
(None, 'nhs_local', 'nhs_local', 25000),
(None, 'local', 'local', 25000),
(None, 'emergency_service', 'emergency_service', 25000),
(None, 'school_or_college', 'school_or_college', 25000),
(None, 'other', 'other', 25000),
('central', None, 'central', 250000),
('nhs_central', None, 'nhs_central', 250000),
('nhs_local', None, 'nhs_local', 25000),
('local', None, 'local', 25000),
('emergency_service', None, 'emergency_service', 25000),
('school_or_college', None, 'school_or_college', 25000),
('other', None, 'other', 25000),
('central', 'local', 'central', 250000),
))
@pytest.mark.skip(reason="feature not in use - defaults to central")
def test_should_add_service_and_redirect_to_tour_when_no_services(
mocker,
client_request,
mock_create_service,
mock_create_service_template,
mock_get_services_with_no_services,
api_user_active,
mock_create_or_update_free_sms_fragment_limit,
mock_get_all_email_branding,
inherited,
email_address,
posted,
persisted,
sms_limit,
):
api_user_active['email_address'] = email_address
client_request.login(api_user_active)
mock_get_organisation_by_domain(mocker, organisation_type=inherited)
client_request.post(
'main.add_service',
_data={
'name': 'testing the post',
'organisation_type': posted,
},
_expected_status=302,
_expected_redirect=url_for(
'main.start_tour',
service_id=101,
template_id="Example%20text%20message%20template",
_external=True,
),
)
assert mock_get_services_with_no_services.called
mock_create_service.assert_called_once_with(
service_name='testing the post',
organisation_type=persisted,
message_limit=50,
restricted=True,
user_id=api_user_active['id'],
email_from='testing.the.post',
)
mock_create_service_template.assert_called_once_with(
'Example text message template',
'sms',
(
'Hey ((name)), I’m trying out GC Notify. Today is '
'((day of week)) and my favourite colour is ((colour)).'
),
101,
)
assert session['service_id'] == 101
mock_create_or_update_free_sms_fragment_limit.assert_called_once_with(101, sms_limit)
@pytest.mark.skip(reason="feature not in use - defaults to central")
def test_add_service_has_to_choose_org_type(
mocker,
client_request,
mock_create_service,
mock_create_service_template,
mock_get_services_with_no_services,
api_user_active,
mock_create_or_update_free_sms_fragment_limit,
mock_get_all_email_branding,
):
mocker.patch(
'app.organisations_client.get_organisation_by_domain',
return_value=None,
)
page = client_request.post(
'main.add_service',
_data={
'name': 'testing the post',
},
_expected_status=200,
)
assert normalize_spaces(page.select_one('.error-message').text) == (
'You need to choose an option'
)
assert mock_create_service.called is False
assert mock_create_service_template.called is False
assert mock_create_or_update_free_sms_fragment_limit.called is False
@pytest.mark.parametrize('email_address', (
'test@canada.ca',
'test@tbs-sct.gc.ca',
'test@canada.ca',
pytest.param(
'test@not-canada.ca',
marks=pytest.mark.xfail(raises=AssertionError)
)
))
def test_get_should_only_show_nhs_org_types_radios_if_user_has_nhs_email(
client_request,
mocker,
api_user_active,
email_address,
):
api_user_active['email_address'] = email_address
client_request.login(api_user_active)
mocker.patch(
'app.organisations_client.get_organisation_by_domain',
return_value=None,
)
page = client_request.get('main.add_service')
assert page.select_one('h1').text.strip() == 'Name your service'
assert page.select_one('input[name=name]')['value'] == ''
assert [
label.text.strip() for label in page.select('.multiple-choice label')
] == []
assert [
radio['value'] for radio in page.select('.multiple-choice input')
] == []
@pytest.mark.parametrize('organisation_type, free_allowance', [
('central', 25 * 1000)
])
def test_should_add_service_and_redirect_to_dashboard_along_with_proper_side_effects(
app_,
client_request,
mock_create_service,
mock_create_service_template,
mock_get_organisation_by_domain,
api_user_active,
organisation_type,
free_allowance,
mock_create_or_update_free_sms_fragment_limit,
mock_get_all_email_branding,
mock_service_name_is_unique,
mock_service_email_from_is_unique
):
client_request.post(
'main.add_service',
_data={
'name': 'testing the post',
},
_expected_status=200,
_follow_redirects=True,
)
assert mock_service_name_is_unique.called is True
client_request.post(
'main.add_service',
_data={
'email_from': 'testing.the.post',
},
current_step='choose_email_from',
_expected_status=200,
_follow_redirects=True,
)
assert mock_service_email_from_is_unique.called is True
client_request.post(
'main.add_service',
_data={
'default_branding': FieldWithLanguageOptions.FRENCH_OPTION_VALUE,
},
current_step='choose_logo',
_expected_status=302,
_expected_redirect=url_for(
'main.service_dashboard',
service_id=101,
_external=True,
)
)
mock_create_service.assert_called_once_with(
service_name='testing the post',
message_limit=app_.config['DEFAULT_SERVICE_LIMIT'],
restricted=True,
user_id=api_user_active['id'],
email_from='testing.the.post',
default_branding_is_french=True,
organisation_type=organisation_type,
)
mock_create_or_update_free_sms_fragment_limit.assert_called_once_with(101, free_allowance)
assert len(mock_create_service_template.call_args_list) == 0
assert session['service_id'] == 101
def test_should_return_form_errors_when_service_name_is_empty(
client_request,
mock_get_organisation_by_domain,
):
page = client_request.post(
'main.add_service',
_data={
'current_step': 'choose_service_name',
},
_expected_status=200,
)
assert 'This cannot be empty' in page.text
def test_should_return_form_errors_when_service_email_from_is_empty(
client_request,
mock_get_organisation_by_domain,
):
page = client_request.post(
'main.add_service',
_data={
'email_from': ''
},
current_step='choose_email_from',
_expected_status=200,
)
assert 'This cannot be empty' in page.text
def test_should_return_form_errors_with_duplicate_service_name_regardless_of_case(
client_request,
mock_create_duplicate_service,
mock_get_organisation_by_domain,
mock_service_name_is_not_unique
):
page = client_request.post(
'main.add_service',
_data={
'current_step': 'choose_logo',
'email_from': 'servicE1',
'name': 'SERVICE ONE',
'default_branding': FieldWithLanguageOptions.FRENCH_OPTION_VALUE,
'organisation_type': 'central',
},
_expected_status=200,
)
assert page.select_one('.error-message').text.strip() == (
'This service name is already in use'
)
assert mock_service_name_is_not_unique.called is True
def test_non_safelist_user_cannot_access_create_service_page(
client_request,
mock_get_non_govuser,
api_nongov_user_active,
mock_get_organisations,
):
assert is_gov_user(api_nongov_user_active['email_address']) is False
client_request.get(
'main.add_service',
_expected_status=403,
)
def test_non_safelist_user_cannot_create_service(
client_request,
mock_get_non_govuser,
api_nongov_user_active,
mock_get_organisations,
):
assert is_gov_user(api_nongov_user_active['email_address']) is False
client_request.post(
'main.add_service',
_data={'name': 'SERVICE TWO'},
_expected_status=403,
)
| 29.391304
| 94
| 0.67496
|
aef69aeec7b0cc1d98ccdc2674ab8c0b0756a26a
| 874
|
py
|
Python
|
tests/test_postgresql_config.py
|
rohit-dimagi/commcare-cloud
|
55576713f3a12acc3f2df4f24c405df9c30143b3
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_postgresql_config.py
|
rohit-dimagi/commcare-cloud
|
55576713f3a12acc3f2df4f24c405df9c30143b3
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_postgresql_config.py
|
rohit-dimagi/commcare-cloud
|
55576713f3a12acc3f2df4f24c405df9c30143b3
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import print_function
import os
import yaml
from parameterized import parameterized
from commcare_cloud.environment.main import Environment
from commcare_cloud.environment.paths import DefaultPaths
from nose.tools import assert_equal
TEST_ENVIRONMENTS_DIR = os.path.join(os.path.dirname(__file__), 'postgresql_config')
TEST_ENVIRONMENTS = os.listdir(TEST_ENVIRONMENTS_DIR)
@parameterized(TEST_ENVIRONMENTS)
def test_postgresql_config(env_name):
env = Environment(DefaultPaths(env_name, environments_dir=TEST_ENVIRONMENTS_DIR))
with open(env.paths.generated_yml) as f:
generated = yaml.safe_load(f)
assert generated.keys() == ['postgresql_dbs']
expected_json = generated['postgresql_dbs']
actual_json = env.postgresql_config.to_generated_variables(env)['postgresql_dbs']
assert_equal(actual_json, expected_json)
| 29.133333
| 85
| 0.802059
|
92368e927002992b0630d4d066ca105bab28835b
| 493
|
py
|
Python
|
Leap Year.py
|
Tulip2MF/100_Days_Challenge
|
026073bd16fab5746b36eb1d3174a20011b2fbd1
|
[
"Unlicense"
] | null | null | null |
Leap Year.py
|
Tulip2MF/100_Days_Challenge
|
026073bd16fab5746b36eb1d3174a20011b2fbd1
|
[
"Unlicense"
] | null | null | null |
Leap Year.py
|
Tulip2MF/100_Days_Challenge
|
026073bd16fab5746b36eb1d3174a20011b2fbd1
|
[
"Unlicense"
] | null | null | null |
def divisionFunction (year,denominator):
if(year % denominator) == 0:
return "Yes"
else:
return "No"
year=int(input("Type the year: \n"))
divideBy4= divisionFunction(year,4)
divideBy400 = divisionFunction(year,400)
divideBy100 = divisionFunction(year,100)
if divideBy4 == "Yes":
if divideBy400 == "Yes":
print("Leap year")
elif divideBy100 == "Yes":
print("Not Leap Year")
else:
print("Leap Year")
else:
print("Not Leap year")
| 23.47619
| 40
| 0.626775
|
801a3704eaf8b0db0869723d66119c75167580c0
| 20,653
|
py
|
Python
|
orm_alchemy/alchemy.py
|
HKer-MuCoi/orm-alchemy
|
a6948d71aa7dbcd54f43c7422016b31cd7ec528a
|
[
"MIT"
] | null | null | null |
orm_alchemy/alchemy.py
|
HKer-MuCoi/orm-alchemy
|
a6948d71aa7dbcd54f43c7422016b31cd7ec528a
|
[
"MIT"
] | null | null | null |
orm_alchemy/alchemy.py
|
HKer-MuCoi/orm-alchemy
|
a6948d71aa7dbcd54f43c7422016b31cd7ec528a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
==================
Active-Alchemy
==================
A framework agnostic wrapper for SQLAlchemy that makes it really easy
to use by implementing a simple active record like api, while it still uses the db.session underneath
:copyright: © 2014/2016 by `Mardix`.
:license: MIT, see LICENSE for more details.
"""
import enum
from math import ceil
NAME = "Active-Alchemy"
# ------------------------------------------------------------------------------
import threading
import json
import datetime
import sqlalchemy
from sqlalchemy import *
from sqlalchemy.orm import scoped_session, sessionmaker, Query
from sqlalchemy.engine.url import make_url
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.schema import MetaData
import inflection
import sqlalchemy_utils as sa_utils
import arrow
DEFAULT_PER_PAGE = 10
utcnow = arrow.utcnow
def _create_scoped_session(db, query_cls):
session = sessionmaker(autoflush=True, autocommit=True,
bind=db.engine, query_cls=query_cls)
return scoped_session(session)
def _tablemaker(db):
def make_sa_table(*args, **kwargs):
if len(args) > 1 and isinstance(args[1], db.Column):
args = (args[0], db.metadata) + args[1:]
kwargs.setdefault('bind_key', None)
info = kwargs.pop('info', None) or {}
info.setdefault('bind_key', None)
kwargs['info'] = info
return sqlalchemy.Table(*args, **kwargs)
return make_sa_table
def _include_sqlalchemy(db):
for module in sqlalchemy, sqlalchemy.orm:
for key in module.__all__:
if not hasattr(db, key):
setattr(db, key, getattr(module, key))
db.Table = _tablemaker(db)
db.event = sqlalchemy.event
db.utils = sa_utils
db.arrow = arrow
db.utcnow = utcnow
db.SADateTime = db.DateTime
db.DateTime = sa_utils.ArrowType
db.JSONType = sa_utils.JSONType
db.EmailType = sa_utils.EmailType
class BaseQuery(Query):
def get_or_error(self, uid, error):
"""Like :meth:`get` but raises an error if not found instead of
returning `None`.
"""
rv = self.get(uid)
if rv is None:
if isinstance(error, Exception):
raise error
return error()
return rv
def first_or_error(self, error):
"""Like :meth:`first` but raises an error if not found instead of
returning `None`.
"""
rv = self.first()
if rv is None:
if isinstance(error, Exception):
raise error
return error()
return rv
# def paginate(self, **kwargs):
# """Paginate this results.
# Returns an :class:`Paginator` object.
# """
# return Paginator(self, **kwargs)
def paginate(self, page=None, per_page=None, error_out=True, max_per_page=None, count=True):
"""Returns ``per_page`` items from page ``page``.
If ``page`` or ``per_page`` are ``None``, they will be retrieved from
the request query. If ``max_per_page`` is specified, ``per_page`` will
be limited to that value. If there is no request or they aren't in the
query, they default to 1 and 20 respectively. If ``count`` is ``False``,
no query to help determine total page count will be run.
When ``error_out`` is ``True`` (default), the following rules will
cause a 404 response:
* No items are found and ``page`` is not 1.
* ``page`` is less than 1, or ``per_page`` is negative.
* ``page`` or ``per_page`` are not ints.
When ``error_out`` is ``False``, ``page`` and ``per_page`` default to
1 and 20 respectively.
Returns a :class:`Pagination` object.
"""
if page is None:
page = 1
if per_page is None:
per_page = 20
if max_per_page is not None:
per_page = min(per_page, max_per_page)
if page < 1:
if error_out:
raise Exception(404)
else:
page = 1
if per_page < 0:
if error_out:
raise Exception(404)
else:
per_page = 20
items = self.limit(per_page).offset((page - 1) * per_page).all()
if not items and page != 1 and error_out:
raise Exception(404)
if not count:
total = None
else:
total = self.order_by(None).count()
return Pagination(self, page, per_page, total, items)
class ModelTableNameDescriptor(object):
"""
Create the table name if it doesn't exist.
"""
def __get__(self, obj, type):
tablename = type.__dict__.get('__tablename__')
if not tablename:
tablename = inflection.underscore(type.__name__)
setattr(type, '__tablename__', tablename)
return tablename
class EngineConnector(object):
def __init__(self, sa_obj):
self._sa_obj = sa_obj
self._engine = None
self._connected_for = None
self._lock = threading.Lock()
def get_engine(self):
with self._lock:
uri = self._sa_obj.uri
info = self._sa_obj.info
options = self._sa_obj.options
echo = options.get('echo')
if (uri, echo) == self._connected_for:
return self._engine
self._engine = engine = sqlalchemy.create_engine(info, **options)
self._connected_for = (uri, echo)
return engine
class BaseModel(object):
"""
Baseclass for custom user models.
"""
__tablename__ = ModelTableNameDescriptor()
__primary_key__ = "id" # String
to_json_filter = ()
def __iter__(self):
"""Returns an iterable that supports .next()
so we can do dict(sa_instance).
"""
for k in self.__dict__.keys():
if not k.startswith('_'):
yield (k, getattr(self, k))
def __repr__(self):
return '<%s>' % self.__class__.__name__
def dict_as_json(self):
"""
Convert the entity to JSON
:returns str:
"""
data = {}
for k, v in self.to_dict().items():
if isinstance(v, (datetime.datetime, sa_utils.ArrowType, arrow.Arrow)):
v = v.isoformat()
if isinstance(v, enum.Enum):
v = v.value
data[k] = v
return data
def to_dict(self):
"""
Return an entity as dict
:returns dict:
"""
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
def to_json(self):
"""
Convert the entity to JSON
:returns str:
"""
data = {}
for k, v in self.to_dict().items():
if isinstance(v, (datetime.datetime, sa_utils.ArrowType, arrow.Arrow)):
v = v.isoformat()
data[k] = v
return json.dumps(data)
@property
def json(self):
""" Define a base way to jsonify models
Columns inside `to_json_filter` are excluded """
return {
column: value.isoformat()
if isinstance(value, (datetime.datetime, sa_utils.ArrowType, arrow.Arrow))
else (
value.strftime("%Y-%m-%d") if isinstance(value, datetime.datetime)
else (
value.value if isinstance(value, enum.Enum)
else value
)
)
for column, value in self.to_dict().items()
if column not in self.to_json_filter
}
@classmethod
def get(cls, pk):
"""
Select entry by its primary key. It must be define as
__primary_key__ (string)
"""
return cls._query(cls).filter(getattr(cls, cls.__primary_key__) == pk).first()
@classmethod
def create(cls, **kwargs):
"""
To create a new record
:returns object: The new record
"""
record = cls(**kwargs).save()
return record
def update(self, **kwargs):
"""
Update an entry
"""
for k, v in kwargs.items():
setattr(self, k, v)
self.save()
return self
@classmethod
def query(cls, *args):
"""
:returns query:
"""
if not args:
query = cls._query(cls)
else:
query = cls._query(*args)
return query
def save(self):
"""
Shortcut to add and save + rollback
"""
try:
self.db.add(self)
self.db.commit()
return self
except Exception as e:
self.db.rollback()
raise
def delete(self, delete=True, hard_delete=False):
"""
Soft delete a record
:param delete: Bool - To soft-delete/soft-undelete a record
:param hard_delete: Bool - *** Not applicable under BaseModel
"""
try:
self.db.session.delete(self)
return self.db.commit()
except Exception as e:
self.db.rollback()
raise
class Model(BaseModel):
"""
Model create
"""
id = Column(Integer, primary_key=True)
created_at = Column(sa_utils.ArrowType, default=utcnow)
updated_at = Column(sa_utils.ArrowType, default=utcnow, onupdate=utcnow)
is_deleted = Column(Boolean, default=False, index=True)
deleted_at = Column(sa_utils.ArrowType, default=None)
@classmethod
def query(cls, *args, **kwargs):
"""
:returns query:
:**kwargs:
- include_deleted bool: True To filter in deleted records.
By default it is set to False
"""
if not args:
query = cls._query(cls)
else:
query = cls._query(*args)
if "include_deleted" not in kwargs or kwargs["include_deleted"] is False:
query = query.filter(cls.is_deleted != True)
return query
@classmethod
def get(cls, id, include_deleted=False):
"""
Select entry by id
:param id: The id of the entry
:param include_deleted: It should not query deleted record. Set to True to get all
"""
return cls.query(include_deleted=include_deleted) \
.filter(cls.id == id) \
.first()
def delete(self, delete=True, hard_delete=False):
"""
Soft delete a record
:param delete: Bool - To soft-delete/soft-undelete a record
:param hard_delete: Bool - If true it will completely delete the record
"""
# Hard delete
if hard_delete:
try:
self.db.session.delete(self)
return self.db.commit()
except:
self.db.rollback()
raise
else:
data = {
"is_deleted": delete,
"deleted_at": utcnow() if delete else None
}
self.update(**data)
return self
class ActiveAlchemy(object):
"""This class is used to instantiate a SQLAlchemy connection to
a database.
db = ActiveAlchemy(_uri_to_database_)
The class also provides access to all the SQLAlchemy
functions from the :mod:`sqlalchemy` and :mod:`sqlalchemy.orm` modules.
So you can declare models like this::
class User(db.Model):
login = db.Column(db.String(80), unique=True)
passw_hash = db.Column(db.String(80))
In a web application you need to call `db.session.remove()`
after each response, and `db.session.rollback()` if an error occurs.
If your application object has a `after_request` and `on_exception
decorators, just pass that object at creation::
app = Flask(__name__)
db = ActiveAlchemy('sqlite://', app=app)
or later::
db = ActiveAlchemy()
app = Flask(__name__)
db.init_app(app)
.. admonition:: Check types carefully
Don't perform type or `isinstance` checks against `db.Table`, which
emulates `Table` behavior but is not a class. `db.Table` exposes the
`Table` interface, but is a function which allows omission of metadata.
"""
def __init__(self, uri='sqlite://',
app=None,
echo=False,
pool_size=None,
pool_timeout=None,
pool_recycle=60,
convert_unicode=True,
query_cls=BaseQuery):
self.uri = uri
self.info = make_url(uri)
self.options = self._cleanup_options(
echo=echo,
pool_size=pool_size,
pool_timeout=pool_timeout,
pool_recycle=pool_recycle,
convert_unicode=convert_unicode,
)
self.connector = None
self._engine_lock = threading.Lock()
self.session = _create_scoped_session(self, query_cls=query_cls)
self.Model = declarative_base(cls=Model, name='Model')
self.BaseModel = declarative_base(cls=BaseModel, name='BaseModel')
self.Model.db, self.BaseModel.db = self, self
self.Model._query, self.BaseModel._query = self.session.query, self.session.query
if app is not None:
self.init_app(app)
_include_sqlalchemy(self)
def _cleanup_options(self, **kwargs):
options = dict([
(key, val)
for key, val in kwargs.items()
if val is not None
])
return self._apply_driver_hacks(options)
def _apply_driver_hacks(self, options):
if "mysql" in self.info.drivername:
# self.info.query.setdefault('charset', 'utf8')
options.setdefault('pool_size', 10)
options.setdefault('pool_recycle', 7200)
elif self.info.drivername == 'sqlite':
no_pool = options.get('pool_size') == 0
memory_based = self.info.database in (None, '', ':memory:')
if memory_based and no_pool:
raise ValueError(
'SQLite in-memory database with an empty queue'
' (pool_size = 0) is not possible due to data loss.'
)
return options
def init_app(self, app):
"""This callback can be used to initialize an application for the
use with this database setup. In a web application or a multithreaded
environment, never use a database without initialize it first,
or connections will leak.
"""
if not hasattr(app, 'databases'):
app.databases = []
if isinstance(app.databases, list):
if self in app.databases:
return
app.databases.append(self)
def shutdown(response=None):
self.session.remove()
return response
def rollback(error=None):
try:
self.session.rollback()
except Exception:
pass
self.set_flask_hooks(app, shutdown, rollback)
def set_flask_hooks(self, app, shutdown, rollback):
if hasattr(app, 'after_request'):
app.after_request(shutdown)
if hasattr(app, 'on_exception'):
app.on_exception(rollback)
@property
def engine(self):
"""Gives access to the engine. """
with self._engine_lock:
connector = self.connector
if connector is None:
connector = EngineConnector(self)
self.connector = connector
return connector.get_engine()
@property
def metadata(self):
"""Proxy for Model.metadata"""
return self.Model.metadata
@property
def query(self):
"""Proxy for session.query"""
return self.session.query
def add(self, *args, **kwargs):
"""Proxy for session.add"""
return self.session.add(*args, **kwargs)
def flush(self, *args, **kwargs):
"""Proxy for session.flush"""
return self.session.flush(*args, **kwargs)
def commit(self):
"""Proxy for session.commit"""
return self.session.commit()
def rollback(self):
"""Proxy for session.rollback"""
return self.session.rollback()
def create_all(self):
"""Creates all tables. """
self.Model.metadata.create_all(bind=self.engine)
def drop_all(self):
"""Drops all tables. """
self.Model.metadata.drop_all(bind=self.engine)
def reflect(self, meta=None):
"""Reflects tables from the database. """
meta = meta or MetaData()
meta.reflect(bind=self.engine)
return meta
def __repr__(self):
return "<SQLAlchemy('{0}')>".format(self.uri)
class Pagination:
"""Internal helper class returned by :meth:`BaseQuery.paginate`. You
can also construct it from any other SQLAlchemy query object if you are
working with other libraries. Additionally it is possible to pass `None`
as query object in which case the :meth:`prev` and :meth:`next` will
no longer work.
"""
def __init__(self, query, page, per_page, total, items):
#: the unlimited query object that was used to create this
#: pagination object.
self.query = query
#: the current page number (1 indexed)
self.page = page
#: the number of items to be displayed on a page.
self.per_page = per_page
#: the total number of items matching the query
self.total = total
#: the items for the current page
self.items = items
@property
def pages(self):
"""The total number of pages"""
if self.per_page == 0 or self.total is None:
pages = 0
else:
pages = int(ceil(self.total / float(self.per_page)))
return pages
def prev(self, error_out=False):
"""Returns a :class:`Pagination` object for the previous page."""
assert (
self.query is not None
), "a query object is required for this method to work"
return self.query.paginate(self.page - 1, self.per_page, error_out)
@property
def prev_num(self):
"""Number of the previous page."""
if not self.has_prev:
return None
return self.page - 1
@property
def has_prev(self):
"""True if a previous page exists"""
return self.page > 1
def next(self, error_out=False):
"""Returns a :class:`Pagination` object for the next page."""
assert (
self.query is not None
), "a query object is required for this method to work"
return self.query.paginate(self.page + 1, self.per_page, error_out)
@property
def has_next(self):
"""True if a next page exists."""
return self.page < self.pages
@property
def next_num(self):
"""Number of the next page"""
if not self.has_next:
return None
return self.page + 1
def iter_pages(self, left_edge=2, left_current=2, right_current=5, right_edge=2):
"""Iterates over the page numbers in the pagination. The four
parameters control the thresholds how many numbers should be produced
from the sides. Skipped page numbers are represented as `None`.
This is how you could render such a pagination in the templates:
.. sourcecode:: html+jinja
{% macro render_pagination(pagination, endpoint) %}
<div class=pagination>
{%- for page in pagination.iter_pages() %}
{% if page %}
{% if page != pagination.page %}
<a href="{{ url_for(endpoint, page=page) }}">{{ page }}</a>
{% else %}
<strong>{{ page }}</strong>
{% endif %}
{% else %}
<span class=ellipsis>…</span>
{% endif %}
{%- endfor %}
</div>
{% endmacro %}
"""
last = 0
for num in range(1, self.pages + 1):
if (
num <= left_edge
or (
num > self.page - left_current - 1
and num < self.page + right_current
)
or num > self.pages - right_edge
):
if last + 1 != num:
yield None
yield num
last = num
| 30.825373
| 101
| 0.563889
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.