max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
etc/test/CryoconM32_test.py | MarkRivers/CryoconM32 | 0 | 12758251 | #!/bin/env dls-python2.4
# Test suite to use with pyUnit
from pkg_resources import require
require('dls.autotestframework')
from dls.autotestframework import *
################################################
# Test suite for the CryoconM32 temperature controller
class CryoconM32TestSuite(TestSuite):
def createTests(self):
# Define the targets for this test suite
# The ioc has to be started with screen for the sake of interfacing with Hudson, the integration engine.
# Something to do with what happens to the IOC stdin.
Target("simulation", self, simulationCmds=['data/CryoconM32_sim.py -i 8001 -r 9017'],
iocDirectory="iocs/example_sim",
iocBootCmd="bin/linux-x86/stexample.sh",
epicsDbFiles="db/example_expanded.db",
runIocInScreenUnderHudson=True,
# simulationCmds=['python2.4 data/CryoconM32_sim.py'],
simDevices=[SimDevice("SIM-TS-TCTRL-01", 9017, rpc=True)],
guiCmds=['edm -m "P=SIM-EA-TCTRL-01,tctrlr=SIM-TS-TCTRL-01,device=SIM-TS-TCTRL-01,record1=T1,record2=T2" -eolc -x data/CryoconM32_detail.edl'])
Target("hardware", self,
iocDirectory="iocs/example",
iocBootCmd="bin/linux-x86/stexample.sh",
epicsDbFiles="db/example_expanded.db",
guiCmds=['edm -m "P=SIM-TS-TCTRL-01,tctrlr=SIM-TS-TCTRL-01,device=SIM-TS-TCTRL-01,record1=T1,record2=T2" -eolc -x data/CryoconM32_detail.edl'])
# The tests
CaseIdentifySystem(self)
CaseGetSensorTemperatures(self)
CaseGetSensorRawReadings(self)
CaseGetSensorTemperatureStatistics(self)
CaseChangeLoopSetpoints(self)
CaseChangeLoopTypes(self)
CaseChangeLoopManualOutputs(self)
CaseChangeLoopPIDs(self)
CaseSystemStats(self)
CaseResetSystemStats(self)
return
################################################
# Intermediate test case class that provides some utility functions
# for this suite
class CryoconM32Case(TestCase):
# This arg_a is needed for the parent class constructor.
def __init__(self, arg_a):
'''Constructor. First calls the base class constructor.'''
TestCase.__init__(self , arg_a)
self.pvPrefix = "SIM-TS-TCTRL-01:"
return
def dummy(self):
''' Dummy for now til I think of some common functions.
'''
return
################################################
# Test cases
#
# The EPICS implementation uses 3 templates.
# The tests are split up according to which template they are exercising.
# System tests M32_system.template
# Control loop tests M32_control.template
# Sensor tests M32_sensor.template
#
# -----------------------------------------------
# System tests
# Tests for stuff on the M32_system template.
class CaseIdentifySystem( CryoconM32Case ):
def runTest(self):
'''Get the unit to identify itself.'''
self.putPv(self.pvPrefix + "STS:SINKTEMP.TPRO" , 1)
self.putPv(self.pvPrefix + "STS:AMBIENTTEMP.TPRO" , 1)
self.putPv(self.pvPrefix + "STS:STATS:TIME.TPRO" , 1)
print "Model = " + self.getPv(self.pvPrefix + "SYS:MODEL")
print "Hardware Revision = " + self.getPv(self.pvPrefix + "SYS:HWREV")
print "Firmware Revision = " + self.getPv(self.pvPrefix + "SYS:FWREV")
print "Controller Name = " + self.getPv(self.pvPrefix + "SYS:CONTROLLERNAME")
return
class CaseSystemStats( CryoconM32Case ):
def runTest(self):
'''Perform tests on stats functions which belong to the system and not a loop.'''
print "Stats time = " + str(self.getPv(self.pvPrefix + "STS:STATS:TIME"))
# The tests in these 2 calls are not actually working in that they return blank information.
# The protocol call does not appear to be made. There seem to be potential issues
# in the protocol and template file but need to test against the real instrument
# to find out what format it really returns the information in.
print "Sink temperature = " + str(self.getPv(self.pvPrefix + "STS:SINKTEMP"))
print "Ambient temperature = " + self.getPv(self.pvPrefix + "STS:AMBIENTTEMP")
print "Testing averaging filter reseed now."
# This test mysteriously fails because the protocol call is not made.
# The CryoconM14 has the same command and the same problem in its test suite and also
# the same problem
self.putPv(self.pvPrefix + "DMD:RESEED.PROC" , 1)
print "Returned from reseed"
return
class CaseResetSystemStats( CryoconM32Case ):
def runTest(self):
'''Rezero the accumulated stats, reset the zero of the time over which the stats have been collected.'''
# Construct names of PVs to be used.
my_demandPv = self.pvPrefix + "DMD:STATS:RESET.PROC"
my_statusPv = self.pvPrefix + "STS:STATS:TIME"
# Grab existing status value.
my_before = self.getPv(my_statusPv)
# Write new demand value. Since it is a proc field, it does not make much sense to verify it
# because it will revert to zero when processing is finished.
self.putPv(my_demandPv , 1 )
# Have to wait long enough to be sure the EPICS status record has polled the instrument (SCAN field).
my_sleeptime = 5
self.sleep(my_sleeptime)
# Verify the change has happened as we expect
my_after = self.verifyPvInRange(my_statusPv , 0 , my_sleeptime + 1 )
print "Stats time " + my_statusPv + " changed from " + str(my_before) + " to " + str(my_after)
return
# End of system tests
# -----------------------------------------------
# Control loop tests.
# Stuff for the M32_control template.
# Note control loop types in use restricted to PID and Man and Off but the real instrument
# supports other loop types.
# The change cases are effectively testing the readback also.
class CaseChangeLoopSetpoints( CryoconM32Case ):
def changeLoopSetpoint(self, arg_loopID, arg_value):
'''Change setpoint temperature (or other controlled output) on given control loop.'''
# print "changeLoopSetpoint: loopID = " + arg_loopID + " arg_value = " + str(arg_value)
# Need a delta for accepting numeric comparison of PV float values.
my_delta = 0.0001
# Construct names of PVs to be used.
my_demandPv = self.pvPrefix + "DMD:LOOP" + arg_loopID + ":SETPOINT"
my_statusPv = self.pvPrefix + "STS:LOOP" + arg_loopID + ":SETPOINT"
# Grab existing status value.
my_before = self.getPv(my_statusPv)
# Write new demand value and verify it was OK.
self.putPv(my_demandPv , arg_value )
self.verifyPvFloat(my_demandPv , arg_value , my_delta )
# Wait for status to be updated and then verify the new status.
self.sleep(2)
my_after = self.verifyPvFloat(my_statusPv , arg_value , my_delta )
print "Loop " + arg_loopID + " setpoint " + my_statusPv + " changed from " + str(my_before) + " to " + str(my_after)
return
def runTest(self):
'''Change the setpoint temperatures for both control loops.'''
self.changeLoopSetpoint ( "1" , 3.5 )
self.changeLoopSetpoint ( "2" , 9.0 )
return
class CaseChangeLoopTypes( CryoconM32Case ):
def changeLoopType(self, arg_loopID, arg_loopType):
'''
Change the loop type. Expect this test to work for all loop types.
'''
print "changeLoopType: loopID = " + arg_loopID + " arg_loopType = " + arg_loopType
# Construct names of PVs to be used.
my_demandPv = self.pvPrefix + "DMD:LOOP" + arg_loopID + ":LOOPTYPE"
my_statusPv = self.pvPrefix + "STS:LOOP" + arg_loopID + ":LOOPTYPE"
print "changeLoopType: my_demandPv = " + my_demandPv + " my_statusPv = " + my_statusPv
my_sleeptime = 2
# Grab existing status value.
my_before = self.getPv(my_statusPv)
# Write new demand value and verify it was OK.
self.putPv(my_demandPv , arg_loopType )
wilma = self.getPv(my_demandPv)
print "wilma = " + str(wilma)
self.sleep(my_sleeptime)
print "####NOW###"
fred = self.verifyPv(my_demandPv , arg_loopType)
print "####SLEEPING###"
# Wait for status to be updated and then verify the new status.
self.sleep(my_sleeptime)
# This is complicated by the fact that I am not sure about the interaction
# between PID and RampP and ramp status in real instrument - guessing.
exception_loopTypes=["PID","RampP"]
# if arg_loopType in exception_loopTypes :
# my_after = self.getPv(my_statusPv )
# if my_after not in exception_loopTypes :
# self.fail("#" + my_statusPv + " value of " + my_after + "is not in exception types list after attempt to change to loop type" + arg_loopType )
# else :
my_after = self.verifyPv(my_statusPv , arg_loopType)
print "Loop " + arg_loopID + " loop type" + my_statusPv + " changed from " + my_before + " to " + my_after
return
def runTest(self):
'''Change the looptype for both control loops to each possible loop type in turn.'''
# It might be better to fetch the allowed looptype strings from the mbbo fields
# here but lets cheat and use a fixed list.
for my_loopType in ["PID", "Man", "Table", "RampP", "Off" ]:
self.changeLoopType( "1" , my_loopType )
self.changeLoopType( "2" , my_loopType )
return
class CaseChangeLoopManualOutputs( CryoconM32Case ):
def changeLoopManualOutput(self, arg_loopID, arg_value):
'''
Change manual output request on given control loop. Expect this test to work for all loop types.
However it will only actually change the control loop output if the loop type is MAN for manual
If the looptype is not MAN, the device is simply storing the parameter in case it is subsequently told to change the looptype to MAN.
(Currently the EPICS database does not support setting the looptype as it is not wished to expose this functionality to the users).
'''
# print "changeLoopManualOutput: loopID = " + arg_loopID + " arg_value = " + str(arg_value)
# Need a delta for accepting numeric comparison of PV float values.
my_delta = 0.0001
# Construct names of PVs to be used.
my_demandPv = self.pvPrefix + "DMD:LOOP" + arg_loopID + ":MANUAL"
my_statusPv = self.pvPrefix + "STS:LOOP" + arg_loopID + ":MANUAL"
my_looptypePv = self.pvPrefix + "STS:LOOP" + arg_loopID + ":LOOPTYPE"
# Grab and report the looptype, which may well be important to understand what happens.
my_looptype = self.getPv(my_looptypePv)
# print "changeLoopManualOutput: Loop type = " + my_looptype
# Grab existing status value.
my_before = self.getPv(my_statusPv)
# Write new demand value and verify it was OK.
self.putPv(my_demandPv , arg_value )
self.verifyPvFloat(my_demandPv , arg_value , my_delta )
# Wait for status to be updated and then verify the new status.
self.sleep(2)
my_after = self.verifyPvFloat(my_statusPv , arg_value , my_delta )
print "Loop " + arg_loopID + " manual output " + my_statusPv + " changed from " + str(my_before) + " to " + str(my_after)
return
def runTest(self):
'''Change the manual output request for both control loops.'''
self.changeLoopManualOutput( "1" , 7.3 )
self.changeLoopManualOutput( "2" , 4.9 )
return
class CaseChangeLoopPIDs ( CryoconM32Case ) :
def changeLoopPID ( self , arg_loopID, arg_value_P , arg_value_I , arg_value_D ) :
'''
Change control loop PID parameters for given control loop. Expect this test to work for all looptypes.
However it will only actually change the control output if the loop type is PID. If the looptype is not PID,
the device is simply storing the parameters in case it is subsequently told to change the looptype to PID.
(Currently the EPICS database does not support setting the looptype as it is not wished to expose this functionality to the users).
'''
#print "changeLoopPID: loopID = " + arg_loopID + " arg_value_P = " + str(arg_value_P) + " arg_value_I = " + str(arg_value_I) + " arg_value_D = " + str(arg_value_D)
# Pug the 3 argument values in a lookup we can loop over.
my_PIDs = { "P" : arg_value_P , "I" : arg_value_I , "D" : arg_value_D }
# Need a delta for accepting numeric comparison of PV float values.
my_delta = 0.0001
# Construct names of loop type PV to be used.
my_looptypePv = self.pvPrefix + "STS:LOOP" + arg_loopID + ":LOOPTYPE"
# Grab and report the looptype, which may well be important to understand what happens.
my_looptype = self.getPv(my_looptypePv)
# print "changeLoopPID: Loop type = " + my_looptype
for my_key in my_PIDs.keys() :
# print "changeLoopPID: " + my_key + " gain"
# Construct names of PVs to be used.
my_demandPv = self.pvPrefix + "DMD:LOOP" + arg_loopID + ":" + my_key + "GAIN"
my_statusPv = self.pvPrefix + "STS:LOOP" + arg_loopID + ":" + my_key + "GAIN"
# print "changeLoopPID: Demand PV = " + my_demandPv + " Status PV = " + my_statusPv
# Grab existing status value.
my_before = self.getPv(my_statusPv)
# print "changeLoopPID: Existing value = " + str(my_before)
# Write new demand value and verify it was OK.
self.putPv(my_demandPv , my_PIDs[my_key] )
self.verifyPvFloat(my_demandPv , my_PIDs[my_key] , my_delta )
# Wait for status to be updated and then verify the new status.
self.sleep(3)
my_after = self.verifyPvFloat(my_statusPv , my_PIDs[my_key] , my_delta )
print "Loop " + arg_loopID + " " + my_key + " gain " + my_statusPv + " changed from " + str(my_before) + " to " + str(my_after)
return
def runTest(self):
'''Change the PID parameters for both control loops.'''
self.changeLoopPID( "1" , 2.57 , 1.8 , 0.02 )
self.changeLoopPID( "2" , 3 , 2.2 , 0.0001 )
return
# End of control loop tests.
# -----------------------------------------------
# Sensor channel tests.
#
# Testing stuff on M32_sensor template
class CaseGetSensorTemperatures( CryoconM32Case ):
def getSensorTemperature(self, arg_sensorTemperatureRecordName):
'''Read back the temperature and units on a given sensor temperature record name, which has to be T1 or T2.'''
#print "getSensorTemperature: sensorTemperatureRecordName = " + arg_sensorTemperatureRecordName
# Construct names of PVs to be used.
my_valuePv = self.pvPrefix + "STS:" + arg_sensorTemperatureRecordName
my_unitsPv = my_valuePv + ":UNITS"
# Grab existing values.
my_value = self.getPv(my_valuePv)
my_units = self.getPv(my_unitsPv)
print "Sensor temperature " + my_valuePv + ", a.k.a. " + arg_sensorTemperatureRecordName + " = " + str(my_value) + " " + str(my_units)
return
def runTest(self):
'''Readback the temperatures and units for both sensor channels.'''
self.getSensorTemperature ( "T1" )
self.getSensorTemperature ( "T2" )
return
class CaseGetSensorTemperatureStatistics( CryoconM32Case ):
def getSensorTemperatureStats(self, arg_sensorTemperatureRecordName):
'''Read back the accumulated statistics for a given sensor temperature record name, which has to be T1 or T2.'''
#print "getSensorTemperature: sensorTemperatureRecordName = " + arg_sensorTemperatureRecordName
# Construct stem of names of PVs to be used.
my_temperaturePv = self.pvPrefix + "STS:" + arg_sensorTemperatureRecordName
# Create a hash of statistics and record name components.
my_stats = { "variance" : "VARIANCE" , "slope" : "SLOPE" , "offset" : "OFFSET" , "maximum" : "MAX" , "minimum" : "MIN" }
for my_key in my_stats.keys() :
# print "getSensorTemperatureStats: " + my_key
# Construct names of PVs to be used.
my_valuePv = my_temperaturePv + ":" + my_stats[my_key]
# print "getSensorTemperatureStats: " + my_key + " statistic status PV = " + my_valuePv
# Grab existing status value.
my_value = self.getPv(my_valuePv)
print "Sensor temperature " + arg_sensorTemperatureRecordName + " " + my_key + " statistic" ", " + my_valuePv + " = " + str(my_value)
return
def runTest(self):
'''Read back the accumulated temperature statistics for both sensor channels.'''
self.getSensorTemperatureStats ( "T1" )
self.getSensorTemperatureStats ( "T2" )
return
class CaseGetSensorRawReadings( CryoconM32Case ):
def getSensorRawReading(self, arg_sensorChannelName):
'''Read back the raw sensor reading on a given sensor channel, which has to be A or B.'''
# print "getSensorRawReading: sensorChannelName = " + arg_sensorChannelName
# Construct names of PV to be used.
my_valuePv = self.pvPrefix + "STS:" + arg_sensorChannelName + ":RAW"
# Grab existing value.
my_value = self.getPv(my_valuePv)
print "Sensor raw reading " + my_valuePv + " for sensor channel " + arg_sensorChannelName + " = " + str(my_value)
return
def runTest(self):
'''Readback the raw sensor readings for both sensor channels.'''
self.getSensorRawReading ( "A" )
self.getSensorRawReading ( "B" )
return
# End of sensor channel tests.
################################################
# Main entry point
if __name__ == "__main__":
# Create and run the test sequence
CryoconM32TestSuite()
| 2.09375 | 2 |
planner/migrations/0006_auto_20210410_1547.py | wxw12150/Spring21_Four_Year_Planner | 0 | 12758252 | <filename>planner/migrations/0006_auto_20210410_1547.py<gh_stars>0
# Generated by Django 3.1.7 on 2021-04-10 20:47
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('planner', '0005_auto_20210410_1527'),
]
operations = [
migrations.CreateModel(
name='Student_Plan',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Course', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='planner.course')),
('Semester', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='planner.semester')),
('Student', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.DeleteModel(
name='Plan',
),
migrations.AddConstraint(
model_name='student_plan',
constraint=models.UniqueConstraint(fields=('Semester', 'Course', 'Student'), name='Course exists'),
),
]
| 1.570313 | 2 |
api/graphql/opening_hours/opening_hours_types.py | SuviVappula/tilavarauspalvelu-core | 0 | 12758253 | import graphene
from django.conf import settings
from django.utils.timezone import get_default_timezone
from opening_hours.utils.opening_hours_client import OpeningHoursClient
DEFAULT_TIMEZONE = get_default_timezone()
class TimeSpanType(graphene.ObjectType):
start_time = graphene.Time()
end_time = graphene.Time()
weekdays = graphene.List(graphene.Int)
resource_state = graphene.String()
end_time_on_next_day = graphene.Boolean()
name_fi = graphene.String()
name_en = graphene.String()
name_sv = graphene.String()
description_fi = graphene.String()
description_en = graphene.String()
description_sv = graphene.String()
def resolve_start_time(self, info):
if not self.start_time:
return None
tzinfo = self.start_time.tzinfo or DEFAULT_TIMEZONE
start = tzinfo.localize(self.start_time)
return start
def resolve_end_time(self, info):
if not self.end_time:
return None
tzinfo = self.start_time.tzinfo or DEFAULT_TIMEZONE
end = tzinfo.localize(self.end_time)
return end
class PeriodType(graphene.ObjectType):
period_id = graphene.Int()
start_date = graphene.Date()
end_date = graphene.Date()
resource_state = graphene.String()
time_spans = graphene.List(TimeSpanType)
name_fi = graphene.String()
name_en = graphene.String()
name_sv = graphene.String()
description_fi = graphene.String()
description_en = graphene.String()
description_sv = graphene.String()
class OpeningTimesType(graphene.ObjectType):
date = graphene.Date()
start_time = graphene.Time()
end_time = graphene.Time()
state = graphene.String()
periods = graphene.List(graphene.Int)
def resolve_date(self, info):
return self.date
def resolve_start_time(self, info):
if not self.start_time:
return None
tzinfo = self.start_time.tzinfo or DEFAULT_TIMEZONE
start = tzinfo.localize(self.start_time)
return start
def resolve_end_time(self, info):
if not self.end_time:
return None
tzinfo = self.start_time.tzinfo or DEFAULT_TIMEZONE
end = tzinfo.localize(self.end_time)
return end
def resolve_periods(self, info, **kwargs):
return self.periods
class OpeningHoursType(graphene.ObjectType):
opening_times = graphene.List(OpeningTimesType)
opening_time_periods = graphene.List(PeriodType)
class OpeningHoursMixin:
hauki_origin_id = settings.HAUKI_ORIGIN_ID
opening_hours = graphene.Field(
OpeningHoursType,
opening_times=graphene.Boolean(),
periods=graphene.Boolean(),
start_date=graphene.Date(),
end_date=graphene.Date(),
)
def resolve_opening_hours(self, info, **kwargs):
start = kwargs.get("start_date")
end = kwargs.get("end_date")
init_periods = kwargs.get("periods", False)
init_times = kwargs.get("opening_times", False)
if not (start and end):
init_times = False
opening_hours_client = OpeningHoursClient(
self.hauki_resource_origin_id,
start,
end,
single=True,
init_periods=init_periods,
init_opening_hours=init_times,
hauki_origin_id=self.hauki_resource_data_source_id,
)
return_object = OpeningHoursType()
if init_times:
hours = opening_hours_client.get_opening_hours_for_date_range(
str(self.hauki_resource_origin_id), start, end
)
opening_hours = []
for date, times in hours.items():
for time in times:
oh = OpeningTimesType(
date=date,
start_time=time.start_time.time(),
end_time=time.end_time.time(),
state=time.resource_state,
periods=time.periods,
)
opening_hours.append(oh)
return_object.opening_times = opening_hours
if init_periods:
periods = []
for period in opening_hours_client.get_resource_periods(
str(self.hauki_resource_origin_id)
):
time_spans = []
for time_span in period.time_spans:
time_spans.append(
TimeSpanType(
start_time=time_span.start_time,
end_time=time_span.end_time,
resource_state=time_span.resource_state,
weekdays=time_span.weekdays,
name_fi=time_span.name.get("fi"),
name_sv=time_span.name.get("sv"),
name_en=time_span.name.get("en"),
description_fi=time_span.description.get("fi"),
description_sv=time_span.description.get("sv"),
description_en=time_span.description.get("en"),
)
)
periods.append(
PeriodType(
period_id=period.id,
start_date=period.start_date,
end_date=period.end_date,
time_spans=time_spans,
name_fi=period.name.get("fi"),
name_sv=period.name.get("sv"),
name_en=period.name.get("en"),
description_fi=period.description.get("fi"),
description_sv=period.description.get("sv"),
description_en=period.description.get("en"),
)
)
return_object.opening_time_periods = periods
return return_object
| 2.234375 | 2 |
format.py | crazyender/configs | 0 | 12758254 | #!/usr/bin/python
from commands import getstatusoutput
import sys
W = '\033[0m' # white (normal)
R = '\033[31m' # red
G = '\033[32m' # green
O = '\033[33m' # orange
B = '\033[34m' # blue
P = '\033[35m' # purple
format_file_type = [
".c",
".h",
".cpp"
]
status, output = getstatusoutput("git show --pretty=\"\" --name-only")
lines = output.split("\n")
files = [line for line in lines if len(line) != 0]
for file_name in files:
needs = False
for ext in format_file_type:
if file_name.endswith(ext):
needs = True;
if not needs:
continue
cmd = "git-clang-format --commit HEAD~1 --style=file " + file_name
sys.stdout.write(cmd)
status, output = getstatusoutput(cmd)
sys.stdout.write(" -- ");
statustxt = "";
if status != 0:
print(R + "FAIL" + W)
else:
print(G + "DONE" + W)
| 2.578125 | 3 |
addresses/models.py | DKMDebugin/ecommerce | 0 | 12758255 | from django.db import models
from billing.models import BillingProfile
ADDRESS_TYPES = (
('billing', 'Billing'),
('shipping', 'Shipping'),
)
class Addresses(models.Model):
billing_profile = models.ForeignKey(BillingProfile, on_delete=models.DO_NOTHING)
address_type = models.CharField(max_length=120, choices=ADDRESS_TYPES)
address_line_1 = models.CharField(max_length=120)
address_line_2 = models.CharField(max_length=120, null=True, blank=True)
city = models.CharField(max_length=120)
country = models.CharField(max_length=120, default='Nigeria')
state = models.CharField(max_length=120)
postal_code = models.CharField(max_length=120)
def __str__(self):
return str(self.billing_profile)
def get_address(self):
return f"{self.address_line_1}\n{self.address_line_2 or ''}\n{self.city}\n{self.state}, {self.postal_code}\n{self.country}"
| 2.484375 | 2 |
internet_engineer/Geogram/Geogram/map/migrations/0004_auto_20200131_1132.py | moghadas76/Geogram | 2 | 12758256 | <filename>internet_engineer/Geogram/Geogram/map/migrations/0004_auto_20200131_1132.py
# Generated by Django 3.0.1 on 2020-01-31 11:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('map', '0003_auto_20200129_1634'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='lang',
field=models.FloatField(),
),
migrations.AlterField(
model_name='profile',
name='lat',
field=models.FloatField(),
),
]
| 1.492188 | 1 |
release/stubs.min/Tekla/Structures/ModelInternal_parts/dotIFC2X3_Product_t.py | YKato521/ironpython-stubs | 0 | 12758257 | class dotIFC2X3_Product_t(object):
# no doc
Description = None
IFC2X3_OwnerHistory = None
Name = None
ObjectType = None
| 1.304688 | 1 |
datadog_checks_base/tests/base/checks/win/test_winpdh.py | mchelen-gov/integrations-core | 663 | 12758258 | # (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import logging
from collections import defaultdict
import pytest
from datadog_checks.dev.testing import requires_windows
try:
from datadog_test_libs.win.pdh_mocks import ( # noqa: F401
initialize_pdh_tests,
pdh_mocks_fixture,
pdh_mocks_fixture_bad_perf_strings,
)
from datadog_checks.checks.win.winpdh import SINGLE_INSTANCE_KEY, WinPDHCounter
except ImportError:
import platform
if platform.system() != 'Windows':
pass
logger = logging.getLogger(__file__)
'''
WinPDHCounter tests.
Test specific behavior of the WinPDHCounter class, which provides
the interface to the OS API.
'''
@requires_windows
def test_winpdhcounter_bad_strings_english(pdh_mocks_fixture_bad_perf_strings): # noqa F811
initialize_pdh_tests()
counter = WinPDHCounter('System', 'Processor Queue Length', logger)
vals = counter.get_all_values()
assert len(vals) == 1 # single instance key, should only have one value
assert SINGLE_INSTANCE_KEY in vals
@requires_windows
def test_winpdhcounter_throws_on_bad_input(pdh_mocks_fixture): # noqa F811
initialize_pdh_tests()
with pytest.raises(AttributeError):
WinPDHCounter('Ssystem', 'Processor Queue Length', logger)
with pytest.raises(AttributeError):
WinPDHCounter('System', 'PProcessor Queue Length', logger)
@requires_windows
def test_winpdhcounter_throws_on_bad_input_with_bad_strings(pdh_mocks_fixture_bad_perf_strings): # noqa F811
initialize_pdh_tests()
with pytest.raises(AttributeError):
WinPDHCounter('Ssystem', 'Processor Queue Length', logger)
with pytest.raises(AttributeError):
WinPDHCounter('System', 'PProcessor Queue Length', logger)
@requires_windows
def test_winpdhcounter_bad_strings_not_english(pdh_mocks_fixture_bad_perf_strings): # noqa F811
WinPDHCounter._use_en_counter_names = False
WinPDHCounter.pdh_counter_dict = defaultdict(list)
initialize_pdh_tests(lang="se-sv")
'''
expectation is that the initialization will fail. We attempt to fall
back to english counters if the strings database isn't present; however,
on non-english windows the english counters won't work
'''
with pytest.raises(AttributeError):
WinPDHCounter('System', 'Processor Queue Length', logger)
@requires_windows
def test_winpdhcounter_non_english(pdh_mocks_fixture): # noqa F811
WinPDHCounter._use_en_counter_names = False
WinPDHCounter.pdh_counter_dict = defaultdict(list)
initialize_pdh_tests(lang="se-sv")
counter = WinPDHCounter('System', 'Processor Queue Length', logger)
vals = counter.get_all_values()
assert len(vals) == 1 # single instance key, should only have one value
assert SINGLE_INSTANCE_KEY in vals
| 2.015625 | 2 |
decode.py | angelog/handlecrash.h | 26 | 12758259 | <reponame>angelog/handlecrash.h
#!/usr/bin/python
#
# handlecrash.h
# https://github.com/angelog/handlecrash.h
import re
import sys
import zlib
import base64
import subprocess
def addr2line(name, ptr, intext = False):
args = ['addr2line', '-Cfpe', name, ptr]
if intext:
args.append('-j')
args.append('.text')
try:
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except:
return False
out, err = p.communicate()
if out.find('??') != -1:
return False
return out.strip()
if len(sys.argv) == 1:
print 'Usage: ./decode.py crash_1475679908.log'
sys.exit(1)
with open(sys.argv[1]) as f:
readingBacktrace = False
backtrace = []
readingStackMemory = False
stackMemory = ''
for line in f:
line = line.strip()
if readingBacktrace:
if line[4:] == '':
readingBacktrace = False
for frame in backtrace:
parse = re.match('([^\\(]+)\\((\\+0x[0-9a-f]+|)\\)', frame[2])
addr = False
if parse != None:
addr = addr2line(parse.group(1), frame[1])
if addr == False and parse.group(2) != '':
addr = addr2line(parse.group(1), parse.group(2), True)
if addr == False:
print '*** ' + frame[0] + '\t' + frame[1] + '\t' + frame[2]
else:
parse = addr.split(' at ')
print '*** ' + frame[0] + '\t' + parse[0] + '\t' + parse[1]
print line
continue
backtrace.append(line[6:].split('\t'))
elif readingStackMemory:
stackMemory += line[4:]
else:
if line[4:13] == 'Backtrace':
readingBacktrace = True
elif line[4:17] == 'Stack memory:':
readingStackMemory = True
continue
print line
with open(sys.argv[1] + '.bin', 'wb') as fbin:
compressed = base64.b64decode(stackMemory)
fbin.write(zlib.decompress(compressed))
| 2.3125 | 2 |
nevow/taglibrary/cal.py | wthie/nevow | 49 | 12758260 | import calendar
import datetime
from nevow import tags as t, url, itaglibrary, rend, static
_calendar_css = """
.calendar tbody td.today { background-color: #aaaaaa; }
"""
calendarCSS = t.style(type_="text/css")[_calendar_css]
calendarCSSFile = static.File(_calendar_css, "text/css")
class CalendarComponent(object):
current_date = None
def days(self, year, month):
def _(ctx, data):
return [[day and datetime.date(year, month, day) or None
for day in row]
for row in calendar.monthcalendar(year, month)]
return _
def render_calendarDay(self, ctx, data):
options = itaglibrary.ICalendarOptions(ctx, {})
today_class = options.get('today_class', 'today')
if data is None:
return ctx.tag['']
if self.current_date.day == data.day and \
self.current_date.month == data.month and \
self.current_date.year == data.year:
return ctx.tag(class_=today_class)[data.day]
return ctx.tag[data.day]
def calendar(self, ctx, data):
now = datetime.datetime.now()
self.current_date = now
month_delta = datetime.timedelta(31)
options = itaglibrary.ICalendarOptions(ctx, {})
strftime = options.get('strftime', '%b %d, %Y @ %I:%M %p')
width = options.get('width', 2)
prev = options.get('prev', None)
next = options.get('next', None)
base = options.get('base_url', None)
calendar_class = options.get('calendar_class', 'calendar')
if data is None:
d = now
current = d.year, d.month
elif isinstance(data, tuple):
year, month = data
d = datetime.date(year, month, 4)
current = data
elif isinstance(data, (datetime.date, datetime.datetime)):
d = data
current = d.year, d.month
if prev is None or next is None:
p = d - month_delta
n = d + month_delta
prev = p.year, p.month
next = n.year, n.month
if base is None:
u = url.URL.fromContext(ctx)
segments = u.pathList()
if segments[-1] == '':
u = u.up()
segments = segments[:-1]
if segments[-1].isdigit() and segments[-2].isdigit():
u = u.up().up()
prev_url = u
next_url = u
else:
prev_url = base
next_url = base
add_query_params = False
def buildUrl(u, el):
if add_query_params:
param_name, param_value = el
u = u.add(param_name, str(param_value))
else:
u = u.child(str(el))
return u
for el in prev:
if el == '?':
add_query_params = True
continue
prev_url = buildUrl(prev_url, el)
add_query_params = False
for el in next:
if el == '?':
add_query_params = True
continue
next_url = buildUrl(next_url, el)
else:
if isinstance(prev, (url.URL, url.URLOverlay)) and \
isinstance(next, (url.URL, url.URLOverlay)):
next_url = next
prev_url = prev
return t.table(class_=calendar_class)[
t.thead[
t.tr[
t.th(colspan="7")[
t.a(href=prev_url)[t.xml("←")],
t.xml(" "),
t.xml('-'.join([str(el) for el in current])),
t.xml(" "),
t.a(href=next_url)[t.xml("→")]
]
],
[
t.tr[[t.td[dayname] for dayname in calendar.weekheader(width).split()]]
]
],
t.tbody[
t.invisible(data=self.days(*current), render=rend.sequence)[
t.tr(pattern='item', render=rend.sequence)[
t.td(pattern='item', render=self.render_calendarDay)
]
]
],
t.tfoot[
t.tr[
t.td(colspan="7")[
now.strftime(strftime)
]
]
]
]
c = CalendarComponent()
cal = c.calendar
__all__ = ["cal", "CalendarComponent", "calendarCSS", "calendarCSSFile"]
| 2.953125 | 3 |
test/test_Util/test_param_util.py | tvelagapudi/lenstronomy | 0 | 12758261 | import numpy as np
import pytest
import numpy.testing as npt
from lenstronomy.Util import util
import lenstronomy.Util.param_util as param_util
def test_cart2polar():
#singel 2d coordinate transformation
center_x, center_y = 0, 0
x = 1
y = 1
r, phi = param_util.cart2polar(x, y, center_x, center_y)
assert r == np.sqrt(2) #radial part
assert phi == np.arctan(1)
#array of 2d coordinates
x = np.array([1, 2])
y = np.array([1, 1])
r, phi = param_util.cart2polar(x, y, center_x, center_y)
assert r[0] == np.sqrt(2) #radial part
assert phi[0] == np.arctan(1)
def test_polar2cart():
#singel 2d coordinate transformation
center = np.array([0,0])
r = 1
phi = np.pi
x, y = param_util.polar2cart(r, phi, center)
assert x == -1
assert abs(y) < 10e-14
def test_phi_q2_ellipticity():
phi, q = 0, 1
e1,e2 = param_util.phi_q2_ellipticity(phi, q)
assert e1 == 0
assert e2 == 0
phi, q = 1, 1
e1,e2 = param_util.phi_q2_ellipticity(phi, q)
assert e1 == 0
assert e2 == 0
phi, q = 2.,0.95
e1, e2 = param_util.phi_q2_ellipticity(phi, q)
assert e1 == -0.016760092842656733
assert e2 == -0.019405192187382792
phi, q = 0, 0.9
e1, e2 = param_util.phi_q2_ellipticity(phi, q)
assert e1 == 0.05263157894736841
assert e2 == 0
def test_ellipticity2phi_q():
e1, e2 = 0.3,0
phi,q = param_util.ellipticity2phi_q(e1, e2)
assert phi == 0
assert q == 0.53846153846153844
# Works on np arrays as well
e1 = np.array([0.3, 0.9])
e2 = np.array([0.0, 0.9 ])
phi, q = param_util.ellipticity2phi_q(e1, e2)
assert np.allclose(phi, [0.0, 0.39269908], atol=1.e-08)
assert np.allclose(q, [0.53846153, 5.00025001e-05], atol=1.e-08)
def test_ellipticity2phi_q_symmetry():
phi,q = 1.5, 0.8
e1,e2 = param_util.phi_q2_ellipticity(phi, q)
phi_new,q_new = param_util.ellipticity2phi_q(e1, e2)
assert phi == phi_new
assert q == q_new
phi,q = -1.5, 0.8
e1,e2 = param_util.phi_q2_ellipticity(phi, q)
phi_new,q_new = param_util.ellipticity2phi_q(e1, e2)
assert phi == phi_new
assert q == q_new
e1, e2 = 0.1, -0.1
phi, q = param_util.ellipticity2phi_q(e1, e2)
e1_new, e2_new = param_util.phi_q2_ellipticity(phi, q)
npt.assert_almost_equal(e1, e1_new, decimal=10)
npt.assert_almost_equal(e2, e2_new, decimal=10)
e1, e2 = 2.99, -0.0
phi, q = param_util.ellipticity2phi_q(e1, e2)
print(phi, q)
e1_new, e2_new = param_util.phi_q2_ellipticity(phi, q)
phi_new, q_new = param_util.ellipticity2phi_q(e1_new, e2_new)
npt.assert_almost_equal(phi, phi_new, decimal=10)
npt.assert_almost_equal(q, q_new, decimal=10)
#npt.assert_almost_equal(e1, e1_new, decimal=10)
#npt.assert_almost_equal(e2, e2_new, decimal=10)
def test_transform_e1e2():
e1 = 0.01
e2 = 0.
x = 0.
y = 1.
x_, y_ = param_util.transform_e1e2(x, y, e1, e2, center_x=0, center_y=0)
x_new = (1-e1) * x - e2 * y
y_new = -e2 * x + (1 + e1) * y
det = np.sqrt((1 - e1) * (1 + e1) + e2 ** 2)
npt.assert_almost_equal(x_, x_new / det, decimal=5)
npt.assert_almost_equal(y_, y_new / det, decimal=5)
def test_phi_gamma_ellipticity():
phi = -1.
gamma = 0.1
e1, e2 = param_util.shear_polar2cartesian(phi, gamma)
print(e1, e2, 'e1, e2')
phi_out, gamma_out = param_util.shear_cartesian2polar(e1, e2)
assert phi == phi_out
assert gamma == gamma_out
def test_phi_gamma_ellipticity_2():
e1, e2 = -0.04, -0.01
phi, gamma = param_util.shear_cartesian2polar(e1, e2)
e1_out, e2_out = param_util.shear_polar2cartesian(phi, gamma)
npt.assert_almost_equal(e1, e1_out, decimal=10)
npt.assert_almost_equal(e2, e2_out, decimal=10)
def test_displace_eccentricity():
#x, y = np.array([1, 0]), np.array([0, 1])
x, y = util.make_grid(numPix=10, deltapix=1)
e1 = 0.1#.1
e2 = -0#.1
center_x, center_y = 0, 0
x_, y_ = param_util.transform_e1e2(x, y, e1, e2, center_x=center_x, center_y=center_y)
phi_G, q = param_util.ellipticity2phi_q(e1, e2)
x_shift = x - center_x
y_shift = y - center_y
cos_phi = np.cos(phi_G)
sin_phi = np.sin(phi_G)
print(cos_phi, sin_phi)
xt1 = cos_phi * x_shift + sin_phi * y_shift
xt2 = -sin_phi * x_shift + cos_phi * y_shift
xt1 *= np.sqrt(q)
xt2 /= np.sqrt(q)
npt.assert_almost_equal(x_, xt1, decimal=8)
npt.assert_almost_equal(y_, xt2, decimal=8)
x, y = util.make_grid(numPix=10, deltapix=1)
x, y = np.array([1, 0]), np.array([0, 1])
e1 = 0.1#.1#.1
e2 = 0
center_x, center_y = 0, 0
x_, y_ = param_util.transform_e1e2(x, y, e1, e2, center_x=center_x, center_y=center_y)
phi_G, q = param_util.ellipticity2phi_q(e1, e2)
x_shift = x - center_x
y_shift = y - center_y
cos_phi = np.cos(phi_G)
sin_phi = np.sin(phi_G)
print(cos_phi, sin_phi)
xt1 = cos_phi * x_shift + sin_phi * y_shift
xt2 = -sin_phi * x_shift + cos_phi * y_shift
xt1 *= np.sqrt(q)
xt2 /= np.sqrt(q)
npt.assert_almost_equal(x_, xt1, decimal=8)
npt.assert_almost_equal(y_, xt2, decimal=8)
if __name__ == '__main__':
pytest.main()
| 2.296875 | 2 |
app/dependencies.py | perymerdeka/fastapi-fullstack-boilerplate | 70 | 12758262 | <filename>app/dependencies.py<gh_stars>10-100
from functools import partial
import logging
from typing import Callable, Optional
import warnings
from fastapi import Request, Response, Cookie, HTTPException, status, Depends
from fastapi.templating import Jinja2Templates
from .database import get_db_session as _get_db_session
from .model import User, UserSession
from . import settings
logger = logging.getLogger(__name__)
_warned = False
def get_db():
"""The database session can alternatively be injected as a
dependency.
NOTE: This is not advisable due to possible deadlock issues under
concurrent load!
See: https://github.com/tiangolo/fastapi/issues/3205
TL;DR - mixing the Session lifetime with fastapi's Depends machinery
reults in unpredicatable behavior. It is difficult to reason about,
especially when nested/chained dependencies are used (i.e. your
dependency depends on a Depends(get_db).)
I would suggest starting a session in the body of each route.
"""
global _warned
if not _warned:
logger.warn(
"get_db dependency is unstable, prefer get_db_session instead!"
)
_warned = True
with _get_db_session() as db:
yield db
SESSION_KEY = "session_id"
def wsgi_admin(request: Request):
try:
session_id = request.cookies[SESSION_KEY]
session = require_session(session_id)
user = require_login(session)
admin = require_admin(user)
return admin
except:
return None
def auth_optional(session_id: Optional[str] = Cookie(None)):
with _get_db_session() as db:
if session_id:
session = db.query(UserSession).filter_by(session_id=session_id).first()
if session and session.active:
return session
else:
return
else:
return
def require_session(session_id: Optional[str] = Cookie(None)):
with _get_db_session() as db:
if not session_id:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED)
session = db.query(UserSession).filter_by(session_id=session_id).first()
if not session or not session.active:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED)
return session
def require_login(auth: UserSession = Depends(require_session)):
with _get_db_session() as db:
user = db.query(User).get(auth.user_id)
if not user:
raise ValueError("Could not find user for current session")
return user
def require_admin(user: User = Depends(require_login)):
if not user.is_admin:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED)
else:
return user
Render = Callable[[str, dict], Response]
class RenderTemplate:
def __init__(self, template_dir=None, **globals):
template_dir = template_dir or settings.TEMPLATE_DIR
self._templates = Jinja2Templates(directory=template_dir)
self._set_globals(self._templates, globals)
@staticmethod
def _set_globals(template_cls, globals: dict):
for key, value in globals.items():
template_cls.env.globals[key] = value
def _render(self, request, name, context):
context.update({"request": request})
return self._templates.TemplateResponse(name, context)
def __call__(self, request: Request) -> Render:
return partial(self._render, request)
Templates = RenderTemplate(APP_NAME=settings.APP_NAME)
| 2.078125 | 2 |
rgb2rgbcw.py | brettonw/mywizlight | 0 | 12758263 | #!/usr/bin/env python3
import math
import operator
from pywizlight.bulb import PilotBuilder, PilotParser, wizlight
try: from .vec import *;
except: from vec import *;
# can't actually change a value like this from another file once it's been loaded. this
# is actually a very bad design on python's part IMNSHO, and this is an ugly workaround
verbose = [False]
def setVerbose (val):
verbose[0] = val
def debug (msg, end = "\n"):
if (verbose[0]): print (msg, end = end);
# red, green, blue basis vectors from the hue/saturation color wheel - just vectors at 3
# angles (0, 120, 240)
angle = (math.pi * 2) / 3
basis = (
vecFromAngle (0),
vecFromAngle (angle),
vecFromAngle (angle * 2)
)
def printBasis (basis, prefix = ""):
debug ("{}Basis Vectors: ".format (prefix), end = "")
for vector in basis:
debug ("{} ".format (vecFormat (vector)), end="")
debug ("")
# this function computes the linear combination of two basis vectors that define a trapezoid
# hueVec - a normalized vector in the hue color wheel (0..1, 0..1, 0..1)
# saturation - a single value representing the length of the hue vector (0..1)
# brightness - a separate value that may be passed in, and should be used in the Pilot
def trapezoid (hueVec, saturation, brightness):
# if saturation is essentially 0, just go to the full on
if (saturation <= epsilon):
rgb = (0, 0, 0)
else:
# we want to compute the actual RGB color of the saturated point as a linear
# combination of no more than two of the basis vectors. first we have to figure
# out which of the basis vectors we will use
maxAngle = math.cos ((math.pi * 2 / 3) - epsilon)
mask = tuple([(1 if (vecDot (hueVec, vector) > maxAngle) else 0) for vector in basis])
count = sum(mask)
debug (" Max Angle: {:0.3f}, Mask: ({}, {}, {}), Count: {}".format (maxAngle, mask[0], mask[1], mask[2], count))
if (count == 1):
# easy case, it's just one color component
rgb = mask
else:
# recast as a ray-line intersection using the two found basis vectors, note
# the basis vectors are normalized by definition
subBasis = [basis[i] for i, maskVal in enumerate(mask) if (maskVal == 1)]
printBasis (subBasis, " ")
# define the line from the origin along the second vector, computing its
# equation in the form Ax + C = 0, but C is always 0 for this line
AB = (subBasis[1][1], subBasis[1][0] * -1)
# intersect the ray from the saturation point along the first basis vector
# with the line we just computed, these are definitely not co-linear, so there
# should always be an intersection point, and the result should always be in
# the range [-1 .. 1], this is the first basis coefficient
coeff = [0, 0]
coeff[0] = vecDot (hueVec, AB) / vecDot (subBasis[0], AB)
# compute the intersection point, and the second basis coefficient, note that
# we compute the coefficients to always be positive, but the intersection calculation
# needs to be in the opposite direction from the basis vector (hence the negative on
# coeff[0]).
intersection = vecAdd (vecMul (subBasis[0], -coeff[0]), hueVec)
coeff[1] = vecDot (intersection, subBasis[1])
debug (" Intersection Point: {}, Coefficients: {}".format (vecFormat (intersection), vecFormat (coeff)))
# there's a bit of a gamut problem here, as the area outside the hexagon defined by
# the three unit basis vectors is not actually reachable. this manifests as
# coefficients greater than 1, which will always happen unless the target color is
# either one of the basis vectors or a bisector of two basis vectors. we scale both
# coefficients by 1/maxCoefficient to make valid colors
maxCoeff = max (coeff[0], coeff[1])
coeff = [c / maxCoeff for c in coeff]
debug (" Scaled Coefficients: {}".format (vecFormat (coeff)))
# now rebuild the rgb vector by putting the coefficients into the correct place
j = 0
rgbList = []
for i in range (3):
if (mask[i] == 1):
rgbList.append (min (coeff[j], 1))
j += 1
else:
rgbList.append (0)
rgb = tuple (rgbList)
# we want a discontinuous behavior. if saturation >= 0.5, we want the color to remain saturated
# and we scale the cw value down to 0 as saturation goes from 0.5 to 1. if saturation < 0.5, we
# want to saturate cw, and scale the rgb down to (0, 0, 0) as saturation goes from 0.5 - 0
if (saturation >= 0.5):
# rgb remains saturated
# scale the cw value down to 0 as saturation goes from 0.5 to 1
cw = 1 - ((saturation - 0.5) * 2)
else:
cw = 1
rgb = vecMul (rgb, saturation * 2)
# scale back to the pilot color space
rgb = vecInt (vecMul (rgb, 255))
cw = int (max (0, cw * cwMax))
if (cw == 0): cw = None;
# scale cw back to 1-255 and return the Pilot Builder that includes the white light
debug (" RGB OUT: {}, CW: {}".format (rgb, cw))
# the wiz light appears to have 5 different LEDs, r, g, b, warm_white, and cold_white
# there appears to be a max power supplied across the 5 LEDs, which explains why all-
# on full isn't the brightest configuration
# warm_white appears to be 2800k, and cold_white appears to be 6200k, somewhat neutral
# brightness is achieved by turning both of them on
return PilotBuilder(rgb = rgb, warm_white = cw, cold_white = cw, brightness = brightness)
# the max value we will use for c and w
cwMax = 128
def rgb2rgbcw (rgb, brightness):
debug ("RGB IN: {}, BRIGHTNESS: {}".format (rgb, brightness))
# scale the vector into canonical space ([0-1])
rgb = vecMul (rgb, 1 / 255)
# compute the hue vector as a linear combination of the basis vectors, and extract the
# saturation, there's probably a better pythonese way of doing this
hueVec = vecAdd (vecAdd (vecMul (basis[0], rgb[0]), vecMul (basis[1], rgb[1])), vecMul (basis[2], rgb[2]))
saturation = vecLen (hueVec)
if (saturation > epsilon):
hueVec = vecMul (hueVec, 1 / saturation)
return trapezoid(hueVec, saturation, brightness)
# given a tuple that is r,g,b and cw in 0-255 range, convert that to a hue, saturation tuple in the
# range (0..360, 0..100)
def rgbcw2hs (rgb, cw):
# scale the rgb and cw values into canonical space (the wiz app might set cw to higher than the
# value we use, so we have to allow for that
rgb = vecMul (rgb, 1 / 255)
cw = min (cw, cwMax) / cwMax
# compute the hue vector as a linear combination of the basis vectors, there's probably a
# better pythonese way of doing this
hueVec = vecAdd (vecAdd (vecMul (basis[0], rgb[0]), vecMul (basis[1], rgb[1])), vecMul (basis[2], rgb[2]))
debug ("RGB IN: {}, CW: {:.5f}, HUE VECTOR: {:.3f}".format (vecFormat(rgb), cw, vecFormat (hueVec)))
# the discontinuous nature of the wiz bulb setting means we have two different states:
# 1) the cw value is 1, and the hue vector is scaled (from 50% saturation to white)
# 2) the hue vector is saturated, and cw is scaled down (from 50% saturation to full color)
if (cw == 1):
# hue scales down to (0, 0) at saturation 0, up to unit length at 50% saturation, so we get
# that length, normalize the vector, and scale the saturation to reflect the half range
hueVecLength = vecLen(hueVec)
if (hueVecLength > epsilon):
vecMul (hueVec, 1 / hueVecLength)
saturation = hueVecLength * 0.5
else:
# the hue vector is already fully saturated, and cw scales from 0 - 0.5 to add in white light
saturation = 1 - (cw / 2)
# we have a saturated version of the hue vector now, which we convert to a hue vector and
# then extract the angle of the vector in radians. We add P2 pi to the angle if it is less than
# 0 to put the hue angle in the range from 0 to 2 Pi
hue = math.atan2 (hueVec[1], hueVec[0])
while (hue < 0): hue += (math.pi * 2)
# scale the hue/saturation values back to their native ranges and return the tuple
hue *= (180 / math.pi)
saturation *= 100
debug (" HUE OUT: {:.5f}, SATURATION: {:.3f}".format (hue, saturation))
return hue, saturation
# given a canonical value, a width, and a number of divisions, snap the value to the nearest subdivision
def snapToDiscreteValue (canonicalValue, divisions, scale):
spacing = 1 / (divisions - 1)
snapIndex = int ((canonicalValue + (spacing / 2)) / spacing)
snappedX = snapIndex * spacing
return snappedX * scale
# given a hue, saturation tuple in the range (0..360, 0..100), convert that to a rgbcw for the wiz light
def hs2rgbcw (hs, brightness):
# convert hue to a canonical value
hueCanonical = hs[0] / 360
while (hueCanonical >= 1): hueCanonical -= 1;
# compute hue in a discretized space and convert to radians, then a vector
hueRadians = snapToDiscreteValue (hueCanonical, 3 * 8, math.pi * 2)
hueVec = vecFromAngle(hueRadians)
# convert saturation to a canonical value in a discretized space
# we take the square root to give the user more visual control
saturationCanonical = hs[1] / 100
saturation = snapToDiscreteValue (saturationCanonical, 8, 1)
debug ("HS IN: {}, HUE: {:.5f}, SATURATION: {:.3f}, BRIGHTNESS: {}".format (vecFormat(hs), hueRadians, saturation, brightness))
return trapezoid (hueVec, saturation, brightness)
| 2.78125 | 3 |
OracleInternetDirectory/dockerfiles/12.2.1.4.0/container-scripts/start_oid_component.py | rmohare/oracle-product-images | 5,519 | 12758264 | #!/usr/bin/python
#
# Copyright (c) 2021, Oracle and/or its affiliates.
#
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
#
# Author: <NAME>
#
import os, sys, re
domain_name = os.environ.get("DOMAIN_NAME", "oid_domain")
oracle_home = os.environ.get("ORACLE_HOME", "/u01/oracle/")
weblogic_home = '/u01/oracle/wlserver'
i = 1
while i < len(sys.argv):
if sys.argv[i] == '-username':
user = sys.argv[i + 1]
i += 2
elif sys.argv[i] == '-adminpassword':
password = sys.argv[i + 1]
i += 2
elif sys.argv[i] == '-instance_Name':
instanceName= sys.argv[i + 1]
i += 2
else:
print 'Unexpected argument switch at position ' + str(i) + ': ' + str(sys.argv[i])
sys.exit(1)
try:
nmConnect(domainName=domain_name,username=user,password=password,nmType='ssl')
nmServerStatus(serverName=instanceName,serverType='OID')
nmStart(serverName=instanceName,serverType='OID')
exit()
except:
print 'Unable to start '+instanceName
exit()
| 2.265625 | 2 |
kfac/examples/mnist.py | ntselepidis/kfac | 179 | 12758265 | <filename>kfac/examples/mnist.py<gh_stars>100-1000
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for loading MNIST into TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow.compat.v1 as tf
__all__ = [
'load_mnist_as_tensors',
'load_mnist_as_dataset',
'load_mnist_as_iterator',
]
def load_mnist_as_tensors(flatten_images=True, dtype=tf.float32):
"""Loads MNIST as Tensors.
Args:
flatten_images: bool. If True, [28, 28, 1]-shaped images are flattened into
[784]-shaped vectors.
dtype: The TF dtype to return the images as.
Returns:
images, labels, num_examples
"""
# mnist_data = tf.contrib.learn.datasets.mnist.read_data_sets(
# '/tmp/mnist', reshape=flatten_images)
# num_examples = len(mnist_data.train.labels)
# images = mnist_data.train.images
# labels = mnist_data.train.labels
#
# images = tf.constant(np.asarray(images, dtype=np.float32))
# labels = tf.constant(np.asarray(labels, dtype=np.int64))
#
# return images, labels, num_examples
(images, labels), _ = tf.keras.datasets.mnist.load_data()
num_examples = images.shape[0]
if flatten_images:
images = images.reshape(images.shape[0], 28**2)
else:
images = images.reshape(images.shape[0], 28, 28, 1)
images = images.astype('float64')
labels = labels.astype('int32')
images /= 255.
images = tf.constant(images, dtype=dtype)
labels = tf.constant(labels)
return images, labels, num_examples
def load_mnist_as_dataset(flatten_images=True):
"""Loads MNIST as a Dataset object.
Args:
flatten_images: bool. If True, [28, 28, 1]-shaped images are flattened into
[784]-shaped vectors.
Returns:
dataset, num_examples, where dataset is a Dataset object containing the
whole MNIST training dataset and num_examples is the number of examples
in the MNIST dataset (should be 60000).
"""
images, labels, num_examples = load_mnist_as_tensors(
flatten_images=flatten_images)
dataset = tf.data.Dataset.from_tensor_slices((images, labels))
return dataset, num_examples
def load_mnist_as_iterator(num_epochs, batch_size,
use_fake_data=False,
flatten_images=True):
"""Loads MNIST dataset as an iterator Tensor.
Args:
num_epochs: int. Number of passes to make over the dataset.
batch_size: int. Number of examples per minibatch.
use_fake_data: bool. If True, generate a synthetic dataset rather than
reading MNIST in.
flatten_images: bool. If True, [28, 28, 1]-shaped images are flattened into
[784]-shaped vectors.
Returns:
examples: Tensor of shape [batch_size, 784] if 'flatten_images' is
True, else [batch_size, 28, 28, 1]. Each row is one example.
Values in [0, 1].
labels: Tensor of shape [batch_size]. Indices of integer corresponding to
each example. Values in {0...9}.
"""
if use_fake_data:
rng = np.random.RandomState(42)
num_examples = batch_size * 4
images = rng.rand(num_examples, 28 * 28)
if not flatten_images:
images = np.reshape(images, [num_examples, 28, 28, 1])
labels = rng.randint(10, size=num_examples)
dataset = tf.data.Dataset.from_tensor_slices((np.asarray(
images, dtype=np.float32), np.asarray(labels, dtype=np.int64)))
else:
dataset, num_examples = load_mnist_as_dataset(flatten_images=flatten_images)
dataset = (dataset.shuffle(num_examples).repeat(num_epochs)
.batch(batch_size).prefetch(5))
return tf.compat.v1.data.make_one_shot_iterator(dataset).get_next()
| 3.09375 | 3 |
test/integration/test_projects.py | Violet26/cfapi | 75 | 12758266 | # -- coding: utf-8 --
import json
from datetime import datetime, timedelta
from test.factories import ProjectFactory, OrganizationFactory, IssueFactory
from test.harness import IntegrationTest
from app import db, Issue
class TestProjects(IntegrationTest):
def test_all_projects_order(self):
'''
Test that projects gets returned in order of last_updated
'''
ProjectFactory(name=u'Project 1', last_updated='Mon, 01 Jan 2010 00:00:00 GMT')
ProjectFactory(name=u'Project 2', last_updated='Tue, 01 Jan 2011 00:00:00 GMT')
ProjectFactory(name=u'Non Github Project', last_updated='Wed, 01 Jan 2013 00:00:00', github_details=None)
ProjectFactory(name=u'Project 3', last_updated='Thu, 01 Jan 2014 00:00:00 GMT')
db.session.commit()
response = self.app.get('/api/projects')
response = json.loads(response.data)
self.assertEqual(response['objects'][0]['name'], u'Project 3')
self.assertEqual(response['objects'][1]['name'], u'Non Github Project')
self.assertEqual(response['objects'][2]['name'], u'Project 2')
self.assertEqual(response['objects'][3]['name'], u'Project 1')
def test_projects(self):
ProjectFactory()
db.session.commit()
response = self.app.get('/api/projects')
response = json.loads(response.data)
assert isinstance(response, dict)
assert isinstance(response['pages'], dict)
assert isinstance(response['total'], int)
assert isinstance(response['objects'], list)
assert isinstance(response['objects'][0]['categories'], unicode)
assert isinstance(response['objects'][0]['tags'], list)
assert isinstance(response['objects'][0]['code_url'], unicode)
assert isinstance(response['objects'][0]['description'], unicode)
assert isinstance(response['objects'][0]['github_details'], dict)
assert isinstance(response['objects'][0]['id'], int)
assert isinstance(response['objects'][0]['api_url'], unicode)
assert isinstance(response['objects'][0]['link_url'], unicode)
assert isinstance(response['objects'][0]['name'], unicode)
assert isinstance(response['objects'][0]['organization'], dict)
assert isinstance(response['objects'][0]['organization_name'], unicode)
assert isinstance(response['objects'][0]['type'], unicode)
assert isinstance(response['objects'][0]['status'], unicode)
assert isinstance(response['objects'][0]['languages'], list)
def test_project_search_nonexisting_text(self):
''' Searching for non-existing text in the project and org/project
endpoints returns no results
'''
organization = OrganizationFactory(name=u"Code for San Francisco")
ProjectFactory(organization_name=organization.name, description=u'Coder')
db.session.commit()
project_response = self.app.get('/api/projects?q=ruby')
project_response = json.loads(project_response.data)
assert isinstance(project_response['total'], int)
assert isinstance(project_response['objects'], list)
self.assertEqual(project_response['total'], 0)
self.assertEqual(len(project_response['objects']), 0)
org_project_response = self.app.get('/api/organizations/Code-for-San-Francisco/projects?q=ruby')
org_project_response = json.loads(org_project_response.data)
assert isinstance(org_project_response['total'], int)
assert isinstance(org_project_response['objects'], list)
self.assertEqual(org_project_response['total'], 0)
self.assertEqual(len(org_project_response['objects']), 0)
def test_project_search_existing_text(self):
''' Searching for existing text in the project and org/project endpoints
returns expected results
'''
organization = OrganizationFactory(name=u"Code for San Francisco")
ProjectFactory(organization_name=organization.name, description=u'ruby')
ProjectFactory(organization_name=organization.name, description=u'python')
db.session.commit()
project_response = self.app.get('/api/projects?q=ruby')
project_response = json.loads(project_response.data)
assert isinstance(project_response['total'], int)
assert isinstance(project_response['objects'], list)
self.assertEqual(project_response['total'], 1)
self.assertEqual(len(project_response['objects']), 1)
org_project_response = self.app.get('/api/organizations/Code-for-San-Francisco/projects?q=ruby')
org_project_response = json.loads(org_project_response.data)
assert isinstance(org_project_response['total'], int)
assert isinstance(org_project_response['objects'], list)
self.assertEqual(org_project_response['total'], 1)
self.assertEqual(len(org_project_response['objects']), 1)
def test_project_search_escaped_text(self):
''' Searching for escaped text in the project and org/project endpoints
returns expected results
'''
organization = OrganizationFactory(name=u"Code for San Francisco")
ProjectFactory(organization_name=organization.name, description=u'What\'s My \'District')
ProjectFactory(organization_name=organization.name, description=u'Cöde%%for%%Ameriça')
db.session.commit()
project_response = self.app.get('/api/projects?q=What\'s My \'District')
project_response = json.loads(project_response.data)
assert isinstance(project_response['total'], int)
assert isinstance(project_response['objects'], list)
self.assertEqual(project_response['total'], 1)
self.assertEqual(len(project_response['objects']), 1)
org_project_response = self.app.get("/api/organizations/Code-for-San-Francisco/projects?q='District")
org_project_response = json.loads(org_project_response.data)
assert isinstance(org_project_response['total'], int)
assert isinstance(org_project_response['objects'], list)
self.assertEqual(org_project_response['total'], 1)
self.assertEqual(len(org_project_response['objects']), 1)
project_response = self.app.get('/api/projects?q=%Ameriça')
project_response = json.loads(project_response.data)
assert isinstance(project_response['total'], int)
assert isinstance(project_response['objects'], list)
self.assertEqual(project_response['total'], 1)
self.assertEqual(len(project_response['objects']), 1)
org_project_response = self.app.get("/api/organizations/Code-for-San-Francisco/projects?q=Cöde%")
org_project_response = json.loads(org_project_response.data)
assert isinstance(org_project_response['total'], int)
assert isinstance(org_project_response['objects'], list)
self.assertEqual(org_project_response['total'], 1)
self.assertEqual(len(org_project_response['objects']), 1)
def test_project_search_existing_phrase(self):
''' Searching for an existing phrase in the project and org/project endpoints
returns expected results
'''
organization = OrganizationFactory(name=u"Code for San Francisco")
ProjectFactory(organization_name=organization.name, description=u'ruby on rails')
ProjectFactory(organization_name=organization.name, description=u'i love lamp')
db.session.commit()
project_response = self.app.get('/api/projects?q=ruby on rails')
project_response = json.loads(project_response.data)
assert isinstance(project_response['total'], int)
assert isinstance(project_response['objects'], list)
self.assertEqual(project_response['total'], 1)
self.assertEqual(len(project_response['objects']), 1)
org_project_response = self.app.get('/api/organizations/Code-for-San-Francisco/projects?q=ruby on rails')
org_project_response = json.loads(org_project_response.data)
assert isinstance(org_project_response['total'], int)
assert isinstance(org_project_response['objects'], list)
self.assertEqual(org_project_response['total'], 1)
self.assertEqual(len(org_project_response['objects']), 1)
def test_project_search_existing_part_of_phrase(self):
''' Searching for a partial phrase in the project and org/project endpoints
returns expected results
'''
organization = OrganizationFactory(name=u"Code for San Francisco")
ProjectFactory(organization_name=organization.name, description=u'ruby on rails')
ProjectFactory(organization_name=organization.name, description=u'i love lamp')
db.session.commit()
project_response = self.app.get('/api/projects?q=ruby')
project_response = json.loads(project_response.data)
assert isinstance(project_response['total'], int)
assert isinstance(project_response['objects'], list)
self.assertEqual(project_response['total'], 1)
self.assertEqual(len(project_response['objects']), 1)
org_project_response = self.app.get('/api/organizations/Code-for-San-Francisco/projects?q=ruby')
org_project_response = json.loads(org_project_response.data)
assert isinstance(org_project_response['total'], int)
assert isinstance(org_project_response['objects'], list)
self.assertEqual(org_project_response['total'], 1)
self.assertEqual(len(org_project_response['objects']), 1)
def test_project_search_nonexisting_phrase(self):
''' Searching for a term that is not part of an existing phrase in the project and
org/project endpoints returns no results
'''
organization = OrganizationFactory(name=u"Code for San Francisco")
ProjectFactory(organization_name=organization.name, description=u'ruby on rails')
db.session.commit()
project_response = self.app.get('/api/projects?q=joomla')
project_response = json.loads(project_response.data)
assert isinstance(project_response['total'], int)
assert isinstance(project_response['objects'], list)
self.assertEqual(project_response['total'], 0)
self.assertEqual(len(project_response['objects']), 0)
org_project_response = self.app.get('/api/organizations/Code-for-San-Francisco/projects?q=joomla')
org_project_response = json.loads(org_project_response.data)
assert isinstance(org_project_response['total'], int)
assert isinstance(org_project_response['objects'], list)
self.assertEqual(org_project_response['total'], 0)
self.assertEqual(len(org_project_response['objects']), 0)
def test_project_search_order_by_relevance(self):
''' Search results from the project and org/project endpoints are returned
in order of relevance
'''
organization = OrganizationFactory(name=u"Code for San Francisco")
ProjectFactory(organization_name=organization.name, description=u'ruby ruby ruby ruby ruby', last_updated=datetime.now() - timedelta(10))
ProjectFactory(organization_name=organization.name, description=u'ruby', last_updated=datetime.now() - timedelta(1))
db.session.commit()
project_response = self.app.get('/api/projects?q=ruby')
project_response = json.loads(project_response.data)
assert isinstance(project_response['total'], int)
assert isinstance(project_response['objects'], list)
self.assertEqual(len(project_response["objects"]), 2)
self.assertEqual(project_response['objects'][0]['description'], 'ruby ruby ruby ruby ruby')
org_project_response = self.app.get('/api/organizations/Code-for-San-Francisco/projects?q=ruby')
org_project_response = json.loads(org_project_response.data)
assert isinstance(org_project_response['total'], int)
assert isinstance(org_project_response['objects'], list)
self.assertEqual(len(org_project_response["objects"]), 2)
self.assertEqual(org_project_response['objects'][0]['description'], 'ruby ruby ruby ruby ruby')
def test_project_search_order_by_relevance_requested(self):
''' Search results from the project and org/project endpoints are returned
in order of relevance when explicitly requested
'''
organization = OrganizationFactory(name=u"Code for San Francisco")
ProjectFactory(organization_name=organization.name, description=u'ruby ruby ruby ruby ruby', last_updated=datetime.now() - timedelta(10))
ProjectFactory(organization_name=organization.name, description=u'ruby', last_updated=datetime.now() - timedelta(1))
db.session.commit()
project_response = self.app.get('/api/projects?q=ruby&sort_by=relevance')
project_response = json.loads(project_response.data)
assert isinstance(project_response['total'], int)
assert isinstance(project_response['objects'], list)
self.assertEqual(len(project_response["objects"]), 2)
self.assertEqual(project_response['objects'][0]['description'], 'ruby ruby ruby ruby ruby')
org_project_response = self.app.get('/api/organizations/Code-for-San-Francisco/projects?q=ruby&sort_by=relevance')
org_project_response = json.loads(org_project_response.data)
assert isinstance(org_project_response['total'], int)
assert isinstance(org_project_response['objects'], list)
self.assertEqual(len(org_project_response["objects"]), 2)
self.assertEqual(org_project_response['objects'][0]['description'], 'ruby ruby ruby ruby ruby')
def test_project_search_order_by_last_updated(self):
''' Search results from the project and org/project endpoints are returned
in order of last_updated, if requested
'''
organization = OrganizationFactory(name=u"Code for San Francisco")
ProjectFactory(organization_name=organization.name, description=u'ruby ruby ruby ruby ruby', last_updated=datetime.now() - timedelta(10))
ProjectFactory(organization_name=organization.name, description=u'ruby', last_updated=datetime.now() - timedelta(1))
db.session.commit()
project_response = self.app.get('/api/projects?q=ruby&sort_by=last_updated')
project_response = json.loads(project_response.data)
assert isinstance(project_response['total'], int)
assert isinstance(project_response['objects'], list)
self.assertEqual(len(project_response["objects"]), 2)
self.assertEqual(project_response['objects'][0]['description'], 'ruby')
org_project_response = self.app.get('/api/organizations/Code-for-San-Francisco/projects?q=ruby&sort_by=last_updated')
org_project_response = json.loads(org_project_response.data)
assert isinstance(org_project_response['total'], int)
assert isinstance(org_project_response['objects'], list)
self.assertEqual(len(org_project_response["objects"]), 2)
self.assertEqual(org_project_response['objects'][0]['description'], 'ruby')
def test_project_search_order_by_last_updated_sort_desc(self):
''' Search results from the project and org/project endpoints are returned
in descending order of last_updated, if requested
'''
organization = OrganizationFactory(name=u"Code for San Francisco")
ProjectFactory(organization_name=organization.name, description=u'ruby ruby ruby ruby ruby', last_updated=datetime.now() - timedelta(10))
ProjectFactory(organization_name=organization.name, description=u'ruby', last_updated=datetime.now() - timedelta(1))
db.session.commit()
project_response = self.app.get('/api/projects?q=ruby&sort_by=last_updated&sort_dir=desc')
project_response = json.loads(project_response.data)
assert isinstance(project_response['total'], int)
assert isinstance(project_response['objects'], list)
self.assertEqual(len(project_response["objects"]), 2)
self.assertEqual(project_response['objects'][0]['description'], 'ruby')
org_project_response = self.app.get('/api/organizations/Code-for-San-Francisco/projects?q=ruby&sort_by=last_updated&sort_dir=desc')
org_project_response = json.loads(org_project_response.data)
assert isinstance(org_project_response['total'], int)
assert isinstance(org_project_response['objects'], list)
self.assertEqual(len(org_project_response["objects"]), 2)
self.assertEqual(org_project_response['objects'][0]['description'], 'ruby')
def test_project_search_order_by_last_updated_sort_asc(self):
''' Search results from the project and org/project endpoints are returned
in ascending order of last_updated, if requested
'''
organization = OrganizationFactory(name=u"Code for San Francisco")
ProjectFactory(organization_name=organization.name, description=u'ruby ruby ruby ruby ruby', last_updated=datetime.now() - timedelta(10))
ProjectFactory(organization_name=organization.name, description=u'ruby', last_updated=datetime.now() - timedelta(1))
db.session.commit()
project_response = self.app.get('/api/projects?q=ruby&sort_by=last_updated&sort_dir=asc')
project_response = json.loads(project_response.data)
assert isinstance(project_response['total'], int)
assert isinstance(project_response['objects'], list)
self.assertEqual(len(project_response["objects"]), 2)
self.assertEqual(project_response['objects'][0]['description'], 'ruby ruby ruby ruby ruby')
org_project_response = self.app.get('/api/organizations/Code-for-San-Francisco/projects?q=ruby&sort_by=last_updated&sort_dir=asc')
org_project_response = json.loads(org_project_response.data)
assert isinstance(org_project_response['total'], int)
assert isinstance(org_project_response['objects'], list)
self.assertEqual(len(org_project_response["objects"]), 2)
self.assertEqual(org_project_response['objects'][0]['description'], 'ruby ruby ruby ruby ruby')
def test_project_search_ranked_order(self):
''' Search results from the project and org/project endpoints are returned
with correct ranking values
'''
organization = OrganizationFactory(name=u"Code for San Francisco")
ProjectFactory(organization_name=organization.name, status=u'TEST', last_updated=datetime.now() - timedelta(10000))
ProjectFactory(organization_name=organization.name, description=u'testing a new thing', last_updated=datetime.now() - timedelta(1))
ProjectFactory(organization_name=organization.name, tags=[u'test,tags,what,ever'], last_updated=datetime.now() - timedelta(100))
ProjectFactory(organization_name=organization.name, last_updated=datetime.now())
db.session.commit()
project_response = self.app.get('/api/projects?q=TEST')
project_response = json.loads(project_response.data)
self.assertEqual(project_response['total'], 3)
self.assertEqual(project_response['objects'][0]['status'], u'TEST')
self.assertEqual(project_response['objects'][1]['tags'], [u'test,tags,what,ever'])
self.assertEqual(project_response['objects'][2]['description'], u'testing a new thing')
def test_project_return_only_ids(self):
''' Search results from the project and org/project endpoints are returned
as only IDs if requested
'''
organization = OrganizationFactory(name=u"Code for San Francisco")
project_one = ProjectFactory(organization_name=organization.name, description=u'ruby ruby ruby ruby ruby', last_updated=datetime.now() - timedelta(10))
project_two = ProjectFactory(organization_name=organization.name, description=u'ruby', last_updated=datetime.now() - timedelta(1))
db.session.commit()
project_one_id = project_one.id
project_two_id = project_two.id
project_response = self.app.get('/api/projects?q=ruby&only_ids=true')
project_response = json.loads(project_response.data)
assert isinstance(project_response['total'], int)
assert isinstance(project_response['objects'], list)
self.assertEqual(len(project_response["objects"]), 2)
assert isinstance(project_response['objects'][0], int)
assert isinstance(project_response['objects'][1], int)
self.assertEqual(project_response['objects'][0], project_one_id)
self.assertEqual(project_response['objects'][1], project_two_id)
org_project_response = self.app.get('/api/organizations/Code-for-San-Francisco/projects?q=ruby&only_ids=true')
org_project_response = json.loads(org_project_response.data)
assert isinstance(org_project_response['total'], int)
assert isinstance(org_project_response['objects'], list)
self.assertEqual(len(org_project_response["objects"]), 2)
assert isinstance(org_project_response['objects'][0], int)
assert isinstance(org_project_response['objects'][1], int)
self.assertEqual(org_project_response['objects'][0], project_one_id)
self.assertEqual(org_project_response['objects'][1], project_two_id)
def test_project_search_empty_string(self):
''' Searching an empty string on the project and org/project endpoints returns all projects
'''
organization = OrganizationFactory(name=u"Code for San Francisco")
ProjectFactory(organization_name=organization.name, description=u'ruby ruby ruby ruby ruby', last_updated=datetime.now() - timedelta(10))
ProjectFactory(organization_name=organization.name, description=u'ruby', last_updated=datetime.now() - timedelta(1))
db.session.commit()
project_response = self.app.get('/api/projects?q=')
project_response = json.loads(project_response.data)
assert isinstance(project_response['total'], int)
assert isinstance(project_response['objects'], list)
self.assertEqual(project_response['total'], 2)
self.assertEqual(len(project_response['objects']), 2)
org_project_response = self.app.get('/api/organizations/Code-for-San-Francisco/projects?q=')
org_project_response = json.loads(org_project_response.data)
assert isinstance(org_project_response['total'], int)
assert isinstance(org_project_response['objects'], list)
self.assertEqual(org_project_response['total'], 2)
self.assertEqual(len(org_project_response['objects']), 2)
def test_project_search_tsv_body_not_in_response(self):
''' The tsv_body field is not in the response from the project and org/project endpoints
'''
organization = OrganizationFactory(name=u"Code for San Francisco")
ProjectFactory(organization_name=organization.name, description=u'ruby ruby ruby ruby ruby', last_updated=datetime.now() - timedelta(10))
ProjectFactory(organization_name=organization.name, description=u'ruby', last_updated=datetime.now() - timedelta(1))
db.session.commit()
project_response = self.app.get('/api/projects?q=')
project_response = json.loads(project_response.data)
self.assertEqual(len(project_response['objects']), 2)
self.assertFalse('tsv_body' in project_response['objects'][0])
self.assertFalse('tsv_body' in project_response['objects'][1])
org_project_response = self.app.get('/api/organizations/Code-for-San-Francisco/projects?q=')
org_project_response = json.loads(org_project_response.data)
self.assertEqual(len(org_project_response['objects']), 2)
self.assertFalse('tsv_body' in org_project_response['objects'][0])
self.assertFalse('tsv_body' in org_project_response['objects'][1])
def test_project_orgs_dont_include_tsv(self):
OrganizationFactory(name=u"Code for San Francisco")
ProjectFactory(organization_name=u"Code for San Francisco")
db.session.commit()
response = self.app.get('/api/projects')
response = json.loads(response.data)
self.assertFalse('tsv_body' in response['objects'][0]['organization'])
def test_project_search_includes_status(self):
''' The status field is included in search results from the project and org/project endpoints
'''
organization = OrganizationFactory(name=u"Code for San Francisco")
ProjectFactory(organization_name=organization.name, status=u'Beta')
ProjectFactory(organization_name=organization.name, status=u'Alpha')
db.session.commit()
project_response = self.app.get('/api/projects?q=alpha')
project_response = json.loads(project_response.data)
self.assertEqual(len(project_response['objects']), 1)
self.assertEqual(project_response['objects'][0]['status'], 'Alpha')
org_project_response = self.app.get('/api/organizations/Code-for-San-Francisco/projects?q=alpha')
org_project_response = json.loads(org_project_response.data)
self.assertEqual(len(org_project_response['objects']), 1)
self.assertEqual(org_project_response['objects'][0]['status'], 'Alpha')
def test_project_search_includes_name(self):
''' The name field is included in search results from the project and org/project endpoints
'''
organization = OrganizationFactory(name=u"Code for San Francisco")
ProjectFactory(organization_name=organization.name, name=u'My Cool Project')
ProjectFactory(organization_name=organization.name, name=u'My Dumb Project')
db.session.commit()
project_response = self.app.get('/api/projects?q=cool')
project_response = json.loads(project_response.data)
self.assertEqual(len(project_response['objects']), 1)
self.assertEqual(project_response['objects'][0]['name'], 'My Cool Project')
org_project_response = self.app.get('/api/organizations/Code-for-San-Francisco/projects?q=cool')
org_project_response = json.loads(org_project_response.data)
self.assertEqual(len(org_project_response['objects']), 1)
self.assertEqual(org_project_response['objects'][0]['name'], 'My Cool Project')
def test_project_search_includes_tags(self):
'''
The tags field is included in search results from the project and org/project endpoints
'''
organization = OrganizationFactory(name=u"Code for San Francisco")
ProjectFactory(organization_name=organization.name, tags=['mapping', 'philly'])
ProjectFactory(organization_name=organization.name, tags=['food stamps', 'health'])
db.session.commit()
project_response = self.app.get('/api/projects?q=stamps')
project_response = json.loads(project_response.data)
self.assertEqual(len(project_response['objects']), 1)
self.assertEqual(project_response['objects'][0]['tags'], ['food stamps', 'health'])
org_project_response = self.app.get('/api/organizations/Code-for-San-Francisco/projects?q=stamps')
org_project_response = json.loads(org_project_response.data)
self.assertEqual(len(org_project_response['objects']), 1)
self.assertEqual(org_project_response['objects'][0]['tags'], ['food stamps', 'health'])
def test_project_search_includes_organization_name(self):
'''
The organization name is included in the project search
'''
organization = OrganizationFactory(name=u"Code for San Francisco")
ProjectFactory(organization_name=organization.name, name=u"Project One")
ProjectFactory(organization_name=organization.name, name=u"Project Two", description=u"America")
organization = OrganizationFactory(name=u"Code for America")
ProjectFactory(organization_name=organization.name, name=u"Project Three")
ProjectFactory(organization_name=organization.name, name=u"Project Four", tags=u"San Francisco")
db.session.commit()
# Test that org_name matches return before project name
project_response = self.app.get('/api/projects?q=Code+for+San+Francisco')
project_response = json.loads(project_response.data)
self.assertEqual(len(project_response['objects']), 3)
self.assertEqual(project_response['objects'][0]['name'], u'Project One')
self.assertEqual(project_response['objects'][1]['name'], u'Project Two')
self.assertEqual(project_response['objects'][2]['name'], u'Project Four')
self.assertTrue('San Francisco' in project_response['objects'][2]['tags'])
# Test that org name matches return before project description
project_response = self.app.get('/api/projects?q=Code for America')
project_response = json.loads(project_response.data)
self.assertEqual(len(project_response['objects']), 3)
self.assertEqual(project_response['objects'][0]['name'], u'Project Three')
self.assertEqual(project_response['objects'][1]['name'], u'Project Four')
self.assertEqual(project_response['objects'][2]['name'], u'Project Two')
self.assertEqual(project_response['objects'][2]['description'], u'America')
def test_project_organzation_type_filter(self):
'''
Test searching for projects from certain types of organizations.
'''
brigade = OrganizationFactory(name=u'Brigade Org', type=u'Brigade, midwest')
code_for_all = OrganizationFactory(name=u'Code for All Org', type=u'Code for All')
gov_org = OrganizationFactory(name=u'Gov Org', type=u'Government')
brigade_project = ProjectFactory(name=u'Today Brigade project', organization_name=brigade.name)
code_for_all_project = ProjectFactory(name=u'Yesterday Code for All project', organization_name=code_for_all.name, last_updated=datetime.now() - timedelta(days=1))
gov_project = ProjectFactory(name=u'Two days ago Gov project', organization_name=gov_org.name, last_updated=datetime.now() - timedelta(days=2))
brigade_project2 = ProjectFactory(name=u'Three days ago Brigade project', organization_name=brigade.name, last_updated=datetime.now() - timedelta(days=3))
code_for_all_project2 = ProjectFactory(name=u'Four days ago Code for All project', organization_name=code_for_all.name, last_updated=datetime.now() - timedelta(days=4))
gov_project2 = ProjectFactory(name=u'Five days ago Gov project', organization_name=gov_org.name, last_updated=datetime.now() - timedelta(days=5))
db.session.add(brigade_project)
db.session.add(code_for_all_project)
db.session.add(gov_project)
db.session.add(brigade_project2)
db.session.add(code_for_all_project2)
db.session.add(gov_project2)
db.session.commit()
# Test they return in order of last_updated
response = self.app.get('/api/projects')
self.assertEqual(response.status_code, 200)
response = json.loads(response.data)
self.assertEqual(response['total'], 6)
self.assertEqual(response['objects'][0]['name'], 'Today Brigade project')
self.assertEqual(response['objects'][1]['name'], 'Yesterday Code for All project')
self.assertEqual(response['objects'][2]['name'], 'Two days ago Gov project')
self.assertEqual(response['objects'][3]['name'], 'Three days ago Brigade project')
self.assertEqual(response['objects'][4]['name'], 'Four days ago Code for All project')
self.assertEqual(response['objects'][5]['name'], 'Five days ago Gov project')
# Test they return in order of last_updated, no matter the search order
response = self.app.get('/api/projects?organization_type=Government,Code+for+All,Brigade')
self.assertEqual(response.status_code, 200)
response = json.loads(response.data)
self.assertEqual(response['total'], 6)
self.assertEqual(response['objects'][0]['name'], 'Today Brigade project')
self.assertEqual(response['objects'][1]['name'], 'Yesterday Code for All project')
self.assertEqual(response['objects'][2]['name'], 'Two days ago Gov project')
self.assertEqual(response['objects'][3]['name'], 'Three days ago Brigade project')
self.assertEqual(response['objects'][4]['name'], 'Four days ago Code for All project')
self.assertEqual(response['objects'][5]['name'], 'Five days ago Gov project')
response = self.app.get('/api/projects?organization_type=Brigade,Code+for+All')
self.assertEqual(response.status_code, 200)
response = json.loads(response.data)
self.assertEqual(response['total'], 4)
self.assertEqual(response['objects'][0]['name'], 'Today Brigade project')
self.assertEqual(response['objects'][1]['name'], 'Yesterday Code for All project')
self.assertEqual(response['objects'][2]['name'], 'Three days ago Brigade project')
self.assertEqual(response['objects'][3]['name'], 'Four days ago Code for All project')
# # Different order, same results
response = self.app.get('/api/projects?organization_type=Code+for+All,Brigade')
self.assertEqual(response.status_code, 200)
response = json.loads(response.data)
self.assertEqual(response['total'], 4)
self.assertEqual(response['objects'][0]['name'], 'Today Brigade project')
self.assertEqual(response['objects'][1]['name'], 'Yesterday Code for All project')
self.assertEqual(response['objects'][2]['name'], 'Three days ago Brigade project')
self.assertEqual(response['objects'][3]['name'], 'Four days ago Code for All project')
response = self.app.get('/api/projects?organization_type=Code+for+All,Government')
self.assertEqual(response.status_code, 200)
response = json.loads(response.data)
self.assertEqual(response['total'], 4)
self.assertEqual(response['objects'][0]['name'], 'Yesterday Code for All project')
self.assertEqual(response['objects'][1]['name'], 'Two days ago Gov project')
self.assertEqual(response['objects'][2]['name'], 'Four days ago Code for All project')
self.assertEqual(response['objects'][3]['name'], 'Five days ago Gov project')
# # Different order, same results
response = self.app.get('/api/projects?organization_type=Government,Code+for+All')
self.assertEqual(response.status_code, 200)
response = json.loads(response.data)
self.assertEqual(response['total'], 4)
self.assertEqual(response['objects'][0]['name'], 'Yesterday Code for All project')
self.assertEqual(response['objects'][1]['name'], 'Two days ago Gov project')
self.assertEqual(response['objects'][2]['name'], 'Four days ago Code for All project')
self.assertEqual(response['objects'][3]['name'], 'Five days ago Gov project')
def test_project_cascading_deletes(self):
''' Test that issues get deleted when their parent
project and org is deleted
'''
# set up test objects and delete a project
organization = OrganizationFactory(name=u'TEST ORG')
db.session.flush()
project = ProjectFactory(organization_name=organization.name, name=u'TEST PROJECT')
db.session.flush()
issue = IssueFactory(title=u'TEST ISSUE', project_id=project.id)
another_issue = IssueFactory(title=u'ANOTHER TEST ISSUE', project_id=project.id)
a_third_issue = IssueFactory(title=u'A THIRD TEST ISSUE', project_id=project.id)
db.session.commit()
# make sure the issues are in the db
issues = db.session.query(Issue).all()
self.assertTrue(len(issues) == 3)
db.session.execute('DELETE FROM project')
db.session.commit()
issues = db.session.query(Issue).all()
self.assertFalse(len(issues))
# delete an organization
project = ProjectFactory(organization_name=organization.name, name=u'TEST PROJECT')
db.session.flush()
issue = IssueFactory(title=u'TEST ISSUE', project_id=project.id)
another_issue = IssueFactory(title=u'ANOTHER TEST ISSUE', project_id=project.id)
a_third_issue = IssueFactory(title=u'A THIRD TEST ISSUE', project_id=project.id)
db.session.add(issue)
db.session.add(another_issue)
db.session.add(a_third_issue)
db.session.commit()
# make sure the issues are in the db
issues = db.session.query(Issue).all()
self.assertTrue(len(issues) == 3)
db.session.execute('DELETE FROM organization')
db.session.commit()
issues = db.session.query(Issue).all()
self.assertFalse(len(issues))
def test_include_issues(self):
""" Test the include_issues flag """
project = ProjectFactory()
db.session.commit()
IssueFactory(project_id=project.id)
db.session.commit()
got = self.app.get("/api/projects?include_issues=True")
project = json.loads(got.data)['objects'][0]
self.assertTrue(isinstance(project['issues'], list))
got = self.app.get("/api/projects?include_issues=False")
project = json.loads(got.data)['objects'][0]
self.assertFalse(isinstance(project['issues'], list))
self.assertEqual("http://localhost/api/projects/1/issues", project["issues"])
got = self.app.get("/api/projects")
project = json.loads(got.data)['objects'][0]
self.assertFalse(isinstance(project['issues'], list))
self.assertEqual("http://localhost/api/projects/1/issues", project["issues"])
| 2.296875 | 2 |
storable/core.py | CowboyTim/python-storable | 8 | 12758267 | <filename>storable/core.py
#
# License
#
# python storable is distributed under the zlib/libpng license, which is OSS
# (Open Source Software) compliant.
#
# Copyright (C) 2009 <NAME>
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
#
# <NAME> <<EMAIL>>
#
from functools import wraps
from io import BytesIO
from struct import calcsize, unpack
import logging
import sys
if sys.version_info > (3, 0):
xrange = range
def id_():
n = 0
while True:
n += 1
yield n
ID_GENERATOR = id_()
LOG = logging.getLogger(__name__)
DEBUG = False
def _guess_type(data):
"""
In Perl, the "scalar" type maps to different Python types. Strictly
speaking, the only *correct* output would be bytes objects. But this causes
a discrepancy when using "frozen" storables and non-frozen storables (unless
the generated test-data is wrong). For now, we will use the conversion
functions below to "guess" the type.
"""
try:
converted_result = float(data)
if converted_result.is_integer():
# use "data" again to avoid rounding errors
converted_result = int(data)
except ValueError:
converted_result = None
if converted_result is None:
try:
converted_result = data.decode('ascii')
except UnicodeDecodeError:
converted_result = None
return data if converted_result is None else converted_result
def maybelogged(f):
"""
If the DEBUG flag is set in this module (must be set before importing),
deserialisation functions will be logged.
"""
if not DEBUG:
return f
@wraps(f)
def fun(*args, **kwargs):
id_ = next(ID_GENERATOR)
LOG.debug('[%s] Entering %s with args=%r, kwargs=%r',
id_, f.__name__, args, kwargs)
output = f(*args, **kwargs)
LOG.debug('[%s] Result: %r', id_, output)
return output
return fun
@maybelogged
def _read_size(fh, cache):
fmt = cache['size_unpack_fmt']
return unpack(fmt, fh.read(calcsize(fmt)))[0]
@maybelogged
def SX_OBJECT(fh, cache):
# From Storable.xs store function:
# * The tag is always written in network order.
i = SX_NETINT(fh, cache)
cache['has_sx_object'] = True
return (0, i)
@maybelogged
def SX_LSCALAR(fh, cache):
raw_result = fh.read(_read_size(fh, cache))
return _guess_type(raw_result)
@maybelogged
def SX_LUTF8STR(fh, cache):
return fh.read(_read_size(fh, cache)).decode('utf-8')
@maybelogged
def SX_ARRAY(fh, cache):
return [process_item(fh, cache) for _ in xrange(_read_size(fh, cache))]
@maybelogged
def SX_HASH(fh, cache):
data = {}
sz = _read_size(fh, cache)
for _ in xrange(sz):
value = process_item(fh, cache)
key = _guess_type(fh.read(_read_size(fh, cache)))
data[key] = value
return data
@maybelogged
def SX_REF(fh, cache):
return process_item(fh, cache)
@maybelogged
def SX_UNDEF(fh, cache):
return None
@maybelogged
def SX_INTEGER(fh, cache):
fmt = cache['int_unpack_fmt']
return unpack(fmt, fh.read(calcsize(fmt)))[0]
@maybelogged
def SX_DOUBLE(fh, cache):
fmt = cache['double_unpack_fmt']
return unpack(fmt, fh.read(calcsize(fmt)))[0]
@maybelogged
def SX_BYTE(fh, cache):
return _read_unsigned_byte(fh) - 128
@maybelogged
def SX_NETINT(fh, cache):
fmt = '!I'
return unpack(fmt, fh.read(calcsize(fmt)))[0]
@maybelogged
def SX_SCALAR(fh, cache):
size = _read_unsigned_byte(fh)
raw_result = fh.read(size)
return _guess_type(raw_result)
@maybelogged
def SX_UTF8STR(fh, cache):
return fh.read(_read_unsigned_byte(fh)).decode('utf-8')
@maybelogged
def SX_TIED_ARRAY(fh, cache):
return process_item(fh, cache)
@maybelogged
def SX_TIED_HASH(fh, cache):
return SX_TIED_ARRAY(fh, cache)
@maybelogged
def SX_TIED_SCALAR(fh, cache):
return SX_TIED_ARRAY(fh, cache)
@maybelogged
def SX_SV_UNDEF(fh, cache):
return None
@maybelogged
def SX_SV_YES(fh, cache):
return True
@maybelogged
def SX_SV_NO(fh, cache):
return False
@maybelogged
def SX_BLESS(fh, cache):
size = _read_unsigned_byte(fh)
package_name = fh.read(size)
cache['classes'].append(package_name)
return process_item(fh, cache)
@maybelogged
def SX_IX_BLESS(fh, cache):
indx = _read_unsigned_byte(fh)
package_name = cache['classes'][indx]
return process_item(fh, cache)
@maybelogged
def SX_OVERLOAD(fh, cache):
return process_item(fh, cache)
@maybelogged
def SX_TIED_KEY(fh, cache):
data = process_item(fh, cache)
key = process_item(fh, cache)
return data
@maybelogged
def SX_TIED_IDX(fh, cache):
data = process_item(fh, cache)
# idx's are always big-endian dumped by storable's freeze/nfreeze I think
indx_in_array = SX_NETINT(fh, cache)
return data
@maybelogged
def SX_HOOK(fh, cache):
flags = _read_unsigned_byte(fh)
while flags & 0x40: # SHF_NEED_RECURSE
dummy = process_item(fh, cache)
flags = _read_unsigned_byte(fh)
if flags & 0x20: # SHF_IDX_CLASSNAME
if flags & 0x04: # SHF_LARGE_CLASSLEN
# TODO: test
fmt = '>I'
indx = unpack(fmt, fh.read(calcsize(fmt)))[0]
else:
indx = _read_unsigned_byte(fh)
package_name = cache['classes'][indx]
else:
if flags & 0x04: # SHF_LARGE_CLASSLEN
# TODO: test
# FIXME: is this actually possible?
class_size = _read_size(fh, cache)
else:
class_size = _read_unsigned_byte(fh)
package_name = fh.read(class_size)
cache['classes'].append(package_name)
arguments = {}
if flags & 0x08: # SHF_LARGE_STRLEN
str_size = _read_size(fh, cache)
else:
str_size = _read_unsigned_byte(fh)
if str_size:
frozen_str = _guess_type(fh.read(str_size))
arguments[0] = frozen_str
if flags & 0x80: # SHF_HAS_LIST
if flags & 0x10: # SHF_LARGE_LISTLEN
list_size = _read_size(fh, cache)
else:
list_size = _read_unsigned_byte(fh)
for i in xrange(list_size):
fmt = '>I'
indx_in_array = unpack(fmt, fh.read(calcsize(fmt)))[0]
arguments[i + 1] = cache['objects'].get(indx_in_array)
# FIXME: implement the real callback STORABLE_thaw() still, for now, just
# return the dictionary 'arguments' as data
type = flags & 0x03 # SHF_TYPE_MASK 0x03
data = arguments
if type == 3: # SHT_EXTRA
# TODO
pass
if type == 0: # SHT_SCALAR
# TODO
pass
if type == 1: # SHT_ARRAY
# TODO
pass
if type == 2: # SHT_HASH
# TODO
pass
return data
@maybelogged
def SX_FLAG_HASH(fh, cache):
# TODO: NOT YET IMPLEMENTED!!!!!!
flags = _read_unsigned_byte(fh)
size = _read_size(fh, cache)
data = {}
for i in xrange(size):
value = process_item(fh, cache)
flags = _read_unsigned_byte(fh)
keysize = _read_size(fh, cache)
key = None
if keysize:
key = fh.read(keysize)
data[key] = value
return data
def SX_VSTRING(fh, cache):
value = SX_SCALAR(fh, cache)
return tuple(x for x in value[1:].split('.'))
def SX_LVSTRING(fh, cache):
value = SX_LSCALAR(fh, cache)
return tuple(x for x in value[1:].split('.'))
# *AFTER* all the subroutines
engine = {
b'\x00': SX_OBJECT, # ( 0): Already stored object
b'\x01': SX_LSCALAR, # ( 1): Scalar (large binary) follows (length, data)
b'\x02': SX_ARRAY, # ( 2): Array forthcoming (size, item list)
b'\x03': SX_HASH, # ( 3): Hash forthcoming (size, key/value pair list)
b'\x04': SX_REF, # ( 4): Reference to object forthcoming
b'\x05': SX_UNDEF, # ( 5): Undefined scalar
b'\x06': SX_INTEGER, # ( 6): Integer forthcoming
b'\x07': SX_DOUBLE, # ( 7): Double forthcoming
b'\x08': SX_BYTE, # ( 8): (signed) byte forthcoming
b'\x09': SX_NETINT, # ( 9): Integer in network order forthcoming
b'\x0a': SX_SCALAR, # (10): Scalar (binary, small) follows (length, data)
b'\x0b': SX_TIED_ARRAY, # (11): Tied array forthcoming
b'\x0c': SX_TIED_HASH, # (12): Tied hash forthcoming
b'\x0d': SX_TIED_SCALAR, # (13): Tied scalar forthcoming
b'\x0e': SX_SV_UNDEF, # (14): Perl's immortal PL_sv_undef
b'\x0f': SX_SV_YES, # (15): Perl's immortal PL_sv_yes
b'\x10': SX_SV_NO, # (16): Perl's immortal PL_sv_no
b'\x11': SX_BLESS, # (17): Object is blessed
b'\x12': SX_IX_BLESS, # (18): Object is blessed, classname given by index
b'\x13': SX_HOOK, # (19): Stored via hook, user-defined
b'\x14': SX_OVERLOAD, # (20): Overloaded reference
b'\x15': SX_TIED_KEY, # (21): Tied magic key forthcoming
b'\x16': SX_TIED_IDX, # (22): Tied magic index forthcoming
b'\x17': SX_UTF8STR, # (23): UTF-8 string forthcoming (small)
b'\x18': SX_LUTF8STR, # (24): UTF-8 string forthcoming (large)
b'\x19': SX_FLAG_HASH, # (25): Hash with flags forthcoming (size, flags, key/flags/value triplet list)
b'\x1d': SX_VSTRING, # (29): vstring forthcoming (small)
b'\x1e': SX_LVSTRING, # (30): vstring forthcoming (large)
}
exclude_for_cache = {
b'\x00',
b'\x0b',
b'\x0c',
b'\x0d',
b'\x11',
b'\x12',
}
@maybelogged
def handle_sx_object_refs(cache, data):
iterateelements = None
if type(data) is list:
iterateelements = enumerate(data)
elif type(data) is dict:
iterateelements = iter(data.items())
else:
return
for k, item in iterateelements:
if type(item) is list or type(item) is dict:
handle_sx_object_refs(cache, item)
elif type(item) is tuple:
data[k] = cache['objects'][item[1]]
return data
@maybelogged
def process_item(fh, cache):
magic_type = fh.read(1)
if magic_type in exclude_for_cache:
data = engine[magic_type](fh, cache)
else:
i = cache['objectnr']
cache['objectnr'] += 1
data = engine[magic_type](fh, cache)
cache['objects'][i] = data
return data
@maybelogged
def thaw(frozen_data):
fh = BytesIO(frozen_data)
data = deserialize(fh)
fh.close()
return data
@maybelogged
def retrieve(filepath):
data = None
with open(filepath, 'rb') as fh:
file_magic = fh.read(4)
if file_magic == b'pst0':
data = deserialize(fh)
return data
def _read_unsigned_byte(fh):
return unpack('B', fh.read(1))[0]
def skip_magic_header_if_present(fh):
curr_pos = fh.tell()
file_magic = fh.read(4)
if file_magic != b'pst0':
fh.seek(curr_pos)
@maybelogged
def deserialize(fh):
skip_magic_header_if_present(fh)
magic_byte = _read_unsigned_byte(fh)
is_network_byte_order = (magic_byte & 1) == 1
major_version_number = magic_byte >> 1
minor_version_number = _read_unsigned_byte(fh)
nvsize = 8 # Size of double in bytes
integer_formats = {
2: 'H',
4: 'I',
8: 'Q',
}
double_formats = {
4: 'f',
8: 'd',
}
if is_network_byte_order:
byteorder = '!'
# TODO: unsure what these values should be when reading a net-order
# file
intsize = 4
longsize = 8
ptrsize = 4
else:
size = _read_unsigned_byte(fh)
archsize = fh.read(size)
# 32-bit ppc: 4321
# 32-bit x86: 1234
# 64-bit x86_64: 12345678
# 64-bit ppc: 87654321
if archsize == b'1234' or archsize == b'12345678':
byteorder = '<'
else:
byteorder = '>'
x = fh.read(3)
intsize, longsize, ptrsize = unpack('3B', x)
if (major_version_number, minor_version_number) >= (2, 2):
nvsize = _read_unsigned_byte(fh)
if nvsize > 8:
raise ValueError('Cannot handle 16 byte doubles')
cache = {
'objects': {},
'objectnr': 0,
'classes': [],
'has_sx_object': False,
'size_unpack_fmt': byteorder + integer_formats[intsize],
'int_unpack_fmt': byteorder + integer_formats[longsize],
'double_unpack_fmt': byteorder + double_formats[nvsize],
}
data = process_item(fh, cache)
if cache['has_sx_object']:
handle_sx_object_refs(cache, data)
return data
| 1.890625 | 2 |
kitt/kitt.py | OiNutter/microbit-scripts | 1 | 12758268 | from microbit import *
display.scroll("I am the Knight Industries 2000")
MAX_ROWS = 4
MAX_BRIGHTNESS = 9
MIN_BRIGHTNESS = 2
def scan(reverse=False):
for i in range(0, 9):
brightness = MAX_BRIGHTNESS
row_range = range(0, i+1) if not reverse else range(i, -1, -1)
counter = 0
for j in row_range:
x = i - j if not reverse else MAX_ROWS - j
light_level = max(MIN_BRIGHTNESS, brightness) if counter >= 2 else MAX_BRIGHTNESS - counter
print (x, light_level)
if x <= MAX_ROWS and x >= 0:
display.set_pixel(x, 2, light_level)
counter += 1
#if i >= 2:
brightness -= 1
print("-")
if i < 8:
sleep(100)
for x in range(0, MAX_ROWS+1):
display.set_pixel(x, 2, MIN_BRIGHTNESS)
while True:
scan()
scan(reverse=True)
| 3.703125 | 4 |
tests/unit/test_utils/tests_example_datasets/test_example_get_games.py | RelevanceAI/RelevanceAI | 21 | 12758269 | import pytest
def test_get_games_dataset_subset():
from relevanceai.utils.datasets import get_games_dataset
assert len(get_games_dataset(number_of_documents=100)) == 100
| 1.9375 | 2 |
catalog/bindings/csw/coordinates_type.py | NIVANorge/s-enda-playground | 0 | 12758270 | <gh_stars>0
from dataclasses import dataclass, field
__NAMESPACE__ = "http://www.opengis.net/gml"
@dataclass
class CoordinatesType:
"""Tables or arrays of tuples.
May be used for text-encoding of values from a table. Actually just
a string, but allows the user to indicate which characters are used
as separators. The value of the 'cs' attribute is the separator for
coordinate values, and the value of the 'ts' attribute gives the
tuple separator (a single space by default); the default values may
be changed to reflect local usage. Defaults to CSV within a tuple,
space between tuples. However, any string content will be schema-
valid.
"""
value: str = field(
default="",
metadata={
"required": True,
},
)
decimal: str = field(
default=".",
metadata={
"type": "Attribute",
},
)
cs: str = field(
default=",",
metadata={
"type": "Attribute",
},
)
ts: str = field(
default=" ",
metadata={
"type": "Attribute",
},
)
| 2.90625 | 3 |
quickstart/rok/quickstart_rok.py | CharlesRngrd/toucan-connectors | 15 | 12758271 | import requests
from datetime import datetime, timedelta
import jwt
import base64
secret = ''
# The secret provided by ROK is b64 encoded, we need to decode it for jwt
notb64_secret = base64.b64decode(secret)
# Here we provide the registered claims and the claims we agreed on with ROK
data = {
'aud': 'Rok-solution',
'iss': '',
'exp': str(int((datetime.now() + timedelta(minutes=10)).timestamp())),
'email': '',
'iat': str(int(datetime.now().timestamp())),
'nbf': str(int(datetime.now().timestamp())),
}
token = jwt.encode(data, notb64_secret, algorithm='HS256')
response = requests.post('https://demo.rok-solution.com/graphql',
data='{\'query\':\'\'}',
headers={'DatabaseName': '', 'JwtString': token,
'Accept': 'application/json', 'Content-Type': 'application/json'}).text
print(response)
| 2.6875 | 3 |
functions/emailTrigger/main.py | Raghavi-Adoni/covid-gdgpune | 4 | 12758272 | <reponame>Raghavi-Adoni/covid-gdgpune<gh_stars>1-10
def sendCustomEmail(email, msg):
from config import senderEmail, senderPassword
import smtplib, ssl
context = ssl.create_default_context()
with smtplib.SMTP_SSL('smtp.gmail.com', 465) as smtp:
smtp.login(senderEmail, senderPassword)
smtp.sendmail(senderEmail, email, msg)
return True
def emailTrigger(data, context):
if 'email' in data['updateMask']['fieldPaths']:
email = data['value']['fields']['email']['stringValue']
subject = 'Covid Help'
body = "Hey,\n\nAs a developer community we are working on a support against COVID project. We hope we can help you with your requirements.\n\nYour feedback will help us improve the portal.\n\nThanks for registering your email with our covid help platform.\n\nThanks & Regards,\nTeam covid help"
msg = f'To:{email}\nSubject: {subject}\n\n{body}'
sendCustomEmail(email, msg)
return True
else:
return False | 2.140625 | 2 |
sigmoid.py | rigovides/multiclass-logistic-regression | 2 | 12758273 | <reponame>rigovides/multiclass-logistic-regression
import numpy as np
def sigmoid(z):
return 1/(1 + np.exp(-z)) | 1.84375 | 2 |
pyxnat/tests/sessionmirror_test.py | Udolf15/pyxnat | 0 | 12758274 | import sys
import os.path as op
import pyxnat
from . import skip_if_no_network
_modulepath = op.dirname(op.abspath(pyxnat.__file__))
dd = op.join(op.split(_modulepath)[0], 'bin')
sys.path.append(dd)
@skip_if_no_network
def test_001_sessionmirror():
from sessionmirror import create_parser, main
parser = create_parser()
cfg = op.join(op.dirname(op.abspath(__file__)), 'central.cfg')
central = pyxnat.Interface(config=cfg)
e = 'CENTRAL_E74609'
args = ['--h1', cfg, '--h2', cfg, '-e', e, '-p', 'nosetests3']
args = parser.parse_args(args)
main(args)
e1 = central.array.experiments(experiment_id=e,
columns=['subject_label']).data[0]
@skip_if_no_network
def test_002_deletesubject():
print('DELETING')
cfg = op.join(op.dirname(op.abspath(__file__)), 'central.cfg')
central = pyxnat.Interface(config=cfg)
e = 'CENTRAL_E74609'
e0 = central.array.experiments(experiment_id=e,
columns=['subject_label', 'label']).data[0]
subject_label = e0['subject_label']
experiment_label = e0['label']
e1 = central.array.experiments(project_id='nosetests3',
subject_label=subject_label,
experiment_label=experiment_label,
columns=['subject_id']).data[0]
e2 = central.select.project('nosetests3').subject(e1['subject_ID']).experiment(e1['ID'])
assert(e2.exists())
e2.delete()
assert(not e2.exists())
| 2.046875 | 2 |
levels/migrations/0001_initial.py | saberworks/massassi-django | 0 | 12758275 | <filename>levels/migrations/0001_initial.py
# Generated by Django 3.2.10 on 2022-01-03 06:40
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import levels.util
import massassi.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Level',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(default=django.utils.timezone.now)),
('last_modified_at', models.DateTimeField(auto_now=True)),
('file', models.FileField(null=True, upload_to=massassi.models.get_file_upload_to)),
('file_size', models.PositiveIntegerField(blank=True, null=True)),
('file_hash', models.CharField(blank=True, editable=False, max_length=40)),
('name', models.CharField(max_length=128)),
('description', models.TextField()),
('author', models.CharField(max_length=128)),
('email', models.EmailField(max_length=254)),
('dl_count', models.PositiveIntegerField(default=0)),
('comment_count', models.PositiveIntegerField(default=0)),
('rate_count', models.PositiveIntegerField(default=0)),
('rating', models.PositiveIntegerField(blank=True, default=None, null=True)),
('screenshot_1', models.ImageField(null=True, upload_to=levels.util.get_screenshot_1_upload_path)),
('screenshot_2', models.ImageField(null=True, upload_to=levels.util.get_screenshot_2_upload_path)),
],
options={
'db_table': 'levels',
},
),
migrations.CreateModel(
name='LevelRating',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(default=django.utils.timezone.now)),
('last_modified_at', models.DateTimeField(auto_now=True)),
('ip', models.GenericIPAddressField(default='0.0.0.0')),
('rating', models.PositiveSmallIntegerField()),
('date_created', models.DateTimeField(default=django.utils.timezone.now)),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to=settings.AUTH_USER_MODEL)),
('last_modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to=settings.AUTH_USER_MODEL)),
('level', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='levels.level')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'level_ratings',
},
),
migrations.CreateModel(
name='LevelComment',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(default=django.utils.timezone.now)),
('last_modified_at', models.DateTimeField(auto_now=True)),
('comment', models.TextField()),
('ip', models.GenericIPAddressField(default='0.0.0.0')),
('date_created', models.DateTimeField(default=django.utils.timezone.now)),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to=settings.AUTH_USER_MODEL)),
('last_modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to=settings.AUTH_USER_MODEL)),
('level', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='levels.level')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'level_comments',
},
),
migrations.CreateModel(
name='LevelCategory',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(default=django.utils.timezone.now)),
('last_modified_at', models.DateTimeField(auto_now=True)),
('path', models.CharField(max_length=16)),
('name', models.CharField(max_length=64)),
('description', models.TextField(blank=True, null=True)),
('enable_3dpreview', models.BooleanField(default=False)),
('game', models.CharField(max_length=16, null=True)),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to=settings.AUTH_USER_MODEL)),
('last_modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'Level Categories',
'db_table': 'level_categories',
},
),
migrations.AddField(
model_name='level',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='levels.levelcategory'),
),
migrations.AddField(
model_name='level',
name='created_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='level',
name='last_modified_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to=settings.AUTH_USER_MODEL),
),
]
| 1.773438 | 2 |
autokeras/bayesian.py | shubhamwagh/autokeras | 0 | 12758276 | <reponame>shubhamwagh/autokeras
import random
import time
import warnings
from copy import deepcopy
from functools import total_ordering
from queue import PriorityQueue
import numpy as np
import math
from scipy.linalg import cholesky, cho_solve, solve_triangular
from scipy.optimize import linear_sum_assignment
from autokeras.net_transformer import transform
def layer_distance(a, b):
return abs(a - b) * 1.0 / max(a, b)
def layers_distance(list_a, list_b):
len_a = len(list_a)
len_b = len(list_b)
f = np.zeros((len_a + 1, len_b + 1))
f[-1][-1] = 0
for i in range(-1, len_a):
f[i][-1] = i + 1
for j in range(-1, len_b):
f[-1][j] = j + 1
for i in range(len_a):
for j in range(len_b):
f[i][j] = min(f[i][j - 1] + 1, f[i - 1][j] + 1, f[i - 1][j - 1] + layer_distance(list_a[i], list_b[j]))
return f[len_a - 1][len_b - 1]
def skip_connection_distance(a, b):
if a[2] != b[2]:
return 1.0
len_a = abs(a[1] - a[0])
len_b = abs(b[1] - b[0])
return (abs(a[0] - b[0]) + abs(len_a - len_b)) / (max(a[0], b[0]) + max(len_a, len_b))
def skip_connections_distance(list_a, list_b):
distance_matrix = np.zeros((len(list_a), len(list_b)))
for i, a in enumerate(list_a):
for j, b in enumerate(list_b):
distance_matrix[i][j] = skip_connection_distance(a, b)
return distance_matrix[linear_sum_assignment(distance_matrix)].sum() + abs(len(list_a) - len(list_b))
def edit_distance(x, y, kernel_lambda):
ret = 0
ret += layers_distance(x.conv_widths, y.conv_widths)
ret += layers_distance(x.dense_widths, y.dense_widths)
ret += kernel_lambda * skip_connections_distance(x.skip_connections, y.skip_connections)
return ret
class IncrementalGaussianProcess:
def __init__(self, kernel_lambda):
self.alpha = 1e-10
self._k_matrix = None
self._distance_matrix = None
self._x = None
self._y = None
self._first_fitted = False
self._l_matrix = None
self._alpha_vector = None
self.edit_distance_matrix = edit_distance_matrix
self.kernel_lambda = kernel_lambda
@property
def kernel_matrix(self):
return self._distance_matrix
def fit(self, train_x, train_y):
if self.first_fitted:
self.incremental_fit(train_x, train_y)
else:
self.first_fit(train_x, train_y)
def incremental_fit(self, train_x, train_y):
if not self._first_fitted:
raise ValueError("The first_fit function needs to be called first.")
train_x, train_y = np.array(train_x), np.array(train_y)
# Incrementally compute K
up_right_k = self.edit_distance_matrix(self.kernel_lambda, self._x, train_x)
down_left_k = np.transpose(up_right_k)
down_right_k = self.edit_distance_matrix(self.kernel_lambda, train_x)
up_k = np.concatenate((self._distance_matrix, up_right_k), axis=1)
down_k = np.concatenate((down_left_k, down_right_k), axis=1)
self._distance_matrix = np.concatenate((up_k, down_k), axis=0)
self._distance_matrix = bourgain_embedding_matrix(self._distance_matrix)
self._k_matrix = 1.0 / np.exp(self._distance_matrix)
diagonal = np.diag_indices_from(self._k_matrix)
diagonal = (diagonal[0][-len(train_x):], diagonal[1][-len(train_x):])
self._k_matrix[diagonal] += self.alpha
self._x = np.concatenate((self._x, train_x), axis=0)
self._y = np.concatenate((self._y, train_y), axis=0)
self._l_matrix = cholesky(self._k_matrix, lower=True) # Line 2
self._alpha_vector = cho_solve((self._l_matrix, True), self._y) # Line 3
return self
@property
def first_fitted(self):
return self._first_fitted
def first_fit(self, train_x, train_y):
train_x, train_y = np.array(train_x), np.array(train_y)
self._x = np.copy(train_x)
self._y = np.copy(train_y)
self._distance_matrix = self.edit_distance_matrix(self.kernel_lambda, self._x)
self._distance_matrix = bourgain_embedding_matrix(self._distance_matrix)
self._k_matrix = 1.0 / np.exp(self._distance_matrix)
self._k_matrix[np.diag_indices_from(self._k_matrix)] += self.alpha
self._l_matrix = cholesky(self._k_matrix, lower=True) # Line 2
self._alpha_vector = cho_solve((self._l_matrix, True), self._y) # Line 3
self._first_fitted = True
return self
def predict(self, train_x):
k_trans = 1.0 / np.exp(self.edit_distance_matrix(self.kernel_lambda, train_x, self._x))
y_mean = k_trans.dot(self._alpha_vector) # Line 4 (y_mean = f_star)
# compute inverse K_inv of K based on its Cholesky
# decomposition L and its inverse L_inv
l_inv = solve_triangular(self._l_matrix.T, np.eye(self._l_matrix.shape[0]))
k_inv = l_inv.dot(l_inv.T)
# Compute variance of predictive distribution
y_var = np.ones(len(train_x), dtype=np.float)
y_var -= np.einsum("ij,ij->i", np.dot(k_trans, k_inv), k_trans)
# Check if any of the variances is negative because of
# numerical issues. If yes: set the variance to 0.
y_var_negative = y_var < 0
if np.any(y_var_negative):
warnings.warn("Predicted variances smaller than 0. "
"Setting those variances to 0.")
y_var[y_var_negative] = 0.0
return y_mean, np.sqrt(y_var)
def edit_distance_matrix(kernel_lambda, train_x, train_y=None):
if train_y is None:
ret = np.zeros((train_x.shape[0], train_x.shape[0]))
for x_index, x in enumerate(train_x):
for y_index, y in enumerate(train_x):
if x_index == y_index:
ret[x_index][y_index] = 0
elif x_index < y_index:
ret[x_index][y_index] = edit_distance(x, y, kernel_lambda)
else:
ret[x_index][y_index] = ret[y_index][x_index]
return ret
ret = np.zeros((train_x.shape[0], train_y.shape[0]))
for x_index, x in enumerate(train_x):
for y_index, y in enumerate(train_y):
ret[x_index][y_index] = edit_distance(x, y, kernel_lambda)
return ret
def vector_distance(a, b):
a = np.array(a)
b = np.array(b)
return np.linalg.norm(a - b)
def bourgain_embedding_matrix(distance_matrix):
distance_matrix = np.array(distance_matrix)
n = len(distance_matrix)
if n == 1:
return distance_matrix
np.random.seed(123)
distort_elements = []
r = range(n)
k = int(math.ceil(math.log(n) / math.log(2) - 1))
t = int(math.ceil(math.log(n)))
counter = 0
for i in range(0, k + 1):
for t in range(t):
s = np.random.choice(r, 2 ** i)
for j in r:
d = min([distance_matrix[j][s] for s in s])
counter += len(s)
if i == 0 and t == 0:
distort_elements.append([d])
else:
distort_elements[j].append(d)
distort_matrix = np.zeros((n, n))
for i in range(n):
for j in range(i + 1, n):
distort_matrix[i][j] = distort_matrix[j][i] = vector_distance(distort_elements[i], distort_elements[j])
return np.array(distort_matrix)
class BayesianOptimizer:
"""
gpr: A GaussianProcessRegressor for bayesian optimization.
"""
def __init__(self, searcher, t_min, metric, kernel_lambda, beta):
self.searcher = searcher
self.t_min = t_min
self.metric = metric
self.gpr = IncrementalGaussianProcess(kernel_lambda)
self.beta = beta
def fit(self, x_queue, y_queue):
self.gpr.fit(x_queue, y_queue)
def optimize_acq(self, model_ids, descriptors, timeout):
start_time = time.time()
target_graph = None
father_id = None
descriptors = deepcopy(descriptors)
elem_class = Elem
if self.metric.higher_better():
elem_class = ReverseElem
# Initialize the priority queue.
pq = PriorityQueue()
temp_list = []
for model_id in model_ids:
metric_value = self.searcher.get_metric_value_by_id(model_id)
temp_list.append((metric_value, model_id))
temp_list = sorted(temp_list)
for metric_value, model_id in temp_list:
graph = self.searcher.load_model_by_id(model_id)
graph.clear_operation_history()
graph.clear_weights()
pq.put(elem_class(metric_value, model_id, graph))
t = 1.0
t_min = self.t_min
alpha = 0.9
opt_acq = self._get_init_opt_acq_value()
remaining_time = timeout
while not pq.empty() and t > t_min and remaining_time > 0:
elem = pq.get()
if self.metric.higher_better():
temp_exp = min((elem.metric_value - opt_acq) / t, 1.0)
else:
temp_exp = min((opt_acq - elem.metric_value) / t, 1.0)
ap = math.exp(temp_exp)
if ap >= random.uniform(0, 1):
for temp_graph in transform(elem.graph):
if contain(descriptors, temp_graph.extract_descriptor()):
continue
temp_acq_value = self.acq(temp_graph)
pq.put(elem_class(temp_acq_value, elem.father_id, temp_graph))
descriptors.append(temp_graph.extract_descriptor())
if self._accept_new_acq_value(opt_acq, temp_acq_value):
opt_acq = temp_acq_value
father_id = elem.father_id
target_graph = deepcopy(temp_graph)
t *= alpha
remaining_time = timeout - (time.time() - start_time)
if remaining_time < 0:
raise TimeoutError
# Did not found a not duplicated architecture
if father_id is None:
return None, None
nm_graph = self.searcher.load_model_by_id(father_id)
for args in target_graph.operation_history:
getattr(nm_graph, args[0])(*list(args[1:]))
return nm_graph, father_id
def acq(self, graph):
mean, std = self.gpr.predict(np.array([graph.extract_descriptor()]))
if self.metric.higher_better():
return mean + self.beta * std
return mean - self.beta * std
def _get_init_opt_acq_value(self):
if self.metric.higher_better():
return -np.inf
return np.inf
def _accept_new_acq_value(self, opt_acq, temp_acq_value):
if temp_acq_value > opt_acq and self.metric.higher_better():
return True
if temp_acq_value < opt_acq and not self.metric.higher_better():
return True
return False
@total_ordering
class Elem:
def __init__(self, metric_value, father_id, graph):
self.father_id = father_id
self.graph = graph
self.metric_value = metric_value
def __eq__(self, other):
return self.metric_value == other.metric_value
def __lt__(self, other):
return self.metric_value < other.metric_value
class ReverseElem(Elem):
def __lt__(self, other):
return self.metric_value > other.metric_value
def contain(descriptors, target_descriptor):
for descriptor in descriptors:
if edit_distance(descriptor, target_descriptor, 1) < 1e-5:
return True
return False
| 2.21875 | 2 |
tests/utils/test_common.py | WhiteOnBlackCode/proxion | 4 | 12758277 | <reponame>WhiteOnBlackCode/proxion
from proxion.util.common import *
def test_is_proxy_format():
assert is_proxy_format('0.0.0.0:0') == True
assert is_proxy_format('255.255.255.255:65535') == True
assert is_proxy_format(None) == False
assert is_proxy_format('') == False
assert is_proxy_format('256.255.255.255:65535') == False
assert is_proxy_format('255.255.255.255:65536') == False
assert is_proxy_format('1.1.1.1:-20') == False
assert is_proxy_format('-1.1.1.1:20') == False
assert is_proxy_format('a1.1.1.1:20') == False
assert is_proxy_format('1.1.1.1:20a') == False
assert is_proxy_format('1.1.1.1:-20') == False
def test_is_ip_addr():
assert is_ip_address('0.0.0.0') == True
assert is_ip_address('255.255.255.255') == True
assert is_ip_address(None) == False
assert is_ip_address('') == False
assert is_ip_address('256.255.255.255') == False
assert is_ip_address('-1.1.1.1') == False
assert is_ip_address('a1.1.1.1') == False
| 2.5 | 2 |
projects/SimpleCalcV2.py | MBLTech/p4ye | 0 | 12758278 | '''
###############################################
# ####################################### #
#### ######## Simple Calculator ########## ####
# ####################################### #
###############################################
## ##
##########################################
############ Version 2 #################
##########################################
## ##
'''
'''
The version 2 of SimpleCalc.py adds extra functionality but due to my limited
approach, has to remove to some functions as well, which hope so will be added
along with these new functions in the later version.
This version can tell total number of inputs, their summation and and average,
no matter how many inputs you give it, but as the same time this version unable
to calculate multiple and devision of the numbers.
'''
print("Type 'done' to Quit.")
sum = 0
count = 0
while True:
num = input('Input your number: ')
if num == 'done':
print('goodbye')
break
try:
fnum = float(num)
except:
print('bad input')
continue
sum = sum + fnum
count = count + 1
print('----Total Inputs:', count)
print('----Sum:', sum)
print('----Average:', sum/count)
| 3.828125 | 4 |
test.py | cricketsamya/BluetoothPython | 0 | 12758279 | <filename>test.py
import struct
from bluepy import btle
class MyDelegate(btle.DefaultDelegate):
def __init__(self):
btle.DefaultDelegate.__init__(self)
def handleNotification(self, cHandle, data):
print(data)
if __name__ == '__main__':
heartrate_uuid = btle.UUID(0x2a38)
p = btle.Peripheral("80:6c:1b:f0:fa:7b")
p.setDelegate(MyDelegate())
print("Connected")
try:
print("Setting Characteristics")
ch = p.getCharacteristics(uuid=heartrate_uuid)[0]
print("Setting Done, writing now")
ch.write(struct.pack('<bb', 0x01, 0x00))
print("writing Done, looping now")
while True:
if p.waitForNotifications(1.0):
print("Notification trigger")
continue
print("Waiting")
finally:
p.disconnect()
| 2.890625 | 3 |
errorhandler/__init__.py | kanzihuang/errorhandler | 3 | 12758280 | # Copyright (c) 2008 Simplistix Ltd
# See license.txt for license details.
from logging import Handler,ERROR,getLogger
class ErrorHandler(Handler):
fired = False
def __init__(self,level=ERROR,logger='',install=True):
Handler.__init__(self)
self.level=level
self.logger=logger
if install:
self.install()
def install(self):
self.setLevel(self.level)
getLogger(self.logger).addHandler(self)
def emit(self, record):
self.fired=True
def reset(self):
self.fired=False
def remove(self):
getLogger().removeHandler(self)
| 2.59375 | 3 |
05-Data-Structures-Doubly-Linked-Lists/35-DLL-Prepend/src/DLL-Prepend.py | covuworie/data-structures-and-algorithms | 0 | 12758281 | from typing import Optional
class Node:
def __init__(self, value: int) -> None:
self.value = value
self.next: Optional[Node] = None
self.prev: Optional[Node] = None
class DoublyLinkedList:
def __init__(self, value: int) -> None:
new_node = Node(value)
self.head = new_node
self.tail = new_node
self.length = 1
def print_list(self) -> None:
temp = self.head
while temp is not None:
print(temp.value)
temp = temp.next
def append(self, value: int) -> bool:
new_node = Node(value)
if self.head is None:
self.head = new_node
self.tail = new_node
else:
self.tail.next = new_node
new_node.prev = self.tail
self.tail = new_node
self.length += 1
return True
def pop(self) -> Optional[Node]:
if self.length == 0:
return None
temp = self.tail
if self.length == 1:
self.head = None
self.tail = None
else:
self.tail = self.tail.prev
self.tail.next = None
temp.prev = None
self.length -= 1
return temp
def prepend(self, value: int) -> bool:
new_node = Node(value)
if self.length == 0:
self.head = new_node
self.tail = new_node
else:
new_node.next = self.head
self.head.prev = new_node
self.head = new_node
self.length += 1
return True
my_doubly_linked_list = DoublyLinkedList(2)
my_doubly_linked_list.append(3)
my_doubly_linked_list.prepend(1)
my_doubly_linked_list.print_list()
| 4.0625 | 4 |
azure_functions_devops_build/repository/github_repository_manager.py | coolgeeck/delwar1 | 16 | 12758282 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import base64
from ..exceptions import (
GithubContentNotFound,
GithubIntegrationRequestError,
GithubUnauthorizedError,
)
from ..base.base_github_manager import BaseGithubManager
class GithubRepositoryManager(BaseGithubManager):
def check_github_repository(self, repository_fullname):
header_parameters = self.construct_github_request_header()
request = self._client.get('/repos/{repo}'.format(repo=repository_fullname))
response = self._client.send(request, header_parameters)
if response.status_code // 100 == 2:
return True
return False
def check_github_file(self, repository_fullname, file_path):
header_parameters = self.construct_github_request_header()
request = self._client.get('/repos/{repo}/contents/{path}'.format(
repo=repository_fullname,
path=file_path
))
response = self._client.send(request, header_parameters)
return response.status_code // 100 == 2
def get_content(self, repository_fullname, file_path, get_metadata=True):
header_parameters = self.construct_github_request_header()
if get_metadata: # Get files metadata
header_parameters['Content-Type'] = 'application/json'
else: # Get files content
header_parameters['Accept'] = 'application/vnd.github.v3.raw'
request = self._client.get('/repos/{repo}/contents/{path}'.format(
repo=repository_fullname,
path=file_path
))
response = self._client.send(request, header_parameters)
# The response is a Json content
if response.status_code // 100 == 2:
return response.json()
if response.status_code == 401:
raise GithubUnauthorizedError('Failed to read {repo}/{path}'.format(
repo=repository_fullname,
path=file_path
))
if response.status_code == 404:
raise GithubContentNotFound('Failed to find {repo}/{path}'.format(
repo=repository_fullname,
path=file_path
))
raise GithubIntegrationRequestError(response.status_code)
def put_content(self, repository_fullname, file_path, data):
header_parameters = self.construct_github_request_header()
header_parameters['Content-Type'] = 'Application/Json'
request = self._client.put(
url='/repos/{repo}/contents/{path}'.format(repo=repository_fullname, path=file_path),
headers=header_parameters,
content=data
)
response = self._client.send(request)
if response.status_code // 100 == 2:
return response
if response.status_code == 401:
raise GithubUnauthorizedError('Failed to write {repo}/{path}'.format(
repo=repository_fullname,
path=file_path
))
if response.status_code == 404:
raise GithubContentNotFound('Failed to find {repo}/{path}'.format(
repo=repository_fullname,
path=file_path
))
raise GithubIntegrationRequestError("{res.status_code} {res.url}".format(res=response))
def commit_file(self, repository_fullname, file_path, commit_message, file_data, sha=None, encode='utf-8'):
data = {
"branch": "master",
"message": "{message}".format(message=commit_message),
"content": base64.b64encode(bytes(file_data.encode(encode))).decode('ascii'),
}
if sha:
data["sha"] = sha
return self.put_content(
repository_fullname=repository_fullname,
file_path=file_path,
data=data
)
| 2.390625 | 2 |
xrpl/models/base_model.py | mDuo13/xrpl-py | 0 | 12758283 | <reponame>mDuo13/xrpl-py
"""The base class for all model types."""
from __future__ import annotations
from abc import ABC
from dataclasses import fields
from enum import Enum
from typing import Any, Dict, Type, Union, get_type_hints
from xrpl.models.exceptions import XRPLModelException
from xrpl.models.required import REQUIRED
class BaseModel(ABC):
"""The base class for all model types."""
@classmethod
def is_dict_of_model(cls: Type[BaseModel], dictionary: Dict[str, Any]) -> bool:
"""
Returns True if the input dictionary was derived by the `to_dict`
method of an instance of this class. In other words, True if this is
a dictionary representation of an instance of this class.
NOTE: does not account for model inheritance, IE will only return True
if dictionary represents an instance of this class, but not if
dictionary represents an instance of a subclass of this class.
Args:
dictionary: The dictionary to check.
Returns:
True if dictionary is a dict representation of an instance of this
class.
"""
return isinstance(dictionary, dict) and set(get_type_hints(cls).keys()) == set(
dictionary.keys()
)
@classmethod
def from_dict(cls: Type[BaseModel], value: Dict[str, Any]) -> BaseModel:
"""
Construct a new BaseModel from a dictionary of parameters.
If not overridden, passes the dictionary as args to the constructor.
Args:
value: The value to construct the BaseModel from.
Returns:
A new BaseModel object, constructed using the given parameters.
Raises:
XRPLModelException: If the dictionary provided is invalid.
"""
# returns a dictionary mapping class params to their types
class_types = get_type_hints(cls)
args = {}
for param in value:
if param not in class_types:
raise XRPLModelException(
f"{param} not a valid parameter for {cls.__name__}"
)
if type(value[param]) == class_types[param]:
# the type of the param provided matches the type expected for the param
args[param] = value[param]
else:
args[param] = cls._from_dict_special_cases(
param, class_types[param], value[param]
)
init = cls._get_only_init_args(args)
# Ignore type-checking on this for now to simplify subclass constructors
# which might pass non kwargs.
return cls(**init) # type: ignore
@classmethod
def _from_dict_special_cases(
cls: Type[BaseModel],
param: str,
param_type: Type[Any],
param_value: Dict[str, Any],
) -> Union[str, Enum, BaseModel, Dict[str, Any]]:
"""Handles all the recursive/more complex cases for `from_dict`."""
from xrpl.models.amounts import Amount, IssuedCurrencyAmount
from xrpl.models.currencies import XRP, Currency, IssuedCurrency
from xrpl.models.transactions.transaction import Transaction
# TODO: figure out how to make Unions work generically (if possible)
if param_type == Amount:
# special case, Union
if isinstance(param_value, str):
return param_value
if not isinstance(param_value, dict):
raise XRPLModelException(
f"{param_type} requires a dictionary of params"
)
return IssuedCurrencyAmount.from_dict(param_value)
if param_type == Currency:
# special case, Union
if not isinstance(param_value, dict):
raise XRPLModelException(
f"{param_type} requires a dictionary of params"
)
if "currency" in param_value and "issuer" in param_value:
return IssuedCurrency.from_dict(param_value)
if "currency" in param_value:
param_value_copy = {**param_value}
del param_value_copy["currency"]
return XRP.from_dict(param_value_copy)
raise XRPLModelException(f"No valid type for {param}")
if param_type == Transaction:
# special case, multiple options (could be any Transaction type)
if "transaction_type" not in param_value:
raise XRPLModelException(
f"{param} not a valid parameter for {cls.__name__}"
)
type_str = param_value["transaction_type"]
# safely convert type string into the actual type
transaction_type = Transaction.get_transaction_type(type_str)
param_value_copy = {**param_value}
del param_value_copy["transaction_type"]
return transaction_type.from_dict(param_value_copy)
if param_type in BaseModel.__subclasses__():
# any other BaseModel
if not isinstance(param_value, dict):
raise XRPLModelException(
f"{param_type} requires a dictionary of params"
)
# mypy doesn't know that the If checks that it's a subclass of BaseModel
return param_type.from_dict(param_value) # type: ignore
if param_type in Enum.__subclasses__():
# mypy doesn't know that the If checks that it's a subclass of Enum
return param_type(param_value) # type: ignore
return param_value
@classmethod
def _get_only_init_args(
cls: Type[BaseModel], args: Dict[str, Any]
) -> Dict[str, Any]:
init_keys = {field.name for field in fields(cls)}
valid_args = {key: value for key, value in args.items() if key in init_keys}
return valid_args
def __post_init__(self: BaseModel) -> None:
"""Called by dataclasses immediately after __init__."""
self.validate()
def validate(self: BaseModel) -> None:
"""
Raises if this object is invalid.
Raises:
XRPLModelException: if this object is invalid.
"""
errors = self._get_errors()
if len(errors) > 0:
raise XRPLModelException(str(errors))
def is_valid(self: BaseModel) -> bool:
"""
Returns whether this BaseModel is valid.
Returns:
Whether this BaseModel is valid.
"""
return len(self._get_errors()) == 0
def _get_errors(self: BaseModel) -> Dict[str, str]:
"""
Extended in subclasses to define custom validation logic.
Returns:
Dictionary of any errors found on self.
"""
return {
attr: f"{attr} is not set"
for attr, value in self.__dict__.items()
if value is REQUIRED
}
def to_dict(self: BaseModel) -> Dict[str, Any]:
"""
Returns the dictionary representation of a BaseModel.
If not overridden, returns the object dict with all non-None values.
Returns:
The dictionary representation of a BaseModel.
"""
return {
key: self._to_dict_elem(value)
for key, value in self.__dict__.items()
if value is not None
}
def _to_dict_elem(self: BaseModel, elem: Any) -> Any:
if isinstance(elem, BaseModel):
return elem.to_dict()
if isinstance(elem, Enum):
return elem.value
if isinstance(elem, list):
return [
self._to_dict_elem(sub_elem)
for sub_elem in elem
if sub_elem is not None
]
return elem
def __eq__(self: BaseModel, other: object) -> bool:
"""Compares a BaseModel to another object to determine if they are equal."""
return isinstance(other, BaseModel) and self.to_dict() == other.to_dict()
def __repr__(self: BaseModel) -> str:
"""Returns a string representation of a BaseModel object"""
repr_items = [f"{key}={repr(value)}" for key, value in self.to_dict().items()]
return f"{type(self).__name__}({repr_items})"
| 2.734375 | 3 |
ambari-agent/src/main/python/ambari_agent/CustomServiceOrchestrator.py | willwill1101/ambari | 3 | 12758284 | <filename>ambari-agent/src/main/python/ambari_agent/CustomServiceOrchestrator.py<gh_stars>1-10
#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import logging
import os
import ambari_simplejson as json
import sys
from ambari_commons import shell
import threading
from FileCache import FileCache
from AgentException import AgentException
from PythonExecutor import PythonExecutor
from PythonReflectiveExecutor import PythonReflectiveExecutor
import Constants
import hostname
logger = logging.getLogger()
class CustomServiceOrchestrator():
"""
Executes a command for custom service. stdout and stderr are written to
tmpoutfile and to tmperrfile respectively.
"""
SCRIPT_TYPE_PYTHON = "PYTHON"
COMMAND_NAME_STATUS = "STATUS"
COMMAND_NAME_SECURITY_STATUS = "SECURITY_STATUS"
CUSTOM_ACTION_COMMAND = 'ACTIONEXECUTE'
CUSTOM_COMMAND_COMMAND = 'CUSTOM_COMMAND'
PRE_HOOK_PREFIX="before"
POST_HOOK_PREFIX="after"
HOSTS_LIST_KEY = "all_hosts"
PING_PORTS_KEY = "all_ping_ports"
RACKS_KEY = "all_racks"
IPV4_ADDRESSES_KEY = "all_ipv4_ips"
AMBARI_SERVER_HOST = "ambari_server_host"
DONT_DEBUG_FAILURES_FOR_COMMANDS = [COMMAND_NAME_SECURITY_STATUS, COMMAND_NAME_STATUS]
REFLECTIVELY_RUN_COMMANDS = [COMMAND_NAME_SECURITY_STATUS, COMMAND_NAME_STATUS] # -- commands which run a lot and often (this increases their speed)
def __init__(self, config, controller):
self.config = config
self.tmp_dir = config.get('agent', 'prefix')
self.exec_tmp_dir = Constants.AGENT_TMP_DIR
self.file_cache = FileCache(config)
self.status_commands_stdout = os.path.join(self.tmp_dir,
'status_command_stdout.txt')
self.status_commands_stderr = os.path.join(self.tmp_dir,
'status_command_stderr.txt')
self.public_fqdn = hostname.public_hostname(config)
# cache reset will be called on every agent registration
controller.registration_listeners.append(self.file_cache.reset)
# Clean up old status command files if any
try:
os.unlink(self.status_commands_stdout)
os.unlink(self.status_commands_stderr)
except OSError:
pass # Ignore fail
self.commands_in_progress_lock = threading.RLock()
self.commands_in_progress = {}
def map_task_to_process(self, task_id, processId):
with self.commands_in_progress_lock:
logger.debug('Maps taskId=%s to pid=%s' % (task_id, processId))
self.commands_in_progress[task_id] = processId
def cancel_command(self, task_id, reason):
with self.commands_in_progress_lock:
if task_id in self.commands_in_progress.keys():
pid = self.commands_in_progress.get(task_id)
self.commands_in_progress[task_id] = reason
logger.info("Canceling command with task_id - {tid}, " \
"reason - {reason} . Killing process {pid}"
.format(tid=str(task_id), reason=reason, pid=pid))
shell.kill_process_with_children(pid)
else:
logger.warn("Unable to find pid by taskId = %s" % task_id)
def get_py_executor(self, forced_command_name):
"""
Wrapper for unit testing
:return:
"""
if forced_command_name in self.REFLECTIVELY_RUN_COMMANDS:
return PythonReflectiveExecutor(self.tmp_dir, self.config)
else:
return PythonExecutor(self.tmp_dir, self.config)
def runCommand(self, command, tmpoutfile, tmperrfile, forced_command_name=None,
override_output_files=True, retry=False):
"""
forced_command_name may be specified manually. In this case, value, defined at
command json, is ignored.
"""
try:
script_type = command['commandParams']['script_type']
script = command['commandParams']['script']
timeout = int(command['commandParams']['command_timeout'])
if 'hostLevelParams' in command and 'jdk_location' in command['hostLevelParams']:
server_url_prefix = command['hostLevelParams']['jdk_location']
else:
server_url_prefix = command['commandParams']['jdk_location']
task_id = "status"
try:
task_id = command['taskId']
command_name = command['roleCommand']
except KeyError:
pass # Status commands have no taskId
if forced_command_name is not None: # If not supplied as an argument
command_name = forced_command_name
if command_name == self.CUSTOM_ACTION_COMMAND:
base_dir = self.file_cache.get_custom_actions_base_dir(server_url_prefix)
script_tuple = (os.path.join(base_dir, 'scripts', script), base_dir)
hook_dir = None
else:
if command_name == self.CUSTOM_COMMAND_COMMAND:
command_name = command['hostLevelParams']['custom_command']
# forces a hash challenge on the directories to keep them updated, even
# if the return type is not used
self.file_cache.get_host_scripts_base_dir(server_url_prefix)
hook_dir = self.file_cache.get_hook_base_dir(command, server_url_prefix)
base_dir = self.file_cache.get_service_base_dir(command, server_url_prefix)
script_path = self.resolve_script_path(base_dir, script)
script_tuple = (script_path, base_dir)
tmpstrucoutfile = os.path.join(self.tmp_dir,
"structured-out-{0}.json".format(task_id))
# We don't support anything else yet
if script_type.upper() != self.SCRIPT_TYPE_PYTHON:
message = "Unknown script type {0}".format(script_type)
raise AgentException(message)
# Execute command using proper interpreter
handle = None
if command.has_key('__handle'):
handle = command['__handle']
handle.on_background_command_started = self.map_task_to_process
del command['__handle']
json_path = self.dump_command_to_json(command, retry)
pre_hook_tuple = self.resolve_hook_script_path(hook_dir,
self.PRE_HOOK_PREFIX, command_name, script_type)
post_hook_tuple = self.resolve_hook_script_path(hook_dir,
self.POST_HOOK_PREFIX, command_name, script_type)
py_file_list = [pre_hook_tuple, script_tuple, post_hook_tuple]
# filter None values
filtered_py_file_list = [i for i in py_file_list if i]
logger_level = logging.getLevelName(logger.level)
# Executing hooks and script
ret = None
from ActionQueue import ActionQueue
if command.has_key('commandType') and command['commandType'] == ActionQueue.BACKGROUND_EXECUTION_COMMAND and len(filtered_py_file_list) > 1:
raise AgentException("Background commands are supported without hooks only")
python_executor = self.get_py_executor(forced_command_name)
for py_file, current_base_dir in filtered_py_file_list:
log_info_on_failure = not command_name in self.DONT_DEBUG_FAILURES_FOR_COMMANDS
script_params = [command_name, json_path, current_base_dir, tmpstrucoutfile, logger_level, self.exec_tmp_dir]
ret = python_executor.run_file(py_file, script_params,
tmpoutfile, tmperrfile, timeout,
tmpstrucoutfile, self.map_task_to_process,
task_id, override_output_files, handle = handle, log_info_on_failure=log_info_on_failure)
# Next run_file() invocations should always append to current output
override_output_files = False
if ret['exitcode'] != 0:
break
if not ret: # Something went wrong
raise AgentException("No script has been executed")
# if canceled and not background command
if handle is None:
cancel_reason = self.command_canceled_reason(task_id)
if cancel_reason:
ret['stdout'] += cancel_reason
ret['stderr'] += cancel_reason
with open(tmpoutfile, "a") as f:
f.write(cancel_reason)
with open(tmperrfile, "a") as f:
f.write(cancel_reason)
except Exception, e: # We do not want to let agent fail completely
exc_type, exc_obj, exc_tb = sys.exc_info()
message = "Caught an exception while executing "\
"custom service command: {0}: {1}; {2}".format(exc_type, exc_obj, str(e))
logger.exception(message)
ret = {
'stdout' : message,
'stderr' : message,
'structuredOut' : '{}',
'exitcode': 1,
}
return ret
def command_canceled_reason(self, task_id):
with self.commands_in_progress_lock:
if self.commands_in_progress.has_key(task_id):#Background command do not push in this collection (TODO)
logger.debug('Pop with taskId %s' % task_id)
pid = self.commands_in_progress.pop(task_id)
if not isinstance(pid, int):
return '\nCommand aborted. ' + pid
return None
def requestComponentStatus(self, command):
"""
Component status is determined by exit code, returned by runCommand().
Exit code 0 means that component is running and any other exit code means that
component is not running
"""
override_output_files=True # by default, we override status command output
if logger.level == logging.DEBUG:
override_output_files = False
res = self.runCommand(command, self.status_commands_stdout,
self.status_commands_stderr, self.COMMAND_NAME_STATUS,
override_output_files=override_output_files)
return res
def requestComponentSecurityState(self, command):
"""
Determines the current security state of the component
A command will be issued to trigger the security_status check and the result of this check will
returned to the caller. If the component lifecycle script has no security_status method the
check will return non zero exit code and "UNKNOWN" will be returned.
"""
override_output_files=True # by default, we override status command output
if logger.level == logging.DEBUG:
override_output_files = False
security_check_res = self.runCommand(command, self.status_commands_stdout,
self.status_commands_stderr, self.COMMAND_NAME_SECURITY_STATUS,
override_output_files=override_output_files)
result = 'UNKNOWN'
if security_check_res is None:
logger.warn("The return value of the security_status check was empty, the security status is unknown")
elif 'exitcode' not in security_check_res:
logger.warn("Missing 'exitcode' value from the security_status check result, the security status is unknown")
elif security_check_res['exitcode'] != 0:
logger.debug("The 'exitcode' value from the security_status check result indicated the check routine failed to properly execute, the security status is unknown")
elif 'structuredOut' not in security_check_res:
logger.warn("Missing 'structuredOut' value from the security_status check result, the security status is unknown")
elif 'securityState' not in security_check_res['structuredOut']:
logger.warn("Missing 'securityState' value from the security_status check structuredOut data set, the security status is unknown")
else:
result = security_check_res['structuredOut']['securityState']
return result
def resolve_script_path(self, base_dir, script):
"""
Encapsulates logic of script location determination.
"""
path = os.path.join(base_dir, script)
if not os.path.exists(path):
message = "Script {0} does not exist".format(path)
raise AgentException(message)
return path
def resolve_hook_script_path(self, stack_hooks_dir, prefix, command_name, script_type):
"""
Returns a tuple(path to hook script, hook base dir) according to string prefix
and command name. If script does not exist, returns None
"""
if not stack_hooks_dir:
return None
hook_dir = "{0}-{1}".format(prefix, command_name)
hook_base_dir = os.path.join(stack_hooks_dir, hook_dir)
hook_script_path = os.path.join(hook_base_dir, "scripts", "hook.py")
if not os.path.isfile(hook_script_path):
logger.debug("Hook script {0} not found, skipping".format(hook_script_path))
return None
return hook_script_path, hook_base_dir
def dump_command_to_json(self, command, retry=False):
"""
Converts command to json file and returns file path
"""
# Perform few modifications to stay compatible with the way in which
public_fqdn = self.public_fqdn
command['public_hostname'] = public_fqdn
# Add cache dir to make it visible for commands
command["hostLevelParams"]["agentCacheDir"] = self.config.get('agent', 'cache_dir')
command["agentConfigParams"] = {"agent": {"parallel_execution": self.config.get_parallel_exec_option()}}
# Now, dump the json file
command_type = command['commandType']
from ActionQueue import ActionQueue # To avoid cyclic dependency
if command_type == ActionQueue.STATUS_COMMAND:
# These files are frequently created, that's why we don't
# store them all, but only the latest one
file_path = os.path.join(self.tmp_dir, "status_command.json")
else:
task_id = command['taskId']
if 'clusterHostInfo' in command and command['clusterHostInfo'] and not retry:
command['clusterHostInfo'] = self.decompressClusterHostInfo(command['clusterHostInfo'])
file_path = os.path.join(self.tmp_dir, "command-{0}.json".format(task_id))
if command_type == ActionQueue.AUTO_EXECUTION_COMMAND:
file_path = os.path.join(self.tmp_dir, "auto_command-{0}.json".format(task_id))
# Json may contain passwords, that's why we need proper permissions
if os.path.isfile(file_path):
os.unlink(file_path)
with os.fdopen(os.open(file_path, os.O_WRONLY | os.O_CREAT,
0600), 'w') as f:
content = json.dumps(command, sort_keys = False, indent = 4)
f.write(content)
return file_path
def decompressClusterHostInfo(self, clusterHostInfo):
info = clusterHostInfo.copy()
#Pop info not related to host roles
hostsList = info.pop(self.HOSTS_LIST_KEY)
pingPorts = info.pop(self.PING_PORTS_KEY)
racks = info.pop(self.RACKS_KEY)
ipv4_addresses = info.pop(self.IPV4_ADDRESSES_KEY)
ambariServerHost = info.pop(self.AMBARI_SERVER_HOST)
decompressedMap = {}
for k,v in info.items():
# Convert from 1-3,5,6-8 to [1,2,3,5,6,7,8]
indexes = self.convertRangeToList(v)
# Convert from [1,2,3,5,6,7,8] to [host1,host2,host3...]
decompressedMap[k] = [hostsList[i] for i in indexes]
#Convert from ['1:0-2,4', '42:3,5-7'] to [1,1,1,42,1,42,42,42]
pingPorts = self.convertMappedRangeToList(pingPorts)
racks = self.convertMappedRangeToList(racks)
ipv4_addresses = self.convertMappedRangeToList(ipv4_addresses)
#Convert all elements to str
pingPorts = map(str, pingPorts)
#Add ping ports to result
decompressedMap[self.PING_PORTS_KEY] = pingPorts
#Add hosts list to result
decompressedMap[self.HOSTS_LIST_KEY] = hostsList
#Add racks list to result
decompressedMap[self.RACKS_KEY] = racks
#Add ips list to result
decompressedMap[self.IPV4_ADDRESSES_KEY] = ipv4_addresses
#Add ambari-server host to result
decompressedMap[self.AMBARI_SERVER_HOST] = ambariServerHost
return decompressedMap
# Converts from 1-3,5,6-8 to [1,2,3,5,6,7,8]
def convertRangeToList(self, list):
resultList = []
for i in list:
ranges = i.split(',')
for r in ranges:
rangeBounds = r.split('-')
if len(rangeBounds) == 2:
if not rangeBounds[0] or not rangeBounds[1]:
raise AgentException("Broken data in given range, expected - ""m-n"" or ""m"", got : " + str(r))
resultList.extend(range(int(rangeBounds[0]), int(rangeBounds[1]) + 1))
elif len(rangeBounds) == 1:
resultList.append((int(rangeBounds[0])))
else:
raise AgentException("Broken data in given range, expected - ""m-n"" or ""m"", got : " + str(r))
return resultList
#Converts from ['1:0-2,4', '42:3,5-7'] to [1,1,1,42,1,42,42,42]
def convertMappedRangeToList(self, list):
resultDict = {}
for i in list:
valueToRanges = i.split(":")
if len(valueToRanges) <> 2:
raise AgentException("Broken data in given value to range, expected format - ""value:m-n"", got - " + str(i))
value = valueToRanges[0]
rangesToken = valueToRanges[1]
for r in rangesToken.split(','):
rangeIndexes = r.split('-')
if len(rangeIndexes) == 2:
if not rangeIndexes[0] or not rangeIndexes[1]:
raise AgentException("Broken data in given value to range, expected format - ""value:m-n"", got - " + str(r))
start = int(rangeIndexes[0])
end = int(rangeIndexes[1])
for k in range(start, end + 1):
resultDict[k] = value if not value.isdigit() else int(value)
elif len(rangeIndexes) == 1:
index = int(rangeIndexes[0])
resultDict[index] = value if not value.isdigit() else int(value)
resultList = dict(sorted(resultDict.items())).values()
return resultList
| 1.828125 | 2 |
back/posts/views.py | Eidard/pocs-official-web | 6 | 12758285 | import os
from django.core.exceptions import ValidationError
from django.shortcuts import get_object_or_404
from django.http import JsonResponse, HttpResponse
from django.views import View
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.db import transaction
from django.conf import settings
from .common import get_file_hash, remove_saved_files_and_empty_dirs, unmarkdown, trans_markdown_to_html_and_bleach, bleach_clean
from .forms import PostFormExceptFiles
from .validators import FileValidator
from accounts.models import Account
from accounts.serializers import AccountSerializerInPost
from .models import Post, PostFile
from .serializers import PostDetailSerializer, PostFileDetailSerializerForNonAnonymousUser
from board.models import Board
from board.serializers import BoardCategorySerializer
class PostCreateView(View):
@method_decorator(login_required, name="dispatch")
def post(self, request):
success_message = "글 생성에 성공했습니다."
error_messages = {
'wrong_request' : "잘못된 요청입니다. 다시 시도해주세요.",
'not_permitted' : "글 생성 권한이 없습니다. 승인된 회원 계정으로 로그인 후 다시 시도해주세요.",
'file_max_size_over' : f"첨부한 파일(들)의 총 용량이 글 하나당 저장 가능한 최대 용량({settings.MAX_FILE_UPLOAD_SIZE_TO_UNIT_NOTATION})을 넘어갑니다.",
'invalid_board_id' : "입력한 보드가 존재하지 않습니다. 확인 후 다시 시도해주세요.",
'not_exist_account' : "작성자의 회원 정보가 존재하지 않습니다. 관리자에게 연락해주세요.",
'fail_create_post' : "글 생성에 실패했습니다. 확인 후 다시 시도해주세요."
}
data = request.POST
if data is None:
return JsonResponse({"message": error_messages['wrong_request']}, status=400)
if not request.user.is_active:
return JsonResponse({"message":error_messages['not_permitted']}, status=403)
form = PostFormExceptFiles(data)
if not form.is_valid():
return JsonResponse({"message":form.errors}, status=400)
board = Board.objects.filter(id=form.cleaned_data['board_id'])
if not board.exists():
return JsonResponse({"message":error_messages['invalid_board_id']}, status=404)
account = Account.objects.filter(user_id=request.user.id)
if not account.exists():
return JsonResponse({"message":error_messages['not_exist_account']}, status=404)
total_file_size = 0
for file in request.FILES.getlist('files'):
total_file_size += file.size
fv = FileValidator(allowed_extensions=settings.ALLOWED_FILE_EXTENTIONS)
try:
if total_file_size > settings.MAX_FILE_UPLOAD_SIZE:
raise ValidationError(message=error_messages['file_max_size_over'])
fv(file)
except ValidationError as e:
return JsonResponse({"message": e.message}, status=400)
md_content = form.cleaned_data['md_content']
html_text = trans_markdown_to_html_and_bleach(md_content)
plain_text = unmarkdown(md_content)
background_image = settings.DEFAULT_IMAGE_RELATIVE_PATH if form.cleaned_data['background_image_url'] is None else form.cleaned_data['background_image_url']
savedFilePaths = []
try:
with transaction.atomic():
post = Post.objects.create(
title = bleach_clean(form.cleaned_data['title']),
content = html_text,
md_content = md_content,
plain_content = plain_text,
preview_content = plain_text[:128],
background_image_url = background_image,
board_id = board[0],
author_id = account[0],
hits = 0
)
savedFilePaths.append(post.background_image_real_relative_path)
for f in request.FILES.getlist('files'):
ff = f.open()
file_hash = get_file_hash(ff)
file = PostFile.objects.create(
post_id = post,
title = f.name,
file = f,
hash = file_hash
)
ff.close()
savedFilePaths.append(file.file.url[1:])
for tag in form.cleaned_data['tags'].split(','):
post.tags.add(tag.strip())
except:
remove_saved_files_and_empty_dirs(savedFilePaths)
return JsonResponse({"message":error_messages['fail_create_post']}, status=406)
return JsonResponse({"message":success_message}, status=200)
def get(self, request):
post = Post.objects.values()
# post_tag = ', '.join(o.name for o in Post.tags.all())
# print(post_tag)
postfile = PostFile.objects.values()
return JsonResponse({"list": list(post), "files": list(postfile)}, status=200)
class PostDetailView(View):
@method_decorator(login_required, name="dispatch")
def post(self, request, post_id):
success_message = "%(before_post_title)s 글을 수정하는데 성공했습니다"
error_messages = {
'wrong_request' : "잘못된 요청입니다. 다시 시도해주세요.",
'not_exist_post' : "수정하려는 글이 존재하지 않습니다. 확인 후 다시 시도해주세요.",
'not_permitted' : "해당 글 수정 권한이 없습니다. 작성자나 관리자 계정으로 로그인 후 다시 요청해주세요.",
'file_max_size_over' : f"첨부한 파일(들)의 총 용량이 글 하나당 저장 가능한 최대 용량({settings.MAX_FILE_UPLOAD_SIZE_TO_UNIT_NOTATION})을 넘어갑니다.",
'invalid_board_id' : "입력한 보드가 존재하지 않습니다. 확인 후 다시 시도해주세요.",
'fail_update_post' : "'%(before_post_title)s' 글을 수정하는데 실패했습니다. 확인 후 다시 시도해주세요."
}
data = request.POST
if data is None:
return JsonResponse({"message":error_messages['wrong_request']}, status=400)
post = Post.objects.filter(id=post_id)
if not post.exists():
return JsonResponse({"message":error_messages['not_exist_post']}, status=404)
post = post[0]
if post.author_id.id != request.user.id and not request.user.is_superuser:
return JsonResponse({"message":error_messages['not_permitted']}, status=401)
form = PostFormExceptFiles(data)
if not form.is_valid():
return JsonResponse({"message":form.errors}, status=400)
board = Board.objects.filter(id=form.cleaned_data['board_id'])
if not board.exists():
return JsonResponse({"message":error_messages['invalid_board_id']}, status=404)
total_file_size = 0
for file in request.FILES.getlist('files'):
total_file_size += file.size
fv = FileValidator(allowed_extensions=settings.ALLOWED_FILE_EXTENTIONS)
try:
if total_file_size > settings.MAX_FILE_UPLOAD_SIZE:
raise ValidationError(message=error_messages['file_max_size_over'])
fv(file)
except ValidationError as e:
return JsonResponse({"message": e.message}, status=400)
md_content = form.cleaned_data['md_content']
html_text = trans_markdown_to_html_and_bleach(md_content)
plain_text = unmarkdown(md_content)
before_post_title = post.title
post.title = bleach_clean(form.cleaned_data['title'])
post.content = html_text
post.md_content = md_content
post.plain_content = plain_text
post.preview_content = plain_text[:128]
post.board_id = board[0]
removeFilePaths = []
removeFilePaths.append(post.background_image_real_relative_path)
post.background_image_url = settings.DEFAULT_IMAGE_RELATIVE_PATH if form.cleaned_data['background_image_url'] is None else form.cleaned_data['background_image_url']
savedFilePaths = []
try:
with transaction.atomic():
post.save()
files = PostFile.objects.filter(post_id=post_id)
fileList = list(files)
for f in request.FILES.getlist('files'):
isItSameFile = False
try:
ff = f.open()
file_hash = get_file_hash(ff)
for file in files:
if file.hash == file_hash:
isItSameFile = True
fileList.remove(file)
ff.close()
break
if isItSameFile:
continue
fileInstance = PostFile.objects.create(
post_id = post,
title = f.name,
file = f,
hash = file_hash
)
savedFilePaths.append(fileInstance.file.url[1:])
finally:
if not ff.closed:
ff.close()
for file in fileList:
removeFilePaths.append(file.file.url[1:])
file.delete()
remove_saved_files_and_empty_dirs(removeFilePaths)
post.tags.clear()
for tag in data['tags'].split(','):
post.tags.add(tag.strip())
except:
remove_saved_files_and_empty_dirs(savedFilePaths)
return JsonResponse({"message" : error_messages['fail_update_post'] % {"before_post_title":before_post_title}}, status=406)
return JsonResponse({"message":success_message % {"before_post_title":before_post_title}}, status=200)
def get(self, request, post_id):
error_messages = {
"not_exist_post" : "해당 글이 존재하지 않습니다.",
"data_load_fail" : "글을 불러오는데 실패했습니다. 다시 시도해주세요."
}
post = Post.objects.filter(id=post_id)
if not post.exists():
return JsonResponse({"message":error_messages['not_exist_post']}, status=404)
post = post[0]
response_data = PostDetailSerializer(post).data
try:
with transaction.atomic():
if request.user.is_active:
fileInstances = PostFile.objects.filter(post_id=post_id)
files = [PostFileDetailSerializerForNonAnonymousUser(file).data for file in fileInstances]
else:
fileInstances = PostFile.objects.filter(post_id=post_id).values('title')
files = list(fileInstances)
tags = [x.name for x in post.tags.all()]
post.hits += 1
post.save(update_fields=['hits'])
except:
return JsonResponse({"message":error_messages['data_load_fail']}, status=406)
response_data['tags'] = tags
response_data['files'] = files
response_data['author'] = AccountSerializerInPost(post.author_id).data
response_data['board'] = BoardCategorySerializer(post.board_id).data
return JsonResponse(response_data, status=200)
@method_decorator(login_required, name="dispatch")
def delete(self, request, post_id):
success_message = "'%(post_title)s' 글이 정상적으로 삭제되었습니다"
error_messages = {
"not_exist_post" : "해당 글이 존재하지 않습니다. 확인 후 다시 시도해주세요.",
"not_permitted" : "해당 글 삭제 권한이 없습니다. 작성자나 관리자 계정으로 로그인 후 다시 시도해주세요.",
"delete_fail" : "'%(post_title)s' 글을 삭제하는데 실패했습니다. 다시 시도해주세요."
}
post = Post.objects.filter(id=post_id)
if not post.exists():
return JsonResponse({"message" : error_messages['not_exist_post']}, status=404)
post = post[0]
if post.author_id.id != request.user.id and not request.user.is_superuser:
return JsonResponse({"message" : error_messages['not_permitted']}, status=403)
try:
with transaction.atomic():
savedFilePaths = []
savedFilePaths.append(post.background_image_real_relative_path)
postFiles = PostFile.objects.filter(post_id=post_id)
for file in postFiles:
savedFilePaths.append(file.file.url[1:])
remove_saved_files_and_empty_dirs(savedFilePaths)
post.tags.clear()
post.delete()
except:
return JsonResponse({"message":error_messages['delete_fail'] % {"post_title":post.title}}, status=406)
return JsonResponse({"message":success_message % {"post_title":post.title}}, status=200)
class PostFileDownloadView(View):
@method_decorator(login_required, name="dispatch")
def get(self, request, post_id, file_name):
error_messages = {
"not_permitted" : "파일 다운로드 권한이 없습니다. 인증된 회원 계정으로 로그인 후 다시 시도해주세요.",
"not_exist_file_in_post" : "해당 글에 등록된 파일이 없습니다. 확인 후 다시 시도해주세요.",
"not_exist_file_in_path" : "파일이 해당 경로에 존재하지 않습니다."
}
if not request.user.is_active:
return JsonResponse({"message" : error_messages['not_permitted']}, status=403)
postFiles = PostFile.objects.filter(post_id=post_id)
if not postFiles.exists():
return JsonResponse({"message" : error_messages['not_exist_file_in_post']}, status=404)
for pf in postFiles:
if pf.real_file_name == file_name:
filePath = pf.file.path
if os.path.exists(filePath):
try:
with open(filePath, 'rb') as f:
response = HttpResponse(f.read(), content_type="application/force-download")
response['Content-Disposition'] = f'inline; filename={pf.title}'
return response
except:
return JsonResponse({"message" : error_messages['not_exist_file_in_path']}, status=404) | 1.960938 | 2 |
tictactoe_api_messages.py | dculyba/boom-tic-tac-toe | 40 | 12758286 | #!/usr/bin/python
# Copyright (C) 2010-2013 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ProtoRPC message class definitions for TicTicToe API."""
from protorpc import messages
class BoardMessage(messages.Message):
"""ProtoRPC message definition to represent a board."""
state = messages.StringField(1, required=True)
class ScoresListRequest(messages.Message):
"""ProtoRPC message definition to represent a scores query."""
limit = messages.IntegerField(1, default=10)
class Order(messages.Enum):
WHEN = 1
TEXT = 2
order = messages.EnumField(Order, 2, default=Order.WHEN)
class ScoreRequestMessage(messages.Message):
"""ProtoRPC message definition to represent a score to be inserted."""
outcome = messages.StringField(1, required=True)
class ScoreResponseMessage(messages.Message):
"""ProtoRPC message definition to represent a score that is stored."""
id = messages.IntegerField(1)
outcome = messages.StringField(2)
played = messages.StringField(3)
class ScoresListResponse(messages.Message):
"""ProtoRPC message definition to represent a list of stored scores."""
items = messages.MessageField(ScoreResponseMessage, 1, repeated=True)
| 2.6875 | 3 |
1.py | maheshwarkuchana/Skillenza-NLP-Challenge | 0 | 12758287 | <reponame>maheshwarkuchana/Skillenza-NLP-Challenge<gh_stars>0
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
import pickle
df = pd.read_csv("offcampus_training.csv")
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf = TfidfVectorizer(sublinear_tf=True, min_df=5, norm='l2', encoding='latin-1', ngram_range=(1, 2))
features = tfidf.fit_transform(df['text']).toarray()
labels = df.category
print(features.shape)
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.svm import OneClassSVM
from sklearn.ensemble import IsolationForest
X = df.iloc[:,:]
y_train = df['category']
le_name_mapping = {1:0,2:1,3:2,4:3,5:4}
for key in le_name_mapping.keys():
one_class_dataframe = X.loc[X['category'] == key]
X_train = one_class_dataframe.iloc[:,-1]
# print(X_train)
count_vect = CountVectorizer()
X_train_counts = count_vect.fit(X['text'])
X_train_counts1 = X_train_counts.transform(X_train)
tfidf_transformer = TfidfTransformer()
X_train_tfidf = tfidf_transformer.fit_transform(X_train_counts1)
clf = IsolationForest().fit(X_train_tfidf, y_train)
filename = "One_Class_Models\\"+str(key)+"_Model.pickle"
pickle.dump(clf, open(filename, 'wb'))
print("Dumped "+str(key)+" Model")
| 2.828125 | 3 |
enzi/backend/__init__.py | Yummot/enzi | 1 | 12758288 | # -*- coding: utf-8 -*-
import logging
import platform
from enzi.backend.backend import *
from enzi.backend.ies import IES
from enzi.backend.questa import Questa
from enzi.backend.vivado import Vivado
__all__ = ['KnownBackends', 'Questa', 'IES', 'Vivado',
'Backend', 'BackendCallback', 'value_str_filter']
logger = logging.getLogger(__name__)
logger.setLevel(logging.WARNING)
cur_system = platform.system()
class KnownBackends(object):
"""
Factory class for backends.
Currently, the available backends are: ies.
TODO: more backends may be added, if we get acess to use them.
"""
def __init__(self):
known_backends = Backend.__subclasses__()
def f(x): return (x.__name__.lower(), x)
def g(x): return cur_system in x[1].supported_system
self.allow_backends = dict(map(f, known_backends))
self.known_backends = dict(filter(g, self.allow_backends.items()))
# hard code 'vsim' to 'questa'
self.known_backends['vsim'] = self.known_backends['questa']
self.allow_backends['vsim'] = self.allow_backends['questa']
def register_backend(self, backend):
"""
register new backend
:param backend: a subclass of Backend
"""
name = backend.__class__.__name__.lower()
if not issubclass(backend.__class__, Backend):
fmt = 'register_backend: backend(class:{}) must be a subclass of Backend'
msg = fmt.format(backend.__class__)
logger.error(msg)
raise ValueError(msg)
self.known_backends[name] = backend
self.allow_backends[name] = backend
def get(self, backend_name, config, work_root):
if not backend_name:
raise RuntimeError('No backend name specified.')
backend_name = backend_name.lower()
if backend_name in self.known_backends:
return self.known_backends[backend_name](config, work_root)
else:
# the given backend name is not in support list.
raise NameError('backend name {} not found'.format(backend_name))
| 2.15625 | 2 |
models/ovo.py | diNard/cvk-parser | 0 | 12758289 | <reponame>diNard/cvk-parser
from pony.orm import *
class Ovo(db.Entity):
id = PrimaryKey(int, unsigned=True)
url = Required(str, 256)
region_id = Required(int, index=True, unsigned=True)
peoples_count = Optional(int, unsigned=True)
stations_count = Optional(int, unsigned=True)
post_address = Optional(LongStr)
bounds_desc = Required(LongStr)
center_desc = Required(str, 256)
place = Optional(str, 256)
phones = Optional(str, 256)
faxes = Optional(str, 256)
processed = Required(bool, default=False)
processed_update = Required(bool, default=False)
def get_stations_url(self):
return self.url.replace("WP024", "WP029")
| 2.234375 | 2 |
2017/18.py | m1el/advent-of-code | 0 | 12758290 | <gh_stars>0
from collections import defaultdict
from string import ascii_lowercase
with open('18.txt') as fd:
prog = [line.strip().split(' ') for line in fd.readlines()]
IP = 0
regs = defaultdict(int)
while True:
if IP < 0 or IP >= len(prog): break
inst = prog[IP]
if inst[0] == 'snd':
regs['snd'] = regs[inst[1]]
elif inst[0] == 'rcv':
val = regs['snd']
if val != 0:
print(val)
regs[inst[1]] = val
break
elif inst[0] == 'set':
val = inst[2]
val = regs[val] if val in ascii_lowercase else int(val)
regs[inst[1]] = val
elif inst[0] == 'add':
val = inst[2]
val = regs[val] if val in ascii_lowercase else int(val)
regs[inst[1]] += val
elif inst[0] == 'mul':
val = inst[2]
val = regs[val] if val in ascii_lowercase else int(val)
regs[inst[1]] *= val
elif inst[0] == 'mod':
val = inst[2]
val = regs[val] if val in ascii_lowercase else int(val)
regs[inst[1]] %= val
elif inst[0] == 'jgz':
val = regs[inst[1]] if inst[1] in ascii_lowercase else int(inst[1])
off = regs[inst[2]] if inst[2] in ascii_lowercase else int(inst[2])
if val > 0:
IP += off
continue
else:
raise Exception('unknown instruction ' + inst[0])
IP += 1
| 2.90625 | 3 |
Lib/idlelib/idlever.py | deadsnakes/python3.3 | 0 | 12758291 | IDLE_VERSION = "3.3.6"
| 1.054688 | 1 |
FBDParser/patterns/_tables.py | jonix6/fbdparser | 7 | 12758292 | <reponame>jonix6/fbdparser
# -*- coding: utf-8 -*-
from ._global import _f
from ._textstyles import font, TextPatterns
"========================================表格类========================================="
class TablePatterns:
# 表格注解(BG)
# 子表注解(ZB)
BG_suffix = ZB_suffix = _f(r'''
(?:
(?P<dx> # 底线线型号
F| # 反线
S| # 双线
W| # 无线
Z| # 正线(缺省)
D| # 点线
Q| # 曲线
= # 双曲线
)
(?P<dz> # 底线字号
{_r[size]})?
)?
(?P<ds> # 底线颜色
{_r[color]})?
''')
BG_prefix = _f(
r'''(?:(?P<qd> # 表格起点
\({_r[length]}\)|!))?
(?:(?P<bt>BT)| # 表头
SD{SD})? # 换页时上顶线型号、颜色
(?:XD{XD})? # 换页时下底线型号、颜色
(?P<xb>[;,]N)? # 使用新的方式绘制表格线
(?:[;,]HT{HT})? # 汉字字号、字体
(?:[;,]WT{WT})? # 外文字号、字体
(?:[;,]ST{ST})? # 数字字号、字体
(?:(?:;J|,HJ)(?P<hj> # 行距
{_r[length]}))?
''', SD=BG_suffix.replace('<d', '<s').replace('底线', '顶线'),
XD=BG_suffix.replace('<d', '<x'),
HT=font.replace('<z', '<h').replace('字', '汉字字'),
WT=TextPatterns.ST_infix.replace('<z', '<w')
.replace('<bt>', '<wb>').replace('字', '外文字'),
ST=TextPatterns.ST_infix.replace('<z', '<s')
.replace('<bt>', '<sb>').replace('字', '数字字'))
# 续表注解(XB)
XB_infix = _f(
r'''(?:,HT{HT})? # 汉字字号、字体
(?:,WT{WT})? # 外文字号、字体
(?:,ST{ST})? # 数字字号、字体
(?:J(?P<hj> # 行距
{_r[length]}))?
(?:;(?P<dw> # 续表内容在表格顶线上位置
M|(?: # 居中
Z| # 左齐
Y| # 右齐
D) # 自动根据页码单右双左(缺省)
-?{_r[length]})
)?
(?P<hs>\#)? # 续表数字采用中文数字
(?P<nr>.+) # 续表内容
''',
HT=font.replace('<z', '<h').replace('字', '汉字字'),
WT=TextPatterns.ST_infix.replace('<z', '<w')
.replace('<bt>', '<wb>').replace('字', '外文字'),
ST=TextPatterns.ST_infix.replace('<z', '<s')
.replace('<bt>', '<sb>').replace('字', '数字字'))
# 表格跨项位标注解(GW)
# 改排注解(GP)
# 卧排表格注解(ZP)
GW_infix = GP_infix = ZB_prefix = ZP_prefix = ''
# 表行注解(BH)
BH_arg = _f(r'''
,{BG_suffix} # 左线线型号、颜色
(?:K(?P<kd>{_r[length]})?)? # 栏宽
(?:。(?P<ls>\d+))? # 栏数
(?P<dw>DW)? # 各表行相应栏数的数字项对位(个位对齐)
(?:(?P<pf> # 内容排法
CM| # 撑满
YQ| # 右齐
ZQ) # 左齐
(?P<sj>{_r[length]})? # 对齐缩进
)?
''', BG_suffix=BG_suffix.replace('<d', '<z').replace('底线', '左线'))
BH_infix = _f(
r'''(?:D{BG_suffix_s})? # 顶线型号、颜色
(?:G?(?P<hg> # 行高
{_r[length]})?|
(?:TK(?P<sk> # 顶线与内容之间的间距
{_r[length]}))?
(?:JK(?P<xk> # 底线与内容之间的间距
{_r[length]}))?
)?
(?P<hq> # 本行所有内容的整体排法
JZ| # 居中(缺省)
SQ| # 上齐
XQ)? # 下齐
(?:B(?P<dw>[0-8]\d\d\d))? # 底纹
(?P<cs> # 各栏参数
(?:{_r[BH_arg]})*)
(?:{BG_suffix_y})? # 右线型号、颜色
''', BG_suffix_s=BG_suffix.replace('<d', '<s').replace('底线', '顶线'),
BG_suffix_y=BG_suffix.replace('<d', '<y').replace('底线', '右线'),
BH_arg=BH_arg)
# 表首注解(BS)
BS_infix = _f(r'''
(?P<qd>[ZY][SX]) # 起点(左/右+上/下)
(?:X(?P<qx> # 起点X偏移
{_r[length]}))?
(?:Y(?P<qy> # 起点Y偏移
{_r[length]}))?
''')
BS_prefix = _f(r'''
{BS_infix}- # 起点
(?P<zd>[ZY][SX]) # 终点
(?:X(?P<zx> # 终点X偏移
{_r[length]}))?
(?:Y(?P<zy> # 终点Y偏移
{_r[length]}))?
''', BS_infix=BS_infix)
# 表格跨项对位注解(GD)
GD_infix = r'''(?P<wb>\d{,2}) # 位标数
'''
# 无线表注解(WX)
WX_arg = _f(r'''
(?P<kd>{_r[length]}) # 栏宽
(?:KG(?P<jj>{_r[length]}))? # 与后一栏间的栏间距
(?:。(?P<ls>[1-9]\d?))? # 栏数
(?P<dw>DW)? # 该栏的数字项对位(个位对齐)
(?P<pf>CM|YQ|JZ)? # 内容排法
''')
WX_prefix = _f(r'''
(?:(?P<qd> # 表格起点
\({_r[length]}\)|!))?
(?P<dw>DW)? # 各表行相应栏数的数字项对位(个位对齐)
(?P<kl>KL)? # 允许跨栏
(?P<pf>CM|YQ|JZ)? # 内容排法
(?P<cs> # 栏说明
{_r[WX_arg]}(?:,{_r[WX_arg]})*)
''', WX_arg=WX_arg)
| 2.15625 | 2 |
Desafio72.py | VictorCastao/Curso-em-Video-Python | 0 | 12758293 | print('=' * 12 + 'Desafio 72' + '=' * 12)
numeros = (
'Zero', 'Um', 'Dois', 'Três', 'Quatro', 'Cinco', 'Seis', 'Sete', 'Oito', 'Nove', 'Dez', 'Onze', 'Doze', 'Treze',
'Catorze', 'Quinze', 'Dezesseis', 'Dezessete', 'Dezoito', 'Dezenove', 'Vinte')
while True:
escolha = int(input('Digite um número entre 0 e 20: '))
while escolha < 0 or escolha > 20:
escolha = int(input('Número Inválido. Digite outro número: '))
print(f'O número escolhido foi "{numeros[escolha]}"')
resp = input("Deseja continuar? [S/N] ").strip().upper()
while resp[0] not in "SN":
resp = input("Deseja continuar? [S/N] ").strip().upper()
if resp == 'N':
break
| 4.03125 | 4 |
venv/Lib/site-packages/factom/exceptions.py | TRGG3R/Visual_FCT_Explorer | 13 | 12758294 | <reponame>TRGG3R/Visual_FCT_Explorer
def handle_error_response(resp):
codes = {
-1: FactomAPIError,
-32008: BlockNotFound,
-32009: MissingChainHead,
-32010: ReceiptCreationError,
-32011: RepeatedCommit,
-32600: InvalidRequest,
-32601: MethodNotFound,
-32602: InvalidParams,
-32603: InternalError,
-32700: ParseError,
}
error = resp.json().get('error', {})
message = error.get('message')
code = error.get('code', -1)
data = error.get('data', {})
raise codes[code](message=message, code=code, data=data, response=resp)
class FactomAPIError(Exception):
response = None
data = {}
code = -1
message = "An unknown error occurred"
def __init__(self, message=None, code=None, data={}, response=None):
self.response = response
if message:
self.message = message
if code:
self.code = code
if data:
self.data = data
def __str__(self):
if self.code:
return '{}: {}'.format(self.code, self.message)
return self.message
class BlockNotFound(FactomAPIError):
pass
class MissingChainHead(FactomAPIError):
pass
class ReceiptCreationError(FactomAPIError):
pass
class RepeatedCommit(FactomAPIError):
pass
class InvalidRequest(FactomAPIError):
pass
class MethodNotFound(FactomAPIError):
pass
class InvalidParams(FactomAPIError):
pass
class InternalError(FactomAPIError):
pass
class ParseError(FactomAPIError):
pass
| 2.5625 | 3 |
setup.py | WYishai/suppyress | 0 | 12758295 | <reponame>WYishai/suppyress<filename>setup.py
from setuptools import setup, find_packages
with open("README.md", "rb") as readme_file:
long_description = readme_file.read()
long_description = long_description \
.replace("](LICENSE)", "](https://github.com/WYishai/suppyress/blob/master/LICENSE)")
setup(
name="suppyress",
version="0.1",
packages=find_packages(),
author="<NAME>",
author_email="<EMAIL>",
description="A tiny python library for running code and ignore any errors",
long_description=long_description,
long_description_content_type="text/markdown",
install_requires=[],
url="https://github.com/WYishai/suppyress",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.7",
"Topic :: Software Development :: Libraries :: Python Modules"
],
)
| 1.4375 | 1 |
app/display_modules/taxa_tree/tests/test_module.py | MetaGenScope/metagenscope-server | 0 | 12758296 | """Test suite for Taxa Tree display module."""
from app.display_modules.display_module_base_test import BaseDisplayModuleTest
from app.display_modules.taxa_tree import TaxaTreeDisplayModule
from app.display_modules.taxa_tree.models import TaxaTreeResult
from app.display_modules.taxa_tree.constants import MODULE_NAME
from app.tool_results.kraken import KrakenResultModule
from app.tool_results.kraken.tests.factory import create_kraken
from app.tool_results.krakenhll import KrakenHLLResultModule
from app.tool_results.krakenhll.tests.factory import create_krakenhll
from app.tool_results.metaphlan2 import Metaphlan2ResultModule
from app.tool_results.metaphlan2.tests.factory import create_metaphlan2
from .factory import generate_random_tree, TaxaTreeFactory
class TestTaxaTreeModule(BaseDisplayModuleTest):
"""Test suite for TaxaTree display module."""
def test_get_taxa_tree(self):
"""Ensure getting a single TaxaTree behaves correctly."""
ttree = TaxaTreeFactory()
self.generic_getter_test(ttree, MODULE_NAME,
verify_fields=('metaphlan2', 'kraken', 'krakenhll'))
def test_add_taxa_tree(self):
"""Ensure TaxaTree model is created correctly."""
kwargs = {
'metaphlan2': generate_random_tree(),
'kraken': generate_random_tree(),
'krakenhll': generate_random_tree(),
}
taxa_tree_result = TaxaTreeResult(**kwargs)
self.generic_adder_test(taxa_tree_result, MODULE_NAME)
def test_run_taxa_tree_sample(self): # pylint: disable=invalid-name
"""Ensure TaxaTree run_sample produces correct results."""
kwargs = {
KrakenResultModule.name(): create_kraken(),
KrakenHLLResultModule.name(): create_krakenhll(),
Metaphlan2ResultModule.name(): create_metaphlan2(),
}
self.generic_run_sample_test(kwargs, TaxaTreeDisplayModule)
| 2.46875 | 2 |
events/views.py | eddyyonnie/Neighbours | 0 | 12758297 | <gh_stars>0
from django.http import HttpResponse,Http404,HttpResponseRedirect
from django.contrib.auth import login, authenticate
from django.shortcuts import render, redirect
from rest_framework.response import Response
from rest_framework.views import APIView
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.db.models import Sum
from .models import Profile,Project,Ratings
from .forms import SignUpForm, ProfileUpdateForm, UserUpdateForm,LoginForm,NewProjectForm,RatingForm
from django.views.decorators.csrf import _EnsureCsrfCookie
from django.contrib import messages
from .serializer import ProfileSerializer,ProjectSerializer
from django.http import JsonResponse
from rest_framework import status
from .permissions import IsAdminOrReadOnly
def home(request):
projects = Project.get_projects()
user = request.user
profile = Profile.get_profiles()
return render(request, 'home.html',{"projects":projects, "user":request.user, "profile":profile})
def signup(request):
if request.method =='POST':
form = SignUpForm(request.POST)
if form.is_valid():
user = form.save()
user.refresh_from_db()
user.profile.birth_date = form.cleaned_data.get('birth_date')
user.save()
raw_password = form.cleaned_data.get('password1')
user = authenticate(username=user.username, password=<PASSWORD>)
login(request, user)
return redirect('home')
else:
form = SignUpForm()
return render(request, 'registration/signup.html',{'form':form})
def login_view(request):
if request.method == 'POST':
form = LoginForm()
if form.is_valid():
username = form.cleaned_data.get('username')
raw_password = form.cleaned_data.get('password1')
user = authenticate(username=username, password=<PASSWORD>)
login(request, user)
return redirect('home')
else:
form = LoginForm()
context = {
'form': form
}
return render(request, 'registration/login.html', context)
def profile(request,id):
'''
View that allows user to view other users profiles
'''
user = User.objects.get(id=id)
projects = Project.objects.all().filter(owner_id = user.id)
profile = Profile.objects.all()
return render(request, 'profile.html',{"projects":projects,"profile":profile,"current_user":request.user,"user":user,})
@login_required
def own_profile(request):
'''
Directs Current User to their own Profile.
'''
user = request.user
projects = Project.objects.all().filter(owner_id = user.id)
profile = Profile.objects.all()
return render(request, 'profile.html', {'projects':projects,'profile':profile, "user":user, "current_user":request.user })
@login_required
def edit_profile(request):
user = request.user
if request.method == 'POST':
form = ProfileUpdateForm(request.POST,request.FILES,instance=user.profile)
user_form = UserUpdateForm(request.POST,instance=user)
if user_form.is_valid() and form.is_valid():
user_form.save()
profile = form.save(commit=False)
profile.user = user
profile.save()
messages.info(request, 'You\'ve successfully updated your account!')
return redirect('home')
else:
form = ProfileUpdateForm(instance=request.user)
user_form = UserUpdateForm(instance=request.user.profile)
context = {
'user': user,
'user_form': user_form,
'form': form
}
return render(request, 'edit-profile.html', context)
@login_required
def new_project(request):
user= request.user
if request.method == 'POST':
form = NewProjectForm(request.POST,request.FILES)
if form.is_valid():
project = form.save(commit=False)
project.owner = user.profile
project.save()
return redirect('home')
else:
form = NewProjectForm()
return render(request, 'new_project.html', {"form":form})
@login_required
def project(request, project_id):
project = Project.get_project_id(project_id)
ratings = Ratings.get_rating_by_projects(project_id)
if request.method == 'POST':
form = RatingForm(request.POST)
if form.is_valid():
ratings = form.save(commit=False)
ratings.project = project
ratings.user = request.user
ratings.save()
return redirect('project', project_id=project_id)
else:
form = RatingForm()
return render(request, 'project.html', {'project':project, 'form':form ,'ratings':ratings})
def search(request):
if 'search' in request.GET and request.GET['search']:
search_term = request.GET.get('search')
projects = Project.search_projects(search_term)
message = f'{search_term}'
return render(request, 'search.html',{'message':message, 'projects':projects})
else:
message = 'Enter term to search'
return render(request, 'search.html', {'message':message})
class ProfileList(APIView):
def get(self,request,format=None):
all_profiles = Profile.objects.all()
serializers = ProfileSerializer(all_profiles, many=True)
return Response(serializers.data)
class ProjectList(APIView):
def get(self,request,format=None):
all_projects = Project.objects.all()
serializers = ProjectSerializer(all_projects, many=True)
return Response(serializers.data)
def alerts(request):
return render(request, 'events/templates/Alerts.html', {})
| 2.0625 | 2 |
metrics/kvret/evaluator.py | HKUNLP/UnifiedSKG | 191 | 12758298 | <reponame>HKUNLP/UnifiedSKG<gh_stars>100-1000
# encoding=utf8
from collections import OrderedDict
import json
import nltk
from datasets import load_metric
def load_entities(kvret_entity_file_path):
"""
@param kvret_entity_file_path: the path of kvret_entities.json
@return:
"""
under_scored_entity_dict = OrderedDict()
with open(kvret_entity_file_path) as f:
entity = json.load(f)
for sub_class_name, sub_class_entity_list in entity.items():
if sub_class_name == 'poi':
for entity_item in sub_class_entity_list:
under_scored_entity_dict[str(entity_item['address'])] = (
str(entity_item['address']).replace(" ", "_"))
under_scored_entity_dict[str(entity_item['poi'])] = (str(entity_item['poi']).replace(" ", "_"))
under_scored_entity_dict[str(entity_item['type'])] = (str(entity_item['type']).replace(" ", "_"))
elif sub_class_name == "distance":
for entity_item in sub_class_entity_list:
under_scored_entity_dict[str(entity_item) + " miles"] = str(entity_item) + " miles"
elif sub_class_name == "temperature":
for entity_item in sub_class_entity_list:
under_scored_entity_dict[str(entity_item) + "f"] = str(entity_item) + "f"
else:
for entity_item in sub_class_entity_list:
under_scored_entity_dict[str(entity_item)] = (str(entity_item).replace(" ", "_"))
# add missing entities,
missed_entities = ["yoga", "tennis", "swimming", "football", " lab ", "doctor", "optometrist", "dentist", "1st",
"2nd", "3rd", "4th", "5th", "6th", "7th", "8th", "9th", "10th",
"11th", "12th", "13th", "14th", "15th", "16th", "17th", "18th", "19th", "20th", "Jill",
"Jack"]
for missed_entity in missed_entities:
under_scored_entity_dict[str(missed_entity)] = (missed_entity)
# special handle of "HR"
del under_scored_entity_dict['HR']
under_scored_entity_dict[' HR '] = ' HR '
return under_scored_entity_dict
def postprocess_text(preds, responses, metric_name):
_preds = [pred.strip() for pred in preds]
_responses = [response.strip() for response in responses]
# rougeLSum expects newline after each sentence
if metric_name == "rouge":
_preds = ["\n".join(nltk.sent_tokenize(pred)) for pred in _preds]
_responses = ["\n".join(nltk.sent_tokenize(response)) for response in _responses]
elif metric_name == "sacrebleu": # sacrebleu
_responses = [[response] for response in _responses]
elif metric_name == "bleu":
_preds = [pred.split(" ") for pred in _preds]
_responses = [[response.split(" ")] for response in _responses]
else:
pass
return _preds, _responses
class EvaluateTool(object):
def __init__(self, args):
self.args = args
def evaluate(self, preds, golds, section):
summary = {}
assert len(golds) > 0
global_entities = load_entities(golds[0]["entities_file"])
metric_list = []
if section in ["train", "dev"]:
metric_list = ["bleu"]
elif section == "test":
metric_list = ["bleu", "metrics/kvret/response_entity_hit.py"]
for metric_name in metric_list:
metric = load_metric(metric_name)
if metric_name == "metrics/kvret/response_entity_hit.py":
gold_responses = [
{
"response": item["seq_out"],
"intents": [item["intent"]],
}
for item in golds
]
res = metric.compute(
**{
"predictions": preds,
"references": gold_responses,
"global_entities": global_entities,
}
)
summary.update(res)
else:
gold_responses = [item["seq_out"] for item in golds]
processed_preds, processed_golds = postprocess_text(
preds, gold_responses, metric_name
)
res = metric.compute(
predictions=processed_preds,
references=processed_golds,
)
summary[metric_name] = res[metric_name]
return summary | 2.5 | 2 |
project/db/models/course_type.py | samsonosiomwan/Devs-Prime-Api | 0 | 12758299 | <filename>project/db/models/course_type.py<gh_stars>0
import uuid
from django.db import models
class CourseType(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
name = models.CharField(max_length=255, blank=False, unique=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.name
| 2.390625 | 2 |
month02/day13/home_server.py | Amiao-miao/all-codes | 1 | 12758300 | from socket import *
from multiprocessing import Process
HOST="0.0.0.0"
PORT=8888
ADDR=(HOST,PORT)
def main():
sort=socket(AF_INET,SOCK_DGRAM)
sort.bind(ADDR)
if __name__ == '__main__':
main() | 2.78125 | 3 |
lore/models/base.py | RahulHP/lore | 0 | 12758301 | <gh_stars>0
from __future__ import absolute_import
import json
import logging
import os.path
from os.path import join
import pickle
import re
import lore.ansi
import lore.estimators
from lore.env import require
from lore.util import timer, timed
require(
lore.dependencies.TABULATE +
lore.dependencies.SKLEARN +
lore.dependencies.SHAP
)
import shap
from tabulate import tabulate
from sklearn.model_selection import RandomizedSearchCV
logger = logging.getLogger(__name__)
try:
FileExistsError
except NameError:
FileExistsError = OSError
class Base(object):
def __init__(self, pipeline=None, estimator=None):
self.name = self.__module__ + '.' + self.__class__.__name__
self._estimator = None
self.estimator = estimator
self.fitting = None
self.pipeline = pipeline
self._shap_explainer = None
def __getstate__(self):
state = dict(self.__dict__)
state['_shap_explainer'] = None
return state
def __setstate__(self, state):
self.__dict__ = state
backward_compatible_defaults = {
'_shap_explainer': None,
}
for key, default in backward_compatible_defaults.items():
if key not in self.__dict__.keys():
self.__dict__[key] = default
@property
def estimator(self):
return self._estimator
@estimator.setter
def estimator(self, value):
self._estimator = value
# Keras models require access to the pipeline during build,
# and fit for extended functionality
if hasattr(self._estimator, 'model'):
self._estimator.model = self
def fit(self, test=True, score=True, **estimator_kwargs):
self.fitting = self.__class__.last_fitting() + 1
self.stats = self.estimator.fit(
x=self.pipeline.encoded_training_data.x,
y=self.pipeline.encoded_training_data.y,
validation_x=self.pipeline.encoded_validation_data.x,
validation_y=self.pipeline.encoded_validation_data.y,
**estimator_kwargs
)
if test:
self.stats['test'] = self.evaluate(self.pipeline.test_data)
if score:
self.stats['score'] = self.score(self.pipeline.test_data)
self.save(stats=self.stats)
logger.info(
'\n\n' + tabulate([self.stats.keys(), self.stats.values()], tablefmt="grid", headers='firstrow') + '\n\n')
@timed(logging.INFO)
def predict(self, dataframe):
predictions = self.estimator.predict(self.pipeline.encode_x(dataframe))
return self.pipeline.output_encoder.reverse_transform(predictions)
@timed(logging.INFO)
def evaluate(self, dataframe):
return self.estimator.evaluate(
self.pipeline.encode_x(dataframe),
self.pipeline.encode_y(dataframe)
)
@timed(logging.INFO)
def score(self, dataframe):
return self.estimator.score(
self.pipeline.encode_x(dataframe),
self.pipeline.encode_y(dataframe)
)
def hyper_parameter_search(
self,
param_distributions,
n_iter=10,
scoring=None,
fit_params={},
n_jobs=1,
iid=True,
refit=True,
cv=None,
verbose=0,
pre_dispatch='2*njobs',
random_state=None,
error_score='raise',
return_train_score=True
):
"""Random search hyper params
"""
if scoring is None:
scoring = None
result = RandomizedSearchCV(
self.estimator,
param_distributions,
n_iter=n_iter,
scoring=scoring,
n_jobs=n_jobs,
iid=iid,
refit=refit,
cv=cv,
verbose=verbose,
pre_dispatch=pre_dispatch,
random_state=random_state,
error_score=error_score,
return_train_score=return_train_score
).fit(
self.pipeline.encoded_training_data.x,
y=self.pipeline.encoded_training_data.y,
validation_x=self.pipeline.encoded_validation_data.x,
validation_y=self.pipeline.encoded_validation_data.y,
**fit_params
)
self.estimator = result.best_estimator_
return result
@classmethod
def local_path(cls):
return join(lore.env.MODELS_DIR, cls.remote_path())
@classmethod
def remote_path(cls):
return join(cls.__module__, cls.__name__)
@classmethod
def last_fitting(cls):
if not os.path.exists(cls.local_path()):
return 0
fittings = [int(d) for d in os.listdir(cls.local_path()) if re.match(r'^\d+$', d)]
if not fittings:
return 0
return sorted(fittings)[-1]
def fitting_path(self):
if self.fitting is None:
self.fitting = self.last_fitting()
return join(self.local_path(), str(self.fitting))
def model_path(self):
return join(self.fitting_path(), 'model.pickle')
def remote_model_path(self):
return join(self.remote_path(), 'model.pickle')
def save(self, stats=None):
if self.fitting is None:
raise ValueError("This model has not been fit yet. There is no point in saving.")
if not os.path.exists(self.fitting_path()):
try:
os.makedirs(self.fitting_path())
except FileExistsError as ex:
pass # race to create
with timer('pickle model'):
with open(self.model_path(), 'wb') as f:
pickle.dump(self, f)
with open(join(self.fitting_path(), 'params.json'), 'w') as f:
params = {}
for child in [self.estimator, self.pipeline]:
param = child.__module__ + '.' + child.__class__.__name__
params[param] = {}
if hasattr(child, '__getstate__'):
state = child.__getstate__()
else:
state = child.__dict__
for key, value in state.items():
if not key.startswith('_'):
params[param][key] = value.__repr__()
json.dump(params, f, indent=2, sort_keys=True)
if stats:
with open(join(self.fitting_path(), 'stats.json'), 'w') as f:
json.dump(stats, f, indent=2, sort_keys=True)
@classmethod
def load(cls, fitting=None):
model = cls()
if fitting is None:
model.fitting = model.last_fitting()
else:
model.fitting = int(fitting)
with timer('unpickle model'):
with open(model.model_path(), 'rb') as f:
loaded = pickle.load(f)
loaded.fitting = model.fitting
return loaded
def upload(self):
self.fitting = 0
self.save()
lore.io.upload(self.model_path(), self.remote_model_path())
@classmethod
def download(cls, fitting=0):
model = cls(None, None)
model.fitting = int(fitting)
lore.io.download(model.remote_model_path(), model.model_path(), cache=True)
return cls.load(fitting)
def shap_values(self, i, nsamples=1000):
instance = self.pipeline.encoded_test_data.x.iloc[i, :]
display = self.pipeline.decode(instance.to_frame().transpose()).iloc[0, :]
return self.shap_explainer.shap_values(instance, nsamples=nsamples), display
def shap_force_plot(self, i, nsamples=1000):
return shap.force_plot(*self.shap_values(i, nsamples))
@property
def shap_explainer(self):
if self._shap_explainer is None:
with timer('fitting shap'):
shap_data = self.pipeline.encoded_training_data.x.sample(100)
def f(X):
return self.estimator.predict([X[:, i] for i in range(X.shape[1])]).flatten()
self._shap_explainer = shap.KernelExplainer(f, shap_data)
return self._shap_explainer
| 1.953125 | 2 |
setup.py | compunova/kozinaki | 4 | 12758302 | <reponame>compunova/kozinaki
from setuptools import setup, find_packages
def readme():
with open('README.md') as f:
return f.read()
setup(
name='kozinaki',
description='OpenStack multi-cloud driver for AWS, Azure',
url='https://github.com/compunova/kozinaki.git',
author='Compunova',
author_email='<EMAIL>',
version='0.1.8',
long_description=readme(),
packages=find_packages(),
include_package_data=True,
install_requires=[
'haikunator',
'requests==2.11',
'oauth2client==3.0.0',
'azure==2.0.0rc6',
'boto3',
'google-cloud',
'cryptography==1.4',
'Fabric3',
'Jinja2',
'PyYAML',
'terminaltables',
'apache-libcloud',
'click',
'click-spinner',
'click-didyoumean'
],
entry_points={
'console_scripts': [
'kozinaki-manage=kozinaki.manage.__main__:main',
],
}
)
| 1.375 | 1 |
Leetcode-cn/292.nim-游戏.py | joey66666/Codeyard | 0 | 12758303 | #
# @lc app=leetcode.cn id=292 lang=python3
#
# [292] Nim 游戏
#
# https://leetcode-cn.com/problems/nim-game/description/
#
# algorithms
# Easy (69.62%)
# Likes: 501
# Dislikes: 0
# Total Accepted: 102.9K
# Total Submissions: 146K
# Testcase Example: '4'
#
# 你和你的朋友,两个人一起玩 Nim 游戏:
#
#
# 桌子上有一堆石头。
# 你们轮流进行自己的回合,你作为先手。
# 每一回合,轮到的人拿掉 1 - 3 块石头。
# 拿掉最后一块石头的人就是获胜者。
#
#
# 假设你们每一步都是最优解。请编写一个函数,来判断你是否可以在给定石头数量为 n 的情况下赢得游戏。如果可以赢,返回 true;否则,返回 false
# 。
#
#
#
# 示例 1:
#
#
# 输入:n = 4
# 输出:false
# 解释:如果堆中有 4 块石头,那么你永远不会赢得比赛;
# 因为无论你拿走 1 块、2 块 还是 3 块石头,最后一块石头总是会被你的朋友拿走。
#
#
# 示例 2:
#
#
# 输入:n = 1
# 输出:true
#
#
# 示例 3:
#
#
# 输入:n = 2
# 输出:true
#
#
#
#
# 提示:
#
#
# 1
#
#
#
# @lc code=start
# 1. Solution1, 数学推理, Time: O(1), Space: O(1), Runtime: 44%
# - 剩下石头是`4`的倍数则会输
class Solution:
def canWinNim(self, n: int) -> bool:
return n % 4 != 0
# @lc code=end
| 3.3125 | 3 |
examples/general/plot_14_glm_components.py | AiAiHealthcare/mne-nirs | 46 | 12758304 | """
.. _tut-fnirs-glm-components:
GLM and Design Matrix Parameters
================================
This tutorial describes the various design choices available when analysing
fNIRS data with a GLM approach.
.. sidebar:: Nilearn
If you use MNE-NIRS to conduct a GLM analysis please cite Nilearn.
This package relies on Nilearn for the underlying computation.
Without Nilearn this would not be possible.
For how to accurately cite Nilearn see:
http://nilearn.github.io/authors.html#citing
There are subtle differences between the GLM analysis procedures
available in the different fNIRS software packages (Homer, NIRS-SPM, etc).
This document aims to clarify the features available for GLM analysis
in the MNE-NIRS software, and demonstrate how you can modify the default
analysis parameters to best suit your experiment.
It also endeavours to motivate some of the design choices that were made
when designing this software.
Please raise a GitHub issue if there is an analysis design you would
like to use but can not determine how to do with MNE-NIRS.
The MNE-NIRS GLM analysis framework is entirely based on the Nilearn package.
Their excellent software forms the basis of the analysis described in this tutorial.
As such, you may also wish to read
`their documentation <http://nilearn.github.io>`__
to familiarise yourself with different concepts used in MNE-NIRS.
Specifically this tutorial is heavily based on the following Nilearn examples,
but placed within an fNIRS context.
* `Nilearn: Understanding parameters of the first-level model <http://nilearn.github.io/auto_examples/04_glm_first_level/plot_first_level_details.html>`__.
* `Nilearn: Example of hemodynamic response functions <https://nilearn.github.io/auto_examples/04_glm_first_level/plot_hrf.html>`__.
Accordingly, in this tutorial we will access nilearn functions directly to illustrate
various choices available in your analysis.
However, this is just to illustrate various points. In reality (see all other tutorials),
MNE-NIRS will wrap all required Nilearn functions so you don't need to access them directly.
.. contents:: Page contents
:local:
:depth: 2
"""
# sphinx_gallery_thumbnail_number = 1
# Authors: <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
# Import common libraries
import os
import numpy as np
import mne
# Import MNE-NIRS processing
from mne_nirs.experimental_design import make_first_level_design_matrix, \
longest_inter_annotation_interval, drift_high_pass
# Import Nilearn
from nilearn.glm import first_level
from nilearn.plotting import plot_design_matrix
# Import Plotting Library
import matplotlib.pyplot as plt
import matplotlib as mpl
# %%
# Haemodynamic Response Function
# ---------------------------------------------------------------------
#
# Various Haemodynamic Response Functions (HRFs) are provided for use
# when analysing your data. A summary of these functions in the context
# of fMRI is provided in the Nilearn tutorial
# `Nilearn: Example of hemodynamic response functions. <https://nilearn.github.io/auto_examples/04_glm_first_level/plot_hrf.html>`__.
# This example heavily borrows from that example but expands the description
# within an fNIRS context.
#
# To illustrate underlying concepts we will use Nilearn functions directly,
# but for analysing actual data you should use the MNE-NIRS
# :func:`mne_nirs.experimental_design.make_first_level_design_matrix`
# wrapper.
# %%
# HRF Model Selection
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# .. sidebar:: FIR Models
#
# MNE-NIRS also supports FIR GLM models.
# See :ref:`MNE-NIRS FIR GLM tutorial <tut-fnirs-fir>`.
#
# Two standard HRF models are provided. The SPM and Glover models.
# These differ in their response dynamics.
# Both are plotted on top of each other below for comparison.
# Note that they differ in their peak timing and undershoot.
time_length = 30
glover_timecourse = first_level.glover_hrf(1, oversampling=50, time_length=time_length)
spm_timecourse = first_level.spm_hrf(1, oversampling=50, time_length=time_length)
sample_times = np.linspace(0, time_length, num=len(glover_timecourse))
plt.plot(sample_times, glover_timecourse, label="Glover")
plt.plot(sample_times, spm_timecourse, label="SPM")
plt.xlabel("Time (s)")
plt.ylabel("Amplitude (AU)")
plt.legend()
# %%
# Regressor Computation
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# These functions are not used directly in the GLM analysis.
# Instead they are used as the basis to compute a regressor which is
# utilised in the GLM fit.
# This is done by convolving the HRF model with a boxcar function that
# distills information
# about the experimental design. Specifically the stimulus onset times
# are used to indicate when a response begins, and a duration is used
# to specify the time over which the model should be convolved.
#
# Modifying the duration changes the regressor timecourse. Below we demonstrate
# how this varies for several duration values with the Glover HRF.
# Convenient functions so we dont need to repeat code below
def generate_stim(onset, amplitude, duration, hrf_model, maxtime=30):
# Generate signal with specified duration and onset
frame_times = np.linspace(0, maxtime, 601)
exp_condition = np.array((onset, duration, amplitude)).reshape(3, 1)
stim = np.zeros_like(frame_times)
stim[(frame_times > onset) * (frame_times <= onset + duration)] = amplitude
signal, name = first_level.compute_regressor(
exp_condition, hrf_model, frame_times, con_id="main", oversampling=16
)
return frame_times, stim, signal
def plot_regressor(onset, amplitude, duration, hrf_model):
frame_times, stim, signal = generate_stim(
onset, amplitude, duration, hrf_model)
plt.fill(frame_times, stim, "k", alpha=0.5, label="stimulus")
plt.plot(frame_times, signal.T[0], label="Regressor")
plt.xlabel("Time (s)")
plt.ylabel("Amplitude (AU)")
plt.legend(loc=1)
plt.title(hrf_model)
return None
# Generate an event of 1 second duration that occurs at time zero.
onset, amplitude, duration = 0.0, 1.0, 1.0
hrf_model = "glover"
plot_regressor(onset, amplitude, duration, hrf_model)
# %%
#
# If the duration is increased we see the resulting regressor
# is modified, and the transformation is not a simple scaling.
#
# For a 3 second duration:
duration = 3
plot_regressor(onset, amplitude, duration, hrf_model)
# %%
#
# Or for a 5 second duration:
duration = 5
plot_regressor(onset, amplitude, duration, hrf_model)
# %%
#
# Or for a 15 second duration:
duration = 15
plot_regressor(onset, amplitude, duration, hrf_model)
# %%
#
# We can plot multiple durations together to see how the
# resulting regressor varies as a function of this parameter.
cmap = mpl.cm.viridis
norm = mpl.colors.Normalize(vmin=0, vmax=40)
for n in [1, 3, 5, 10, 15, 20, 25, 30, 35]:
frame_times, stim, signal = generate_stim(
onset, amplitude, n, hrf_model, maxtime=50)
plt.plot(frame_times, signal.T[0], label="Regressor", c=cmap(norm(n)))
plt.xlabel("Time (s)")
plt.ylabel("Amplitude (AU)")
plt.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap))
# %%
# Inclusion in Design matrix
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# .. sidebar:: Derivative and dispersion terms
#
# You can also include derivative and dispersion terms to model
# differences between your data and the model. This is done by simply
# specifying your selected model plus the additional terms.
# For example, ``spm + derivative`` or
# ``glover + derivative + dispersion``.
#
# As mentioned above, we don't directly compute these regressors for
# each condition. Instead the function ``make_first_level_design_matrix``
# conveniently does this for us.
#
# As an example we will import a measurement and generate a
# design matrix for it. We will specify that we wish to use a Glover
# HRF convolved with a 3 second duration.
# See the :ref:`MNE-NIRS fNIRS GLM tutorial <tut-fnirs-hrf>` for more details.
#
# First we import the example data, crop to just the first few minutes,
# and give names to the annotations.
fnirs_data_folder = mne.datasets.fnirs_motor.data_path()
fnirs_raw_dir = os.path.join(fnirs_data_folder, 'Participant-1')
raw_intensity = mne.io.read_raw_nirx(fnirs_raw_dir).load_data().crop(tmax=300)
# raw_intensity.resample(0.7)
raw_intensity.annotations.rename({'1.0': 'Control',
'2.0': 'Tapping/Left',
'3.0': 'Tapping/Right'})
raw_intensity.annotations.delete(raw_intensity.annotations.description == '15.0')
raw_intensity.annotations.set_durations(5)
# %%
#
# Next we generate the design matrix and plot it.
# This representation of the regressor is transposed,
# time goes down the vertical
# axis and is specified in scan number (fMRI hangover) or sample.
# There is no colorbar for this plot, as specified in Nilearn.
#
# We can see that when each event occurs the model value increases before returning to baseline.
# this is the same information as was shown in the time courses above, except displayed differently
# with color representing amplitude.
design_matrix = make_first_level_design_matrix(raw_intensity,
# Ignore drift model for now, see section below
drift_model='polynomial',
drift_order=0,
# Here we specify the HRF and duration
hrf_model='glover',
stim_dur=3.0)
fig, ax1 = plt.subplots(figsize=(10, 6), nrows=1, ncols=1)
fig = plot_design_matrix(design_matrix, ax=ax1)
# %%
#
# As before we can explore the effect of modifying the duration,
# the resulting regressor for each annotation is elongated.
design_matrix = make_first_level_design_matrix(raw_intensity,
# Ignore drift model for now, see section below
drift_model='polynomial',
drift_order=0,
# Here we specify the HRF and duration
hrf_model='glover',
stim_dur=13.0)
fig, ax1 = plt.subplots(figsize=(10, 6), nrows=1, ncols=1)
fig = plot_design_matrix(design_matrix, ax=ax1)
# %%
#
# Depending on your experimental design the resulting responses
# may overlap (for example an event related design).
# This is not an issue, the design matrix can handle overlapping responses.
design_matrix = make_first_level_design_matrix(raw_intensity,
# Ignore drift model for now, see section below
drift_model='polynomial',
drift_order=0,
# Here we specify the HRF and duration
hrf_model='glover',
stim_dur=30.0)
fig, ax1 = plt.subplots(figsize=(10, 6), nrows=1, ncols=1)
fig = plot_design_matrix(design_matrix, ax=ax1)
# %%
# Drift Regressors
# ---------------------------------------------------------------------
#
# Aspects of the measured signal may change over time in a manner
# unrelated to the neural response we wish to measure.
# For example, the measurement room may warm up and result in a steady
# increase in the signal over the measurement duration.
# These signal changes that are unrelated to our feature of interest are
# termed drifts, and can be included in the design matrix and the GLM
# fitting as drift regressors.
#
# In the examples above a single drift regressor was used to model a constant
# offset in the data. This is also termed a zero order polynomial regressor.
# Two types of regressors are provided for in MNE-NIRS thanks to Nilearn.
# Polynomial and cosine drift regressors.
#
# .. note::
#
# Remember that the GLM can fit a negative coefficient,
# so a decreasing drift can be modeled by the increasing drift
# regressor with a negative coefficient.
# %%
# Polynomial Drift Regressors
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# In the example above a polynomial drift regressor is included.
# In this case we can specify the order of the polynomials to be included.
# A zero order polynomial will fit a constant, a first order will fit an
# increasing function, and so on.
# As an example we demonstrate how to include up to a fifth order polynomial.
# You can observe that with increasing polynomial order,
# higher frequency components will be regressed from the signal.
design_matrix = make_first_level_design_matrix(raw_intensity,
drift_model='polynomial',
drift_order=5)
fig, ax1 = plt.subplots(figsize=(10, 6), nrows=1, ncols=1)
fig = plot_design_matrix(design_matrix, ax=ax1)
# %%
# Cosine Drift Regressors
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# An alternative way to specify drift regressors is via the cosine drift model.
# This may be more intuitive as you can specify regressors up to a certain cut off
# frequency. Effectively regressing out frequency components below a limit,
# which may be interpreted as a high pass filter.
# In the example below we demonstrate how to regress our signals up to 0.01 Hz.
# We observe that the function has included 6 drift regressors in the design matrix.
design_matrix = make_first_level_design_matrix(raw_intensity,
drift_model='cosine',
high_pass=0.01)
fig, ax1 = plt.subplots(figsize=(10, 6), nrows=1, ncols=1)
fig = plot_design_matrix(design_matrix, ax=ax1)
# %%
#
# As described above, including additional regressor components will remove
# higher frequency components. So we can increase the high pass cut off and
# this should add more regressors.
design_matrix = make_first_level_design_matrix(raw_intensity,
drift_model='cosine',
high_pass=0.03)
fig, ax1 = plt.subplots(figsize=(10, 6), nrows=1, ncols=1)
fig = plot_design_matrix(design_matrix, ax=ax1)
# %%
# Selecting Drift Regressors
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# The aim of the drift regressors is to remove signal components unrelated
# to the expected neural response. As the expected response can be computed
# based on annotation timing and expected brain responses
# (see :ref:`frequency commentary <tut-fnirs-freq>`)
# the high pass cut off can be set on first principles.
#
# The Nilearn documentation states that
# "The cutoff period (1/high_pass) should be set as the longest period between two trials of the same condition multiplied by 2.
# For instance, if the longest period is 32s, the high_pass frequency shall be 1/64 Hz ~ 0.016 Hz."
# `(reference) <http://nilearn.github.io/auto_examples/04_glm_first_level/plot_first_level_details.html#changing-the-drift-model>`__.
#
# To assist in selecting a high pass value a few convenience functions are included in MNE-NIRS.
# First we can query what the longest ISI is per annotation, but first we must be sure
# to remove annotations we aren't interested in (in this experiment the trigger
# 15 is not of interest).
raw_original = mne.io.read_raw_nirx(fnirs_raw_dir)
raw_original.annotations.delete(raw_original.annotations.description == '15.0')
isis, names = longest_inter_annotation_interval(raw_original)
print(isis)
# %%
#
# We see that the longest period between two trials is 435 seconds. Which multiplied
# by two is 870 seconds. So a high pass value of 1/870 or 0.001 Hz is appropriate.
# We can also use the function
# :func:`mne_nirs.experimental_design.make_first_level_design_matrix`
# to suggest the high pass value. Note however, that you should not blindly follow
# this functions suggestion, as each experiment is different. Instead use this as
# a sanity check on your own calculations.
print(drift_high_pass(raw_original))
# %%
#
# For example, if all conditions were evoking the same response it may make more
# sense to include them as a single condition when computing the ISI.
# This would be achieved by renaming the triggers.
raw_original.annotations.rename({'2.0': 'Tapping', '3.0': 'Tapping'})
raw_original.annotations.delete(raw_original.annotations.description == '1.0')
isis, names = longest_inter_annotation_interval(raw_original)
print(isis)
print(drift_high_pass(raw_original))
| 2.265625 | 2 |
src/tasks/fizz_buzz.py | JNMaree/solvdoku | 0 | 12758305 | <gh_stars>0
# Encapsulate the pairs of int multiples to related string monikers
class MultipleMoniker:
mul = 0
mon = ""
def __init__(self, multiple, moniker) -> None:
self.mul = multiple
self.mon = moniker
# Define object to contain methods
class FizzBuzz:
# Define the int to start counting at
start = 1
# Define the max number to count to
maxi = 0
# Define the multiples and the corresponding descriptor terms
mmPair = [MultipleMoniker(3, "Fizz"), MultipleMoniker(5, "Buzz")]
# Define the array that will hold the designation
array = []
def __init__(self, max_int, start = 1) -> None:
self.start = start
self.maxi = max_int
self.init_with_max()
# Generate sequence up to and including maxi
def init_with_max(self, max_i=0):
if max_i != 0 :
self.maxi = max_i
tmp_array = []
for i in range(self.start, self.maxi + 1):
tmp_str = ""
for m in range(len(self.mmPair)):
if i % self.mmPair[m].mul == 0:
tmp_str += self.mmPair[m].mon
if tmp_str == "":
tmp_str += format(i)
tmp_array.append(tmp_str)
#print(f"{i}|:{self.array[i-self.start]}")
self.array = tmp_array
# Generate class STR for printout
def __str__(self):
ret_str = f"FizzBuzz({self.maxi}):"
for i in self.array:
ret_str += i + ", "
return ret_str
def add_multiple_moniker(self, multiple, moniker):
self.mmPair.append(MultipleMoniker(multiple, moniker))
def main():
# Test FizzBuzz Class Init
x1 = 42
x2 = 15
# Calculate sequence & Print Output to terminal
print("TEST_1:")
F1 = FizzBuzz(x1)
print(F1)
print("TEST_2:")
F2 = FizzBuzz(x2)
print(F2)
# Add "Fuzz" as a designator for a multiple of 7
F1.add_multiple_moniker(7, "Fuzz")
F1.init_with_max(105)
print(F1)
if __name__ == "__main__":
main() | 3.28125 | 3 |
gunicorn.py | iamjdcollins/districtwebsite | 0 | 12758306 | import glob
import multiprocessing
import os
import json
from django.core.exceptions import ImproperlyConfigured
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# Load Secrets
def load_secrets(file=os.path.join(BASE_DIR, '.secrets.json')):
try:
with open(file) as f:
secrets = json.loads(f.read())
return secrets
except FileNotFoundError:
raise ImproperlyConfigured(
'Secrets file not found. Please create the secrets file or correct'
' the configuration.'
)
secrets = load_secrets()
# Get a secret
def get_secret(key, secrets=secrets or load_secrets()):
try:
val = secrets[key]
if val == 'True':
val = True
elif val == 'False':
val = False
return val
except KeyError:
error_msg = (
"ImproperlyConfigured: Set {0} environment variable"
).format(key)
raise ImproperlyConfigured(error_msg)
def watch_extra_files():
files = set()
patterns = [
{'path': '**/*.html', 'recursive': True, },
{'path': '**/*.py', 'recursive': True, },
]
for pattern in patterns:
files = files.union(glob.glob(pattern['path'], recursive=pattern[
'recursive']))
return files
proc_name = 'districtwebsites'
pidfile = '/var/run/gunicorn/www_slcschools_org.pid'
worker_tmp_dir = '/srv/gunicorn/www_slcschools_org'
bind = 'unix:/var/run/gunicorn/www_slcschools_org.sock'
workers = multiprocessing.cpu_count() * 3 + 1
worker_class = 'sync'
timeout = 3600
raw_env = [
'DJANGO_SETTINGS_MODULE={0}'.format(get_secret('DJANGO_SETTINGS_MODULE')),
]
reload = get_secret('GUNICORN_RELOAD')
if reload:
reload_extra_files = watch_extra_files()
| 2.03125 | 2 |
openstates/openstates-master/openstates/mo/__init__.py | Jgorsick/Advocacy_Angular | 0 | 12758307 | import datetime
from billy.utils.fulltext import pdfdata_to_text, text_after_line_numbers
from billy.scrape.utils import url_xpath
from .bills import MOBillScraper
from .legislators import MOLegislatorScraper
from .committees import MOCommitteeScraper
from .votes import MOVoteScraper
metadata = dict(
name = 'Missouri',
abbreviation = 'mo',
legislature_name = 'Missouri General Assembly',
legislature_url = 'http://www.moga.mo.gov/',
capitol_timezone = 'America/Chicago',
chambers = {
'upper': {
'name': 'Senate',
'title': 'Senator'
},
'lower': {
'name': 'House',
'title': 'Representative'
},
},
terms = [
{
'name': '2011-2012',
'sessions': ['2012'],
'start_year': 2011,
'end_year': 2012,
},
{
'name': '2013-2014',
'sessions': ['2013', '2014'],
'start_year': 2013,
'end_year': 2014,
},
{
'name': '2015-2016',
'sessions': ['2015', '2016'],
'start_year': 2015,
'end_year': 2016,
},
],
# General Assembly sessions convene the Wed. following the first Mon.
# of January and adjourn May 30.
# http://www.house.mo.gov/content.aspx?info=/info/howbill.htm
session_details = {
'2012': {
'type': 'primary',
'start_date': datetime.date(2012,1,4),
'end_date': datetime.date(2012,5,30),
'display_name': '2012 Regular Session',
'_scraped_name': '2012 - 96th General Assembly - 2nd Regular Session',
},
'2013': {
'type': 'primary',
'start_date': datetime.date(2013,1,9),
'end_date': datetime.date(2013,5,30),
'display_name': '2013 Regular Session',
'_scraped_name': '2013 - 97th General Assembly - 1st Regular Session',
},
'2014': {
'type': 'primary',
'start_date': datetime.date(2014,1,8),
'end_date': datetime.date(2014,5,30),
'display_name': '2014 Regular Session',
'_scaped_name': '2014 - 97th General Assembly - 2nd Regular Session',
},
'2015': {
'type': 'primary',
'start_date': datetime.date(2015,1,7),
'end_date': datetime.date(2015,5,30),
'display_name': '2015 Regular Session',
'_scraped_name': '2015 - 98th General Assembly - 1st Regular Session',
},
'2016': {
'type': 'primary',
'start_date': datetime.date(2016,1,6),
'end_date': datetime.date(2016,5,30),
'display_name': '2016 Regular Session',
},
},
feature_flags = ['subjects', 'influenceexplorer'],
_ignored_scraped_sessions = [
'2014 - 97th General Assembly - 2nd Regular Session',
'2013 - 97th General Assembly - 1st Regular Session',
'2012 - 96th General Assembly - 2nd Regular Session',
'2011 - 96th General Assembly - 1st Regular Session',
'2010 - 95th General Assembly - 2nd Regular Session',
'2009 - 95th General Assembly - 1st Regular Session',
'2008 - 94th General Assembly - 2nd Regular Session',
'2007 - 94th General Assembly - 1st Regular Session',
'2006 - 93rd General Assembly - 2nd Regular Session',
'2005 - 93rd General Assembly - 1st Regular Session',
'2004 - 92nd General Assembly - 2nd Regular Session',
'2003 - 92nd General Assembly - 1st Regular Session',
'2002 - 91st General Assembly - 2nd Regular Session',
'2001 - 91st General Assembly - 1st Regular Session',
'2000 - 90th General Assembly - 2nd Regular Session',
'1999 - 90th General Assembly - 1st Regular Session',
'1998 - 89th General Assembly - 2nd Regular Session',
'1997 - 89th General Assembly - 1st Regular Session',
'1996 - 88th General Assembly - 2nd Regular Session',
'1995 - 88th General Assembly - 1st Regular Session'
]
)
def session_list():
sessions = url_xpath('http://www.senate.mo.gov/pastsessions.htm',
'//div[@id="list"]/li/a/text()')
return sessions
def extract_text(doc, data):
text = pdfdata_to_text(data)
return text_after_line_numbers(text).encode('ascii', 'ignore')
| 2.28125 | 2 |
examples/backends/plot_unmix_optim_torch.py | kguerda-idris/POT | 830 | 12758308 | # -*- coding: utf-8 -*-
r"""
=================================
Wasserstein unmixing with PyTorch
=================================
In this example we estimate mixing parameters from distributions that minimize
the Wasserstein distance. In other words we suppose that a target
distribution :math:`\mu^t` can be expressed as a weighted sum of source
distributions :math:`\mu^s_k` with the following model:
.. math::
\mu^t = \sum_{k=1}^K w_k\mu^s_k
where :math:`\mathbf{w}` is a vector of size :math:`K` and belongs in the
distribution simplex :math:`\Delta_K`.
In order to estimate this weight vector we propose to optimize the Wasserstein
distance between the model and the observed :math:`\mu^t` with respect to
the vector. This leads to the following optimization problem:
.. math::
\min_{\mathbf{w}\in\Delta_K} \quad W \left(\mu^t,\sum_{k=1}^K w_k\mu^s_k\right)
This minimization is done in this example with a simple projected gradient
descent in PyTorch. We use the automatic backend of POT that allows us to
compute the Wasserstein distance with :any:`ot.emd2` with
differentiable losses.
"""
# Author: <NAME> <<EMAIL>>
#
# License: MIT License
# sphinx_gallery_thumbnail_number = 2
import numpy as np
import matplotlib.pylab as pl
import ot
import torch
##############################################################################
# Generate data
# -------------
#%% Data
nt = 100
nt1 = 10 #
ns1 = 50
ns = 2 * ns1
rng = np.random.RandomState(2)
xt = rng.randn(nt, 2) * 0.2
xt[:nt1, 0] += 1
xt[nt1:, 1] += 1
xs1 = rng.randn(ns1, 2) * 0.2
xs1[:, 0] += 1
xs2 = rng.randn(ns1, 2) * 0.2
xs2[:, 1] += 1
xs = np.concatenate((xs1, xs2))
# Sample reweighting matrix H
H = np.zeros((ns, 2))
H[:ns1, 0] = 1 / ns1
H[ns1:, 1] = 1 / ns1
# each columns sums to 1 and has weights only for samples form the
# corresponding source distribution
M = ot.dist(xs, xt)
##############################################################################
# Plot data
# ---------
#%% plot the distributions
pl.figure(1)
pl.scatter(xt[:, 0], xt[:, 1], label='Target $\mu^t$', alpha=0.5)
pl.scatter(xs1[:, 0], xs1[:, 1], label='Source $\mu^s_1$', alpha=0.5)
pl.scatter(xs2[:, 0], xs2[:, 1], label='Source $\mu^s_2$', alpha=0.5)
pl.title('Sources and Target distributions')
pl.legend()
##############################################################################
# Optimization of the model wrt the Wasserstein distance
# ------------------------------------------------------
#%% Weights optimization with gradient descent
# convert numpy arrays to torch tensors
H2 = torch.tensor(H)
M2 = torch.tensor(M)
# weights for the source distributions
w = torch.tensor(ot.unif(2), requires_grad=True)
# uniform weights for target
b = torch.tensor(ot.unif(nt))
lr = 2e-3 # learning rate
niter = 500 # number of iterations
losses = [] # loss along the iterations
# loss for the minimal Wasserstein estimator
def get_loss(w):
a = torch.mv(H2, w) # distribution reweighting
return ot.emd2(a, b, M2) # squared Wasserstein 2
for i in range(niter):
loss = get_loss(w)
losses.append(float(loss))
loss.backward()
with torch.no_grad():
w -= lr * w.grad # gradient step
w[:] = ot.utils.proj_simplex(w) # projection on the simplex
w.grad.zero_()
##############################################################################
# Estimated weights and convergence of the objective
# ---------------------------------------------------
we = w.detach().numpy()
print('Estimated mixture:', we)
pl.figure(2)
pl.semilogy(losses)
pl.grid()
pl.title('Wasserstein distance')
pl.xlabel("Iterations")
##############################################################################
# Ploting the reweighted source distribution
# ------------------------------------------
pl.figure(3)
# compute source weights
ws = H.dot(we)
pl.scatter(xt[:, 0], xt[:, 1], label='Target $\mu^t$', alpha=0.5)
pl.scatter(xs[:, 0], xs[:, 1], color='C3', s=ws * 20 * ns, label='Weighted sources $\sum_{k} w_k\mu^s_k$', alpha=0.5)
pl.title('Target and reweighted source distributions')
pl.legend()
| 2.859375 | 3 |
summons.py | justindz/Mines-RPG | 1 | 12758309 | from elements import Elements
from enemy import Goal, GoalType
from ability import EffectType, Effect
from ai.single_target_attack import SingleTargetAttack
from ai.single_target_heal import SingleTargetHeal
from ai.status_effect import StatusEffect
from summon import Summon
from ai.explode import Explode
summons = {
'coal_golem':
Summon('Coal Golem',
{'h': 0, 's': 0, 'm': 8},
[
SingleTargetAttack('Punch', 0, 0.05, [Effect(EffectType.damage_health, Elements.earth, _dice_value=4)]),
SingleTargetAttack('Slam', 3, 0.05, [Effect(EffectType.damage_health, Elements.earth, _dice_value=8)])
],
[Goal(GoalType.damage_opponent, 400)],
strength_growth=7,
earth_res=0.1),
'blood_golem':
Summon('Blood Golem',
{'h': 5, 's': 0, 'm': 0},
[
SingleTargetAttack('Exsanguinate', 0, 0.05, [Effect(EffectType.bleed, Elements.earth, _effect_turns=1, _dot_value=2)]),
Explode('Blood Geyser', [Effect(EffectType.restore_health, Elements.water, _dice_value=4)])
],
[
Goal(GoalType.enrage, 0),
Goal(GoalType.damage_opponent, 400)
],
water_res=0.1,
electricity_res_growth=0.01,
dot_res=0.05,
dot_res_growth=0.03,
health_regen=1,
health_regen_growth=1,
shock_limit=3,
confusion_limit=5),
'wisp':
Summon('Will-o-the-Wisp',
{'h': 0, 's': 0, 'm': 5},
[
StatusEffect('Misdirection', 3, [
Effect(EffectType.debuff, Elements.earth, _stat='bonus_strength', _dice_value=4, _effect_turns=2, _status_effect_name='Weakened'),
Effect(EffectType.debuff, Elements.fire, _stat='bonus_fire', _dice_value=4, _effect_turns=2, _status_effect_name='Stupefied'),
Effect(EffectType.debuff, Elements.electricity, _stat='bonus_dexterity', _dice_value=4, _effect_turns=2, _status_effect_name='Exhausted'),
Effect(EffectType.debuff, Elements.water, _stat='bonus_willpower', _dice_value=4, _effect_turns=2, _status_effect_name='Discouraged'),
]),
SingleTargetAttack('Foolish Fire', 0, 0.08, [Effect(EffectType.damage_health, Elements.fire, _dice_value=2)]),
],
[
Goal(GoalType.debuff_opponent, 450),
Goal(GoalType.damage_opponent, 400)
],
health=7,
health_growth=4),
}
| 2.171875 | 2 |
trunk/MOPS_Area.py | n5iln/railmops | 1 | 12758310 | <filename>trunk/MOPS_Area.py<gh_stars>1-10
'''
Areas Class: Defines a territory or coverage
Model Operations Processing System. Copyright <NAME> 2009-2010. Licenced under the EUPL.
You may not use this work except in compliance with the Licence. You may obtain a copy of the
Licence at http://ec.europa.eu/idabc/eupl or as attached with this application (see Licence file).
Unless required by applicable law or agreed to in writing, software distributed under the Licence
is distributed on an 'AS IS' basis WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either expressed
or implied. See the Licence governing permissions and limitations under the Licence.
'''
import MOPS_Element
class cAreas(MOPS_Element.cElement):
"""details about areas. areas belong to railroads and are geographical in nature. stations are
linked to areas.
"""
extract_code = 'select * from area'
extract_header = 'id|code|name|railroad\n'
def adarea(self, message):
"""adds an area code and name to the system. the area must belong to a railroad
"""
if self.show_access(message, 'ADAREA area;area name;^railroad^', 'S') != 0:
return
#area code----------------------------------------------------------------------------------
area, rc = self.extract_field(message, 0, 'AREA CODE')
if rc > 0:
return
if len(area) > self.areasize:
print('* AREA CODE ENTERED IS GREATER THAN THE ALLOWED SIZE')
return
if len(area) == 0:
print('* NO AREA CODE ENTERED: A BLANK CODE IS NOT ALLOWED')
return
#check it does not already exist------------------------------------------------------------
data = (area,)
sql = 'select id from area where area = ?'
count, dummy = self.db_read(sql, data)
if count < 0:
return
if count != 0:
print('* AREA CODE ALREADY EXISTS')
return
#area name----------------------------------------------------------------------------------
area_name, rc = self.extract_field(message, 1, 'AREA NAME')
if rc > 0:
return
#railroad-----------------------------------------------------------------------------------
railroad, rc = self.extract_field(message, 2, 'RAILROAD CODE')
if rc > 0:
return
data = (railroad, )
sql = 'select name from railroad where railroad = ?'
count, ds_railroads = self.db_read(sql, data)
if count < 0:
return
if count == 0:
print('* RAILROAD CODE DOES NOT EXIST (' + railroad + ')')
return
else:
for row in ds_railroads:
railroad_name = row[0]
#carry out the update and report the change-------------------------------------------------
data = (area, area_name, railroad)
sql = 'insert into area values (null, ?, ?, ?)'
if self.db_update(sql, data) != 0:
return
print('NEW AREA ADDED SUCCESSFULLY')
print(area + area_name + '(' + railroad + railroad_name + ')')
return
def charea(self, message):
"""amend the name of an area or change the railroad to which it belongs
"""
if self.show_access(message, 'CHAREA area;(area name);(^railroad^)', 'S') != 0:
return
#area code----------------------------------------------------------------------------------
area, rc = self.extract_field(message, 0, 'AREA CODE')
if rc > 0:
return
#read the database--------------------------------------------------------------------------
data = (area,)
sql = 'select name, railroad from area where area = ?'
count, ds_areas = self.db_read(sql, data)
if count < 0:
return
if count == 0:
print('* AREA CODE DOES NOT EXIST')
return
for row in ds_areas:
area_name = row[0]
railroad = row[1]
#area name----------------------------------------------------------------------------------
value, rc = self.extract_field(message, 1, '')
if rc == 0:
area_name = value
#railroad-----------------------------------------------------------------------------------
value, rc = self.extract_field(message, 2, '')
if rc == 0:
railroad = value
data = (railroad, )
sql = 'select name from railroad where railroad = ?'
count, ds_railroads = self.db_read(sql, data)
if count < 0:
return
if count == 0:
print('* RAILROAD CODE DOES NOT EXIST (' + railroad + ')')
return
else:
for row in ds_railroads:
railroad_name = row[0]
#carry out the update and report the change-------------------------------------------------
data = (area_name, railroad, area)
sql = 'update area set name = ?, railroad = ? where area = ?'
if self.db_update(sql, data) != 0:
return
print('AREA DETAILS CHANGED SUCCESSFULLY')
print(area + area_name + '(' + railroad + railroad_name + ')')
return
def dxarea (self, message):
"""deletes an area entry from the list. validates that there are no stations attached to
the area
"""
if self.show_access(message, 'DXAREA area', 'S') != 0:
return
#area code----------------------------------------------------------------------------------
area, rc = self.extract_field(message, 0, 'AREA CODE')
if rc > 0:
return
data = (area,)
#validate the change------------------------------------------------------------------------
sql = 'select id from area where area = ?'
count, dummy = self.db_read(sql, data)
if count < 0:
return
if count == 0:
print('* AREA CODE DOES NOT EXIST')
return
#make sure that there is not a station linked to the area-----------------------------------
sql = 'select id from station where area = ?'
count, dummy = self.db_read(sql, data)
if count < 0:
return
if count > 0:
print('* STATIONS ARE ATTACHED TO THIS AREA - CANNOT DELETE')
return
#process the change-------------------------------------------------------------------------
if self.db_update('delete from area where area = ?', data) == 0:
print('AREA ' + area + ' SUCCESSFULLY DELETED')
return
def liarea(self, message):
"""list basic area details to the screen, can be filtered by railroad. can be sorted by
area code, area name or railroad/area.
"""
if self.show_access(message, 'LIAREA (sort[0/1/2]);(^railroad^)', 'R') != 0:
return
#work out the various parameters------------------------------------------------------------
value, rc = self.extract_field(message, 0, '')
if rc == 0:
sort_order = value
else:
sort_order = '0'
value, rc = self.extract_field(message, 1, '')
if rc == 0:
filter_railroad = value
else:
filter_railroad = ''
# build the column titles-------------------------------------------------------------------
area_name_size = 78 - self.areasize - self.railsize - 30 - 3
titles = self.x_field('AREA====', self.areasize) + ' ' + \
self.x_field('AREA NAME==============================', area_name_size) + ' ' + \
self.x_field('RAIL====', self.railsize) + ' ' + \
self.x_field('RAILROAD NAME==========================', 30)
# get the extract data----------------------------------------------------------------------
if sort_order == '1':
sql = 'select area.area, area.name, railroad.railroad, railroad.name ' +\
'from area, railroad where area.railroad = railroad.railroad ' +\
'order by area.area'
elif sort_order == '2':
sql = 'select area.area, area.name, railroad.railroad, railroad.name ' +\
'from area, railroad where area.railroad = railroad.railroad ' +\
'order by area.name'
else:
sql = 'select area.area, area.name, railroad.railroad, railroad.name ' +\
'from area, railroad where area.railroad = railroad.railroad ' +\
'order by railroad.railroad, area.area'
count, ds_areas = self.db_read(sql, '')
if count < 0:
return
#report the extracted data------------------------------------------------------------------
line_count = 0
counter = 0
for row in ds_areas:
rail = row[2]
if filter_railroad == '' or filter_railroad == rail:
if line_count == 0:
print(titles)
counter = counter + 1
print(self.x_field(row[0], self.areasize) + " " +
self.x_field(row[1], area_name_size) + " " +
self.x_field(row[2], self.railsize) + " " +
self.x_field(row[3], 30))
line_count = line_count + 1
if line_count > 20:
line_count = 0
reply = raw_input('+')
if reply == 'x':
break
print(' ** END OF DATA: ' + str(counter) + ' RECORDS DISPLAYED **')
return
def prarea(self, message, Params):
"""print basic area details, can be filtered by railroad. can be sorted by area code, area
name or railroad/area.
"""
if self.show_access(message, 'PRAREA (sort[0/1/2]);(^railroad^)', 'R') != 0:
return
#work out the various parameters------------------------------------------------------------
value, rc = self.extract_field(message, 0, '')
if rc == 0:
sort_order = value
else:
sort_order = '0'
value, rc = self.extract_field(message, 1, '')
if rc == 0:
filter_railroad = value
else:
filter_railroad = ''
# build the column titles-------------------------------------------------------------------
area_name_size = 80 - self.areasize - self.railsize - 30 - 3
titles = self.x_field('AREA====', self.areasize) + ' ' + \
self.x_field('AREA NAME==============================', area_name_size) + ' ' + \
self.x_field('RAIL====', self.railsize) + ' ' + \
self.x_field('RAILROAD NAME==========================', 30)
# get the extract data----------------------------------------------------------------------
if sort_order == '1':
sql = 'select area.area, area.name, railroad.railroad, railroad.name ' +\
'from area, railroad where area.railroad = railroad.railroad ' +\
'order by area.area'
report_desc = 'AREAS SORTED BY AREA CODE'
elif sort_order == '2':
sql = 'select area.area, area.name, railroad.railroad, railroad.name ' +\
'from area, railroad where area.railroad = railroad.railroad ' +\
'order by area.name'
report_desc = 'AREAS SORTED BY AREA NAME'
else:
sql = 'select area.area, area.name, railroad.railroad, railroad.name ' +\
'from area, railroad where area.railroad = railroad.railroad ' +\
'order by railroad.railroad, area.area'
report_desc = 'AREAS SORTED BY RAILROAD, AREA'
count, ds_areas = self.db_read(sql, '')
if count < 0:
return
#build the extracted data-------------------------------------------------------------------
self.temp = {}
for row in ds_areas:
rail = row[2]
if filter_railroad == '' or filter_railroad == rail:
print_line = self.x_field(row[0], self.areasize) + ' ' +\
self.x_field(row[1], area_name_size) + ' ' +\
self.x_field(row[2], self.railsize) + ' ' +\
self.x_field(row[3], 30)
if sort_order == '1':
self.temp[row[0]] = print_line
elif sort_order == '2':
self.temp[row[1]] = print_line
else:
self.temp[row[2] + row[0]] = print_line
#--------------------------------------------------------------------------------------------
#report the extracted data
self.print_report (titles = titles,
report_id = 'PRAREA',
report_name = report_desc,
Params = Params)
return
| 2.921875 | 3 |
deep_disfluency/tagger/deep_tagger_module.py | askender/deep_disfluency | 0 | 12758311 | <filename>deep_disfluency/tagger/deep_tagger_module.py
from __future__ import division
from deep_tagger import DeepDisfluencyTagger
import fluteline
class DeepTaggerModule(fluteline.Consumer):
"""A fluteline incremental concurrent Consumer module which
consumes update increments as word dictionaries, e.g.:
{'id': 1,
'start_time': 0.44,
'end_time': 0.77,
'word': 'hello'}
And produces the same update increments but with the POS tags
and disfluency tags:
{'id': 1,
'start_time': 0.44,
'end_time': 0.77,
'word': 'john',
'pos_tag' : 'NN'
'disf_tag' : '<f/>'}
These will be updated as diffs.
"""
def __init__(self,
config_file="experiments/experiment_configs.csv",
config_number=35,
saved_model_dir="experiments/035/epoch_6",
use_timing_data=True):
super(DeepTaggerModule, self).__init__()
self.disf_tagger = DeepDisfluencyTagger(
config_file=config_file,
config_number=config_number,
saved_model_dir=saved_model_dir,
use_timing_data=use_timing_data
)
print "Deep Tagger Module ready"
self.latest_word_ID = -1
self.word_graph = []
# def enter(self):
def consume(self, word_update):
""" Will get an update like:
{'id': 1,
'start_time': 0.44,
'end_time': 0.77,
'word': 'hello'}
Add it to the tagger's word graph either at the end, or
rolling back first and then add it.
"""
try:
# print "RECEIVING", word_update
if word_update['id'] <= self.latest_word_ID:
# rollback needed
# TODO should be consistent
backwards = (self.latest_word_ID - word_update['id']) + 1
self.disf_tagger.rollback(backwards)
self.word_graph = self.word_graph[:
len(self.word_graph) -
backwards]
self.latest_word_ID = word_update['id']
self.word_graph.append(word_update)
timing = word_update['end_time'] - word_update['start_time']
word = word_update['word']
new_tags = self.disf_tagger.tag_new_word(word, timing=timing)
start_id = self.latest_word_ID - (len(new_tags) - 1)
word_update_indices = range(start_id, self.latest_word_ID + 1)
# print "\nnew tags:"
for idx, new_tag in zip(word_update_indices, new_tags):
# update the disf tag and pos tag for new tag updates
self.word_graph[idx]['disf_tag'] = new_tag
pos_idx = idx + (self.disf_tagger.window_size - 1)
self.word_graph[idx]['pos_tag'] = \
self.disf_tagger.word_graph[pos_idx][1]
# print self.word_graph[idx]
# output the new tags for the updated word
self.output.put(self.word_graph[idx])
except BaseException:
print "Disfluency tagger failed to update with new word"
| 2.734375 | 3 |
bot/plugins/bonk.py | Preocts/twitch-chat-bot | 62 | 12758312 | from __future__ import annotations
from typing import Match
from bot.config import Config
from bot.data import command
from bot.data import esc
from bot.data import format_msg
@command('!bonk')
async def cmd_bonk(config: Config, match: Match[str]) -> str:
_, _, rest = match['msg'].partition(' ')
rest = rest.strip() or 'marsha_socks'
return format_msg(
match,
f'awcBonk awcBonk awcBonk {esc(rest)} awcBonk awcBonk awcBonk',
)
| 2.21875 | 2 |
zimpy/generators/csv_image_provider.py | matthewzimmer/carnd-behavioral-cloning | 0 | 12758313 | <filename>zimpy/generators/csv_image_provider.py
import random
import cv2
import numpy as np
from zimpy.camera_preprocessor import preprocess_image
from scipy import misc
import os
import math
def load_image(imagepath):
path, file_name = os.path.split(imagepath)
imagepath = 'IMG/' + file_name
# image_array = cv2.imread(imagepath, 1)
image_array = misc.imread(imagepath)
if image_array is None:
print('File Not Found: {}'.format(imagepath))
return image_array
def augment_brightness_camera_images(image):
image1 = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
random_bright = .25 + np.random.uniform()
# print(random_bright)
image1[:, :, 2] = image1[:, :, 2] * random_bright
image1 = cv2.cvtColor(image1, cv2.COLOR_HSV2RGB)
return image1
def batch_generator(X, Y, label, num_epochs, batch_size=32, output_shape=None, flip_images=True, classifier=None):
population = len(X)
counter = 0
_index_in_epoch = 0
_tot_epochs = 0
batch_size = min(batch_size, population)
batch_count = int(math.ceil(population / batch_size))
print('Batch generating against the {} dataset with population {} and shape {}'.format(label, population, X.shape))
while True:
counter += 1
print('batch gen iter {}'.format(counter))
for i in range(batch_count):
start_i = _index_in_epoch
_index_in_epoch += batch_size
if _index_in_epoch >= population:
# Save the classifier to support manual early stoppage
if classifier is not None:
classifier.save()
print(' sampled entire population. reshuffling deck and resetting all counters.')
perm = np.arange(population)
np.random.shuffle(perm)
X = X[perm]
Y = Y[perm]
start_i = 0
_index_in_epoch = batch_size
_tot_epochs += 1
end_i = _index_in_epoch
X_batch = []
y_batch = []
for j in range(start_i, end_i):
steering_angle = Y[j]
image_path = None
left = X[j].split(':')[0]
center = X[j].split(':')[1]
right = X[j].split(':')[2]
z_score = X[j].split(':')[3]
# print('angle: {} z_score: {}'.format(steering_angle, z_score))
mode = 1
if mode == 1:
image_path = center # center camera
else:
# This algorithm was inspired by <NAME>'s algorithm
if steering_angle < -0.01:
chance = random.random()
if chance > 0.75:
image_path = left
augmented_steering = steering_angle*3.0
# print('L1 - real:{} - aug:{} - LEFT 3x'.format(steering_angle, augmented_steering))
steering_angle = augmented_steering
else:
if chance > 0.5:
image_path = left
augmented_steering = steering_angle*2.0
# print('L2 - real:{} - aug:{} - LEFT 2x'.format(steering_angle, augmented_steering))
steering_angle = augmented_steering
else:
if chance > 0.25:
image_path = center
augmented_steering = steering_angle*1.5
# print('L3 - real:{} - aug:{} - CENTER 1.5x'.format(steering_angle, augmented_steering))
steering_angle = augmented_steering
else:
# progressively increase chances of introducing raw center
if True or random.random() > (1. - _tot_epochs / num_epochs):
# print('L4 - {} - CENTER'.format(steering_angle))
image_path = center
else:
print('L5 - {} - SKIPPED'.format(steering_angle))
# else:
if steering_angle > 0.01:
chance = random.random()
if chance > 0.75:
image_path = right
augmented_steering = steering_angle*3.0
# print('R1 - real:{} - aug:{} - RIGHT 3x'.format(steering_angle, augmented_steering))
steering_angle = augmented_steering
else:
if chance > 0.5:
image_path = right
augmented_steering = steering_angle*2.0
# print('R2 - real:{} - aug:{} - RIGHT 2x'.format(steering_angle, augmented_steering))
steering_angle = augmented_steering
else:
if chance > 0.25:
image_path = center
augmented_steering = steering_angle*1.5
# print('R3 - real:{} - aug:{} - CENTER 1.5x'.format(steering_angle, augmented_steering))
steering_angle = augmented_steering
else:
if True or random.random() > (1. - _tot_epochs / num_epochs):
image_path = center
# print('R4 - real:{} - aug:{} - CENTER 1x'.format(steering_angle, steering_angle))
else:
print('R5 - {} - SKIPPED'.format(steering_angle))
else:
# progressively increase chances of introducing raw center
if True or random.random() > (1. - _tot_epochs / num_epochs):
# print('C1 - {} - CENTER'.format(steering_angle))
image_path = center
else:
print('C2 - {} - SKIPPED'.format(steering_angle))
if image_path is not None:
image = load_image(image_path)
if image is not None:
image = preprocess_image(image, output_shape=output_shape)
if flip_images and random.random() > 0.5:
X_batch.append(np.fliplr(image))
y_batch.append(-steering_angle)
else:
X_batch.append(image)
y_batch.append(steering_angle)
yield np.array(X_batch), np.array(y_batch)
| 2.875 | 3 |
unused_url_category.py | taylancode/Work-projects | 0 | 12758314 | <reponame>taylancode/Work-projects
#Import modules
import requests
import xml.etree.ElementTree as ET
import sys
#Disable urllib warnings
requests.packages.urllib3.disable_warnings()
#Gets all existing URL categories in specific device group
def getcategories(fwip, key):
try:
#API call to get all existing URL categories in device group
r = requests.post(
f"https://{fwip}/api/?key={key}&type=config&action=get&xpath=/<EMAIL>/devices/entry[<EMAIL>']/device-group/entry[@name='{dgrp}']/profiles/custom-url-category", verify=False)
if r.status_code == 200:
root = ET.fromstring(r.text)
#Iterate over XML and append all names to list
for entry in root.findall("./result/custom-url-category/entry"):
names = entry.get('name')
urlcats.append(names)
except:
print("API call failed, please check connectivity is good and API key is correct.")
#Gets all members of every URL filtering profile
def getprofiles(fwip, key):
try:
#API call displays all URL filtering profiles of device group and their category members
r = requests.post(
f"https://{fwip}/api/?key={key}&type=config&action=get&xpath=/config/devices/entry[<EMAIL>']/device-group/entry[@name='{dgrp}']/profiles/url-filtering", verify=False)
if r.status_code == 200:
root = ET.fromstring(r.text)
#If category exists, it will be in one of these lists. Lists are appended to be iterated over later
for entry in root.findall("./result/url-filtering/entry"):
for blocked in entry.findall(".block/member"):
blockedpfiles = blocked.text
pfilecat.append(blockedpfiles)
for alert in entry.findall(".alert/member"):
alertpfiles = alert.text
pfilecat.append(alertpfiles)
for cont in entry.findall(".continue/member"):
contpfiles = cont.text
pfilecat.append(contpfiles)
except:
print("API call failed, please check connectivity is good and API key is correct.")
#Iterates over all security rules and appends URL category members if one/more is specified
def getsecrules(fwip, key):
try:
#API call to gather all security rules to be iterated over
r = requests.post(
f"https://{fwip}/api/?key={key}&type=config&action=get&xpath=/config/devices/entry[<EMAIL>']/device-group/entry[@name='{dgrp}']/post-rulebase/security", verify=False)
if r.status_code == 200:
root = ET.fromstring(r.text)
#Iterates over XML data to find URL category members
for i in root.findall("./result/security/rules/entry"):
for rule in i.findall('./category/member'):
rules = rule.text
#Appends to list if any are found
if rules != 'any':
secrules.append(rules)
else:
continue
except:
print("API call failed, please check connectivity is good and API key is correct.")
if __name__ == '__main__':
#Gather variables
print("Enter Panorama IP/FQDN:")
fwip = input("> ")
print("Enter Device Group:")
dgrp = input("> ")
print("Enter API Key: ")
key = input("> ")
#Lists to be appended to
pfilecat = []
secrules = []
urlcats = []
unused = []
#To count unused categories
count = 0
#Calling every function
getcategories(fwip, key)
getprofiles(fwip, key)
getsecrules(fwip, key)
#For every existing category, it checks if it exists in any rule or url profile
#If it doesn't exist, it will append to unused list
for cat in urlcats:
ifrule = any(i in str(cat) for i in secrules)
ifurlprofile = any(i in str(cat) for i in pfilecat)
if ifrule is False:
if ifurlprofile is False:
unused.append(cat)
#Write all unused categories to text file, prints output if unused list is empty
with open('unusedcat.txt', 'w') as f:
if not unused:
print("\nNo unused categories")
else:
for i in unused:
f.write(i)
f.write('\n')
count += 1
print("\n"+str(count)+" unused categories added to list")
f.close()
sys.exit() | 2.640625 | 3 |
qiniu/rs/test/rs_token_test.py | jeremybai/Easylink | 1 | 12758315 | <filename>qiniu/rs/test/rs_token_test.py
# -*- coding: utf-8 -*-
import unittest
import os
import json
from base64 import urlsafe_b64decode as decode
from base64 import urlsafe_b64encode as encode
from hashlib import sha1
import hmac
import urllib
from qiniu import conf
from qiniu import rs
conf.ACCESS_KEY = os.getenv("QINIU_ACCESS_KEY")
conf.SECRET_KEY = os.getenv("QINIU_SECRET_KEY")
bucket_name = os.getenv("QINIU_TEST_BUCKET")
domain = os.getenv("QINIU_TEST_DOMAIN")
key = 'QINIU_UNIT_TEST_PIC'
class TestToken(unittest.TestCase):
def test_put_policy(self):
policy = rs.PutPolicy(bucket_name)
policy.endUser = "hello!"
policy.returnUrl = "http://localhost:1234/path?query=hello"
policy.returnBody = "$(sha1)"
# Do not specify the returnUrl and callbackUrl at the same time
policy.callbackUrl = "http://1.2.3.4/callback"
policy.callbackBody = "$(bucket)"
policy.saveKey = "$(sha1)"
policy.insertOnly = 1
policy.detectMime = 1
policy.fsizeLimit = 1024
policy.persistentNotifyUrl = "http://4.3.2.1/persistentNotifyUrl"
policy.persistentOps = "avthumb/flash"
tokens = policy.token().split(':')
# chcek first part of token
self.assertEqual(conf.ACCESS_KEY, tokens[0])
data = json.loads(decode(tokens[2]))
# check if same
self.assertEqual(data["scope"], bucket_name)
self.assertEqual(data["endUser"], policy.endUser)
self.assertEqual(data["returnUrl"], policy.returnUrl)
self.assertEqual(data["returnBody"], policy.returnBody)
self.assertEqual(data["callbackUrl"], policy.callbackUrl)
self.assertEqual(data["callbackBody"], policy.callbackBody)
self.assertEqual(data["saveKey"], policy.saveKey)
self.assertEqual(data["exclusive"], policy.insertOnly)
self.assertEqual(data["detectMime"], policy.detectMime)
self.assertEqual(data["fsizeLimit"], policy.fsizeLimit)
self.assertEqual(
data["persistentNotifyUrl"], policy.persistentNotifyUrl)
self.assertEqual(data["persistentOps"], policy.persistentOps)
new_hmac = encode(hmac.new(conf.SECRET_KEY, tokens[2], sha1).digest())
self.assertEqual(new_hmac, tokens[1])
def test_get_policy(self):
base_url = rs.make_base_url(domain, key)
policy = rs.GetPolicy()
private_url = policy.make_request(base_url)
f = urllib.urlopen(private_url)
body = f.read()
f.close()
self.assertEqual(len(body) > 100, True)
class Test_make_base_url(unittest.TestCase):
def test_unicode(self):
url1 = rs.make_base_url('1.com', '你好')
url2 = rs.make_base_url('1.com', u'你好')
assert url1 == url2
if __name__ == "__main__":
unittest.main()
| 2.328125 | 2 |
api-lesson-intro/flask-rest/app.py | AkanshDivker/flask-api-tutorial | 0 | 12758316 | from flask import Flask
from flask_restful import Resource, Api, reqparse
app = Flask(__name__)
api = Api(app)
parser = reqparse.RequestParser()
parser.add_argument('name', type=str, help='Name of a guest')
parser.add_argument('id', type=int, help='Numerical ID of a guest in the list')
# A list containing names of guests
guest_list = ['Akansh', '<NAME>', '<NAME>']
# GET request to read all items in our list
class GuestList(Resource):
def get(self):
return {'names': [name for name in guest_list]}, 200
# GET request to check for a specific guest
class GuestById(Resource):
def get(self):
args = parser.parse_args()
id = args['id']
return {'name': guest_list[id]}
# POST request to create a new guest into our list
class GuestAdd(Resource):
def post(self):
args = parser.parse_args()
name = args['name']
guest_list.append(name)
return {'message': 'Guest added'}
# POST request to delete a name from our list
class GuestDelete(Resource):
def post(self):
args = parser.parse_args()
id = args['id']
guest_list.pop(id)
return {'message': 'Guest removed'}
# POST request to update an existing guest in our list
class GuestUpdate(Resource):
def put(self):
args = parser.parse_args()
id = args['id']
name = args['name']
guest_list[id] = name
return {'message': 'Guest updated'}
# Creating endpoints for our API
api.add_resource(GuestList, '/guests')
api.add_resource(GuestById, '/guest')
api.add_resource(GuestAdd, '/guest/add')
api.add_resource(GuestDelete, '/guest/delete')
api.add_resource(GuestUpdate, '/guest/update')
if __name__ == '__main__':
app.run()
| 3.21875 | 3 |
PythonCode/1002.py | CrystianPrintes20/ProjetoUri | 0 | 12758317 | <reponame>CrystianPrintes20/ProjetoUri
#Questão: Area do circulo
raio = float(input())
area = (raio**2)* 3.14159
print("A=%0.4f" %area) | 3.609375 | 4 |
common/Graph.py | Aquaware/Miners | 1 | 12758318 | <reponame>Aquaware/Miners
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
from matplotlib.lines import Line2D
from matplotlib.dates import date2num
from matplotlib.dates import DateFormatter
def color(index):
colors = [ mcolors.CSS4_COLORS['red' ], mcolors.CSS4_COLORS['blue'], mcolors.CSS4_COLORS['green'],
mcolors.CSS4_COLORS['magenta' ], mcolors.CSS4_COLORS['pink'], mcolors.CSS4_COLORS['gold' ], mcolors.CSS4_COLORS['orangered'],
mcolors.CSS4_COLORS['yellowgreen' ], mcolors.CSS4_COLORS['cyan'], mcolors.CSS4_COLORS['darkgrey' ], mcolors.CSS4_COLORS['blue']]
return colors[int(index % len(colors))]
def lineStyle(index):
style = ['solid', 'dashed', 'dashdot']
return style[int(index % len(style))]
def style(index):
c = color(index)
style = lineStyle(int(index / 10))
return [c, style]
def makeFig(rows, cols, size):
fig, ax = plt.subplots(rows, cols, figsize=(size[0], size[1]))
return (fig, ax)
class Graph:
def __init__(self, ax):
self.ax = ax
pass
def setDateFormat(self, form ='%H:%M'):
xaxis_ = self.ax.xaxis
xaxis_.set_major_formatter(DateFormatter(form))
pass
def setTitle(self, title, xlabel, ylabel):
self.ax.set_title(title)
self.ax.set_xlabel(xlabel)
self.ax.set_ylabel(ylabel)
pass
def scatter(self, x, y, prop):
self.ax.scatter(x, y, c = prop['color'], lw = prop['width'])
pass
def plot(self, x, y, prop):
self.ax.plot(x, y, color = prop['color'], linestyle=prop['style'], lw = prop['width'])
pass
def box(self, xrange, yrange, color_index, alpha):
if yrange is None:
y0 = 0
y1 = 0
bottom, top = self.ax.get_ylimi()
d = yrange[1] - bottom
y0 = d / (top - bottom)
d = yrange[1] - bottom
y1 = d / (top - bottom)
self.ax.vspan(xrange[0], xrange[1], y0, y1, color = color(color_index), alpha = alpha)
pass
def point(self, point, color, alpha, size):
self.ax.scatter(point[0], point[1], s = size , marker= 'o', c = color)
pass
def xLimit(self, xrange):
self.ax.set_xlim(xrange[0], xrange[1])
def yLimit(self, yrange):
self.ax.set_ylim(yrange[0], yrange[1])
pass
def drawLegend(self, lines, markers):
elements = []
if markers is not None:
for (color_index, label, marker_type) in markers:
e = Line2D([0], [0], marker = marker_type, color= color(color_index), label = label)
elements.append(e)
if lines is not None:
for (style_index, label) in lines:
sty = style(style_index)
e = Line2D([0], [0], marker = 'o', color = sty[0], linestyle=sty[1], linewidth = 5, label=label, markerfacecolor=sty[0], markersize = 0)
elements.append(e)
self.ax.legend(handles=elements, bbox_to_anchor= (1.05, 1.0), loc='upper left', borderaxespad= 0)
pass
def marking(self, x, y, mark_flag, prop):
for status, color, alpha, size in prop:
for i in range(len(y)):
if mark_flag[i] == status:
self.point([x[i], y[i]], color, alpha, size)
pass
| 2.53125 | 3 |
flask_appbuilder/security/sqla/apis/permission_view_menu/api.py | stungkit/Flask-AppBuilder | 0 | 12758319 | from flask_appbuilder import ModelRestApi
from flask_appbuilder.models.sqla.interface import SQLAInterface
from flask_appbuilder.security.sqla.models import PermissionView
class PermissionViewMenuApi(ModelRestApi):
resource_name = "security/permissions-resources"
openapi_spec_tag = "Security Permissions on Resources (View Menus)"
class_permission_name = "PermissionViewMenu"
datamodel = SQLAInterface(PermissionView)
allow_browser_login = True
list_columns = ["id", "permission.name", "view_menu.name"]
show_columns = list_columns
add_columns = ["permission_id", "view_menu_id"]
edit_columns = add_columns
| 2.1875 | 2 |
python/gtsam/examples/PlanarManipulatorExample.py | xxiao-1/gtsam | 2 | 12758320 | """
GTSAM Copyright 2010-2018, Georgia Tech Research Corporation,
Atlanta, Georgia 30332-0415
All Rights Reserved
Authors: <NAME>, et al. (see THANKS for the full author list)
See LICENSE for the license information
Kinematics of three-link manipulator with GTSAM poses and product of exponential maps.
Author: <NAME>
"""
# pylint: disable=invalid-name, E1101
from __future__ import print_function
import math
import unittest
from functools import reduce
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D # pylint: disable=W0611
import gtsam
import gtsam.utils.plot as gtsam_plot
from gtsam import Pose2
from gtsam.utils.test_case import GtsamTestCase
def vector3(x, y, z):
"""Create 3D double numpy array."""
return np.array([x, y, z], dtype=np.float)
def compose(*poses):
"""Compose all Pose2 transforms given as arguments from left to right."""
return reduce((lambda x, y: x.compose(y)), poses)
def vee(M):
"""Pose2 vee operator."""
return vector3(M[0, 2], M[1, 2], M[1, 0])
def delta(g0, g1):
"""Difference between x,y,,theta components of SE(2) poses."""
return vector3(g1.x() - g0.x(), g1.y() - g0.y(), g1.theta() - g0.theta())
def trajectory(g0, g1, N=20):
""" Create an interpolated trajectory in SE(2), treating x,y, and theta separately.
g0 and g1 are the initial and final pose, respectively.
N is the number of *intervals*
Returns N+1 poses
"""
e = delta(g0, g1)
return [Pose2(g0.x()+e[0]*t, g0.y()+e[1]*t, g0.theta()+e[2]*t) for t in np.linspace(0, 1, N)]
class ThreeLinkArm(object):
"""Three-link arm class."""
def __init__(self):
self.L1 = 3.5
self.L2 = 3.5
self.L3 = 2.5
self.xi1 = vector3(0, 0, 1)
self.xi2 = vector3(self.L1, 0, 1)
self.xi3 = vector3(self.L1+self.L2, 0, 1)
self.sXt0 = Pose2(0, self.L1+self.L2 + self.L3, math.radians(90))
def fk(self, q):
""" Forward kinematics.
Takes numpy array of joint angles, in radians.
"""
sXl1 = Pose2(0, 0, math.radians(90))
l1Zl1 = Pose2(0, 0, q[0])
l1Xl2 = Pose2(self.L1, 0, 0)
l2Zl2 = Pose2(0, 0, q[1])
l2Xl3 = Pose2(self.L2, 0, 0)
l3Zl3 = Pose2(0, 0, q[2])
l3Xt = Pose2(self.L3, 0, 0)
return compose(sXl1, l1Zl1, l1Xl2, l2Zl2, l2Xl3, l3Zl3, l3Xt)
def jacobian(self, q):
""" Calculate manipulator Jacobian.
Takes numpy array of joint angles, in radians.
"""
a = q[0]+q[1]
b = a+q[2]
return np.array([[-self.L1*math.cos(q[0]) - self.L2*math.cos(a)-self.L3*math.cos(b),
-self.L1*math.cos(a)-self.L3*math.cos(b),
- self.L3*math.cos(b)],
[-self.L1*math.sin(q[0]) - self.L2*math.sin(a)-self.L3*math.sin(b),
-self.L1*math.sin(a)-self.L3*math.sin(b),
- self.L3*math.sin(b)],
[1, 1, 1]], np.float)
def poe(self, q):
""" Forward kinematics.
Takes numpy array of joint angles, in radians.
"""
l1Zl1 = Pose2.Expmap(self.xi1 * q[0])
l2Zl2 = Pose2.Expmap(self.xi2 * q[1])
l3Zl3 = Pose2.Expmap(self.xi3 * q[2])
return compose(l1Zl1, l2Zl2, l3Zl3, self.sXt0)
def con(self, q):
""" Forward kinematics, conjugation form.
Takes numpy array of joint angles, in radians.
"""
def expmap(x, y, theta):
"""Implement exponential map via conjugation with axis (x,y)."""
return compose(Pose2(x, y, 0), Pose2(0, 0, theta), Pose2(-x, -y, 0))
l1Zl1 = expmap(0.0, 0.0, q[0])
l2Zl2 = expmap(0.0, self.L1, q[1])
l3Zl3 = expmap(0.0, self.L1+self.L2, q[2])
return compose(l1Zl1, l2Zl2, l3Zl3, self.sXt0)
def ik(self, sTt_desired, e=1e-9):
""" Inverse kinematics.
Takes desired Pose2 of tool T with respect to base S.
Optional: mu, gradient descent rate; e: error norm threshold
"""
q = np.radians(vector3(30, -30, 45)) # well within workspace
error = vector3(100, 100, 100)
while np.linalg.norm(error) > e:
error = delta(sTt_desired, self.fk(q))
J = self.jacobian(q)
q -= np.dot(np.linalg.pinv(J), error)
# return result in interval [-pi,pi)
return np.remainder(q+math.pi, 2*math.pi)-math.pi
def manipulator_jacobian(self, q):
""" Calculate manipulator Jacobian.
Takes numpy array of joint angles, in radians.
Returns the manipulator Jacobian of differential twists. When multiplied with
a vector of joint velocities, will yield a single differential twist which is
the spatial velocity d(sTt)/dt * inv(sTt) of the end-effector pose.
Just like always, differential twists can be hatted and multiplied with spatial
coordinates of a point to give the spatial velocity of the point.
"""
l1Zl1 = Pose2.Expmap(self.xi1 * q[0])
l2Zl2 = Pose2.Expmap(self.xi2 * q[1])
# l3Zl3 = Pose2.Expmap(self.xi3 * q[2])
p1 = self.xi1
# p1 = Pose2().Adjoint(self.xi1)
sTl1 = l1Zl1
p2 = sTl1.Adjoint(self.xi2)
sTl2 = compose(l1Zl1, l2Zl2)
p3 = sTl2.Adjoint(self.xi3)
differential_twists = [p1, p2, p3]
return np.stack(differential_twists, axis=1)
def plot(self, fignum, q):
""" Plot arm.
Takes figure number, and numpy array of joint angles, in radians.
"""
fig = plt.figure(fignum)
axes = fig.gca()
sXl1 = Pose2(0, 0, math.radians(90))
p1 = sXl1.translation()
gtsam_plot.plot_pose2_on_axes(axes, sXl1)
def plot_line(p, g, color):
q = g.translation()
line = np.append(p[np.newaxis], q[np.newaxis], axis=0)
axes.plot(line[:, 0], line[:, 1], color)
return q
l1Zl1 = Pose2(0, 0, q[0])
l1Xl2 = Pose2(self.L1, 0, 0)
sTl2 = compose(sXl1, l1Zl1, l1Xl2)
p2 = plot_line(p1, sTl2, 'r-')
gtsam_plot.plot_pose2_on_axes(axes, sTl2)
l2Zl2 = Pose2(0, 0, q[1])
l2Xl3 = Pose2(self.L2, 0, 0)
sTl3 = compose(sTl2, l2Zl2, l2Xl3)
p3 = plot_line(p2, sTl3, 'g-')
gtsam_plot.plot_pose2_on_axes(axes, sTl3)
l3Zl3 = Pose2(0, 0, q[2])
l3Xt = Pose2(self.L3, 0, 0)
sTt = compose(sTl3, l3Zl3, l3Xt)
plot_line(p3, sTt, 'b-')
gtsam_plot.plot_pose2_on_axes(axes, sTt)
# Create common example configurations.
Q0 = vector3(0, 0, 0)
Q1 = np.radians(vector3(-30, -45, -90))
Q2 = np.radians(vector3(-90, 90, 0))
class TestPose2SLAMExample(GtsamTestCase):
"""Unit tests for functions used below."""
def setUp(self):
self.arm = ThreeLinkArm()
def assertPose2Equals(self, actual, expected, tol=1e-2):
"""Helper function that prints out actual and expected if not equal."""
equal = actual.equals(expected, tol)
if not equal:
raise self.failureException(
"Poses are not equal:\n{}!={}".format(actual, expected))
def test_fk_arm(self):
"""Make sure forward kinematics is correct for some known test configurations."""
# at rest
expected = Pose2(0, 2*3.5 + 2.5, math.radians(90))
sTt = self.arm.fk(Q0)
self.assertIsInstance(sTt, Pose2)
self.assertPose2Equals(sTt, expected)
# -30, -45, -90
expected = Pose2(5.78, 1.52, math.radians(-75))
sTt = self.arm.fk(Q1)
self.assertPose2Equals(sTt, expected)
def test_jacobian(self):
"""Test Jacobian calculation."""
# at rest
expected = np.array([[-9.5, -6, -2.5], [0, 0, 0], [1, 1, 1]], np.float)
J = self.arm.jacobian(Q0)
np.testing.assert_array_almost_equal(J, expected)
# at -90, 90, 0
expected = np.array([[-6, -6, -2.5], [3.5, 0, 0], [1, 1, 1]], np.float)
J = self.arm.jacobian(Q2)
np.testing.assert_array_almost_equal(J, expected)
def test_con_arm(self):
"""Make sure POE is correct for some known test configurations."""
# at rest
expected = Pose2(0, 2*3.5 + 2.5, math.radians(90))
sTt = self.arm.con(Q0)
self.assertIsInstance(sTt, Pose2)
self.assertPose2Equals(sTt, expected)
# -30, -45, -90
expected = Pose2(5.78, 1.52, math.radians(-75))
sTt = self.arm.con(Q1)
self.assertPose2Equals(sTt, expected)
def test_poe_arm(self):
"""Make sure POE is correct for some known test configurations."""
# at rest
expected = Pose2(0, 2*3.5 + 2.5, math.radians(90))
sTt = self.arm.poe(Q0)
self.assertIsInstance(sTt, Pose2)
self.assertPose2Equals(sTt, expected)
# -30, -45, -90
expected = Pose2(5.78, 1.52, math.radians(-75))
sTt = self.arm.poe(Q1)
self.assertPose2Equals(sTt, expected)
def test_ik(self):
"""Check iterative inverse kinematics function."""
# at rest
actual = self.arm.ik(Pose2(0, 2*3.5 + 2.5, math.radians(90)))
np.testing.assert_array_almost_equal(actual, Q0, decimal=2)
# -30, -45, -90
sTt_desired = Pose2(5.78, 1.52, math.radians(-75))
actual = self.arm.ik(sTt_desired)
self.assertPose2Equals(self.arm.fk(actual), sTt_desired)
np.testing.assert_array_almost_equal(actual, Q1, decimal=2)
def test_manipulator_jacobian(self):
"""Test Jacobian calculation."""
# at rest
expected = np.array([[0, 3.5, 7], [0, 0, 0], [1, 1, 1]], np.float)
J = self.arm.manipulator_jacobian(Q0)
np.testing.assert_array_almost_equal(J, expected)
# at -90, 90, 0
expected = np.array(
[[0, 0, 3.5], [0, -3.5, -3.5], [1, 1, 1]], np.float)
J = self.arm.manipulator_jacobian(Q2)
np.testing.assert_array_almost_equal(J, expected)
def run_example():
""" Use trajectory interpolation and then trajectory tracking a la Murray
to move a 3-link arm on a straight line.
"""
# Create arm
arm = ThreeLinkArm()
# Get initial pose using forward kinematics
q = np.radians(vector3(30, -30, 45))
sTt_initial = arm.fk(q)
# Create interpolated trajectory in task space to desired goal pose
sTt_goal = Pose2(2.4, 4.3, math.radians(0))
poses = trajectory(sTt_initial, sTt_goal, 50)
# Setup figure and plot initial pose
fignum = 0
fig = plt.figure(fignum)
axes = fig.gca()
axes.set_xlim(-5, 5)
axes.set_ylim(0, 10)
gtsam_plot.plot_pose2(fignum, arm.fk(q))
# For all poses in interpolated trajectory, calculate dq to move to next pose.
# We do this by calculating the local Jacobian J and doing dq = inv(J)*delta(sTt, pose).
for pose in poses:
sTt = arm.fk(q)
error = delta(sTt, pose)
J = arm.jacobian(q)
q += np.dot(np.linalg.inv(J), error)
arm.plot(fignum, q)
plt.pause(0.01)
plt.pause(10)
if __name__ == "__main__":
run_example()
unittest.main()
| 2.328125 | 2 |
MiddleKit/Core/BoolAttr.py | PeaceWorksTechnologySolutions/w4py | 18 | 12758321 | from BasicTypeAttr import BasicTypeAttr
class BoolAttr(BasicTypeAttr):
pass
| 1.773438 | 2 |
attendees/persons/migrations/0006_attendee.py | xjlin0/attendees32 | 0 | 12758322 | # Generated by Django 3.0.2 on 2020-01-13 02:58
from uuid import uuid4
from django.conf import settings
from django.db import migrations, models
from private_storage.fields import PrivateFileField
from private_storage.storage.files import PrivateFileSystemStorage
import django.utils.timezone
import model_utils.fields
import partial_date.fields
from attendees.persons.models.enum import GenderEnum
from attendees.persons.models import Utility
class Migration(migrations.Migration):
dependencies = [
('persons', '0005_folk'),
('whereabouts', '0004_division'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('pghistory', '0003_auto_20201023_1636'),
]
operations = [
migrations.CreateModel(
name='Attendee',
fields=[
('id', models.UUIDField(default=uuid4, editable=False, primary_key=True, serialize=False)),
('user', models.OneToOneField(blank=True, default=None, null=True, on_delete=models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('is_removed', models.BooleanField(default=False)),
('division', models.ForeignKey(default=0, blank=False, null=False, on_delete=models.SET(0), to='whereabouts.Division')),
('first_name', models.CharField(blank=True, db_index=True, max_length=25, null=True)),
('last_name', models.CharField(blank=True, db_index=True, max_length=25, null=True)),
('first_name2', models.CharField(blank=True, db_index=True, max_length=12, null=True)),
('last_name2', models.CharField(blank=True, db_index=True, max_length=8, null=True)),
('gender', models.CharField(choices=GenderEnum.choices(), default=GenderEnum.UNSPECIFIED, max_length=11)),
('photo', PrivateFileField(blank=True, null=True, storage=PrivateFileSystemStorage(), upload_to='attendee_portrait', verbose_name='Photo')),
('actual_birthday', models.DateField(blank=True, null=True)),
('estimated_birthday', partial_date.fields.PartialDateField(blank=True, null=True, help_text='1998, 1998-12 or 1992-12-31, please enter 1800 if year not known')),
('deathday', models.DateField(blank=True, null=True)),
# ('progressions', models.JSONField(blank=True, default=dict, help_text='Example: {"Christian": true, "baptized": {"time": "12/31/2020", "place":"SF"}}. Please keep {} here even no data', null=True)),
('infos', models.JSONField(blank=True, default=Utility.attendee_infos, help_text=('Example: {"fixed": {"food_pref": "peanut allergy", "nick_name": "John"}}.Please keep {} here even no data'), null=True)),
],
options={
'db_table': 'persons_attendees',
'ordering': ['last_name', 'first_name'],
},
bases=(Utility, models.Model),
),
migrations.RunSQL(Utility.default_sql('persons_attendees')),
# migrations.RunSQL(
# sql="""
# ALTER TABLE persons_attendees DROP COLUMN full_name;
# ALTER TABLE persons_attendees ADD COLUMN full_name VARCHAR(70)
# GENERATED ALWAYS AS (TRIM(
# COALESCE(first_name, '') || ' ' ||
# COALESCE(last_name, '') || ' ' ||
# COALESCE(last_name2, '') ||
# COALESCE(first_name2, '')
# )) STORED;
# CREATE INDEX attendee_full_name_raw
# ON persons_attendees (full_name);
# """,
# # reverse_sql="",
# ), # switching to use opencc for language conversion in Attendee.save()
migrations.AddIndex(
model_name='attendee',
index=django.contrib.postgres.indexes.GinIndex(fields=['infos'], name='attendee_infos_gin'),
),
# migrations.AddIndex(
# model_name='attendee',
# index=django.contrib.postgres.indexes.GinIndex(fields=['progressions'], name='attendee_progressions_gin'),
# ),
migrations.CreateModel(
name='AttendeesHistory',
fields=[
('pgh_id', models.BigAutoField(primary_key=True, serialize=False)),
('pgh_created_at', models.DateTimeField(auto_now_add=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('is_removed', models.BooleanField(default=False)),
('pgh_obj', models.ForeignKey(db_constraint=False, on_delete=django.db.models.deletion.DO_NOTHING, related_name='history', to='persons.attendee')),
('id', models.UUIDField(db_index=True, default=uuid4, editable=False, serialize=False)),
('division', models.ForeignKey(db_constraint=False, default=0, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', related_query_name='+', to='whereabouts.division')),
('gender', models.CharField(choices=GenderEnum.choices(), default=GenderEnum['UNSPECIFIED'], max_length=11)),
('pgh_label', models.TextField(help_text='The event label.')),
('user', models.ForeignKey(blank=True, db_constraint=False, default=None, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', related_query_name='+', to=settings.AUTH_USER_MODEL)),
('pgh_context', models.ForeignKey(db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='pghistory.context')),
('infos', models.JSONField(blank=True, default=Utility.attendee_infos, help_text='Example: {"fixed": {"food_pref": "peanut allergy", "nick_name": "John"}}.Please keep {} here even no data', null=True)),
('first_name', models.CharField(blank=True, max_length=25, null=True)),
('last_name', models.CharField(blank=True, max_length=25, null=True)),
('first_name2', models.CharField(blank=True, max_length=12, null=True)),
('last_name2', models.CharField(blank=True, max_length=8, null=True)),
('photo', PrivateFileField(blank=True, null=True, storage=PrivateFileSystemStorage(), upload_to='attendee_portrait', verbose_name='Photo')),
('actual_birthday', models.DateField(blank=True, null=True)),
('estimated_birthday', partial_date.fields.PartialDateField(blank=True, help_text='1998, 1998-12 or 1992-12-31, please enter 1800 if year not known', null=True)),
('deathday', models.DateField(blank=True, null=True)),
],
options={
'db_table': 'persons_attendeeshistory',
},
),
migrations.RunSQL(Utility.pgh_default_sql('persons_attendeeshistory')),
]
| 1.789063 | 2 |
IntelligentUAVPathPlanningSimulationSystem/core/main.py | wangwei39120157028/IntelligentUAVPathPlanningSimulationSystem-Drone | 208 | 12758323 | # -*- coding:utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from appUI.MainWindow import main
if __name__ == "__main__":
#
main()
| 1.648438 | 2 |
harvester/cli.py | MITLibraries/oai-pmh-harvester | 4 | 12758324 | #!/usr/bin/env python
from datetime import date, timedelta
import logging
import sys
import click
from sickle import Sickle
from sickle.iterator import OAIItemIterator
from sickle.oaiexceptions import NoRecordsMatch
from smart_open import open
yesterday = (date.today() - timedelta(days=1)).strftime('%Y-%m-%d')
tomorrow = (date.today() + timedelta(days=1)).strftime('%Y-%m-%d')
@click.command()
@click.option('--host',
default="https://dspace.mit.edu/oai/request",
help='hostname of OAI-PMH server to harvest from')
@click.option('--from_date',
default=yesterday,
help='from date format: YYYY-MM-DD')
@click.option('--until',
default=tomorrow,
help='until date format: YYYY-MM-DD')
@click.option('--format',
default='oai_dc',
help='Add metadata type (e.g. mods, mets, oai_dc, qdc, ore)')
@click.option('--set', default=None, help='set to be harvested.')
@click.option('--out', default='out.xml', help='Filepath to write output')
@click.option('--verbose', help='Enable debug output', is_flag=True)
def harvest(host, from_date, until, format, out, set, verbose):
counter = 0
if verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
logging.info("OAI-PMH harvesting from %s", host)
logging.info("From date = %s", from_date)
logging.info("Until date = %s", until)
logging.info("Metadata format = %s", format)
logging.info("Outfile = %s", out)
mysickle = Sickle(host, iterator=OAIItemIterator)
params = {'metadataPrefix': format,
'from': from_date,
'until': until
}
if set is not None:
params['set'] = set
try:
responses = mysickle.ListIdentifiers(
**params)
except NoRecordsMatch:
logging.info("No records harvested: the combination of the values of "
"the arguments results in an empty list.")
sys.exit()
identifier_list = []
for records in responses:
identifier_list.append(records.identifier)
logging.info(f"Identifier count to harvest: {len(identifier_list)}")
with open(out, 'wb') as f:
f.write('<records>'.encode())
for identifier in identifier_list:
r = mysickle.GetRecord(identifier=identifier,
metadataPrefix=format)
f.write(r.raw.encode('utf8'))
logging.debug(counter)
logging.debug(r.raw)
counter += 1
f.write('</records>'.encode())
logging.info("Total records harvested: %i", counter)
if __name__ == "__main__":
harvest()
| 2.21875 | 2 |
examples/dispatcher.py | nicoddemus/osbrain | 0 | 12758325 | import time
from osbrain import run_agent
from osbrain import run_nameserver
def rep_handler(agent, message):
if not agent.tasks:
return None
return agent.tasks.pop()
def request_work(agent):
x = agent.send_recv('dispatcher', 'READY!')
if not x:
agent.shutdown()
return
time.sleep(x)
agent.send('results', '%s finished with %s' % (agent.name, x))
if __name__ == '__main__':
ns = run_nameserver()
results = run_agent('Results')
results_addr = results.bind('PULL', handler=lambda x, y: x.log_info(y))
dispatcher = run_agent('Dispatcher')
dispatcher.set_attr(tasks=[1, 1, 1, 1, 5, 1, 1, 1, 1, 5])
dispatcher_addr = dispatcher.bind('REP', alias='rep', handler=rep_handler)
for i in range(5):
worker = run_agent('Worker%s' % i)
worker.connect(results_addr, alias='results')
worker.connect(dispatcher_addr, alias='dispatcher')
worker.each(0., request_work)
while len(ns.agents()) > 2:
time.sleep(0.1)
ns.shutdown()
| 2.328125 | 2 |
examples/wolf_sheep/post/plot_usage.py | janursa/cpp_py_ABM | 1 | 12758326 | <reponame>janursa/cpp_py_ABM<filename>examples/wolf_sheep/post/plot_usage.py
import plotly.graph_objs as go
import plotly.express as px
import numpy as np
import pandas as pd
from plotly.subplots import make_subplots
from copy import deepcopy
class SETTINGS:
cpu_file_names = ['cpus_cpp_004.csv','cpus_py_004.csv','cpus_cppy_004.csv']
memory_file_names = ['memorys_cpp_004.csv','memorys_py_004.csv','memorys_cppy_004.csv']
# xs = [1,2,3,4]
xs = [0,1,2]
# postfixes = ['Cpp','Py','Cppy','Pyy']
labels = ['Cpp', 'Py','Cppy']
line_types = ['solid', 'dot', 'dash', 'longdash', 'dashdot', 'longdashdot']
colors = ['DarkSlateGrey','black','DarkViolet']
markers = ['circle','cross','diamond']
fig_name = "usages.csv"
ytitle = 'Megabytes/Seconds'
xtitle = 'Model'
def plot(fig,stack_data,i,name,x):
means = []
max_stds = []
min_stds = []
for data in stack_data:
mean_value = np.mean(data)
std_upp = max(data) - mean_value
std_down = mean_value - min(data)
means.append(mean_value)
max_stds.append(std_upp)
min_stds.append(std_down)
fig.add_trace(go.Scatter(
# x=x,
y=means,
name = name,
error_y=dict(
type='data',
symmetric=False,
array=max_stds,
arrayminus=min_stds,
width = 10,
thickness = 4
),
mode='markers',
marker = dict(size=10,
color=SETTINGS.colors[i]),
marker_symbol = SETTINGS.markers[i]
)
# line = dict(width=3, dash=line_types[0],color =colors[0] )
)
return fig
def update_layout(fig):
fig.update_layout(
width=450, #TODO: to go
height=500,#TODO: to go
xaxis = dict(
title = SETTINGS.xtitle,
titlefont = dict(
family = 'Times New Roman',
size = 30,
color = 'black'
),
showgrid=True,
mirror=True,
showline=True,
# zeroline = False,
linecolor = 'black',
gridwidth = 20,
tickfont = dict(
family = 'Times New Roman',
size = 30,
color = 'black'
)
),
yaxis = dict(
title = SETTINGS.ytitle,
titlefont = dict(
family = 'Times New Roman',
size = 30,
color = 'black'
),
showgrid=True,
mirror=True,
showline=True,
# zeroline = False,
linecolor = 'black',
gridwidth = 20,
tickfont = dict(
family = 'Times New Roman',
size = 30,
color = 'black'
)
),
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)',
legend=dict(
orientation="h",
x=0,
y=1.2,
font=dict(
family='Times New Roman',
size=30,
color='#000'
),
bordercolor='DarkSeaGreen'
# borderwidth=1
),
margin=dict(
l=50,
r=50,
b=100,
t=100,
pad=4
)
)
fig.update_yaxes(
automargin=True,
showgrid=True,
mirror=True,
showline=True,
linecolor = 'black',
gridwidth = 20,
dtick=30,
tickfont = dict(
family = 'Times New Roman',
size = 30,
color = 'black'
),
zeroline = False)
return fig
def change_unit(data,type_t):
if type_t == 'CPU':
return data/1000
if type_t == 'Memory':
return data/1000000
if __name__ == '__main__':
stack_memory=[]
stack_cpu=[]
for i in range(len(SETTINGS.labels)):
label = SETTINGS.labels[i]
cpu_file_name = SETTINGS.cpu_file_names[i]
memory_file_name = SETTINGS.memory_file_names[i]
cpu_data = pd.read_csv(cpu_file_name)
memory_data = pd.read_csv(memory_file_name)
if label == 'Cpp':
cpu_data['CPU'] = change_unit(cpu_data['CPU'],'CPU')
stack_cpu.append(cpu_data['CPU'])
stack_memory.append(change_unit(memory_data['Memory'],'Memory'))
fig = go.Figure()
fig = plot(fig,stack_memory,i = 0,name = 'Memory',x=SETTINGS.xs)
fig = plot(fig,stack_cpu,i = 2,name = 'CPU',x=[x-0.2 for x in SETTINGS.xs])
fig = update_layout(fig)
fig.update_xaxes(automargin=True,showgrid=False,zeroline=False,
tickvals =SETTINGS.xs,ticktext = SETTINGS.labels, range = [SETTINGS.xs[0]-0.5,SETTINGS.xs[-1]+0.5])
# fig.update_yaxes(range = [0,220])
fig.write_image(SETTINGS.fig_name+'.svg')
| 2.1875 | 2 |
dbReports/iondb/rundb/login/middleware.py | sequencer2014/TS | 0 | 12758327 | <gh_stars>0
# Copyright (C) 2012 Ion Torrent Systems, Inc. All Rights Reserved
from django.contrib.auth.models import User
from django.http import HttpResponse
from django.contrib.auth import authenticate, login
import logging
log = logging.getLogger(__name__)
class BasicAuthMiddleware:
"""
Simple HTTP-Basic auth for testing webservices
Used for optional HTTP Auth - validated in django not at apache layer.
Use in conjunction with session auth to provide alternatives for users.
"""
def process_request(self, request):
auth_header = request.META.get('HTTP_AUTHORIZATION', None)
if auth_header is None:
# Do nothing, fall through to other methods
return None
auth = request.META['HTTP_AUTHORIZATION'].split()
if len(auth) == 2:
# NOTE: We are only support basic authentication for now.
if auth[0].lower() == "basic":
import base64
username, password = base64.b64decode(auth[1]).split(':')
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
else:
log.debug("Failed login attempt for user '%s'. INACTIVE USER", user)
else:
log.debug("Attempt to auth with '%s' auth: NOT SUPPORTED", auth[0])
else:
log.debug("Unrecognized HTTP_AUTHORIZATION header received: '%s'", request.META['HTTP_AUTHORIZATION'])
return None
| 2.6875 | 3 |
mode/examples/Basics/Shape/LoadDisplayOBJ/LoadDisplayOBJ.pyde | kazimuth/processing.py | 4 | 12758328 | <reponame>kazimuth/processing.py<gh_stars>1-10
"""
Load and Display an OBJ Shape.
The loadShape() command is used to read simple SVG (Scalable Vector Graphics)
files and OBJ (Object) files into a Processing sketch. This example loads an
OBJ file of a rocket and displays it to the screen.
"""
ry = 0
def setup():
size(640, 360, P3D)
global rocket
rocket = loadShape("rocket.obj")
def draw():
background(0)
lights()
translate(width / 2, height / 2 + 100, -200)
rotateZ(PI)
rotateY(ry)
shape(rocket)
ry += 0.02
| 3.015625 | 3 |
tasks/day_09.py | scyberboy/adventofcode_2021 | 0 | 12758329 | import operator
import os
import re
import sys
import time
from collections import deque
from functools import reduce
import numpy as np
def read_input() -> list[list[int]]:
# Read lines input:
# 2199943210
# 3987894921
# 9856789892
# 8767896789
# 9899965678
# return list with lists of integers [[r1.1,r1.2,r1.3...],[r2.1,r2.2,r2.3,...],...]
# plus wrapped with max values (9) all around for easier low points search ;)
data = [[int(elem) for elem in '9' + line.strip() + '9'] for line in sys.stdin]
nr_additions = len(data[0])
data.insert(0, [9 for _ in range(nr_additions)])
data.append([9 for _ in range(nr_additions)])
return data
def is_low_point(height_map: np.array, x_curr: int, y_curr: int) -> bool:
val = height_map[x_curr, y_curr]
hor_elems = [height_map[x_curr, y_curr - 1], height_map[x_curr, y_curr + 1]]
ver_elems = [height_map[x_curr - 1, y_curr], height_map[x_curr + 1, y_curr]]
all_elems = hor_elems + ver_elems
min_elem = min(all_elems)
if val < min_elem:
return True
return False
def find_low_points(height_map: np.array) -> list[(int, int), int]:
candidates = list()
# print("height map shape: {}".format(height_map.shape))
x_dim, y_dim = height_map.shape
for x_curr in range(1, x_dim - 1):
for y_curr in range(1, y_dim - 1):
if is_low_point(height_map, x_curr, y_curr):
# print("low point found: [{}][{}] -> {}".format(x_curr, y_curr, height_map[x_curr, y_curr]))
candidates.append([(x_curr, y_curr), height_map[x_curr, y_curr]])
return candidates
def find_solution_a(low_points: list[(int, int), int]) -> int:
answer = sum([elem[1] for elem in low_points]) + len(low_points)
return answer
def generate_basin(height_map: np.array, x_y: (int, int)) -> list[(int, int)]:
basin = set()
candidates = deque()
candidates.append(x_y)
neigh_offsets = [(0, -1), (1, 0), (0, 1), (-1, 0)]
while len(candidates) > 0:
# use DFS (depth first search) by simulating store in stack (FIFO)
x_curr, y_curr = candidates.pop()
# use BFS (breadth first search) by simulating store in queue (FILO)
# x_curr, y_curr = candidates.popleft()
# NOTE: this is not optimal (maybe due to python implementation)
# but that's the fact, BFS is 3 times slower than DFS
# both methods should work
basin.add((x_curr, y_curr))
for x_off, y_off in neigh_offsets:
candi_x = x_curr + x_off
candi_y = y_curr + y_off
cur_val = height_map[x_curr][y_curr]
candi_val = height_map[candi_x][candi_y]
if candi_val != 9 and candi_val > cur_val and\
(candi_x, candi_y) not in basin:
candidates.append((candi_x, candi_y))
return list(basin)
def find_solution_b(height_map: np.array, low_points: list[(int, int), int]) -> int:
# print("low points: {}".format(low_points))
basin_sizes = list()
for x_y, _ in low_points:
basin = generate_basin(height_map, x_y)
if len(basin) > 0:
basin_sizes.append(len(basin))
answer = reduce(operator.mul, sorted(basin_sizes, reverse=True)[:3], 1)
return answer
def do_main():
prev_time = time.process_time()
print("start reading input...")
data = read_input()
cur_time = time.process_time()
diff = cur_time - prev_time
prev_time = cur_time
print("[{}] took: {} sec.".format(cur_time, diff))
# print("input data: {}".format(data))
print("generate low points...")
height_map = np.array(data)
low_points = find_low_points(height_map)
cur_time = time.process_time()
diff = cur_time - prev_time
prev_time = cur_time
print("[{}] took: {} sec.".format(cur_time, diff))
print("find_solution_a...")
result_a = find_solution_a(low_points)
print("result_a:", result_a)
cur_time = time.process_time()
diff = cur_time - prev_time
prev_time = cur_time
print("[{}] took: {} sec.".format(cur_time, diff))
print("find_solution_b...")
result_b = find_solution_b(height_map, low_points)
print("result_b:", result_b)
cur_time = time.process_time()
diff = cur_time - prev_time
# prev_time = cur_time
print("[{}] took: {} sec.".format(cur_time, diff))
if __name__ == "__main__":
# execute only if run as a script
filename = os.path.basename(__file__)
day_nr = re.search(r"\d+", filename).group()
print("day_nr:", day_nr)
do_main()
| 3.09375 | 3 |
taskrun/cluster_task.py | gaomy3832/taskrun | 8 | 12758330 | """
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* - Neither the name of prim nor the names of its contributors may be used to
* endorse or promote products derived from this software without specific prior
* written permission.
*
* See the NOTICE file distributed with this work for additional information
* regarding copyright ownership.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
"""
import os
import subprocess
import re
import threading
from .task import Task
class ClusterTask(Task):
"""
This class is a Task that runs as a cluster process.
"""
@staticmethod
def supported_modes():
"""
Returns a list of supported modes.
"""
return ['sge', 'lsf', 'slurm']
def __init__(self, manager, name, command, mode):
"""
This instantiates a ClusterTask object with a subprocess command
Args:
manager (TaskManager) : passed to Task.__init__()
name (str) : passed to Task.__init__()
command (str) : the command to be run
mode (str) : name of cluster scheduler
"""
super().__init__(manager, name)
self._command = command
assert mode in self.supported_modes(), 'invalid scheduler name: ' + mode
self._mode = mode
self._stdout_file = None
self._stderr_file = None
self._log_file = None
self._queues = set()
self._cluster_resources = dict()
self._cluster_options = list()
self.stdout = None
self.stderr = None
self.returncode = None
self._proc = None
self._lock = threading.Lock()
@property
def command(self):
"""
Returns:
(str) : the process's command
"""
return self._command
@command.setter
def command(self, value):
"""
Sets the process's command
Args:
value (str) : the new command
"""
self._command = value
@property
def stdout_file(self):
"""
Returns:
(str) : filename for stdout text
"""
return self._stdout_file
@stdout_file.setter
def stdout_file(self, filename):
"""
Sets the filename of the stdout text
Args:
filename (str) : a filename for stdout text
"""
self._stdout_file = filename
@property
def stderr_file(self):
"""
Returns:
(str) : filename for stderr text
"""
return self._stderr_file
@stderr_file.setter
def stderr_file(self, filename):
"""
Sets the filename of the stderr text.
Args:
filename (str) : a filename for stderr text
"""
self._stderr_file = filename
@property
def log_file(self):
"""
Returns:
(str) : filename for job log
"""
return self._log_file
@log_file.setter
def log_file(self, filename):
"""
Sets the filename of the job log.
Args:
filename (str) : a filename for job log
"""
self._log_file = filename
@property
def queues(self):
"""
Returns:
(set<str>) : the queues allowed to run in
"""
return self._queues
@queues.setter
def queues(self, value):
"""
Sets the allowed queues to run in
Args:
value (strs): the queues
"""
self._queues = set(value)
@property
def cluster_resources(self):
"""
Returns:
(dict<str,str>) : the resources in the cluster
"""
return self._cluster_resources
@cluster_resources.setter
def cluster_resources(self, value):
"""
Sets the cluster resources for this task
Args:
value (strs): the resources
"""
self._cluster_resources = dict(value)
@property
def cluster_options(self):
"""
Returns:
(dict<str,str>) : the options in the cluster
"""
return self._cluster_options
@cluster_options.setter
def cluster_options(self, value):
"""
Sets the cluster options for this task
Args:
value (strs): the options
"""
self._cluster_options = list(value)
def describe(self):
"""
See Task.describe()
"""
return self._build_command()
def _build_command(self):
"""
This builds the command string for this cluster task.
Returns:
(str) : the full command line
"""
# SGE cluster task
if self._mode == 'sge':
cmd = ['qsub',
'-V', # copy full environment
'-b', 'yes', # execute binary file
'-sync', 'yes', # wait for job to complete before exiting
'-cwd', # use current working directory
'-N', self.name] # name of the task
if self._stdout_file:
cmd.extend(['-o', self._stdout_file])
else:
cmd.extend(['-o', os.devnull])
if self._stderr_file:
cmd.extend(['-e', self._stderr_file])
else:
cmd.extend(['-e', os.devnull])
if len(self._queues) > 0:
cmd.extend(['-q', ','.join(self._queues)])
if len(self._cluster_resources) > 0:
cmd.extend(['-l', ','.join(
['{0}={1}'.format(k, v)
for k, v in self._cluster_resources.items()])])
cmd.append(self._command)
return ' '.join(cmd)
# LSF cluster task
if self._mode == 'lsf':
cmd = ['bsub', '-J', self.name] # name of the task
if self._stdout_file:
cmd.extend(['-o', self._stdout_file])
else:
cmd.extend(['-o', os.devnull])
if self._stderr_file:
cmd.extend(['-e', self._stderr_file])
else:
cmd.extend(['-e', os.devnull])
if len(self._queues) > 0:
cmd.extend(['-q', ','.join(self._queues)])
if len(self._cluster_resources) > 0:
cmd.extend(
['{0} {1}'.format(k, v) for k, v in self._cluster_resources.items()])
cmd.append("--")
cmd.append(re.sub('"', '\\"', self._command))
return ' '.join(cmd)
# Slurm cluster task
if self._mode == 'slurm':
cmd = ['srun', '-vv', '-J', self.name]
if self._stdout_file:
cmd.extend(['-o', self._stdout_file])
else:
cmd.extend(['-o', os.devnull])
if self._stderr_file:
cmd.extend(['-e', self._stderr_file])
else:
cmd.extend(['-e', os.devnull])
if self._cluster_options:
cmd.extend(self._cluster_options)
cmd.append(self._command)
return ' '.join(cmd)
assert False, 'programmer error :('
return None
def execute(self):
"""
See Task.execute()
"""
with self._lock:
# If we're killed at this point, don't bother starting a new process.
if self.killed:
return None
# format stderr output
if self._log_file:
stderr_fd = open(self._log_file, 'w')
else:
stderr_fd = subprocess.PIPE
# start the command
cmd = self._build_command()
self._proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=stderr_fd, shell=True,
start_new_session=True)
# wait for the process to finish, collect output
self.stdout, self.stderr = self._proc.communicate()
if self.stdout is not None:
self.stdout = self.stdout.decode('utf-8')
if self.stderr is not None:
self.stderr = self.stderr.decode('utf-8')
# close the output file
if self._log_file:
#pylint: disable=maybe-no-member
stderr_fd.close()
# check the return code
self.returncode = self._proc.returncode
if self.returncode == 0:
return None
return self.returncode
def kill(self):
"""
See Task.kill()
This implementation calls Popen.terminate()
"""
with self._lock:
# Don't kill if already completed or already killed
if self.returncode is None and not self.killed:
self.killed = True
# there is a chance the proc hasn't been created yet
if self._proc is not None:
try:
self._proc.terminate()
except ProcessLookupError:
pass
| 1.5 | 2 |
src/average_of_numbers.py | Wanzaz/projects | 3 | 12758331 | print("Zadávejte celá čísla za každým Enter: nebo jen Enter pro ukončení")
total = 0
count = 0
while True:
line = input("číslo: ")
if line:
try:
number = int(line)
except ValueError as err:
print(err)
continue
total += number
count += 1
else:
break
if count:
print("počet =", count, "celkem =", total, "průměr =", total/count)
| 3.640625 | 4 |
js2py/internals/prototypes/jsboolean.py | renesugar/Js2Py | 1,926 | 12758332 | from __future__ import unicode_literals
from ..conversions import *
from ..func_utils import *
class BooleanPrototype:
def toString(this, args):
if GetClass(this) != 'Boolean':
raise MakeError('TypeError',
'Boolean.prototype.toString is not generic')
if is_object(this):
this = this.value
return u'true' if this else u'false'
def valueOf(this, args):
if GetClass(this) != 'Boolean':
raise MakeError('TypeError',
'Boolean.prototype.valueOf is not generic')
if is_object(this):
this = this.value
return this
| 2.46875 | 2 |
scripts/make_pdfs_both_masks.py | StevenM1/pystain | 0 | 12758333 | import h5py
import glob
import pandas
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import skimage
from skimage import feature
import scipy as sp
from scipy import ndimage
from natsort import natsorted
masks = h5py.File('/home/gdholla1/data/post_mortem/masks.hd5f')
subject_ids = masks.keys()
subject_ids = ['14037']
for subject_id in subject_ids[:]:
print subject_id
from matplotlib.backends.backend_pdf import PdfPages
pdf = PdfPages('/home/gdholla1/projects/pm_quant/pdfs_both_masks/%s.pdf' % (subject_id))
data = h5py.File('/home/gdholla1/data/post_mortem/%s.hd5f' % str(subject_id), 'r')
slices = natsorted(data.keys())
stains = natsorted(np.unique(np.concatenate([e.keys() for e in data.values()])))
n_slices = len(slices)
n_stains = len(stains)
for i, slice in enumerate(slices[:]):
print slice
fig, axs = plt.subplots(1, n_stains)
fig.set_size_inches(n_stains * 10, 6)
for ax in axs.ravel():
ax.set_axis_off()
if 'SMI32' in data[slice].keys():
im = data[slice]['SMI32'].value
diff_mag = feature.canny(im.astype(float), sigma=25, use_quantiles=True, low_threshold=0.7, high_threshold=0.8)
diff_mag = diff_mag > 0
diff_mag = ndimage.binary_dilation(diff_mag, iterations=1)
diff_mag = np.ma.masked_equal(diff_mag, 0)
for j, stain in enumerate(stains[:]):
print stain
plt.sca(axs[j])
plt.title('{stain} - z = {slice}'.format(**locals()))
if stain in data[slice].keys():
im = data[slice][stain].value
im = ndimage.gaussian_filter(im, 10)
plt.imshow(im.T, origin='lower', cmap=plt.cm.hot)
if 'SMI32' in data[slice].keys():
plt.imshow(diff_mag.T, origin='lower', cmap=plt.cm.hot, vmin=0, vmax=1)
if (slice in masks[subject_id].keys()) and ('PARV' in masks[subject_id][slice].keys()):
for key, mask in masks[subject_id][slice]['PARV'].items():
plt.contour(mask.value.T, origin='lower', linewidths=1, colors=['blue'], levels=[0, 1])
if (slice in masks[subject_id].keys()) and ('SMI32' in masks[subject_id][slice].keys()):
for key, mask in masks[subject_id][slice]['SMI32'].items():
print key, mask.value.sum()
plt.contour(mask.value.T, origin='lower', linewidths=1, colors=['green'], levels=[0, 1])
plt.savefig(pdf, format='pdf')
plt.close(fig)
data.close()
pdf.close()
masks.close()
| 1.875 | 2 |
model.py | android-energy/energy | 0 | 12758334 | <filename>model.py
import threading
import time
import Queue
from collections import namedtuple
from threading import _Event
class ComponentModel(object):
def __init__(self):
# self._tick = 1.0 #seconds
self._running = False
self._queue = Queue.Queue()
self._thread = threading.Thread(target=self._loop)
def get_current(self):
raise NotImplementedError()
def _handle_event(self, event):
raise NotImplementedError()
def _update(self):
raise NotImplementedError()
def _loop(self):
while self._running:
try:
event = self._queue.get_nowait()
self._handle_event(event)
except Queue.Empty:
pass
self._update()
# time.sleep(self._tick)
def on_event(self, event):
self._queue.put(event)
def start(self):
if self._running:
return
self._running = True
self._thread.start()
print '{s.__class__} started'.format(s=self)
def stop(self):
if not self._running:
return
self._running = False
self._thread.join()
print '{s.__class__} halt'.format(s=self)
class Model(ComponentModel):
def __init__(self, device):
super(self.__class__, self).__init__()
self._components = {}
self.device = device
def add_component(self, event, component):
self._components[event] = component
def get_current(self):
return sum([c.get_current() for c in self._components.values()])
def _handle_event(self, event):
# TODO: accept regex
# print '_handle_event {}'.format(event)
for k, v in self._components.items():
if k in event:
v.on_event(k)
def _update(self):
pass
def start(self):
for component in self._components.values():
component.start()
super(self.__class__, self).start()
def run(self):
self.start()
self.device.run(model=self)
def stop(self):
for component in self._components.values():
component.stop()
super(self.__class__, self).stop()
#self.device.stop()
| 2.734375 | 3 |
src/python/frontend/user_identifier.py | UV-CDAT/uvcmetrics | 3 | 12758335 | user = 'Golaz' | 0.972656 | 1 |
enn_ppo/enn_ppo/test_sample_recorder.py | WillDudley/incubator | 0 | 12758336 | <reponame>WillDudley/incubator<filename>enn_ppo/enn_ppo/test_sample_recorder.py
import numpy as np
import tempfile
from enn_ppo.sample_recorder import SampleRecorder, Sample, Trace
def test_serde_sample() -> None:
sample = Sample(
entities={
"hero": np.array([[1.0, 2.0, 0.3, 100.0, 10.0]]),
"enemy": np.array(
[
[4.0, -2.0, 0.3, 100.0],
[5.0, -2.0, 0.3, 100.0],
[6.0, -2.0, 0.3, 100.0],
]
),
"box": np.array(
[
[0.0, 0.0, 0.3, 100.0],
[1.0, 0.0, 0.3, 100.0],
[2.0, 0.0, 0.3, 100.0],
]
),
},
action_masks={
"move": np.array([0]),
"shoot": np.array([0]),
"explode": np.array([4, 5, 6]),
},
probabilities={
"move": np.array([[0.5, 0.2, 0.3, 0.0]]),
"shoot": np.array([[0.9, 0.1]]),
"explode": np.array([[0.3, 0.7], [0.2, 0.8], [0.1, 0.9]]),
},
reward=0.3124125987123489,
step=13,
episode=4213,
)
serialized = sample.serialize()
deserialized = Sample.deserialize(serialized)
assert deserialized == sample
def test_sampe_recorder() -> None:
sample = Sample(
entities={
"hero": np.array([[1.0, 2.0, 0.3, 100.0, 10.0]]),
"enemy": np.array(
[
[4.0, -2.0, 0.3, 100.0],
[5.0, -2.0, 0.3, 100.0],
[6.0, -2.0, 0.3, 100.0],
]
),
"box": np.array(
[
[0.0, 0.0, 0.3, 100.0],
[1.0, 0.0, 0.3, 100.0],
[2.0, 0.0, 0.3, 100.0],
]
),
},
action_masks={
"move": np.array([0]),
"shoot": np.array([0]),
"explode": np.array([4, 5, 6]),
},
probabilities={
"move": np.array([[0.5, 0.2, 0.3, 0.0]]),
"shoot": np.array([[0.9, 0.1]]),
"explode": np.array([[0.3, 0.7], [0.2, 0.8], [0.1, 0.9]]),
},
reward=0.3124125987123489,
step=50,
episode=4213,
)
with tempfile.NamedTemporaryFile(mode="wb", delete=False) as f:
sample_recorder = SampleRecorder(f.name, act_space=None, obs_space=None) # type: ignore
sample_recorder.record(sample)
# modify the sample
sample.reward = 1.0
sample.entities["hero"][0][3] = 200
sample_recorder.record(sample)
sample_recorder.close()
with open(f.name, "rb") as f:
trace = Trace.deserialize(f.read())
assert len(trace.samples) == 2
assert trace.samples[0].reward == 0.3124125987123489
assert trace.samples[1].reward == 1.0
assert trace.samples[0].entities["hero"][0][3] == 100
assert trace.samples[1].entities["hero"][0][3] == 200
| 2.015625 | 2 |
kinect_smoothing/utils.py | ostadabbas/HW-HuP | 47 | 12758337 | import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
def plot_image_frame(image_frame):
"""
utils for plot image frames
:param image_frame: list of images
"""
for ii, image in enumerate(image_frame):
plt.figure()
if isinstance(image, list):
image = image[0]
plt.imshow(image)
plt.title('frame: ' + str(ii))
plt.show()
def plot_trajectories(pose_frame):
"""
utils for plot trajectory related to time-step t
:param pose_frame: numpy-array, (time_step,joint_num, ccordinate_dim)
"""
pose_frame = np.array(pose_frame)
timestep, joint_num, dim = pose_frame.shape
joints = ['neck', 'shoulder', 'elbow', 'hand']
plt.figure(figsize=(12, 7))
t = np.arange(timestep)
for ii, mark in enumerate(joints):
plt.subplot(331)
plt.plot(t, pose_frame[:, ii, 0], label=mark)
plt.xlabel('t')
plt.ylabel('x')
plt.subplot(332)
plt.plot(t, pose_frame[:, ii, 1], label=mark)
plt.xlabel('t')
plt.ylabel('y')
if dim > 2:
plt.subplot(333)
plt.plot(t, pose_frame[:, ii, 2], label=mark)
plt.xlabel('t')
plt.ylabel('z')
plt.subplots_adjust(wspace=0.5, hspace=0)
plt.legend(loc=(1, 0.4))
plt.show()
def plot_trajectory_3d(trajectory):
"""
plot 3d trajectory
:param trajectory: numpy-array, shape of (time_step,3)
"""
xs = trajectory[:, 0]
ys = trajectory[:, 1]
zs = trajectory[:, 2]
fig = plt.figure()
ax = Axes3D(fig)
ax.plot3D(xs, ys, zs=zs, marker='o', color='b')
plt.show()
| 2.8125 | 3 |
kubernetes_typed/client/models/v1_service_spec.py | nikhiljha/kubernetes-typed | 0 | 12758338 | # Code generated by `typeddictgen`. DO NOT EDIT.
"""V1ServiceSpecDict generated type."""
from typing import TypedDict, Dict, List
from kubernetes_typed.client import V1ServicePortDict, V1SessionAffinityConfigDict
V1ServiceSpecDict = TypedDict(
"V1ServiceSpecDict",
{
"allocateLoadBalancerNodePorts": bool,
"clusterIP": str,
"clusterIPs": List[str],
"externalIPs": List[str],
"externalName": str,
"externalTrafficPolicy": str,
"healthCheckNodePort": int,
"internalTrafficPolicy": str,
"ipFamilies": List[str],
"ipFamilyPolicy": str,
"loadBalancerClass": str,
"loadBalancerIP": str,
"loadBalancerSourceRanges": List[str],
"ports": List[V1ServicePortDict],
"publishNotReadyAddresses": bool,
"selector": Dict[str, str],
"sessionAffinity": str,
"sessionAffinityConfig": V1SessionAffinityConfigDict,
"type": str,
},
total=False,
)
| 1.835938 | 2 |
benchmarks/benchmarks/bench_linalg.py | mpmkp2020/numpy_demo | 0 | 12758339 | from .common import Benchmark, get_squares_, get_indexes_rand, TYPES1
import numpy_demo as np
class Eindot(Benchmark):
def setup(self):
self.a = np.arange(60000.0).reshape(150, 400)
self.ac = self.a.copy()
self.at = self.a.T
self.atc = self.a.T.copy()
self.b = np.arange(240000.0).reshape(400, 600)
self.c = np.arange(600)
self.d = np.arange(400)
self.a3 = np.arange(480000.).reshape(60, 80, 100)
self.b3 = np.arange(192000.).reshape(80, 60, 40)
def time_dot_a_b(self):
np.dot(self.a, self.b)
def time_dot_d_dot_b_c(self):
np.dot(self.d, np.dot(self.b, self.c))
def time_dot_trans_a_at(self):
np.dot(self.a, self.at)
def time_dot_trans_a_atc(self):
np.dot(self.a, self.atc)
def time_dot_trans_at_a(self):
np.dot(self.at, self.a)
def time_dot_trans_atc_a(self):
np.dot(self.atc, self.a)
def time_einsum_i_ij_j(self):
np.einsum('i,ij,j', self.d, self.b, self.c)
def time_einsum_ij_jk_a_b(self):
np.einsum('ij,jk', self.a, self.b)
def time_einsum_ijk_jil_kl(self):
np.einsum('ijk,jil->kl', self.a3, self.b3)
def time_inner_trans_a_a(self):
np.inner(self.a, self.a)
def time_inner_trans_a_ac(self):
np.inner(self.a, self.ac)
def time_matmul_a_b(self):
np.matmul(self.a, self.b)
def time_matmul_d_matmul_b_c(self):
np.matmul(self.d, np.matmul(self.b, self.c))
def time_matmul_trans_a_at(self):
np.matmul(self.a, self.at)
def time_matmul_trans_a_atc(self):
np.matmul(self.a, self.atc)
def time_matmul_trans_at_a(self):
np.matmul(self.at, self.a)
def time_matmul_trans_atc_a(self):
np.matmul(self.atc, self.a)
def time_tensordot_a_b_axes_1_0_0_1(self):
np.tensordot(self.a3, self.b3, axes=([1, 0], [0, 1]))
class Linalg(Benchmark):
params = [['svd', 'pinv', 'det', 'norm'],
TYPES1]
param_names = ['op', 'type']
def setup(self, op, typename):
np.seterr(all='ignore')
self.func = getattr(np.linalg, op)
if op == 'cholesky':
# we need a positive definite
self.a = np.dot(get_squares_()[typename],
get_squares_()[typename].T)
else:
self.a = get_squares_()[typename]
# check that dtype is supported at all
try:
self.func(self.a[:2, :2])
except TypeError:
raise NotImplementedError()
def time_op(self, op, typename):
self.func(self.a)
class Lstsq(Benchmark):
def setup(self):
self.a = get_squares_()['float64']
self.b = get_indexes_rand()[:100].astype(np.float64)
def time_numpy_demo_linalg_lstsq_a__b_float64(self):
np.linalg.lstsq(self.a, self.b, rcond=-1)
| 2.328125 | 2 |
astroquery/alma/tests/test_alma_remote.py | DBerke/astroquery | 0 | 12758340 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import tempfile
import shutil
import numpy as np
import pytest
from datetime import datetime
import os
from urllib.parse import urlparse
import re
from unittest.mock import Mock, patch
from astropy import coordinates
from astropy import units as u
from astroquery.utils.commons import ASTROPY_LT_4_1
from .. import Alma
from .. import _url_list, _test_url_list
# ALMA tests involving staging take too long, leading to travis timeouts
# TODO: make this a configuration item
SKIP_SLOW = True
all_colnames = {'Project code', 'Source name', 'RA', 'Dec', 'Band',
'Frequency resolution', 'Integration', 'Release date',
'Frequency support', 'Velocity resolution', 'Pol products',
'Observation date', 'PI name', 'PWV', 'Member ous id',
'Asdm uid', 'Project title', 'Project type', 'Scan intent',
'Spatial resolution', 'Largest angular scale',
'QA2 Status', 'Group ous id', 'Pub'}
def get_client():
alma = Alma()
# need this to point alma to a different test site
# alma package __init__.py mentions test sites but I don't know how the
# mechanism is supposed to be used
from .. import core
core.ALMA_TAP_PATH = 'obscore'
alma.archive_url = 'https://alma.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/'
return alma
@pytest.mark.remote_data
class TestAlma:
def setup_class(cls):
pass
# new test server
# this server seems not to serve a help page?
# Alma.archive_url = "https://2016-03.asa-test.alma.cl/aq/"
# starting somewhere between Nov 2015 and Jan 2016, the beta server
# stopped serving the actual data, making all staging attempts break
@pytest.fixture()
def temp_dir(self, request):
my_temp_dir = tempfile.mkdtemp()
def fin():
shutil.rmtree(my_temp_dir)
request.addfinalizer(fin)
return my_temp_dir
def test_public(self):
alma = get_client()
results = alma.query(payload=None, public=True, maxrec=100)
assert len(results) == 100
for row in results:
assert row['data_rights'] == 'Public'
results = alma.query(payload=None, public=False, maxrec=100)
assert len(results) == 100
for row in results:
assert row['data_rights'] == 'Proprietary'
def test_SgrAstar(self, temp_dir):
alma = get_client()
alma.cache_location = temp_dir
result_s = alma.query_object('Sgr A*', legacy_columns=True)
assert '2013.1.00857.S' in result_s['Project code']
# "The Brick", g0.253, is in this one
# assert b'2011.0.00217.S' in result_c['Project code'] # missing cycle 1 data
def test_docs_example(self, temp_dir):
alma = get_client()
alma.cache_location = temp_dir
rslt = alma.query(payload=dict(obs_creator_name='*Ginsburg*'))
assert 'ADS/JAO.ALMA#2013.1.00269.S' in rslt['obs_publisher_did']
def test_freq(self):
alma = get_client()
payload = {'frequency': '85..86'}
result = alma.query(payload)
assert len(result) > 0
for row in result:
# returned em_min and em_max are in m
assert row['frequency'] >= 85
assert row['frequency'] <= 100
assert '3' in row['band_list']
@pytest.mark.skipif("SKIP_SLOW",
reason="Extremely slow due to limitations of "
"the implementation")
def test_bands(self):
alma = get_client()
payload = {'band_list': ['5', '7']}
result = alma.query(payload)
assert len(result) > 0
for row in result:
assert ('5' in row['band_list']) or ('7' in row['band_list'])
def test_equivalent_columns(self):
# this test is to ensure that queries using original column names
# return the same results as the ones that use ObsCore names
alma = get_client()
# original
result_orig = alma.query(payload={'project_code': '2011.0.00131.S'},
legacy_columns=True)
result_obscore = alma.query(payload={'proposal_id': '2011.0.00131.S'},
legacy_columns=True)
assert len(result_orig) == len(result_obscore)
for row in result_orig:
assert row['Project code'] == '2011.0.00131.S'
for row in result_obscore:
assert row['Project code'] == '2011.0.00131.S'
def test_alma_source_name(self):
alma = get_client()
payload = {'source_name_alma': 'GRB021004'}
result = alma.query(payload)
assert len(result) > 0
for row in result:
assert 'GRB021004' == row['target_name']
@pytest.mark.skipif("SKIP_SLOW", reason="Known issue")
def test_ra_dec(self):
alma = get_client()
payload = {'ra_dec': '181.0192d -0.01928d'}
result = alma.query(payload)
assert len(result) > 0
@pytest.mark.skipif("SKIP_SLOW")
def test_m83(self, temp_dir, recwarn):
alma = get_client()
alma.cache_location = temp_dir
m83_data = alma.query_object('M83', science=True, legacy_columns=True)
uids = np.unique(m83_data['Member ous id'])
link_list = alma.stage_data(uids)
# On Feb 8, 2016 there were 83 hits. This number should never go down.
# Except it has. On May 18, 2016, there were 47.
assert len(link_list) >= 47
# test re-staging
# (has been replaced with warning)
# with pytest.raises(requests.HTTPError) as ex:
# link_list = alma.stage_data(uids)
# assert ex.value.args[0] == ('Received an error 405: this may indicate you have '
# 'already staged the data. Try downloading the '
# 'file URLs directly with download_files.')
# log.warning doesn't actually make a warning
# link_list = alma.stage_data(uids)
# w = recwarn.pop()
# assert (str(w.message) == ('Error 405 received. If you have previously staged the '
# 'same UIDs, the result returned is probably correct,'
# ' otherwise you may need to create a fresh astroquery.Alma instance.'))
@pytest.mark.skipif("SKIP_SLOW", reason="Known issue")
def test_stage_data(self, temp_dir, recwarn):
alma = get_client()
alma.cache_location = temp_dir
result_s = alma.query_object('Sgr A*', legacy_columns=True)
if ASTROPY_LT_4_1:
assert b'2013.1.00857.S' in result_s['Project code']
assert b'uid://A002/X40d164/X1b3' in result_s['Asdm uid']
assert b'uid://A002/X391d0b/X23d' in result_s['Member ous id']
match_val = b'uid://A002/X40d164/X1b3'
else:
assert '2013.1.00857.S' in result_s['Project code']
assert 'uid://A002/X40d164/X1b3' in result_s['Asdm uid']
assert 'uid://A002/X391d0b/X23d' in result_s['Member ous id']
match_val = 'uid://A002/X40d164/X1b3'
match = result_s['Asdm uid'] == match_val
uid = result_s['Member ous id'][match]
# this is temporary to switch back to ALMA servers
# del alma.dataarchive_url
# alma.archive_url = 'http://almascience.org'
result = alma.stage_data(uid)
found = False
for url in result['URL']:
if 'uid___A002_X40d164_X1b3' in url:
found = True
break
assert found, 'URL to uid___A002_X40d164_X1b3 expected'
def test_stage_data_listall(self, temp_dir, recwarn):
"""
test for expanded capability created in #1683
"""
alma = get_client()
alma.cache_location = temp_dir
uid = 'uid://A001/X12a3/Xe9'
result1 = alma.stage_data(uid, expand_tarfiles=False)
result2 = alma.stage_data(uid, expand_tarfiles=True)
expected_names = [
'2017.1.01185.S_uid___A002_Xd28a9e_X71b8.asdm.sdm.tar',
'2017.1.01185.S_uid___A002_Xd28a9e_X7b4d.asdm.sdm.tar',
'2017.1.01185.S_uid___A002_Xd29c1f_X1f74.asdm.sdm.tar',
'2017.1.01185.S_uid___A002_Xd29c1f_X5cf.asdm.sdm.tar']
expected_names_with_aux = expected_names + \
['2017.1.01185.S_uid___A001_X12a3_Xe9_auxiliary.tar']
for name in expected_names_with_aux:
assert name in result1['name']
for res in result1:
p = re.compile(r'.*(uid__.*)\.asdm.*')
if res['name'] in expected_names:
assert 'application/x-tar' == res['type']
assert res['id'] == p.search(res['name']).group(1)
else:
assert res['type'] in ['application/x-tar', 'application/x-votable+xml;content=datalink', 'text/plain']
assert res['id'] == 'None'
assert 'UNKNOWN' == res['permission']
assert res['mous_uid'] == uid
assert len(result2) > len(result1)
def test_stage_data_json(self, temp_dir, recwarn):
"""
test for json returns
"""
alma = get_client()
alma.cache_location = temp_dir
uid = 'uid://A001/X12a3/Xe9'
# this is temporary to switch back to ALMA servers
# alma.archive_url = 'http://almascience.org'
result = alma.stage_data(uid, return_json=False)
assert len(result) > 0
with pytest.raises(AttributeError):
# this no longer works
alma.stage_data(uid, return_json=True)
def test_data_proprietary(self):
# public
alma = get_client()
assert not alma.is_proprietary('uid://A001/X12a3/Xe9')
IVOA_DATE_FORMAT = "%Y-%m-%dT%H:%M:%S.%f"
now = datetime.utcnow().strftime(IVOA_DATE_FORMAT)[:-3]
query = "select top 1 obs_id from ivoa.obscore where " \
"obs_release_date > '{}'".format(now)
result = alma.query_tap(query)
assert len(result.table) == 1
# proprietary
assert alma.is_proprietary(result.table[0][0])
# non existent
with pytest.raises(AttributeError):
alma.is_proprietary('uid://NON/EXI/STING')
def test_data_info(self, temp_dir):
alma = get_client()
alma.cache_location = temp_dir
uid = 'uid://A001/X12a3/Xe9'
data_info = alma.get_data_info(uid, expand_tarfiles=True)
for file in data_info:
# TODO found files that do not match info.
# assert u.isclose(file['content_length']*u.B,
# alma._HEADER_data_size([file['access_url']])[1]),\
# 'File {} size: datalink and head do not match'.\
# format(file['access_url'])
pass
# compare with tarball version
data_info_tar = alma.get_data_info(uid, expand_tarfiles=False)
assert len(data_info) > len(data_info_tar)
# size is the same - not working because service inconsistencies
# assert sum(data_info['content_length']) == \
# sum(data_info_tar['content_length'])
# check smallest file downloads correctly
file = 'member.uid___A001_X12a3_Xe9.README.txt'
for url in data_info['access_url']:
if file in url:
file_url = url
break
assert file_url
alma.download_files([file_url], temp_dir)
assert os.stat(os.path.join(temp_dir, file)).st_size
# mock downloading an entire program
download_files_mock = Mock()
alma.download_files = download_files_mock
alma.retrieve_data_from_uid([uid])
comparison = download_files_mock.mock_calls[0][1] == data_info_tar[
'access_url']
assert comparison.all()
def test_download_data(self, temp_dir):
# test only fits files from a program
def myrequests(op, file_url, **kwargs):
# this is to avoid downloading the actual files
if op == 'HEAD':
return Mock(headers={'Content-Type': 'fits'})
else:
return file_url.split('/')[-1]
alma = get_client()
alma.cache_location = temp_dir
uid = 'uid://A001/X12a3/Xe9'
data_info = alma.get_data_info(uid, expand_tarfiles=True)
fitsre = re.compile(r'.*\.fits$')
alma._request = Mock(side_effect=myrequests)
urls = [x['access_url'] for x in data_info
if fitsre.match(x['access_url'])]
results = alma.download_files(urls, temp_dir)
alma._request.assert_called()
assert len(results) == len(urls)
# each url triggers 2 calls: HEAD and GET
assert len(urls)*2 == len(alma._request.mock_calls)
def test_download_and_extract(self, temp_dir):
def myrequests(op, file_url, **kwargs):
# this is to avoid downloading the actual files
if op == 'HEAD':
return Mock(headers={'Content-Type': 'fits'})
else:
return file_url.split('/')[-1]
alma = get_client()
alma.cache_location = temp_dir
alma._request = Mock(side_effect=myrequests)
alma._cycle0_tarfile_content_table = {'ID': ''}
uid = 'uid://A001/X12a3/Xe9'
data_info = alma.get_data_info(uid, expand_tarfiles=False)
aux_tar_file = [x for x in data_info['access_url'] if 'auxiliary' in x]
assert 1 == len(aux_tar_file)
# there are no FITS files in the auxiliary file
assert not alma.download_and_extract_files(aux_tar_file)
# download python scripts now
downloaded = alma.download_and_extract_files(aux_tar_file,
regex=r'.*\.py')
assert len(downloaded) > 1
assert len(downloaded)*2 == len(alma._request.mock_calls)
# ASDM files cannot be expanded.
asdm_url = [x for x in data_info['access_url'] if 'asdm' in x][0]
tarfile_handle_mock = Mock()
mock_content_file1 = Mock(path='/tmp/')
# mocking attribute name is trickier and it requires the name to
# be set separately.
mock_content_file1.name = 'foo.py'
mock_content_file2 = Mock(path='/tmp/')
mock_content_file2.name = 'blah.txt'
tarfile_handle_mock.getmembers.return_value = \
[mock_content_file1, mock_content_file2]
tarfile_pkg_mock = Mock()
tarfile_pkg_mock.open.return_value = tarfile_handle_mock
with patch('astroquery.alma.core.tarfile', tarfile_pkg_mock):
with patch('astroquery.alma.core.os.remove') as delete_mock:
downloaded_asdm = alma.download_and_extract_files(
[asdm_url], include_asdm=True, regex=r'.*\.py')
delete_mock.assert_called_once_with(asdm_url.split('/')[-1])
assert downloaded_asdm == [os.path.join(temp_dir, 'foo.py')]
@pytest.mark.skipif("SKIP_SLOW", reason="Known issue")
def test_doc_example(self, temp_dir):
alma = get_client()
alma.cache_location = temp_dir
alma2 = get_client()
alma2.cache_location = temp_dir
m83_data = alma.query_object('M83', legacy_columns=True)
# the order can apparently sometimes change
# These column names change too often to keep testing.
# assert set(m83_data.colnames) == set(all_colnames)
galactic_center = coordinates.SkyCoord(0 * u.deg, 0 * u.deg,
frame='galactic')
gc_data = alma.query_region(galactic_center, 1 * u.deg)
# assert len(gc_data) >= 425 # Feb 8, 2016
assert len(gc_data) >= 50 # Nov 16, 2016
uids = np.unique(m83_data['Member ous id'])
if ASTROPY_LT_4_1:
assert b'uid://A001/X11f/X30' in uids
X30 = (m83_data['Member ous id'] == b'uid://A001/X11f/X30')
X31 = (m83_data['Member ous id'] == b'uid://A002/X3216af/X31')
else:
assert 'uid://A001/X11f/X30' in uids
X30 = (m83_data['Member ous id'] == 'uid://A001/X11f/X30')
X31 = (m83_data['Member ous id'] == 'uid://A002/X3216af/X31')
assert X30.sum() == 4 # Jul 13, 2020
assert X31.sum() == 4 # Jul 13, 2020
mous1 = alma.stage_data('uid://A001/X11f/X30')
totalsize_mous1 = mous1['size'].sum() * u.Unit(mous1['size'].unit)
assert (totalsize_mous1.to(u.B) > 1.9*u.GB)
mous = alma2.stage_data('uid://A002/X3216af/X31')
totalsize_mous = mous['size'].sum() * u.Unit(mous['size'].unit)
# More recent ALMA request responses do not include any information
# about file size, so we have to allow for the possibility that all
# file sizes are replaced with -1
assert (totalsize_mous.to(u.GB).value > 52)
def test_query(self, temp_dir):
alma = get_client()
alma.cache_location = temp_dir
result = alma.query(payload={'start_date': '<11-11-2011'},
public=False, legacy_columns=True, science=True)
# Nov 16, 2016: 159
# Apr 25, 2017: 150
# Jul 2, 2017: 160
# May 9, 2018: 162
# March 18, 2019: 171 (seriously, how do they keep changing history?)
# with SIA2 numbers are different (cardinality?) assert len(result) == 171
test_date = datetime.strptime('11-11-2011', '%d-%m-%Y')
for row in result['Observation date']:
assert test_date > datetime.strptime(row, '%d-%m-%Y'), \
'Unexpected value: {}'.format(row)
# Not in the help - no need to support it.
# result = alma.query(payload={'member_ous_id': 'uid://A001/X11a2/X11'},
# science=True)
# assert len(result) == 1
@pytest.mark.skipif("SKIP_SLOW", reason="ra dec search known issue")
def test_misc(self):
# miscellaneous set of common tests
alma = get_client()
#
# alma.query_region(coordinate=orionkl_coords, radius=4 * u.arcmin,
# public=False, science=False)
result = alma.query_object('M83', public=True, science=True)
assert len(result) > 0
result = alma.query(payload={'pi_name': '*Bally*'}, public=False,
maxrec=10)
assert result
result.write('/tmp/alma-onerow.txt', format='ascii')
for row in result:
assert 'Bally' in row['obs_creator_name']
result = alma.query(payload=dict(project_code='2016.1.00165.S'),
public=False, cache=False)
assert result
for row in result:
assert '2016.1.00165.S' == row['proposal_id']
result = alma.query(payload=dict(project_code='2017.1.01355.L',
source_name_alma='G008.67'),)
assert result
for row in result:
assert '2017.1.01355.L' == row['proposal_id']
assert 'Public' == row['data_rights']
assert 'G008.67' in row['target_name']
result = alma.query_region(
coordinates.SkyCoord('5:35:14.461 -5:21:54.41', frame='fk5',
unit=(u.hour, u.deg)), radius=0.034 * u.deg)
assert result
result = alma.query_region(
coordinates.SkyCoord('5:35:14.461 -5:21:54.41', frame='fk5',
unit=(u.hour, u.deg)), radius=0.034 * u.deg,
payload={'energy.frequency-asu': '215 .. 220'})
result = alma.query(payload=dict(project_code='2012.*',
public_data=True))
assert result
for row in result:
assert '2012.' in row['proposal_id']
assert 'Public' == row['data_rights']
result = alma.query(payload={'frequency': '96 .. 96.5'})
assert result
for row in result:
# TODO not sure how to test this
pass
result = alma.query_object('M83', band_list=[3, 6, 8])
assert result
for row in result:
assert row['band_list'] in ['3', '6', '8']
result = alma.query(payload={'pi_name': '*Ginsburg*',
'band_list': '6'})
assert result
for row in result:
assert '6' == row['band_list']
assert 'ginsburg' in row['obs_creator_name'].lower()
@pytest.mark.skipif("SKIP_SLOW")
def test_user(self):
# miscellaneous set of tests from current users
alma = get_client()
rslt = alma.query({'band_list': [6], 'project_code': '2012.1.*'},
legacy_columns=True)
for row in rslt:
print(row['Project code'])
print(row['Member ous id'])
# As of April 2017, these data are *MISSING FROM THE ARCHIVE*.
# This has been reported, as it is definitely a bug.
@pytest.mark.xfail
@pytest.mark.bigdata
@pytest.mark.skipif("SKIP_SLOW")
def test_cycle1(self, temp_dir):
# About 500 MB
alma = get_client()
alma.cache_location = temp_dir
target = 'NGC4945'
project_code = '2012.1.00912.S'
payload = {'project_code': project_code,
'source_name_alma': target, }
result = alma.query(payload=payload)
assert len(result) == 1
# Need new Alma() instances each time
a1 = alma()
uid_url_table_mous = a1.stage_data(result['Member ous id'])
a2 = alma()
uid_url_table_asdm = a2.stage_data(result['Asdm uid'])
# I believe the fixes as part of #495 have resulted in removal of a
# redundancy in the table creation, so a 1-row table is OK here.
# A 2-row table may not be OK any more, but that's what it used to
# be...
assert len(uid_url_table_asdm) == 1
assert len(uid_url_table_mous) >= 2 # now is len=3 (Nov 17, 2016)
# URL should look like:
# https://almascience.eso.org/dataPortal/requests/anonymous/944120962/ALMA/2012.1.00912.S_uid___A002_X5a9a13_X528_001_of_001.tar/2012.1.00912.S_uid___A002_X5a9a13_X528_001_of_001.tar
# https://almascience.eso.org/rh/requests/anonymous/944222597/2012.1.00912.S_uid___A002_X5a9a13_X528_001_of_001.tar/2012.1.00912.S_uid___A002_X5a9a13_X528_001_of_001.tar
small = uid_url_table_mous['size'] < 1
urls_to_download = uid_url_table_mous[small]['URL']
uri = urlparse(urls_to_download[0])
assert uri.path == ('/dataPortal/requests/anonymous/{0}/ALMA/2012.1.00912.S_uid___A002_X5a9a13_X528_001_of_001.tar/2012.1.00912.S_uid___A002_X5a9a13_X528_001_of_001.tar' # noqa
.format(a1._staging_log['staging_page_id']))
# THIS IS FAIL
# '2012.1.00912.S_uid___A002_X5a9a13_X528_001_of_001.tar'
left = uid_url_table_mous['URL'][0].split("/")[-1]
assert left == '2012.1.00912.S_uid___A002_X5a9a13_X528_001_of_001.tar'
right = uid_url_table_mous['uid'][0]
assert right == 'uid://A002/X5a9a13/X528'
assert left[15:-15] == right.replace(":", "_").replace("/", "_")
data = alma.download_and_extract_files(urls_to_download)
assert len(data) == 6
@pytest.mark.skipif("SKIP_SLOW")
@pytest.mark.skip("Not working anymore")
def test_cycle0(self, temp_dir):
# About 20 MB
alma = get_client()
alma.cache_location = temp_dir
target = 'NGC4945'
project_code = '2011.0.00121.S'
payload = {'project_code': project_code,
'source_name_alma': target, }
result = alma.query(payload=payload, legacy_columns=True)
assert len(result) == 1
alma1 = alma()
alma2 = alma()
uid_url_table_mous = alma1.stage_data(result['Member ous id'])
uid_url_table_asdm = alma2.stage_data(result['Asdm uid'])
assert len(uid_url_table_asdm) == 1
assert len(uid_url_table_mous) == 32
assert uid_url_table_mous[0]['URL'].split("/")[-1] == '2011.0.00121.S_2012-08-16_001_of_002.tar'
assert uid_url_table_mous[0]['uid'] == 'uid://A002/X327408/X246'
small = uid_url_table_mous['size'] < 1
urls_to_download = uid_url_table_mous[small]['URL']
# Check that all URLs show up in the Cycle 0 table
for url in urls_to_download:
tarfile_name = os.path.split(url)[-1]
assert tarfile_name in alma._cycle0_tarfile_content['ID']
data = alma.download_and_extract_files(urls_to_download)
# There are 10 small files, but only 8 unique
assert len(data) == 8
def test_keywords(self, temp_dir):
alma = get_client()
alma.help_tap()
result = alma.query_tap(
"select * from ivoa.obscore where s_resolution <0.1 and "
"science_keyword in ('High-mass star formation', 'Disks around "
"high-mass stars')")
assert len(result) >= 72
# TODO why is it failing
# assert 'Orion_Source_I' in result['target_name']
@pytest.mark.remote_data
def test_project_metadata():
alma = get_client()
metadata = alma.get_project_metadata('2013.1.00269.S')
assert metadata == ['Sgr B2, a high-mass molecular cloud in our Galaxy\'s '
'Central Molecular Zone, is the most extreme site of '
'ongoing star formation in the Local Group in terms '
'of its gas content, temperature, and velocity '
'dispersion. If any cloud in our galaxy is analogous '
'to the typical cloud at the universal peak of star '
'formation at z~2, this is it. We propose a 6\'x6\' '
'mosaic in the 3mm window targeting gas thermometer '
'lines, specifically CH3CN and its isotopologues. We '
'will measure the velocity dispersion and temperature '
'of the molecular gas on all scales (0.02 - 12 pc, '
'0.5" - 5\') within the cloud, which will yield '
'resolved measurements of the Mach number and the '
'sonic scale of the gas. We will assess the relative '
'importance of stellar feedback and turbulence on the '
'star-forming gas, determining how extensive the '
'feedback effects are within an ultradense '
'environment. The observations will provide '
'constraints on the inputs to star formation theories '
'and will determine their applicability in extremely '
'dense, turbulent, and hot regions. Sgr B2 will be '
'used as a testing ground for star formation theories '
'in an environment analogous to high-z starburst '
'clouds in which they must be applied.']
@pytest.mark.remote_data
@pytest.mark.parametrize('dataarchive_url', _test_url_list)
@pytest.mark.skip('Not working for now - Investigating')
def test_staging_postfeb2020(dataarchive_url):
alma = get_client()
tbl = alma.stage_data('uid://A001/X121/X4ba')
assert 'mous_uid' in tbl.colnames
assert '2013.1.00269.S_uid___A002_X9de499_X3d6c.asdm.sdm.tar' in tbl['name']
@pytest.mark.remote_data
@pytest.mark.parametrize('dataarchive_url', _url_list)
@pytest.mark.skip('Not working for now - Investigating')
def test_staging_uptofeb2020(dataarchive_url):
alma = get_client()
tbl = alma.stage_data('uid://A001/X121/X4ba')
assert 'mous_uid' in tbl.colnames
names = [x.split("/")[-1] for x in tbl['URL']]
assert '2013.1.00269.S_uid___A002_X9de499_X3d6c.asdm.sdm.tar' in names
@pytest.mark.remote_data
@pytest.mark.parametrize('dataarchive_url', _test_url_list)
def test_staging_stacking(dataarchive_url):
alma = get_client()
alma.stage_data(['uid://A001/X13d5/X1d', 'uid://A002/X3216af/X31',
'uid://A001/X12a3/X240'])
| 1.773438 | 2 |
ocdskingfisherprocess/cli/commands/upgrade_database.py | aguilerapy/kingfisher-process | 0 | 12758341 | import ocdskingfisherprocess.cli.commands.base
import redis
class UpgradeDataBaseCLICommand(ocdskingfisherprocess.cli.commands.base.CLICommand):
command = 'upgrade-database'
def configure_subparser(self, subparser):
subparser.add_argument("--deletefirst", help="Delete Database First", action="store_true")
def run_command(self, args):
if args.deletefirst:
if not args.quiet:
print("Dropping Database")
self.database.delete_tables()
if self.config.is_redis_available():
if not args.quiet:
print("Dropping Redis")
redis_conn = redis.Redis(host=self.config.redis_host, port=self.config.redis_port, db=self.config.redis_database)
redis_conn.delete('kingfisher_work')
if not args.quiet:
print("Upgrading/Creating Database")
self.database.create_tables()
| 2.453125 | 2 |
get-grades.py | taylorchenca/ecampus-automation | 1 | 12758342 | import time
from selenium import webdriver
driver = webdriver.Chrome()
driver.get("https://ecampus.scu.edu/")
userid = ''
pwd = ''
page_loading = True
while page_loading:
try:
driver.find_element_by_id("userid").send_keys(userid)
driver.find_element_by_id("pwd").send_keys(<PASSWORD>)
driver.find_element_by_name("Submit").click()
page_loading = False
except:
print("Page loading. Will try again")
time.sleep(1)
page_loading = True
while page_loading:
try:
driver.get('https://ecampus.scu.edu/psc/csprd92/EMPLOYEE/SA/c/SA_LEARNER_SERVICES.SSS_MY_ACAD.GBL?Page=SSS_MY_ACAD&Action=U')
page_loading = False
except:
print("Page loading. Will try again")
time.sleep(3)
page_loading = True
while page_loading:
try:
driver.find_element_by_id('win0divDERIVED_SSSACA2_SS_UNOFF_TRSC_LINK').click()
page_loading = False
except:
print("Page loading. Will try again")
time.sleep(1)
page_loading = True
while page_loading:
try:
driver.find_element_by_id('DERIVED_AA2_TSCRPT_TYPE3').click()
driver.find_element_by_xpath("//option[@value='UNOFF']").click()
time.sleep(2)
driver.find_element_by_id('GO').click()
page_loading = False
except:
print("Page loading. Will try again")
time.sleep(5)
# driver.close()
| 3.0625 | 3 |
proteinsolver/utils/compression.py | ostrokach/proteinsolver | 27 | 12758343 | <reponame>ostrokach/proteinsolver
import io
import brotli
import pyarrow as pa
def compress(array: pa.Array) -> bytes:
rb = pa.RecordBatch.from_arrays([array], ["array"])
buf = io.BytesIO()
writer = pa.RecordBatchFileWriter(buf, rb.schema)
writer.write_batch(rb)
writer.close()
buf.seek(0)
return brotli.compress(buf.read())
def decompress(pybytes: bytes) -> pa.Array:
buf = io.BytesIO()
buf.write(brotli.decompress(pybytes))
buf.seek(0)
reader = pa.RecordBatchFileReader(buf)
rb = reader.get_batch(0)
return rb.column(0)
| 2.25 | 2 |
src/blip_sdk/extensions/artificial_intelligence/ai_model/content_type.py | mirlarof/blip-sdk-python | 2 | 12758344 | <filename>src/blip_sdk/extensions/artificial_intelligence/ai_model/content_type.py<gh_stars>1-10
class ContentType:
"""AI Model content types."""
MODEL_PUBLISHING = 'application/vnd.iris.ai.model-publishing+json'
MODEL_TRAINING = 'application/vnd.iris.ai.model-training+json'
| 1.414063 | 1 |
pydummy/entities/en/name.py | Devgaze/pydummy | 0 | 12758345 | # -*- coding: utf-8 -*-
from pydummy.core.dummydata import DummyData
class DummyName(DummyData):
def __init__(self):
self.data = [
'Mike',
'Julia',
'Harold',
'Sharon',
'Ian',
'Jessica',
'Bob',
'Elisabeth',
'Charles',
'Fionna'
]
| 2.984375 | 3 |
tests/unit/fake_data_root/vault/var/lib/juju/agents/unit-vault-0/charm/hooks/relations/nrpe-external-master/provides.py | KellenRenshaw/hotsos | 6 | 12758346 | import datetime
from charms.reactive import hook
from charms.reactive import RelationBase
from charms.reactive import scopes
class NrpeExternalMasterProvides(RelationBase):
scope = scopes.GLOBAL
@hook('{provides:nrpe-external-master}-relation-{joined,changed}')
def changed_nrpe(self):
self.set_state('{relation_name}.available')
@hook('{provides:nrpe-external-master}-relation-{broken,departed}')
def broken_nrpe(self):
self.remove_state('{relation_name}.available')
def add_check(self, args, name=None, description=None, context=None,
servicegroups=None, unit=None):
unit = unit.replace('/', '-')
check_tmpl = """
#---------------------------------------------------
# This file is Juju managed
#---------------------------------------------------
command[%(check_name)s]=%(check_args)s
"""
service_tmpl = """
#---------------------------------------------------
# This file is Juju managed
#---------------------------------------------------
define service {
use active-service
host_name %(context)s-%(unit_name)s
service_description %(description)s
check_command check_nrpe!%(check_name)s
servicegroups %(servicegroups)s
}
"""
check_filename = "/etc/nagios/nrpe.d/check_%s.cfg" % (name)
with open(check_filename, "w") as fh:
fh.write(check_tmpl % {
'check_args': ' '.join(args),
'check_name': name,
})
service_filename = "/var/lib/nagios/export/service__%s_%s.cfg" % (
unit, name)
with open(service_filename, "w") as fh:
fh.write(service_tmpl % {
'servicegroups': servicegroups or context,
'context': context,
'description': description,
'check_name': name,
'unit_name': unit,
})
def updated(self):
relation_info = {
'timestamp': datetime.datetime.now().isoformat(),
}
self.set_remote(**relation_info)
self.remove_state('{relation_name}.available')
| 1.789063 | 2 |
Grid-1m-oversampler.py | cjaddison82/Survey-Resources | 0 | 12758347 | #INPUT FORMAT E N DEPTH
#OUTPUT CSV AT METRE ONLY
try:
from math import *
except:
print "import error"
#T = int(1) #flag for while loop - removed depricated code
#E = [] #new array to read from the RPL file - depricated
#N = []
#KP = []
gridE = [] #new array to write logfile values to
gridN = []
gridZ = []
OUTPUT1 = open("D:/temp/OUTPUT2.TXT", "w")
previous = str("0")
fileGRID = open("D:/SHEFA_acceptedXYZ/XYZExportFanoe.xyz", "r") #filename of grid file being processed
for line in fileGRID:
splitline = line.split(" ")#split on space because of supplied data, might be comma or semicolon in future
gridE = int(float((splitline[0])))
gridN = int(float((splitline[1])))
gridZ = float((splitline[2]))
checker = str(gridE)#+str(gridN)
if checker == previous:
pass
else:
OUTPUT1.write(str(gridE)+","+str(gridN)+","+str(gridZ)+"\n\r")
previous = str(gridE)#+str(gridN)
#print "new line written to file"
fileGRID.close()
OUTPUT1.close()
| 2.734375 | 3 |
yatube/posts/tests/test_views.py | EugeneSal/hw05-ver2 | 0 | 12758348 | from http import HTTPStatus
from django import forms
from django.core.cache import cache
from django.test import Client, TestCase
from django.urls import reverse
from ..models import Follow, Group, Post, User, Comment
# , Profile
POSTS_COUNT = 13
PAGE_COUNT = 10
class PostPagesTests(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.user = User.objects.create_user(username='TestUser')
# cls.profile = Profile.objects.create(user=cls.user)
cls.group = Group.objects.create(title='TestGroup',
slug='test_slug',
description='Test description')
cls.post = Post.objects.create(
text='Тестовый пост длинна котого больше 15 символов',
author=cls.user, group=cls.group
)
cls.templates_pages_names = {
'posts/index.html': reverse('index'),
'posts/group.html': reverse('group', kwargs={
'slug': cls.group.slug}),
'posts/create.html': reverse('post_create'),
'posts/follow.html': reverse('follow_index')
}
def setUp(self):
self.guest_client = Client()
self.authorized_user = Client()
self.authorized_user.force_login(self.user)
def test_pages_use_correct_template(self):
"""URL-адрес использует соответствующий шаблон..........................
"""
cache.clear()
for template, reverse_name in self.templates_pages_names.items():
with self.subTest(reverse_name=reverse_name):
response = self.authorized_user.get(reverse_name)
self.assertTemplateUsed(response, template)
def test_context_in_template_index(self):
"""Шаблон index сформирован с правильным контекстом.....................
При создании поста с указанием группы,
этот пост появляется на главной странице сайта.
"""
cache.clear()
response = self.authorized_user.get(reverse('index'))
last_post = response.context['page_obj'][0]
self.assertEqual(last_post, self.post)
def get_context(self, name):
self.assertEqual(name.group, PostPagesTests.post.group)
self.assertEqual(name.text, PostPagesTests.post.text)
def test_context_in_template_group(self):
"""Шаблон group сформирован с правильным контекстом.....................
При создании поста с указанием группы,
этот пост появляется на странице этой группы.
"""
response = self.authorized_user.get(reverse('group', kwargs={
'slug': self.group.slug}))
test_group = response.context['group']
test_post = response.context['page_obj'][0]
self.assertEqual(test_group, self.group)
self.assertEqual(test_post, self.post)
self.get_context(test_post)
self.assertEqual(Post.objects.first().text, self.post.text)
self.assertEqual(Post.objects.first().group, self.post.group)
def test_context_in_template_new_post(self):
"""Шаблон new_posts сформирован с правильным контекстом.................
"""
response = self.authorized_user.get(reverse('post_create'))
form_fields = {'text': forms.fields.CharField,
'group': forms.fields.ChoiceField}
for value, expect in form_fields.items():
with self.subTest(value=value):
form_field = response.context['form'].fields[value]
self.assertIsInstance(form_field, expect)
response = self.guest_client.get(reverse('post_create'))
urls = '/auth/login/?next=/create/'
self.assertRedirects(response, urls, status_code=HTTPStatus.FOUND)
def test_context_in_template_post_edit(self):
"""Шаблон post_edit сформирован с правильным контекстом.................
"""
response = self.authorized_user.get(reverse('post_edit', kwargs={
'post_id': self.post.id}))
form_fields = {'text': forms.fields.CharField, }
for value, expect in form_fields.items():
with self.subTest(value=value):
form_field = response.context['form'].fields[value]
self.assertIsInstance(form_field, expect)
def test_context_in_template_profile(self):
"""Шаблон profile сформирован с правильным контекстом...................
"""
response = self.authorized_user.get(reverse('profile', kwargs={
'username': self.user.username, }))
profile = {'author': self.post.author}
for value, expect in profile.items():
with self.subTest(value=value):
context = response.context[value]
self.assertEqual(context, expect)
test_page = response.context['page_obj'][0]
self.assertEqual(test_page, self.user.posts.all()[0])
def test_context_in_template_post(self):
"""Шаблон post сформирован с правильным контекстом......................
"""
response = self.authorized_user.get(reverse('post_detail', kwargs={
'post_id': self.post.id}))
profile = {'author': self.post.author, 'post': self.post}
for value, expect in profile.items():
with self.subTest(value=value):
context = response.context[value]
self.assertEqual(context, expect)
def test_post_not_in_wrong_group(self):
"""Проверка что post не попал ни в ту группу............................
и попал в нужную"""
cache.clear()
Group.objects.create(title='new_group', slug='new_slug')
response = self.authorized_user.get(reverse('group', kwargs={
'slug': 'new_slug'}))
group = response.context['group']
post = group.posts.count()
self.assertEqual(post, 0)
self.assertEqual(len(response.context['page_obj'].object_list), 0)
response = self.authorized_user.get(reverse('index'))
post = response.context['page_obj'][0]
group = post.group
self.assertEqual(group, self.group)
def test_wrong_uri_returns_404(self):
"""Проверка страницы 404................................................
"""
response = self.authorized_user.get('chtoto/poshlo/ne.tak')
self.assertEqual(response.status_code, HTTPStatus.NOT_FOUND)
self.assertTemplateUsed(response, 'misc/404.html')
def test_wrong_uri_returns_500(self):
"""Проверка страницы 404................................................
"""
response = self.authorized_user.get(reverse('page500'))
self.assertTemplateUsed(response, 'misc/500.html')
class PaginatorTest(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.user = User.objects.create_user(username='TestUser')
# cls.profile = Profile.objects.create(user=cls.user)
cls.authorized_user = Client()
cls.authorized_user.force_login(cls.user)
cls.group = Group.objects.create(title='TestGroup',
slug='test_slug',
description='Test description')
for counts in range(POSTS_COUNT):
cls.post = Post.objects.create(
author=cls.user, text='Тестовый пост под номером {counts}',
group=cls.group)
cls.templates_pages_names = {
'posts/index.html': reverse('index'),
'posts/group.html': reverse('group', kwargs={
'slug': cls.group.slug}),
'posts/profile.html': reverse('profile', kwargs={
'username': cls.user.username})}
def test_first_page_have_ten_posts(self):
"""Проверка первой страницы paginator должен показать 10 постов.........
"""
cache.clear()
for address, reverse_name in self.templates_pages_names.items():
with self.subTest(adress=address):
response = self.authorized_user.get(reverse_name)
self.assertEqual(len(response.context.get('page_obj').object_list),
PAGE_COUNT)
def test_second_page_have_three_posts(self):
"""Проверка второй страницы paginator должен покажать 3 поста...........
"""
for address, reverse_name in self.templates_pages_names.items():
with self.subTest(adress=address):
response = self.authorized_user.get(reverse_name + '?page=2')
self.assertEqual(len(
response.context.get('page_obj').object_list),
POSTS_COUNT - PAGE_COUNT)
class TestCache(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.user = User.objects.create_user(username='TestUser')
cls.authorized_user = Client()
cls.authorized_user.force_login(cls.user)
cls.group = Group.objects.create(title='TestGroup',
slug='test_slug',
description='Test description')
cls.post = Post.objects.create(author=cls.user, group=cls.group,
text='text')
def test_cache_index(self):
"""Проверка что страница индекса работает с 20 секундным кешем..........
"""
response = self.authorized_user.get(reverse('index'))
Post.objects.create(author=self.user, text='test cache text',
group=self.group)
response1 = self.authorized_user.get(reverse('index'))
self.assertEqual(response.content, response1.content)
cache.clear()
response3 = self.authorized_user.get(reverse('index'))
self.assertNotEqual(response3.content, response1.content)
self.assertEqual(response3.context['page_obj'][0].text,
'test cache text')
self.assertEqual(len(response3.context['page_obj'].object_list), 2)
class TestFollow(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.user = User.objects.create_user(username='TestAuthor')
cls.group = Group.objects.create(title='TestGroup',
slug='test_slug',
description='Test description')
cls.follow_user = User.objects.create_user(username='TestUser')
def setUp(self):
self.authorized_user = Client()
self.authorized_user.force_login(self.follow_user)
def test_follow(self):
"""Тест что подписка работает и фаловер добавляетя......................
"""
follow_count1 = Follow.objects.count()
follow = Follow.objects.filter(author=self.user, user=self.follow_user)
self.assertEqual(follow.first(), None)
response = self.authorized_user.get(reverse('profile_follow', kwargs={
'username': self.user.username}))
follow_count2 = Follow.objects.count()
self.assertEqual(follow_count2, follow_count1 + 1)
follow = Follow.objects.first()
self.assertEqual(Follow.objects.count(), 1)
self.assertEqual(follow.author, self.user)
self.assertEqual(follow.user, self.follow_user)
self.assertEqual(response.status_code, HTTPStatus.FOUND)
def test_unfollow(self):
"""Тест что фаловер может отписаться....................................
"""
self.authorized_user.get(reverse('profile_follow', kwargs={
'username': self.user.username}))
self.authorized_user.get(reverse('profile_unfollow', kwargs={
'username': self.user.username}))
self.assertFalse(Follow.objects.exists())
def test_follow_index(self):
"""Тест что пост появляется в ленте фаловера............................
"""
Post.objects.create(author=self.user, text='test follow text',
group=self.group)
self.authorized_user.get(reverse('profile_follow', kwargs={
'username': self.user.username}))
response = self.authorized_user.get(reverse('follow_index'))
post = response.context['post']
self.assertEqual(post.text, 'test follow text')
self.assertEqual(post.author, self.user)
self.assertEqual(post.group.id, self.group.id)
def test_not_follow_index(self):
"""Тест что у не фаловера посты не появляются...........................
"""
Post.objects.create(author=self.user, text='test follow text',
group=self.group)
response = self.authorized_user.get(reverse('follow_index'))
self.assertEqual(response.context['paginator'].count, 0)
def test_following_self(self):
"""Тест что нельзя подписаться на самого себя...........................
"""
self.assertEqual(Follow.objects.all().count(), 0)
self.authorized_user.get(reverse('profile_follow', kwargs={
'username': self.follow_user.username}))
self.assertEqual(Follow.objects.all().count(), 0)
self.authorized_user.get(reverse('profile_follow', kwargs={
'username': self.user.username}))
self.assertEqual(Follow.objects.all().count(), 1)
class TestComments(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.user = User.objects.create_user(username='TestUser')
cls.comment_user = User.objects.create_user(username='TestCommentUser')
cls.post = Post.objects.create(text='test text', author=cls.user)
cls.url_comment = reverse('add_comment', kwargs={
'post_id': cls.post.id})
def setUp(self):
self.anonymous = Client()
self.authorized_user = Client()
self.authorized_user.force_login(self.comment_user)
def test_comment_anonymous(self):
"""Тест что анонима редиректит на авторизацию...........................
при попытки комментировать"""
response = self.anonymous.get(self.url_comment)
urls = '/auth/login/?next={}'.format(self.url_comment)
self.assertRedirects(response, urls, status_code=HTTPStatus.FOUND)
def test_comment_authorized(self):
"""Тест что авторизированный юзер может комментировать..................
"""
response = self.authorized_user.post(self.url_comment, {
'text': 'test comment'}, follow=True)
self.assertContains(response, 'test comment')
self.assertEqual(Comment.objects.count(), 1)
| 2.234375 | 2 |
ceti/utils.py | aramirezarg/ceti | 0 | 12758349 | import frappe
from frappe.utils import get_defaults
from frappe import _
from frappe.utils import cint, flt, cstr
from frappe.utils import num2words
#
# convert currency to words
#
def money_in_words(number, main_currency=None, fraction_currency=None):
try:
# note: `flt` returns 0 for invalid input and we don't want that
number = float(number)
except ValueError:
return ""
number = flt(number)
if number < 0:
return ""
d = get_defaults()
if not main_currency:
main_currency = d.get('currency', 'INR')
if not fraction_currency:
fraction_currency = frappe.db.get_value("Currency", main_currency, "fraction", cache=True) or _("Cent")
number_format = frappe.db.get_value("Currency", main_currency, "number_format", cache=True) or \
frappe.db.get_default("number_format") or "#,###.##"
fraction_length = get_number_format_info(number_format)[2]
n = "%.{0}f".format(fraction_length) % number
numbers = n.split('.')
main, fraction = numbers if len(numbers) > 1 else [n, '00']
if len(fraction) < fraction_length:
zeros = '0' * (fraction_length - len(fraction))
fraction += zeros
in_million = True
if number_format == "#,##,###.##": in_million = False
# 0.00
if main == '0' and fraction in ['00', '000']:
out = "{0} {1}".format(main_currency, _('Zero'))
# 0.XX
elif main == '0':
out = _(in_words(fraction, in_million).title()) + ' ' + fraction_currency
else:
out = main_currency + ' ' + _(in_words(main, in_million).title())
if cint(fraction):
out = f"{out} {_('with')} {fraction}/100" # {fraction_currency}"
return out # + ' ' + _('only.')
number_format_info = {
"#,###.##": (".", ",", 2),
"#.###,##": (",", ".", 2),
"# ###.##": (".", " ", 2),
"# ###,##": (",", " ", 2),
"#'###.##": (".", "'", 2),
"#, ###.##": (".", ", ", 2),
"#,##,###.##": (".", ",", 2),
"#,###.###": (".", ",", 3),
"#.###": ("", ".", 0),
"#,###": ("", ",", 0)
}
def get_number_format_info(format):
return number_format_info.get(format) or (".", ",", 2)
def in_words(integer, in_million=True):
locale = 'en_IN' if not in_million else frappe.local.lang
integer = int(integer)
try:
ret = num2words(integer, lang=locale)
except NotImplementedError:
ret = num2words(integer, lang='en')
except OverflowError:
ret = num2words(integer, lang='en')
return ret.replace('-', ' ')
| 2.6875 | 3 |
blueprints/dev-utils/kshim/kshim.py | Inokinoki/craft | 0 | 12758350 | <reponame>Inokinoki/craft<gh_stars>0
import info
from Package.CMakePackageBase import *
class subinfo(info.infoclass):
def setTargets(self):
self.svnTargets["master"] = "https://invent.kde.org/vonreth/kshim.git"
for ver in ["0.1.0"]:
self.targets[ver] = f"https://files.kde.org/craft/sources/libs/kshimgn/kshimgen-v{ver}.tar.xz"
self.targetInstSrc[ver] = f"kshimgen-v{ver}"
self.targetDigests["0.1.0"] = (['1a46c599ca54e112fd37c39a60e5b97b6b20997e2114fe3cd422274c75ebcd22'], CraftHash.HashAlgorithm.SHA256)
self.defaultTarget = '0.1.0'
def setDependencies(self):
self.runtimeDependencies["dev-utils/mingw-w64"] = None
class Package(CMakePackageBase):
def __init__(self, **args):
CMakePackageBase.__init__(self)
if self.buildTarget == "master":
self.subinfo.options.package.disableBinaryCache = True
# can't be bootstrapped
self.__botstrap = False
else:
self.__botstrap = None
@property
def _botstrap(self):
if self.__botstrap is None:
cmakeVer = CraftCore.cache.getVersion("cmake")
self.__botstrap = not cmakeVer or cmakeVer < "3.8"
return self.__botstrap
def configure(self):
if not self._botstrap:
return super().configure()
else:
return True
def make(self):
if not self._botstrap:
return super().make()
else:
utils.createDir(self.buildDir())
return utils.system([sys.executable, os.path.join(self.sourceDir(), "bootstrap.py")], cwd=self.buildDir())
def install(self):
if not self._botstrap:
return super().install()
else:
return utils.copyFile(os.path.join(self.buildDir(), f"kshimgen{CraftCore.compiler.executableSuffix}"), os.path.join(self.installDir(), "bin", f"kshimgen{CraftCore.compiler.executableSuffix}"))
| 2.046875 | 2 |